package db import ( "context" "database/sql" "embed" "fmt" "log" "os" "strconv" "time" "github.com/golang-migrate/migrate/v4" "github.com/golang-migrate/migrate/v4/database/postgres" _ "github.com/golang-migrate/migrate/v4/database/postgres" _ "github.com/golang-migrate/migrate/v4/source/file" "github.com/golang-migrate/migrate/v4/source/iofs" "github.com/jackc/pgx/v5/pgxpool" _ "github.com/lib/pq" ) var Dbpool *pgxpool.Pool var Ctx = context.Background() //go:embed "migrations/*.sql" var MigrationsFs embed.FS func InitDB(host string, port string, user string, password string, dbname string) { psqlInfo := fmt.Sprintf("host=%s port=%s user=%s "+ "password=%s dbname=%s sslmode=disable", host, port, user, password, dbname) fmt.Println(psqlInfo) var err error Dbpool, err = pgxpool.New(Ctx, psqlInfo) if err != nil { _, _ = fmt.Fprintf(os.Stderr, "Unable to connect to database: %v\n", err) os.Exit(1) } var success string err = Dbpool.QueryRow(Ctx, "select 'Successfully connected!'").Scan(&success) if err != nil { _, _ = fmt.Fprintf(os.Stderr, "QueryRow failed: %v\n", err) os.Exit(1) } Testf() fmt.Println(success) } func CloseDb() { fmt.Println("Closing connection to database") Dbpool.Close() } func Testf() { rows, dbErr := Dbpool.Query(Ctx, "select game_name from game") if dbErr != nil { _, _ = fmt.Fprintf(os.Stderr, "QueryRow failed: %v\n", dbErr) os.Exit(1) } for rows.Next() { var gameName string dbErr = rows.Scan(&gameName) if dbErr != nil { _, _ = fmt.Fprintf(os.Stderr, "QueryRow failed: %v\n", dbErr) } _, _ = fmt.Fprintf(os.Stderr, "%v\n", gameName) } } func ResetGameIdSeq() { _, err := Dbpool.Query(Ctx, "SELECT setval('game_id_seq', (SELECT MAX(id) FROM game)+1);") if err != nil { _, _ = fmt.Fprintf(os.Stderr, "Exec failed: %v\n", err) } } func createDb(host string, port string, user string, password string, dbname string) { conninfo := fmt.Sprintf("host=%s port=%s user=%s password=%s sslmode=disable", host, port, user, password) db, err := sql.Open("postgres", conninfo) defer db.Close() if err != nil { log.Fatal(err) } _, err = db.Exec("create database " + dbname) if err != nil { //handle the error log.Fatal(err) } log.Println("Finished creating database") } func Migrate_db(host string, port string, user string, password string, dbname string) { migrationInfo := fmt.Sprintf("postgres://%s:%s@%s:%s/%s?sslmode=disable", user, password, host, port, dbname) fmt.Println("Migration Info: ", migrationInfo) db, err := sql.Open("postgres", migrationInfo) if err != nil { log.Println(err) } _, err = db.Query("select * from game") if err != nil { log.Println(err) createDb(host, port, user, password, dbname) db, err = sql.Open("postgres", migrationInfo) if err != nil { log.Fatal(err) } } driver, err := postgres.WithInstance(db, &postgres.Config{}) if err != nil { log.Println(err) } files, err := iofs.New(MigrationsFs, "migrations") if err != nil { log.Fatal(err) } m, err := migrate.NewWithInstance("iofs", files, "postgres", driver) if err != nil { log.Fatal(err) } /*m, err := migrate.NewWithDatabaseInstance( "file://./db/migrations/", "postgres", driver) if err != nil { log.Println(err) }*/ version, _, err := m.Version() if err != nil { log.Println("Migration version err: ", err) } fmt.Println("Migration version before: ", version) //err = m.Force(1) //err = m.Up() // or m.Steps(2) if you want to explicitly set the number of migrations to run //if err != nil { // log.Println("Force err: ", err) //} err = m.Migrate(2) //err = m.Up() // or m.Steps(2) if you want to explicitly set the number of migrations to run if err != nil { log.Println("Migration err: ", err) } versionAfter, _, err := m.Version() if err != nil { log.Println("Migration version err: ", err) } fmt.Println("Migration version after: ", versionAfter) fmt.Println("Migration done") db.Close() } // Health checks the health of the database connection by pinging the database. // It returns a map with keys indicating various health statistics. func Health() map[string]string { ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) defer cancel() stats := make(map[string]string) // Ping the database //err := s.db.PingContext(ctx) err := Dbpool.Ping(ctx) if err != nil { stats["status"] = "down" stats["error"] = fmt.Sprintf("db down: %v", err) log.Fatalf("db down: %v", err) // Log the error and terminate the program return stats } // Database is up, add more statistics stats["status"] = "up" stats["message"] = "It's healthy" // Get database stats (like open connections, in use, idle, etc.) //dbStats := s.db.Stats() dbStats := Dbpool.Stat() //stats["open_connections"] = strconv.Itoa(dbStats.OpenConnections) stats["open_connections"] = strconv.Itoa(int(dbStats.NewConnsCount())) //stats["in_use"] = strconv.Itoa(dbStats.InUse) stats["in_use"] = strconv.Itoa(int(dbStats.AcquiredConns())) //stats["idle"] = strconv.Itoa(dbStats.Idle) stats["idle"] = strconv.Itoa(int(dbStats.IdleConns())) //stats["wait_count"] = strconv.FormatInt(dbStats.WaitCount, 10) stats["wait_count"] = strconv.FormatInt(dbStats.AcquireCount(), 10) //stats["wait_duration"] = dbStats.WaitDuration.String() stats["wait_duration"] = dbStats.AcquireDuration().String() //stats["max_idle_closed"] = strconv.FormatInt(dbStats.MaxIdleClosed, 10) stats["max_idle_closed"] = strconv.FormatInt(dbStats.MaxIdleDestroyCount(), 10) //stats["max_lifetime_closed"] = strconv.FormatInt(dbStats.MaxLifetimeClosed, 10) stats["max_lifetime_closed"] = strconv.FormatInt(dbStats.MaxLifetimeDestroyCount(), 10) // Evaluate stats to provide a health message if int(dbStats.NewConnsCount()) > 40 { // Assuming 50 is the max for this example stats["message"] = "The database is experiencing heavy load." } if dbStats.AcquireCount() > 1000 { stats["message"] = "The database has a high number of wait events, indicating potential bottlenecks." } if dbStats.MaxIdleDestroyCount() > int64(dbStats.NewConnsCount())/2 { stats["message"] = "Many idle connections are being closed, consider revising the connection pool settings." } if dbStats.MaxLifetimeDestroyCount() > int64(dbStats.NewConnsCount())/2 { stats["message"] = "Many connections are being closed due to max lifetime, consider increasing max lifetime or revising the connection usage pattern." } return stats }