Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 29 additions & 3 deletions cmd/hyperfleet-api/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ package main

import (
"context"
"io"
"log/slog"
"os"

Expand Down Expand Up @@ -46,11 +47,36 @@ func main() {

// initDefaultLogger initializes a default logger with INFO level
// This ensures logging works before environment/config is loaded
// Reads LOG_LEVEL and LOG_FORMAT from environment variables if set
func initDefaultLogger() {
// Read log level from environment with default fallback
level := slog.LevelInfo
if levelStr := os.Getenv("LOG_LEVEL"); levelStr != "" {
if parsed, err := logger.ParseLogLevel(levelStr); err == nil {
level = parsed
}
}

// Read log format from environment with default fallback
format := logger.FormatJSON
if formatStr := os.Getenv("LOG_FORMAT"); formatStr != "" {
if parsed, err := logger.ParseLogFormat(formatStr); err == nil {
format = parsed
}
}

// Read log output from environment with default fallback
var output io.Writer = os.Stdout
if outputStr := os.Getenv("LOG_OUTPUT"); outputStr != "" {
if parsed, err := logger.ParseLogOutput(outputStr); err == nil {
output = parsed
}
}

cfg := &logger.LogConfig{
Level: slog.LevelInfo,
Format: logger.FormatJSON,
Output: os.Stdout,
Level: level,
Format: format,
Output: output,
Component: "hyperfleet-api",
Version: "unknown",
Hostname: getHostname(),
Expand Down
5 changes: 4 additions & 1 deletion cmd/hyperfleet-api/migrate/cmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,10 @@ func NewMigrateCommand() *cobra.Command {
return cmd
}

func runMigrate(_ *cobra.Command, _ []string) {
func runMigrate(cmd *cobra.Command, _ []string) {
// Bind environment variables
dbConfig.BindEnv(cmd.PersistentFlags())

if err := runMigrateWithError(); err != nil {
os.Exit(1)
}
Expand Down
17 changes: 16 additions & 1 deletion cmd/hyperfleet-api/servecmd/cmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ import (
"github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/environments"
"github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/server"
"github.com/openshift-hyperfleet/hyperfleet-api/pkg/api"
"github.com/openshift-hyperfleet/hyperfleet-api/pkg/db/db_session"
"github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger"
"github.com/openshift-hyperfleet/hyperfleet-api/pkg/telemetry"
)
Expand All @@ -37,13 +38,16 @@ func NewServeCommand() *cobra.Command {
func runServe(cmd *cobra.Command, args []string) {
ctx := context.Background()

// Bind database environment variables BEFORE Initialize (database is initialized inside)
environments.Environment().Config.Database.BindEnv(cmd.PersistentFlags())

err := environments.Environment().Initialize()
if err != nil {
logger.WithError(ctx, err).Error("Unable to initialize environment")
os.Exit(1)
}

// Bind environment variables for advanced configuration (OTel, Masking)
// Bind logging environment variables AFTER Initialize (logger is reconfigured later in initLogger)
environments.Environment().Config.Logging.BindEnv(cmd.PersistentFlags())

initLogger()
Expand Down Expand Up @@ -140,4 +144,15 @@ func initLogger() {
// Use ReconfigureGlobalLogger instead of InitGlobalLogger because
// InitGlobalLogger was already called in main() with default config
logger.ReconfigureGlobalLogger(logConfig)

// Reconfigure database logger to follow LOG_LEVEL
dbSessionFactory := environments.Environment().Database.SessionFactory
if dbSessionFactory != nil {
gormLevel := environments.Environment().Config.Database.GetGormLogLevel(
environments.Environment().Config.Logging.Level,
)
if reconfigurable, ok := dbSessionFactory.(db_session.LoggerReconfigurable); ok {
reconfigurable.ReconfigureLogger(gormLevel)
}
}
}
131 changes: 131 additions & 0 deletions docs/logging.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,10 @@ export MASKING_HEADERS="Authorization,Cookie,X-API-Key"

# JSON body fields to mask (comma-separated)
export MASKING_FIELDS="password,token,secret,api_key"

# Database debug mode (true/false)
# When true, logs all SQL queries regardless of LOG_LEVEL
export DB_DEBUG=false
```

### Configuration Struct
Expand Down Expand Up @@ -251,6 +255,97 @@ export LOG_LEVEL=info

No code changes required - the logger automatically adapts output format based on configuration.

## Database Logging

HyperFleet API automatically integrates database (GORM) logging with the application's `LOG_LEVEL` configuration while providing a `DB_DEBUG` override for database-specific debugging.

### LOG_LEVEL Integration

Database logs follow the application log level by default:

| LOG_LEVEL | GORM Behavior | What Gets Logged |
|-----------|---------------|------------------|
| `debug` | Info level | All SQL queries with parameters, duration, and row counts |
| `info` | Warn level | Only slow queries (>200ms) and errors |
| `warn` | Warn level | Only slow queries (>200ms) and errors |
| `error` | Silent | Nothing (database logging disabled) |

### DB_DEBUG Override

The `DB_DEBUG` environment variable provides database-specific debugging without changing the global `LOG_LEVEL`:

```bash
# Production environment with database debugging
export LOG_LEVEL=info # Application logs remain at INFO
export LOG_FORMAT=json # Production format
export DB_DEBUG=true # Force all SQL queries to be logged
./bin/hyperfleet-api serve
```

**Priority:**
1. If `DB_DEBUG=true`, all SQL queries are logged (GORM Info level)
2. Otherwise, follow `LOG_LEVEL` mapping (see table above)

### Database Log Examples

**Fast query (LOG_LEVEL=debug or DB_DEBUG=true):**

JSON format:
```json
{
"timestamp": "2026-01-14T11:31:29.788683+08:00",
"level": "info",
"message": "GORM query",
"duration_ms": 9.052167,
"rows": 1,
"sql": "INSERT INTO \"clusters\" (\"id\",\"created_time\",...) VALUES (...)",
"component": "api",
"version": "0120ac6-modified",
"hostname": "yasun-mac",
"request_id": "38EOuujxBDUduP0hYLxVGMm69Dq",
"transaction_id": 1157
}
```

Text format:
```text
2026-01-14T11:34:23+08:00 INFO [api] [0120ac6-modified] [yasun-mac] GORM query request_id=38EPGnassU9SLNZ82XiXZLiWS4i duration_ms=10.135875 rows=1 sql="INSERT INTO \"clusters\" (\"id\",\"created_time\",...) VALUES (...)"
```

**Slow query (>200ms, visible at all log levels except error):**

```json
{
"timestamp": "2026-01-14T12:00:00Z",
"level": "warn",
"message": "GORM query",
"duration_ms": 250.5,
"rows": 1000,
"sql": "SELECT * FROM clusters WHERE ...",
"request_id": "...",
"transaction_id": 1234
}
```

**Database error (visible at all log levels):**

```json
{
"timestamp": "2026-01-14T12:00:00Z",
"level": "error",
"message": "GORM query error",
"error": "pq: duplicate key value violates unique constraint \"idx_clusters_name\"",
"duration_ms": 10.5,
"rows": 0,
"sql": "INSERT INTO \"clusters\" ...",
"request_id": "..."
}
```
Comment on lines +293 to +343
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

🧩 Analysis chain

🏁 Script executed:

find . -name "gorm_logger.go" -type f

Repository: openshift-hyperfleet/hyperfleet-api

Length of output: 106


🏁 Script executed:

cat -n ./pkg/logger/gorm_logger.go

Repository: openshift-hyperfleet/hyperfleet-api

Length of output: 2879


🏁 Script executed:

rg -n "transaction_id|With\(" ./pkg/logger/ --type=go -B2 -A2 | head -100

Repository: openshift-hyperfleet/hyperfleet-api

Length of output: 5272


🏁 Script executed:

sed -n '293,343p' docs/logging.md

Repository: openshift-hyperfleet/hyperfleet-api

Length of output: 1372


Update slow query example to match actual GORM logger output.

The slow query example shows incorrect message and is missing a field:

  1. Message should be "GORM slow query" (not "GORM query") — see line 72 of pkg/logger/gorm_logger.go
  2. Missing "threshold_ms" field that is emitted at line 69
Corrected slow query example
{
  "timestamp": "2026-01-14T12:00:00Z",
  "level": "warn",
  "message": "GORM slow query",
  "duration_ms": 250.5,
  "threshold_ms": 200.0,
  "rows": 1000,
  "sql": "SELECT * FROM clusters WHERE ...",
  "request_id": "...",
  "transaction_id": 1234
}
🤖 Prompt for AI Agents
In `@docs/logging.md` around lines 293 - 343, Update the slow-query JSON example
to match the actual GORM logger output: change the "message" value from "GORM
query" to "GORM slow query" (as emitted at pkg/logger/gorm_logger.go line 72)
and add the "threshold_ms" field (as emitted at line 69) with the configured
threshold value (e.g., 200.0) so the example reflects the real emitted keys and
values.


### Backward Compatibility

The existing `--enable-db-debug` CLI flag and `DB_DEBUG` environment variable continue to work exactly as before. The new functionality only adds automatic integration with `LOG_LEVEL` when `DB_DEBUG` is not explicitly set.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we keep and old config for backward compatibility?


## Log Output Examples

### Error Logs with Stack Traces
Expand Down Expand Up @@ -354,6 +449,8 @@ env().Config.Logging.Masking.Fields = append(

## Best Practices

### Application Logging

1. **Always use context**: `logger.Info(ctx, "msg")` not `slog.Info("msg")`
2. **Use WithError for errors**: `logger.WithError(ctx, err).Error(...)` not `"error", err`
3. **Use field constants**: `logger.FieldEnvironment` not `"environment"`
Expand All @@ -362,6 +459,15 @@ env().Config.Logging.Masking.Fields = append(
6. **Never log sensitive data**: Always sanitize passwords, tokens, connection strings
7. **Choose appropriate levels**: DEBUG (dev), INFO (normal), WARN (client error), ERROR (server error)

### Database Logging

1. **Use LOG_LEVEL for database logs**: Don't set `DB_DEBUG` unless specifically debugging database issues
2. **Production default**: `LOG_LEVEL=info` hides fast queries, shows slow queries (>200ms)
3. **Temporary debugging**: Use `DB_DEBUG=true` for production database troubleshooting, then disable it
4. **Development**: Use `LOG_LEVEL=debug` to see all SQL queries during development
5. **High-traffic systems**: Consider `LOG_LEVEL=warn` to minimize database log volume
6. **Monitor slow queries**: Review WARN-level GORM logs for queries exceeding 200ms threshold

## Troubleshooting

### Logs Not Appearing
Expand Down Expand Up @@ -392,6 +498,31 @@ mainRouter.Use(logging.RequestLoggingMiddleware)
2. Verify field names match configuration (case-insensitive)
3. Check JSON structure: Masking only works on top-level fields

### SQL Queries Not Appearing

1. Check log level: `export LOG_LEVEL=debug` (to see all SQL queries)
2. Check DB_DEBUG: `export DB_DEBUG=true` (to force SQL logging at any log level)
3. Verify queries are executing: Check if API operations complete successfully
4. Check log format: Use `LOG_FORMAT=text` for easier debugging

### Too Many SQL Queries in Logs

1. Production mode: `export LOG_LEVEL=info` (hides fast queries < 200ms)
2. Disable DB_DEBUG: `export DB_DEBUG=false` or unset it
3. Minimal mode: `export LOG_LEVEL=warn` (only slow queries and errors)
4. Silent mode: `export LOG_LEVEL=error` (no SQL queries logged)

### Only Want to See Slow Queries

Use production default configuration:
```bash
export LOG_LEVEL=info
export LOG_FORMAT=json
export DB_DEBUG=false # or leave unset
```

This will only log SQL queries that take longer than 200ms.

## Testing

### Unit Tests
Expand Down
34 changes: 34 additions & 0 deletions pkg/config/db.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,12 @@ package config

import (
"fmt"
"os"
"strconv"
"strings"

"github.com/spf13/pflag"
"gorm.io/gorm/logger"
)

type DatabaseConfig struct {
Expand Down Expand Up @@ -54,6 +58,19 @@ func (c *DatabaseConfig) AddFlags(fs *pflag.FlagSet) {
fs.IntVar(&c.MaxOpenConnections, "db-max-open-connections", c.MaxOpenConnections, "Maximum open DB connections for this instance")
}

// BindEnv reads configuration from environment variables
// Priority: flags > env vars > defaults
func (c *DatabaseConfig) BindEnv(fs *pflag.FlagSet) {
if val := os.Getenv("DB_DEBUG"); val != "" {
if fs == nil || !fs.Changed("enable-db-debug") {
debug, err := strconv.ParseBool(val)
if err == nil {
c.Debug = debug
}
}
}
}

func (c *DatabaseConfig) ReadFiles() error {
err := readFileValueString(c.HostFile, &c.Host)
if err != nil {
Expand Down Expand Up @@ -117,3 +134,20 @@ func (c *DatabaseConfig) LogSafeConnectionStringWithName(name string, withSSL bo
)
}
}

// GetGormLogLevel returns the appropriate GORM log level based on DB_DEBUG and LOG_LEVEL.
// DB_DEBUG=true always returns Info level, otherwise follows LOG_LEVEL mapping.
func (c *DatabaseConfig) GetGormLogLevel(logLevel string) logger.LogLevel {
if c.Debug {
return logger.Info
}

switch strings.ToLower(strings.TrimSpace(logLevel)) {
case "debug":
return logger.Info
case "error":
return logger.Silent
default:
return logger.Warn
}
}
2 changes: 1 addition & 1 deletion pkg/dao/generic.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ func (d *sqlGenericDao) GetInstanceDao(ctx context.Context, model interface{}) G
}

func (d *sqlGenericDao) Fetch(offset int, limit int, resourceList interface{}) error {
return d.g2.Debug().Offset(offset).Limit(limit).Find(resourceList).Error
return d.g2.Offset(offset).Limit(limit).Find(resourceList).Error
}

func (d *sqlGenericDao) Preload(preload string) {
Expand Down
11 changes: 10 additions & 1 deletion pkg/db/db_session/db_session.go
Original file line number Diff line number Diff line change
@@ -1,9 +1,18 @@
package db_session

import "sync"
import (
"sync"

"gorm.io/gorm/logger"
)

const (
disable = "disable"
)

var once sync.Once

// LoggerReconfigurable allows runtime reconfiguration of the database logger
type LoggerReconfigurable interface {
ReconfigureLogger(level logger.LogLevel)
}
Loading