Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

build+lnd+docs: start using slog and add commit_hash to log lines #9314

Merged
merged 13 commits into from
Dec 2, 2024
Merged
80 changes: 42 additions & 38 deletions lnd.go
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
interceptor signal.Interceptor) error {

defer func() {
ltndLog.Info("Shutdown complete\n")
ltndLog.Info("Shutdown complete")
err := cfg.LogRotator.Close()
if err != nil {
ltndLog.Errorf("Could not close log rotator: %v", err)
Expand Down Expand Up @@ -183,9 +183,11 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
}

// Show version at startup.
ltndLog.Infof("Version: %s commit=%s, build=%s, logging=%s, "+
"debuglevel=%s", build.Version(), build.Commit,
build.Deployment, build.LoggingType, cfg.DebugLevel)
ltndLog.InfoS(ctx, "Version Info",
slog.String("version", build.Version()),
guggero marked this conversation as resolved.
Show resolved Hide resolved
slog.String("commit", build.Commit),
slog.Any("debuglevel", build.Deployment),
slog.String("logging", cfg.DebugLevel))

var network string
switch {
Expand All @@ -205,9 +207,9 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
network = "signet"
}

ltndLog.Infof("Active chain: %v (network=%v)",
strings.Title(BitcoinChainName), network,
)
ltndLog.InfoS(ctx, "Network Info",
"active_chain", strings.Title(BitcoinChainName),
"network", network)

// Enable http profiling server if requested.
if cfg.Pprof.Profile != "" {
Expand All @@ -233,7 +235,7 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
"/debug/pprof/", http.StatusSeeOther,
))

ltndLog.Infof("Pprof listening on %v", cfg.Pprof.Profile)
ltndLog.InfoS(ctx, "Pprof listening", "addr", cfg.Pprof.Profile)

// Create the pprof server.
pprofServer := &http.Server{
Expand All @@ -244,19 +246,19 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,

// Shut the server down when lnd is shutting down.
defer func() {
ltndLog.Info("Stopping pprof server...")
ltndLog.InfoS(ctx, "Stopping pprof server...")
err := pprofServer.Shutdown(ctx)
if err != nil {
ltndLog.Errorf("Stop pprof server got err: %v",
err)
ltndLog.ErrorS(ctx, "Stop pprof server", err)
}
}()

// Start the pprof server.
go func() {
err := pprofServer.ListenAndServe()
if err != nil && !errors.Is(err, http.ErrServerClosed) {
ltndLog.Errorf("Serving pprof got err: %v", err)
ltndLog.ErrorS(ctx, "Could not serve pprof "+
"server", err)
}
}()
}
Expand Down Expand Up @@ -420,8 +422,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
cancelElection()
}()

ltndLog.Infof("Using %v leader elector",
cfg.Cluster.LeaderElector)
ltndLog.InfoS(ctx, "Using leader elector",
"elector", cfg.Cluster.LeaderElector)

leaderElector, err = cfg.Cluster.MakeLeaderElector(
electionCtx, cfg.DB,
Expand All @@ -435,8 +437,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
return
}

ltndLog.Infof("Attempting to resign from leader role "+
"(%v)", cfg.Cluster.ID)
ltndLog.InfoS(ctx, "Attempting to resign from "+
"leader role", "cluster_id", cfg.Cluster.ID)

// Ensure that we don't block the shutdown process if
// the leader resigning process takes too long. The
Expand All @@ -454,21 +456,23 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
}
}()

ltndLog.Infof("Starting leadership campaign (%v)",
cfg.Cluster.ID)
ltndLog.InfoS(ctx, "Starting leadership campaign",
"cluster_id", cfg.Cluster.ID)

if err := leaderElector.Campaign(electionCtx); err != nil {
return mkErr("leadership campaign failed", err)
}

elected = true
ltndLog.Infof("Elected as leader (%v)", cfg.Cluster.ID)
ltndLog.InfoS(ctx, "Elected as leader",
"cluster_id", cfg.Cluster.ID)
}

dbs, cleanUp, err := implCfg.DatabaseBuilder.BuildDatabase(ctx)
switch {
case err == channeldb.ErrDryRunMigrationOK:
ltndLog.Infof("%v, exiting", err)
ltndLog.InfoS(ctx, "Exiting due to BuildDatabase error",
slog.Any("err", err))
return nil
case err != nil:
return mkErr("unable to open databases", err)
Expand Down Expand Up @@ -512,14 +516,14 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,

if cfg.Tor.Active {
if cfg.Tor.SkipProxyForClearNetTargets {
srvrLog.Info("Onion services are accessible via Tor! " +
"NOTE: Traffic to clearnet services is not " +
"routed via Tor.")
srvrLog.InfoS(ctx, "Onion services are accessible "+
"via Tor! NOTE: Traffic to clearnet services "+
"is not routed via Tor.")
} else {
srvrLog.Infof("Proxying all network traffic via Tor "+
"(stream_isolation=%v)! NOTE: Ensure the "+
"backend node is proxying over Tor as well",
cfg.Tor.StreamIsolation)
srvrLog.InfoS(ctx, "Proxying all network traffic "+
"via Tor! NOTE: Ensure the backend node is "+
"proxying over Tor as well",
"stream_isolation", cfg.Tor.StreamIsolation)
}
}

Expand All @@ -541,8 +545,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
}
defer func() {
if err := torController.Stop(); err != nil {
ltndLog.Errorf("error stopping tor "+
"controller: %v", err)
ltndLog.ErrorS(ctx, "Error stopping tor "+
"controller", err)
}
}()
}
Expand Down Expand Up @@ -684,8 +688,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
return mkErr("unable to determine chain tip", err)
}

ltndLog.Infof("Waiting for chain backend to finish sync, "+
"start_height=%v", bestHeight)
ltndLog.InfoS(ctx, "Waiting for chain backend to finish sync",
slog.Int64("start_height", int64(bestHeight)))

type syncResult struct {
synced bool
Expand Down Expand Up @@ -715,9 +719,9 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
"is synced", res.err)
}

ltndLog.Debugf("Syncing to block timestamp: %v, is "+
"synced=%v", time.Unix(res.bestBlockTime, 0),
res.synced)
ltndLog.DebugS(ctx, "Syncing to block chain",
"best_block_time", time.Unix(res.bestBlockTime, 0),
"is_synced", res.synced)
ellemouton marked this conversation as resolved.
Show resolved Hide resolved

if res.synced {
break
Expand All @@ -742,8 +746,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
return mkErr("unable to determine chain tip", err)
}

ltndLog.Infof("Chain backend is fully synced (end_height=%v)!",
bestHeight)
ltndLog.InfoS(ctx, "Chain backend is fully synced!",
"end_height", bestHeight)

// With all the relevant chains initialized, we can finally start the
// server itself. We start the server in an asynchronous goroutine so
Expand All @@ -757,8 +761,8 @@ func Main(cfg *Config, lisCfg ListenerCfg, implCfg *ImplementationCfg,
defer func() {
err := server.Stop()
if err != nil {
ltndLog.Warnf("Stopping the server including all "+
"its subsystems failed with %v", err)
ltndLog.WarnS(ctx, "Stopping the server including all "+
"its subsystems failed with", err)
}
}()

Expand Down