remove sugar logging

Change-Id: I6b6ca9704837cb3f5f5449ba7f55661487814d9f
This commit is contained in:
Kaloyan Raev 2020-04-13 12:31:17 +03:00
parent 158013a866
commit a2ce836761
30 changed files with 189 additions and 135 deletions

View File

@ -24,7 +24,7 @@ func cmdAdminRun(cmd *cobra.Command, args []string) (err error) {
identity, err := runCfg.Identity.Load()
if err != nil {
zap.S().Fatal(err)
log.Fatal("Failed to load identity.", zap.Error(err))
}
db, err := satellitedb.New(log.Named("db"), runCfg.Database, satellitedb.Options{
@ -64,16 +64,16 @@ func cmdAdminRun(cmd *cobra.Command, args []string) (err error) {
}
if err := process.InitMetricsWithCertPath(ctx, log, nil, runCfg.Identity.CertPath); err != nil {
zap.S().Warn("Failed to initialize telemetry batcher on satellite admin: ", err)
log.Warn("Failed to initialize telemetry batcher on satellite admin", zap.Error(err))
}
if err := process.InitTracingWithCertPath(ctx, log, nil, runCfg.Identity.CertPath); err != nil {
zap.S().Warn("Failed to initialize tracing collector on satellite admin: ", err)
log.Warn("Failed to initialize tracing collector on satellite admin", zap.Error(err))
}
err = db.CheckVersion(ctx)
if err != nil {
zap.S().Fatal("failed satellite database version check: ", err)
log.Fatal("Failed satellite database version check.", zap.Error(err))
return errs.New("Error checking version for satellitedb: %+v", err)
}

View File

@ -27,7 +27,7 @@ func cmdAPIRun(cmd *cobra.Command, args []string) (err error) {
identity, err := runCfg.Identity.Load()
if err != nil {
zap.S().Fatal(err)
log.Fatal("Failed to load identity.", zap.Error(err))
}
db, err := satellitedb.New(log.Named("db"), runCfg.Database, satellitedb.Options{
@ -80,15 +80,15 @@ func cmdAPIRun(cmd *cobra.Command, args []string) (err error) {
}
if err := process.InitMetricsWithHostname(ctx, log, nil); err != nil {
zap.S().Warn("Failed to initialize telemetry batcher on satellite api: ", err)
log.Warn("Failed to initialize telemetry batcher on satellite api", zap.Error(err))
}
if err := process.InitTracingWithHostname(ctx, log, nil, runCfg.Identity.CertPath); err != nil {
zap.S().Warn("Failed to initialize tracing collector on satellite api: ", err)
log.Warn("Failed to initialize tracing collector on satellite api", zap.Error(err))
}
err = db.CheckVersion(ctx)
if err != nil {
zap.S().Fatal("failed satellite database version check: ", err)
log.Fatal("Failed satellite database version check.", zap.Error(err))
return errs.New("Error checking version for satellitedb: %+v", err)
}

View File

@ -39,7 +39,7 @@ func generateInvoicesCSV(ctx context.Context, period compensation.Period, out io
defer func() { err = errs.Combine(err, db.Close()) }()
if err := db.CheckVersion(ctx); err != nil {
zap.S().Fatal("failed satellite database version check: ", err)
zap.L().Fatal("Failed satellite database version check.", zap.Error(err))
return errs.New("Error checking version for satellitedb: %+v", err)
}
@ -147,7 +147,7 @@ func recordPeriod(ctx context.Context, paystubsCSV, paymentsCSV string) (int, in
defer func() { err = errs.Combine(err, db.Close()) }()
if err := db.CheckVersion(ctx); err != nil {
zap.S().Fatal("failed satellite database version check: ", err)
zap.L().Fatal("Failed satellite database version check.", zap.Error(err))
return 0, 0, errs.New("Error checking version for satellitedb: %+v", err)
}
@ -171,7 +171,7 @@ func recordOneOffPayments(ctx context.Context, paymentsCSV string) (int, error)
defer func() { err = errs.Combine(err, db.Close()) }()
if err := db.CheckVersion(ctx); err != nil {
zap.S().Fatal("failed satellite database version check: ", err)
zap.L().Fatal("Failed satellite database version check.", zap.Error(err))
return 0, errs.New("Error checking version for satellitedb: %+v", err)
}

View File

@ -24,7 +24,7 @@ func cmdGCRun(cmd *cobra.Command, args []string) (err error) {
identity, err := runCfg.Identity.Load()
if err != nil {
zap.S().Fatal(err)
log.Fatal("Failed to load identity.", zap.Error(err))
}
db, err := satellitedb.New(log.Named("db"), runCfg.Database, satellitedb.Options{})
@ -62,16 +62,16 @@ func cmdGCRun(cmd *cobra.Command, args []string) (err error) {
}
if err := process.InitMetricsWithCertPath(ctx, log, nil, runCfg.Identity.CertPath); err != nil {
zap.S().Warn("Failed to initialize telemetry batcher on satellite GC: ", err)
log.Warn("Failed to initialize telemetry batcher on satellite GC", zap.Error(err))
}
if err := process.InitTracingWithCertPath(ctx, log, nil, runCfg.Identity.CertPath); err != nil {
zap.S().Warn("Failed to initialize tracing collector on satellite GC: ", err)
log.Warn("Failed to initialize tracing collector on satellite GC", zap.Error(err))
}
err = db.CheckVersion(ctx)
if err != nil {
zap.S().Fatal("failed satellite database version check for GC: ", err)
log.Fatal("Failed satellite database version check for GC.", zap.Error(err))
return errs.New("Error checking version for satellitedb for GC: %+v", err)
}

View File

@ -257,7 +257,7 @@ func cmdRun(cmd *cobra.Command, args []string) (err error) {
identity, err := runCfg.Identity.Load()
if err != nil {
zap.S().Fatal(err)
log.Fatal("Failed to load identity.", zap.Error(err))
}
db, err := satellitedb.New(log.Named("db"), runCfg.Database, satellitedb.Options{
@ -311,15 +311,15 @@ func cmdRun(cmd *cobra.Command, args []string) (err error) {
}
if err := process.InitMetricsWithCertPath(ctx, log, nil, runCfg.Identity.CertPath); err != nil {
zap.S().Warn("Failed to initialize telemetry batcher: ", err)
log.Warn("Failed to initialize telemetry batcher", zap.Error(err))
}
if err := process.InitTracingWithCertPath(ctx, log, nil, runCfg.Identity.CertPath); err != nil {
zap.S().Warn("Failed to initialize tracing collector: ", err)
log.Warn("Failed to initialize tracing collector", zap.Error(err))
}
err = db.CheckVersion(ctx)
if err != nil {
zap.S().Fatal("failed satellite database version check: ", err)
log.Fatal("Failed satellite database version check.", zap.Error(err))
return errs.New("Error checking version for satellitedb: %+v", err)
}
@ -415,7 +415,7 @@ func cmdVerifyGracefulExitReceipt(cmd *cobra.Command, args []string) (err error)
identity, err := runCfg.Identity.Load()
if err != nil {
zap.S().Fatal(err)
zap.L().Fatal("Failed to load identity.", zap.Error(err))
}
// Check the node ID is valid
@ -556,7 +556,10 @@ func cmdValueAttribution(cmd *cobra.Command, args []string) (err error) {
defer func() {
err = errs.Combine(err, file.Close())
if err != nil {
log.Sugar().Errorf("error closing the file %v after retrieving partner value attribution data: %+v", partnerAttribtionCfg.Output, err)
log.Error("Error closing the output file after retrieving partner value attribution data.",
zap.String("Output File", partnerAttribtionCfg.Output),
zap.Error(err),
)
}
}()

View File

@ -26,7 +26,7 @@ func cmdRepairerRun(cmd *cobra.Command, args []string) (err error) {
identity, err := runCfg.Identity.Load()
if err != nil {
zap.S().Fatal(err)
log.Fatal("Failed to load identity.", zap.Error(err))
}
db, err := satellitedb.New(log.Named("db"), runCfg.Database, satellitedb.Options{})
@ -81,16 +81,16 @@ func cmdRepairerRun(cmd *cobra.Command, args []string) (err error) {
}
if err := process.InitMetricsWithHostname(ctx, log, nil); err != nil {
zap.S().Warn("Failed to initialize telemetry batcher on repairer: ", err)
log.Warn("Failed to initialize telemetry batcher on repairer", zap.Error(err))
}
if err := process.InitTracingWithHostname(ctx, log, nil, runCfg.Identity.CertPath); err != nil {
zap.S().Warn("Failed to initialize tracing collector on repairer: ", err)
log.Warn("Failed to initialize tracing collector on repairer", zap.Error(err))
}
err = db.CheckVersion(ctx)
if err != nil {
zap.S().Fatal("failed satellite database version check: ", err)
log.Fatal("Failed satellite database version check.", zap.Error(err))
return errs.New("Error checking version for satellitedb: %+v", err)
}

View File

@ -39,7 +39,7 @@ func GenerateAttributionCSV(ctx context.Context, database string, partnerID uuid
defer func() {
err = errs.Combine(err, db.Close())
if err != nil {
log.Sugar().Errorf("error closing satellite DB connection after retrieving partner value attribution data: %+v", err)
log.Error("Error closing satellite DB connection after retrieving partner value attribution data.", zap.Error(err))
}
}()

View File

@ -139,7 +139,7 @@ func (obsvr *observer) processSegment(ctx context.Context, path metainfo.ScopedP
// ins't tracked in it.
if streamMeta.NumberOfSegments > (int64(maxNumOfSegments) + 1) {
object.skip = true
zap.S().Warn("unsupported number of segments", zap.Int64("index", streamMeta.NumberOfSegments))
zap.L().Warn("Unsupported number of segments", zap.Int64("Segments", streamMeta.NumberOfSegments))
}
object.expectedNumberOfSegments = byte(streamMeta.NumberOfSegments)
}
@ -150,7 +150,7 @@ func (obsvr *observer) processSegment(ctx context.Context, path metainfo.ScopedP
}
if segmentIndex >= int(maxNumOfSegments) {
object.skip = true
zap.S().Warn("unsupported segment index", zap.Int("index", segmentIndex))
zap.L().Warn("Unsupported segment index", zap.Int("Index", segmentIndex))
} else {
ok, err := object.segments.Has(segmentIndex)
if err != nil {

View File

@ -90,20 +90,20 @@ func init() {
func cmdRun(cmd *cobra.Command, args []string) (err error) {
err = openLog()
if err != nil {
zap.S().Errorf("Error creating new logger: %v", err)
zap.L().Error("Error creating new logger.", zap.Error(err))
}
if !fileExists(runCfg.BinaryLocation) {
zap.S().Fatal("Unable to find storage node executable binary")
zap.L().Fatal("Unable to find storage node executable binary.")
}
ident, err := runCfg.Identity.Load()
if err != nil {
zap.S().Fatalf("Error loading identity: %v", err)
zap.L().Fatal("Error loading identity.", zap.Error(err))
}
nodeID = ident.ID
if nodeID.IsZero() {
zap.S().Fatal("Empty node ID")
zap.L().Fatal("Empty node ID.")
}
var ctx context.Context
@ -121,13 +121,13 @@ func cmdRun(cmd *cobra.Command, args []string) (err error) {
loopFunc := func(ctx context.Context) (err error) {
if err := update(ctx, runCfg.BinaryLocation, runCfg.ServiceName); err != nil {
// don't finish loop in case of error just wait for another execution
zap.S().Errorf("Error updating %s: %v", runCfg.ServiceName, err)
zap.L().Error("Error updating service.", zap.String("Service", runCfg.ServiceName), zap.Error(err))
}
updaterBinName := os.Args[0]
if err := update(ctx, updaterBinName, updaterServiceName); err != nil {
// don't finish loop in case of error just wait for another execution
zap.S().Errorf("Error updating %s: %v", updaterServiceName, err)
zap.L().Error("Error updating service.", zap.String("Service", updaterServiceName), zap.Error(err))
}
return nil
}
@ -136,7 +136,10 @@ func cmdRun(cmd *cobra.Command, args []string) (err error) {
case runCfg.CheckInterval <= 0:
err = loopFunc(ctx)
case runCfg.CheckInterval < minCheckInterval:
zap.S().Errorf("Check interval below minimum: %s, setting to %s", runCfg.CheckInterval, minCheckInterval)
zap.L().Error("Check interval below minimum. Overriding it minimum.",
zap.Stringer("Check Interval", runCfg.CheckInterval),
zap.Stringer("Minimum Check Interval", minCheckInterval),
)
runCfg.CheckInterval = minCheckInterval
fallthrough
default:
@ -151,7 +154,7 @@ func cmdRun(cmd *cobra.Command, args []string) (err error) {
func update(ctx context.Context, binPath, serviceName string) (err error) {
if nodeID.IsZero() {
zap.S().Fatal("Empty node ID")
zap.L().Fatal("Empty node ID.")
}
var currentVersion version.SemVer
@ -166,7 +169,7 @@ func update(ctx context.Context, binPath, serviceName string) (err error) {
}
client := checker.New(runCfg.ClientConfig)
zap.S().Infof("Downloading versions from %s", runCfg.ServerAddress)
zap.L().Info("Downloading versions.", zap.String("Server Address", runCfg.ServerAddress))
processVersion, err := client.Process(ctx, serviceName)
if err != nil {
return errs.Wrap(err)
@ -179,12 +182,12 @@ func update(ctx context.Context, binPath, serviceName string) (err error) {
}
if currentVersion.Compare(suggestedVersion) >= 0 {
zap.S().Infof("%s version is up to date", serviceName)
zap.L().Info("Version is up to date.", zap.String("Service", serviceName))
return nil
}
if !version.ShouldUpdate(processVersion.Rollout, nodeID) {
zap.S().Infof("New %s version available but not rolled out to this nodeID yet", serviceName)
zap.L().Info("New version available but not rolled out to this nodeID yet", zap.String("Service", serviceName))
return nil
}
@ -200,12 +203,12 @@ func update(ctx context.Context, binPath, serviceName string) (err error) {
}()
downloadURL := parseDownloadURL(processVersion.Suggested.URL)
zap.S().Infof("Start downloading %s to %s", downloadURL, tempArchive.Name())
zap.L().Info("Download started.", zap.String("From", downloadURL), zap.String("To", tempArchive.Name()))
err = downloadArchive(ctx, tempArchive, downloadURL)
if err != nil {
return errs.Wrap(err)
}
zap.S().Infof("Finished downloading %s to %s", downloadURL, tempArchive.Name())
zap.L().Info("Download finished.", zap.String("From", downloadURL), zap.String("To", tempArchive.Name()))
newVersionPath := prependExtension(binPath, suggestedVersion.String())
err = unpackBinary(ctx, tempArchive.Name(), newVersionPath)
@ -241,13 +244,13 @@ func update(ctx context.Context, binPath, serviceName string) (err error) {
return errs.Wrap(err)
}
zap.S().Infof("Restarting service %s", serviceName)
zap.L().Info("Restarting service.", zap.String("Service", serviceName))
err = restartService(serviceName)
if err != nil {
// TODO: should we try to recover from this?
return errs.New("Unable to restart service: %v", err)
}
zap.S().Infof("Service %s restarted successfully", serviceName)
zap.L().Info("Service restarted successfully.", zap.String("Service", serviceName))
// TODO remove old binary ??
return nil
@ -269,7 +272,7 @@ func parseDownloadURL(template string) string {
func binaryVersion(location string) (version.SemVer, error) {
out, err := exec.Command(location, "version").CombinedOutput()
if err != nil {
zap.S().Infof("Command output: %s", out)
zap.L().Info("Command output.", zap.ByteString("Output", out))
return version.SemVer{}, err
}

View File

@ -104,10 +104,10 @@ func TestAutoUpdater(t *testing.T) {
if assert.NoError(t, logErr) {
logStr := string(logData)
t.Log(logStr)
if !assert.Contains(t, logStr, "storagenode restarted successfully") {
if !assert.Contains(t, logStr, `Service restarted successfully. {"Service": "storagenode"}`) {
t.Log(logStr)
}
if !assert.Contains(t, logStr, "storagenode-updater restarted successfully") {
if !assert.Contains(t, logStr, `Service restarted successfully. {"Service": "storagenode-updater"}`) {
t.Log(logStr)
}
} else {

View File

@ -26,7 +26,7 @@ func init() {
// Check if session is interactive
interactive, err := svc.IsAnInteractiveSession()
if err != nil {
zap.S().Fatalf("Failed to determine if session is interactive: %v", err)
zap.L().Fatal("Failed to determine if session is interactive.", zap.Error(err))
}
if interactive {
@ -45,7 +45,7 @@ func init() {
// Initialize the Windows Service handler
err = svc.Run("storagenode-updater", &service{})
if err != nil {
zap.S().Fatalf("Service failed: %v", err)
zap.L().Fatal("Service failed.", zap.Error(err))
}
// avoid starting main() when service was stopped
os.Exit(0)
@ -69,13 +69,13 @@ func (m *service) Execute(args []string, r <-chan svc.ChangeRequest, changes cha
for c := range r {
switch c.Cmd {
case svc.Interrogate:
zap.S().Info("Interrogate request received.")
zap.L().Info("Interrogate request received.")
changes <- c.CurrentStatus
// Testing deadlock from https://code.google.com/p/winsvc/issues/detail?id=4
time.Sleep(100 * time.Millisecond)
changes <- c.CurrentStatus
case svc.Stop, svc.Shutdown:
zap.S().Info("Stop/Shutdown request received.")
zap.L().Info("Stop/Shutdown request received.")
changes <- svc.Status{State: svc.StopPending}
// Cancel the command's root context to cleanup resources
_, cancel := process.Ctx(runCmd)
@ -84,7 +84,7 @@ func (m *service) Execute(args []string, r <-chan svc.ChangeRequest, changes cha
// After returning the Windows Service is stopped and the process terminates
return
default:
zap.S().Infof("Unexpected control request: %d\n", c)
zap.L().Info("Unexpected control request.", zap.Uint32("Event Type", c.EventType))
}
}
return

View File

@ -52,9 +52,9 @@ func cmdDashboard(cmd *cobra.Command, args []string) (err error) {
ident, err := runCfg.Identity.Load()
if err != nil {
zap.S().Fatal(err)
zap.L().Fatal("Failed to load identity.", zap.Error(err))
} else {
zap.S().Info("Node ID: ", ident.ID)
zap.L().Info("Identity loaded.", zap.Stringer("Node ID", ident.ID))
}
client, err := dialDashboardClient(ctx, dashboardCfg.Address)
@ -63,7 +63,7 @@ func cmdDashboard(cmd *cobra.Command, args []string) (err error) {
}
defer func() {
if err := client.close(); err != nil {
zap.S().Debug("closing dashboard client failed", err)
zap.L().Debug("Closing dashboard client failed.", zap.Error(err))
}
}()

View File

@ -59,7 +59,11 @@ func mapDeprecatedConfigs(log *zap.Logger) {
reflect.ValueOf(migration.newValue).Elem().Set(reflect.ValueOf(override))
log.Sugar().Debugf("Found deprecated flag. Migrating value %v from %s to %s", reflect.ValueOf(migration.newValue).Elem(), migration.oldConfigString, migration.newConfigString)
log.Debug("Found deprecated flag. Migrating value.",
zap.Stringer("Value", reflect.ValueOf(migration.newValue).Elem()),
zap.String("From", migration.oldConfigString),
zap.String("To", migration.newConfigString),
)
}
}
}

View File

@ -57,9 +57,9 @@ func cmdGracefulExitInit(cmd *cobra.Command, args []string) error {
ident, err := runCfg.Identity.Load()
if err != nil {
zap.S().Fatal(err)
zap.L().Fatal("Failed to load identity.", zap.Error(err))
} else {
zap.S().Info("Node ID: ", ident.ID)
zap.L().Info("Identity loaded.", zap.Stringer("Node ID", ident.ID))
}
// display warning message
@ -77,7 +77,7 @@ func cmdGracefulExitInit(cmd *cobra.Command, args []string) error {
}
defer func() {
if err := client.close(); err != nil {
zap.S().Debug("closing graceful exit client failed", err)
zap.L().Debug("Closing graceful exit client failed.", zap.Error(err))
}
}()
@ -140,7 +140,7 @@ func cmdGracefulExitInit(cmd *cobra.Command, args []string) error {
}
resp, err := client.initGracefulExit(ctx, req)
if err != nil {
zap.S().Debug("initializing graceful exit failed", zap.Stringer("Satellite ID", id), zap.Error(err))
zap.L().Debug("Initializing graceful exit failed.", zap.Stringer("Satellite ID", id), zap.Error(err))
errgroup.Add(err)
continue
}
@ -167,9 +167,9 @@ func cmdGracefulExitStatus(cmd *cobra.Command, args []string) (err error) {
ident, err := runCfg.Identity.Load()
if err != nil {
zap.S().Fatal(err)
zap.L().Fatal("Failed to load identity.", zap.Error(err))
} else {
zap.S().Info("Node ID: ", ident.ID)
zap.L().Info("Identity loaded.", zap.Stringer("Node ID", ident.ID))
}
client, err := dialGracefulExitClient(ctx, diagCfg.Server.PrivateAddress)
@ -178,7 +178,7 @@ func cmdGracefulExitStatus(cmd *cobra.Command, args []string) (err error) {
}
defer func() {
if err := client.close(); err != nil {
zap.S().Debug("closing graceful exit client failed", err)
zap.L().Debug("Closing graceful exit client failed.", zap.Error(err))
}
}()

View File

@ -145,11 +145,11 @@ func cmdRun(cmd *cobra.Command, args []string) (err error) {
identity, err := runCfg.Identity.Load()
if err != nil {
zap.S().Fatal(err)
log.Fatal("Failed to load identity.", zap.Error(err))
}
if err := runCfg.Verify(log); err != nil {
log.Sugar().Error("Invalid configuration: ", err)
log.Error("Invalid configuration.", zap.Error(err))
return err
}
@ -183,11 +183,11 @@ func cmdRun(cmd *cobra.Command, args []string) (err error) {
}
if err := process.InitMetricsWithCertPath(ctx, log, nil, runCfg.Identity.CertPath); err != nil {
zap.S().Warn("Failed to initialize telemetry batcher: ", err)
log.Warn("Failed to initialize telemetry batcher.", zap.Error(err))
}
if err := process.InitTracingWithCertPath(ctx, log, nil, runCfg.Identity.CertPath); err != nil {
zap.S().Warn("Failed to initialize tracing collector: ", err)
log.Warn("Failed to initialize tracing collector.", zap.Error(err))
}
err = db.CreateTables(ctx)
if err != nil {
@ -206,7 +206,7 @@ func cmdRun(cmd *cobra.Command, args []string) (err error) {
}
if err := peer.Storage2.CacheService.Init(ctx); err != nil {
zap.S().Error("Failed to initialize CacheService: ", err)
log.Error("Failed to initialize CacheService.", zap.Error(err))
}
runError := peer.Run(ctx)

View File

@ -29,7 +29,7 @@ func init() {
// Check if session is interactive
interactive, err := svc.IsAnInteractiveSession()
if err != nil {
zap.S().Fatalf("Failed to determine if session is interactive: %v", err)
zap.L().Fatal("Failed to determine if session is interactive.", zap.Error(err))
}
if interactive {
@ -48,7 +48,7 @@ func init() {
// Initialize the Windows Service handler
err = svc.Run("storagenode", &service{})
if err != nil {
zap.S().Fatalf("Service failed: %v", err)
zap.L().Fatal("Service failed.", zap.Error(err))
}
// avoid starting main() when service was stopped
os.Exit(0)
@ -72,13 +72,13 @@ func (m *service) Execute(args []string, r <-chan svc.ChangeRequest, changes cha
for c := range r {
switch c.Cmd {
case svc.Interrogate:
zap.S().Info("Interrogate request received.")
zap.L().Info("Interrogate request received.")
changes <- c.CurrentStatus
// Testing deadlock from https://code.google.com/p/winsvc/issues/detail?id=4
time.Sleep(100 * time.Millisecond)
changes <- c.CurrentStatus
case svc.Stop, svc.Shutdown:
zap.S().Info("Stop/Shutdown request received.")
zap.L().Info("Stop/Shutdown request received.")
changes <- svc.Status{State: svc.StopPending}
// Cancel the command's root context to cleanup resources
_, cancel := process.Ctx(runCmd)
@ -87,7 +87,7 @@ func (m *service) Execute(args []string, r <-chan svc.ChangeRequest, changes cha
// After returning the Windows Service is stopped and the process terminates
return
default:
zap.S().Infof("Unexpected control request: %d\n", c)
zap.L().Info("Unexpected control request.", zap.Uint32("Event Type", c.EventType))
}
}
return

View File

@ -80,8 +80,8 @@ func (writer *prefixWriter) Write(data []byte) (int, error) {
var newID string
if writer.id == "" {
if start := bytes.Index(data, []byte("Node ")); start > 0 {
if end := bytes.Index(data[start:], []byte(" started")); end > 0 {
if start := bytes.Index(data, []byte(`Node started. {"Node ID": "`)); start > 0 {
if end := bytes.Index(data[start:], []byte(`"`)); end > 0 {
newID = string(data[start+5 : start+end])
if len(newID) > maxIDLength {
newID = newID[:maxIDLength]

View File

@ -113,24 +113,24 @@ func (service *Service) checkVersion(ctx context.Context) (latestVersion version
minimumOld, err := service.client.OldMinimum(ctx, service.service)
if err != nil {
// Log about the error, but dont crash the Service and allow further operation
service.log.Sugar().Errorf("Failed to do periodic version check: %s", err.Error())
service.log.Error("Failed to do periodic version check.", zap.Error(err))
return suggestedVersion, true
}
minimum, err = version.NewSemVer(minimumOld.String())
if err != nil {
service.log.Sugar().Errorf("failed to convert old sem version to sem version")
service.log.Error("Failed to convert old sem version to sem version.")
return suggestedVersion, true
}
service.log.Sugar().Debugf("allowed minimum version from control server is: %s", minimum.String())
service.log.Debug("Allowed minimum version from control server.", zap.Stringer("Minimum Version", minimum.Version))
if isAcceptedVersion(service.Info.Version, minimumOld) {
service.log.Sugar().Infof("running on version %s", service.Info.Version.String())
service.log.Info("Running on allowed version.", zap.Stringer("Version", service.Info.Version.Version))
return suggestedVersion, true
}
service.log.Sugar().Errorf("running on not allowed/outdated version %s", service.Info.Version.String())
service.log.Error("Running on not allowed/outdated version.", zap.Stringer("Version", service.Info.Version.Version))
return suggestedVersion, false
}

View File

@ -88,8 +88,12 @@ func NewAdmin(log *zap.Logger, full *identity.FullIdentity, db DB,
{
if !versionInfo.IsZero() {
peer.Log.Sugar().Debugf("Binary Version: %s with CommitHash %s, built at %s as Release %v",
versionInfo.Version.String(), versionInfo.CommitHash, versionInfo.Timestamp.String(), versionInfo.Release)
peer.Log.Debug("Version info",
zap.Stringer("Version", versionInfo.Version.Version),
zap.String("Commit Hash", versionInfo.CommitHash),
zap.Stringer("Build Timestamp", versionInfo.Timestamp),
zap.Bool("Release Build", versionInfo.Release),
)
}
peer.Version.Service = checker.NewService(log.Named("version"), config.Version, versionInfo, "Satellite")
peer.Version.Chore = checker.NewChore(peer.Version.Service, config.Version.CheckInterval)

View File

@ -205,8 +205,12 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
{
if !versionInfo.IsZero() {
peer.Log.Sugar().Debugf("Binary Version: %s with CommitHash %s, built at %s as Release %v",
versionInfo.Version.String(), versionInfo.CommitHash, versionInfo.Timestamp.String(), versionInfo.Release)
peer.Log.Debug("Version info",
zap.Stringer("Version", versionInfo.Version.Version),
zap.String("Commit Hash", versionInfo.CommitHash),
zap.Stringer("Build Timestamp", versionInfo.Timestamp),
zap.Bool("Release Build", versionInfo.Release),
)
}
peer.Version.Service = checker.NewService(log.Named("version"), config.Version, versionInfo, "Satellite")
@ -243,9 +247,9 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
Name: "server",
Run: func(ctx context.Context) error {
// Don't change the format of this comment, it is used to figure out the node id.
peer.Log.Sugar().Infof("Node %s started", peer.Identity.ID)
peer.Log.Sugar().Infof("Public server started on %s", peer.Addr())
peer.Log.Sugar().Infof("Private server started on %s", peer.PrivateAddr())
peer.Log.Info("Node started.", zap.Stringer("Node ID", peer.Identity.ID))
peer.Log.Info("Public server started.", zap.String("Address", peer.Addr()))
peer.Log.Info("Private server started.", zap.String("Address", peer.PrivateAddr()))
return peer.Server.Run(ctx)
},
Close: peer.Server.Close,

View File

@ -7,6 +7,7 @@ import (
"context"
"crypto/subtle"
"encoding/json"
"fmt"
"html/template"
"mime"
"net"
@ -115,7 +116,7 @@ func NewServer(logger *zap.Logger, config Config, service *console.Service, mail
stripePublicKey: stripePublicKey,
}
logger.Sugar().Debugf("Starting Satellite UI on %s...", server.listener.Addr().String())
logger.Debug("Starting Satellite UI.", zap.Stringer("Address", server.listener.Addr()))
server.cookieAuth = consolewebauth.NewCookieAuth(consolewebauth.CookieSettings{
Name: "_tokenKey",
@ -695,7 +696,7 @@ func (server *Server) grapqlHandler(w http.ResponseWriter, r *http.Request) {
return
}
server.log.Sugar().Debug(result)
server.log.Debug(fmt.Sprintf("%s", result))
}
// serveError serves error static pages.

View File

@ -178,8 +178,12 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB,
{ // setup version control
if !versionInfo.IsZero() {
peer.Log.Sugar().Debugf("Binary Version: %s with CommitHash %s, built at %s as Release %v",
versionInfo.Version.String(), versionInfo.CommitHash, versionInfo.Timestamp.String(), versionInfo.Release)
peer.Log.Debug("Version info",
zap.Stringer("Version", versionInfo.Version.Version),
zap.String("Commit Hash", versionInfo.CommitHash),
zap.Stringer("Build Timestamp", versionInfo.Timestamp),
zap.Bool("Release Build", versionInfo.Release),
)
}
peer.Version.Service = version_checker.NewService(log.Named("version"), config.Version, versionInfo, "Satellite")
peer.Version.Chore = version_checker.NewChore(peer.Version.Service, config.Version.CheckInterval)

View File

@ -104,8 +104,12 @@ func NewGarbageCollection(log *zap.Logger, full *identity.FullIdentity, db DB,
{ // setup version control
if !versionInfo.IsZero() {
peer.Log.Sugar().Debugf("Binary Version: %s with CommitHash %s, built at %s as Release %v",
versionInfo.Version.String(), versionInfo.CommitHash, versionInfo.Timestamp.String(), versionInfo.Release)
peer.Log.Debug("Version info",
zap.Stringer("Version", versionInfo.Version.Version),
zap.String("Commit Hash", versionInfo.CommitHash),
zap.Stringer("Build Timestamp", versionInfo.Timestamp),
zap.Bool("Release Build", versionInfo.Release),
)
}
peer.Version.Service = version_checker.NewService(log.Named("version"), config.Version, versionInfo, "Satellite")
peer.Version.Chore = version_checker.NewChore(peer.Version.Service, config.Version.CheckInterval)

View File

@ -72,7 +72,7 @@ func NewServer(logger *zap.Logger, config Config, rewards rewards.DB, partners *
partners: partners,
}
logger.Sugar().Debugf("Starting Marketing Admin UI on %s...", s.listener.Addr().String())
logger.Debug("Starting Marketing Admin UI.", zap.Stringer("Address", s.listener.Addr()))
fs := http.StripPrefix("/static/", http.FileServer(http.Dir(s.config.StaticDir)))
mux := mux.NewRouter()
if s.config.StaticDir != "" {

View File

@ -187,11 +187,12 @@ func (endpoint *Endpoint) CreateSegmentOld(ctx context.Context, req *pb.SegmentW
exceeded, limit, err := endpoint.projectUsage.ExceedsStorageUsage(ctx, keyInfo.ProjectID)
if err != nil {
endpoint.log.Error("retrieving project storage totals", zap.Error(err))
endpoint.log.Error("Retrieving project storage totals failed.", zap.Error(err))
}
if exceeded {
endpoint.log.Sugar().Errorf("monthly project limits are %s of storage and bandwidth usage. This limit has been exceeded for storage for projectID %s",
limit, keyInfo.ProjectID,
endpoint.log.Error("Monthly storage limit exceeded.",
zap.Stringer("Limit", limit),
zap.Stringer("Project ID", keyInfo.ProjectID),
)
return nil, rpcstatus.Error(rpcstatus.ResourceExhausted, "Exceeded Usage Limit")
}
@ -283,8 +284,9 @@ func (endpoint *Endpoint) CommitSegmentOld(ctx context.Context, req *pb.SegmentC
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
}
if exceeded {
endpoint.log.Sugar().Errorf("monthly project limits are %s of storage and bandwidth usage. This limit has been exceeded for storage for projectID %s.",
limit, keyInfo.ProjectID,
endpoint.log.Error("Monthly storage limit exceeded.",
zap.Stringer("Limit", limit),
zap.Stringer("Project ID", keyInfo.ProjectID),
)
return nil, rpcstatus.Error(rpcstatus.ResourceExhausted, "Exceeded Usage Limit")
}
@ -302,13 +304,18 @@ func (endpoint *Endpoint) CommitSegmentOld(ctx context.Context, req *pb.SegmentC
if req.Pointer.Type == pb.Pointer_REMOTE {
//We cannot have more redundancy than total/min
if float64(totalStored) > (float64(req.Pointer.SegmentSize)/float64(req.Pointer.Remote.Redundancy.MinReq))*float64(req.Pointer.Remote.Redundancy.Total) {
endpoint.log.Sugar().Debugf("data size mismatch, got segment: %d, pieces: %d, RS Min, Total: %d,%d", req.Pointer.SegmentSize, totalStored, req.Pointer.Remote.Redundancy.MinReq, req.Pointer.Remote.Redundancy.Total)
endpoint.log.Debug("Excessive redundancy.",
zap.Int64("Segment Size", req.Pointer.SegmentSize),
zap.Int64("Actual Pieces", totalStored),
zap.Int32("Required Pieces", req.Pointer.Remote.Redundancy.MinReq),
zap.Int32("Total Pieces", req.Pointer.Remote.Redundancy.Total),
)
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, "mismatched segment size and piece usage")
}
}
if err := endpoint.projectUsage.AddProjectStorageUsage(ctx, keyInfo.ProjectID, segmentSize); err != nil {
endpoint.log.Sugar().Errorf("Could not track new storage usage by project %q: %v", keyInfo.ProjectID, err)
endpoint.log.Error("Could not track new storage usage.", zap.Stringer("Project ID", keyInfo.ProjectID), zap.Error(err))
// but continue. it's most likely our own fault that we couldn't track it, and the only thing
// that will be affected is our per-project bandwidth and storage limits.
}
@ -361,11 +368,12 @@ func (endpoint *Endpoint) DownloadSegmentOld(ctx context.Context, req *pb.Segmen
exceeded, limit, err := endpoint.projectUsage.ExceedsBandwidthUsage(ctx, keyInfo.ProjectID, bucketID)
if err != nil {
endpoint.log.Error("retrieving project bandwidth total", zap.Error(err))
endpoint.log.Error("Retrieving project bandwidth total failed.", zap.Error(err))
}
if exceeded {
endpoint.log.Sugar().Errorf("monthly project limits are %s of storage and bandwidth usage. This limit has been exceeded for bandwidth for projectID %s.",
limit, keyInfo.ProjectID,
endpoint.log.Error("Monthly storage limit exceeded.",
zap.Stringer("Limit", limit),
zap.Stringer("Project ID", keyInfo.ProjectID),
)
return nil, rpcstatus.Error(rpcstatus.ResourceExhausted, "Exceeded Usage Limit")
}
@ -386,7 +394,11 @@ func (endpoint *Endpoint) DownloadSegmentOld(ctx context.Context, req *pb.Segmen
limits, privateKey, err := endpoint.orders.CreateGetOrderLimitsOld(ctx, bucketID, pointer)
if err != nil {
if orders.ErrDownloadFailedNotEnoughPieces.Has(err) {
endpoint.log.Sugar().Errorf("unable to create order limits for project id %s from api key id %s: %v.", keyInfo.ProjectID.String(), keyInfo.ID.String(), zap.Error(err))
endpoint.log.Error("Unable to create order limits.",
zap.Stringer("Project ID", keyInfo.ProjectID),
zap.Stringer("API Key ID", keyInfo.ID),
zap.Error(err),
)
}
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
}
@ -1375,11 +1387,12 @@ func (endpoint *Endpoint) BeginSegment(ctx context.Context, req *pb.SegmentBegin
exceeded, limit, err := endpoint.projectUsage.ExceedsStorageUsage(ctx, keyInfo.ProjectID)
if err != nil {
endpoint.log.Error("retrieving project storage totals", zap.Error(err))
endpoint.log.Error("Retrieving project storage totals failed.", zap.Error(err))
}
if exceeded {
endpoint.log.Sugar().Errorf("monthly project limits are %s of storage and bandwidth usage. This limit has been exceeded for storage for projectID %s",
limit, keyInfo.ProjectID,
endpoint.log.Error("Monthly storage limit exceeded.",
zap.Stringer("Limit", limit),
zap.Stringer("Project ID", keyInfo.ProjectID),
)
return nil, rpcstatus.Error(rpcstatus.ResourceExhausted, "Exceeded Usage Limit")
}
@ -1614,14 +1627,15 @@ func (endpoint *Endpoint) makeInlineSegment(ctx context.Context, req *pb.Segment
return nil, nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
}
if exceeded {
endpoint.log.Sugar().Debugf("monthly project limits are %s of storage and bandwidth usage. This limit has been exceeded for storage for projectID %s.",
limit, keyInfo.ProjectID,
endpoint.log.Error("Monthly storage limit exceeded.",
zap.Stringer("Limit", limit),
zap.Stringer("Project ID", keyInfo.ProjectID),
)
return nil, nil, rpcstatus.Error(rpcstatus.ResourceExhausted, "Exceeded Usage Limit")
}
if err := endpoint.projectUsage.AddProjectStorageUsage(ctx, keyInfo.ProjectID, inlineUsed); err != nil {
endpoint.log.Sugar().Errorf("Could not track new storage usage by project %v: %v", keyInfo.ProjectID, err)
endpoint.log.Error("Could not track new storage usage.", zap.Stringer("Project ID", keyInfo.ProjectID), zap.Error(err))
// but continue. it's most likely our own fault that we couldn't track it, and the only thing
// that will be affected is our per-project bandwidth and storage limits.
}
@ -1937,11 +1951,12 @@ func (endpoint *Endpoint) DownloadSegment(ctx context.Context, req *pb.SegmentDo
exceeded, limit, err := endpoint.projectUsage.ExceedsBandwidthUsage(ctx, keyInfo.ProjectID, bucketID)
if err != nil {
endpoint.log.Error("retrieving project bandwidth total", zap.Error(err))
endpoint.log.Error("Retrieving project bandwidth total failed.", zap.Error(err))
}
if exceeded {
endpoint.log.Sugar().Errorf("monthly project limits are %s of storage and bandwidth usage. This limit has been exceeded for bandwidth for projectID %s.",
limit, keyInfo.ProjectID,
endpoint.log.Error("Monthly bandwidth limit exceeded.",
zap.Stringer("Limit", limit),
zap.Stringer("Project ID", keyInfo.ProjectID),
)
return nil, rpcstatus.Error(rpcstatus.ResourceExhausted, "Exceeded Usage Limit")
}
@ -2002,7 +2017,11 @@ func (endpoint *Endpoint) DownloadSegment(ctx context.Context, req *pb.SegmentDo
limits, privateKey, err := endpoint.orders.CreateGetOrderLimits(ctx, bucketID, pointer)
if err != nil {
if orders.ErrDownloadFailedNotEnoughPieces.Has(err) {
endpoint.log.Sugar().Errorf("unable to create order limits for project id %s from api key id %s: %v.", keyInfo.ProjectID.String(), keyInfo.ID.String(), zap.Error(err))
endpoint.log.Error("Unable to create order limits.",
zap.Stringer("Project ID", keyInfo.ProjectID),
zap.Stringer("API Key ID", keyInfo.ID),
zap.Error(err),
)
}
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
}

View File

@ -102,8 +102,12 @@ func NewRepairer(log *zap.Logger, full *identity.FullIdentity,
{
if !versionInfo.IsZero() {
peer.Log.Sugar().Debugf("Binary Version: %s with CommitHash %s, built at %s as Release %v",
versionInfo.Version.String(), versionInfo.CommitHash, versionInfo.Timestamp.String(), versionInfo.Release)
peer.Log.Debug("Version info",
zap.Stringer("Version", versionInfo.Version.Version),
zap.String("Commit Hash", versionInfo.CommitHash),
zap.Stringer("Build Timestamp", versionInfo.Timestamp),
zap.Bool("Release Build", versionInfo.Release),
)
}
peer.Version.Service = version_checker.NewService(log.Named("version"), config.Version, versionInfo, "Satellite")
peer.Version.Chore = version_checker.NewChore(peer.Version.Service, config.Version.CheckInterval)

View File

@ -29,9 +29,9 @@ func (c OperatorConfig) Verify(log *zap.Logger) error {
func isOperatorEmailValid(log *zap.Logger, email string) error {
if email == "" {
log.Sugar().Warn("Operator email address isn't specified.")
log.Warn("Operator email address isn't specified.")
} else {
log.Sugar().Info("Operator email: ", email)
log.Info("Operator email", zap.String("Address", email))
}
return nil
}
@ -45,6 +45,6 @@ func isOperatorWalletValid(log *zap.Logger, wallet string) error {
return fmt.Errorf("operator wallet address isn't valid")
}
log.Sugar().Info("operator wallet: ", wallet)
log.Info("Operator wallet", zap.String("Address", wallet))
return nil
}

View File

@ -278,8 +278,12 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten
{
if !versionInfo.IsZero() {
peer.Log.Sugar().Debugf("Binary Version: %s with CommitHash %s, built at %s as Release %v",
versionInfo.Version.String(), versionInfo.CommitHash, versionInfo.Timestamp.String(), versionInfo.Release)
peer.Log.Debug("Version info",
zap.Stringer("Version", versionInfo.Version.Version),
zap.String("Commit Hash", versionInfo.CommitHash),
zap.Stringer("Build Timestamp", versionInfo.Timestamp),
zap.Bool("Release Build", versionInfo.Release),
)
}
peer.Version.Service = checker.NewService(log.Named("version"), config.Version, versionInfo, "Storagenode")
@ -309,9 +313,9 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten
peer.Servers.Add(lifecycle.Item{
Name: "server",
Run: func(ctx context.Context) error {
peer.Log.Sugar().Infof("Node %s started", peer.Identity.ID)
peer.Log.Sugar().Infof("Public server started on %s", peer.Addr())
peer.Log.Sugar().Infof("Private server started on %s", peer.PrivateAddr())
peer.Log.Info("Node started.", zap.Stringer("Node ID", peer.Identity.ID))
peer.Log.Info("Public server started.", zap.String("Address", peer.Addr()))
peer.Log.Info("Private server started.", zap.String("Address", peer.PrivateAddr()))
return peer.Server.Run(ctx)
},
Close: peer.Server.Close,
@ -660,7 +664,7 @@ func (peer *Peer) Run(ctx context.Context) (err error) {
}
if err := peer.Preflight.LocalTime.Check(ctx); err != nil {
peer.Log.Fatal("failed preflight check", zap.Error(err))
peer.Log.Fatal("Failed preflight check.", zap.Error(err))
return err
}

View File

@ -764,19 +764,19 @@ func (db *DB) Migration(ctx context.Context) *migrate.Migration {
Action: migrate.Func(func(ctx context.Context, log *zap.Logger, mgdb tagsql.DB, tx tagsql.Tx) error {
err := os.RemoveAll(filepath.Join(db.dbDirectory, "blob/ukfu6bhbboxilvt7jrwlqk7y2tapb5d2r2tsmj2sjxvw5qaaaaaa")) // us-central1
if err != nil {
log.Sugar().Debug(err)
log.Debug("Error removing trash from us-central-1.", zap.Error(err))
}
err = os.RemoveAll(filepath.Join(db.dbDirectory, "blob/v4weeab67sbgvnbwd5z7tweqsqqun7qox2agpbxy44mqqaaaaaaa")) // europe-west1
if err != nil {
log.Sugar().Debug(err)
log.Debug("Error removing trash from europe-west-1.", zap.Error(err))
}
err = os.RemoveAll(filepath.Join(db.dbDirectory, "blob/qstuylguhrn2ozjv4h2c6xpxykd622gtgurhql2k7k75wqaaaaaa")) // asia-east1
if err != nil {
log.Sugar().Debug(err)
log.Debug("Error removing trash from asia-east-1.", zap.Error(err))
}
err = os.RemoveAll(filepath.Join(db.dbDirectory, "blob/abforhuxbzyd35blusvrifvdwmfx4hmocsva4vmpp3rgqaaaaaaa")) // "tothemoon (stefan)"
if err != nil {
log.Sugar().Debug(err)
log.Debug("Error removing trash from tothemoon.", zap.Error(err))
}
// To prevent the node from starting up, we just log errors and return nil
return nil
@ -789,7 +789,7 @@ func (db *DB) Migration(ctx context.Context) *migrate.Migration {
Action: migrate.Func(func(ctx context.Context, log *zap.Logger, mgdb tagsql.DB, tx tagsql.Tx) error {
err := os.RemoveAll(filepath.Join(db.dbDirectory, "tmp"))
if err != nil {
log.Sugar().Debug(err)
log.Debug("Error removing orphaned tmp data.", zap.Error(err))
}
// To prevent the node from starting up, we just log errors and return nil
return nil

View File

@ -106,12 +106,12 @@ func (peer *Peer) HandleGet(w http.ResponseWriter, r *http.Request) {
if xfor = r.Header.Get("X-Forwarded-For"); xfor == "" {
xfor = r.RemoteAddr
}
peer.Log.Sugar().Debugf("Request from: %s for %s", r.RemoteAddr, xfor)
peer.Log.Debug("Request received.", zap.String("From", r.RemoteAddr), zap.String("For", xfor))
w.Header().Set("Content-Type", "application/json")
_, err := w.Write(peer.response)
if err != nil {
peer.Log.Sugar().Errorf("error writing response to client: %v", err)
peer.Log.Error("Error writing response to client.", zap.Error(err))
}
}
@ -184,10 +184,10 @@ func New(log *zap.Logger, config *Config) (peer *Peer, err error) {
peer.response, err = json.Marshal(peer.Versions)
if err != nil {
peer.Log.Sugar().Fatalf("Error marshalling version info: %v", err)
peer.Log.Fatal("Error marshalling version info.", zap.Error(err))
}
peer.Log.Sugar().Debugf("setting version info to: %v", string(peer.response))
peer.Log.Debug("Setting version info.", zap.ByteString("Value", peer.response))
mux := http.NewServeMux()
mux.HandleFunc("/", peer.HandleGet)
@ -214,7 +214,7 @@ func (peer *Peer) Run(ctx context.Context) (err error) {
})
group.Go(func() error {
defer cancel()
peer.Log.Sugar().Infof("Versioning server started on %s", peer.Addr())
peer.Log.Info("Versioning server started.", zap.String("Address", peer.Addr()))
return errs2.IgnoreCanceled(peer.Server.Endpoint.Serve(peer.Server.Listener))
})
return group.Wait()