all: reduce number of log messages

Remove starting up messages from peers. We expect all of them to start,
if they don't, then they should return an error why they don't start.
The only informative message is when a service is disabled.

When doing initial database setup then each migration step isn't
informative, hence print only a single line with the final version.

Also use shorter log scopes.

Change-Id: Ic8b61411df2eeae2a36d600a0c2fbc97a84a5b93
This commit is contained in:
Egon Elbre 2020-01-06 14:34:54 +02:00 committed by Michal Niewrzal
parent a33734bee7
commit f41d440944
11 changed files with 29 additions and 62 deletions

View File

@ -232,7 +232,7 @@ func cmdRun(cmd *cobra.Command, args []string) (err error) {
func cmdMigrationRun(cmd *cobra.Command, args []string) (err error) {
log := zap.L()
db, err := satellitedb.New(log.Named("db migration"), runCfg.Database)
db, err := satellitedb.New(log.Named("migration"), runCfg.Database)
if err != nil {
return errs.New("Error creating new master database connection for satellitedb migration: %+v", err)
}
@ -247,7 +247,7 @@ func cmdMigrationRun(cmd *cobra.Command, args []string) (err error) {
// There should be an explicit CreateTables call for the pointerdb as well.
// This is tracked in jira ticket #3337.
pdb, err := metainfo.NewStore(log.Named("db migration"), runCfg.Metainfo.DatabaseURL)
pdb, err := metainfo.NewStore(log.Named("migration"), runCfg.Metainfo.DatabaseURL)
if err != nil {
return errs.New("Error creating tables for pointer database on satellite: %+v", err)
}

View File

@ -134,7 +134,8 @@ func (migration *Migration) Run(log *zap.Logger) error {
return err
}
for _, step := range migration.Steps {
initialSetup := false
for i, step := range migration.Steps {
if step.DB == nil {
return Error.New("step.DB is nil for step %d", step.Version)
}
@ -148,13 +149,18 @@ func (migration *Migration) Run(log *zap.Logger) error {
if err != nil {
return Error.Wrap(err)
}
if i == 0 && version < 0 {
initialSetup = true
}
if step.Version <= version {
continue
}
stepLog := log.Named(strconv.Itoa(step.Version))
stepLog.Info(step.Description)
if !initialSetup {
stepLog.Info(step.Description)
}
tx, err := step.DB.Begin()
if err != nil {
@ -178,7 +184,11 @@ func (migration *Migration) Run(log *zap.Logger) error {
if len(migration.Steps) > 0 {
last := migration.Steps[len(migration.Steps)-1]
log.Info("Database Version", zap.Int("version", last.Version))
if initialSetup {
log.Info("Database Created", zap.Int("version", last.Version))
} else {
log.Info("Database Version", zap.Int("version", last.Version))
}
} else {
log.Info("No Versions")
}

View File

@ -46,7 +46,6 @@ func New(logger *zap.Logger, sdb accounting.StoragenodeAccounting, interval time
// Run the Rollup loop
func (r *Service) Run(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err)
r.logger.Info("Rollup service starting up")
return r.Loop.Run(ctx, func(ctx context.Context) error {
err := r.Rollup(ctx)
if err != nil {

View File

@ -58,7 +58,6 @@ func New(log *zap.Logger, sdb accounting.StoragenodeAccounting, pdb accounting.P
// Run the tally service loop
func (service *Service) Run(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err)
service.log.Info("Tally service starting up")
return service.Loop.Run(ctx, func(ctx context.Context) error {
err := service.Tally(ctx)

View File

@ -159,7 +159,6 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metai
}
{ // setup listener and server
log.Debug("Satellite API Process starting listener and server")
sc := config.Server
tlsOptions, err := tlsopts.NewOptions(peer.Identity, sc.Config, revocationDB)
@ -180,7 +179,6 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metai
}
{ // setup overlay
log.Debug("Satellite API Process starting overlay")
peer.Overlay.DB = overlay.NewCombinedCache(peer.DB.OverlayCache())
peer.Overlay.Service = overlay.NewService(peer.Log.Named("overlay"), peer.Overlay.DB, config.Overlay)
peer.Overlay.Inspector = overlay.NewInspector(peer.Overlay.Service)
@ -189,7 +187,6 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metai
}
{ // setup contact service
log.Debug("Satellite API Process setting up contact service")
c := config.Contact
if c.ExternalAddress == "" {
c.ExternalAddress = peer.Addr()
@ -217,18 +214,15 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metai
}
{ // setup vouchers
log.Debug("Satellite API Process setting up vouchers")
pb.RegisterVouchersServer(peer.Server.GRPC(), peer.Vouchers.Endpoint)
pb.DRPCRegisterVouchers(peer.Server.DRPC(), peer.Vouchers.Endpoint)
}
{ // setup live accounting
log.Debug("Satellite API Process setting up live accounting")
peer.LiveAccounting.Cache = liveAccounting
}
{ // setup accounting project usage
log.Debug("Satellite API Process setting up accounting project usage")
peer.Accounting.ProjectUsage = accounting.NewService(
peer.DB.ProjectAccounting(),
peer.LiveAccounting.Cache,
@ -237,7 +231,6 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metai
}
{ // setup orders
log.Debug("Satellite API Process setting up orders endpoint")
satelliteSignee := signing.SigneeFromPeerIdentity(peer.Identity.PeerIdentity())
peer.Orders.Endpoint = orders.NewEndpoint(
peer.Log.Named("orders:endpoint"),
@ -262,8 +255,6 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metai
}
{ // setup marketing portal
log.Debug("Satellite API Process setting up marketing server")
peer.Marketing.PartnersService = rewards.NewPartnersService(
peer.Log.Named("partners"),
rewards.DefaultPartnersDB,
@ -292,7 +283,6 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metai
}
{ // setup metainfo
log.Debug("Satellite API Process setting up metainfo")
peer.Metainfo.Database = pointerDB
peer.Metainfo.Service = metainfo.NewService(peer.Log.Named("metainfo:service"),
peer.Metainfo.Database,
@ -318,14 +308,12 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metai
}
{ // setup datarepair
log.Debug("Satellite API Process setting up datarepair inspector")
peer.Repair.Inspector = irreparable.NewInspector(peer.DB.Irreparable())
pb.RegisterIrreparableInspectorServer(peer.Server.PrivateGRPC(), peer.Repair.Inspector)
pb.DRPCRegisterIrreparableInspector(peer.Server.PrivateDRPC(), peer.Repair.Inspector)
}
{ // setup inspector
log.Debug("Satellite API Process setting up inspector")
peer.Inspector.Endpoint = inspector.NewEndpoint(
peer.Log.Named("inspector"),
peer.Overlay.Service,
@ -336,7 +324,6 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metai
}
{ // setup mailservice
log.Debug("Satellite API Process setting up mail service")
// TODO(yar): test multiple satellites using same OAUTH credentials
mailConfig := config.Mail
@ -403,7 +390,6 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metai
}
{ // setup payments
log.Debug("Satellite API Process setting up payments")
pc := config.Payments
switch pc.Provider {
@ -411,7 +397,7 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metai
peer.Payments.Accounts = mockpayments.Accounts()
case "stripecoinpayments":
service := stripecoinpayments.NewService(
peer.Log.Named("stripecoinpayments service"),
peer.Log.Named("payments.stripe:service"),
pc.StripeCoinPayments,
peer.DB.StripeCoinPayments(),
peer.DB.Console().Projects(),
@ -424,7 +410,7 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metai
peer.Payments.Inspector = stripecoinpayments.NewEndpoint(service)
peer.Payments.Version = stripecoinpayments.NewVersionService(
peer.Log.Named("stripecoinpayments version service"),
peer.Log.Named("payments.stripe:service"),
service,
pc.StripeCoinPayments.ConversionRatesCycleInterval)
@ -434,7 +420,6 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metai
}
{ // setup console
log.Debug("Satellite API Process setting up console")
consoleConfig := config.Console
peer.Console.Listener, err = net.Listen("tcp", consoleConfig.Address)
if err != nil {
@ -480,7 +465,6 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metai
}
{ // setup node stats endpoint
log.Debug("Satellite API Process setting up node stats endpoint")
peer.NodeStats.Endpoint = nodestats.NewEndpoint(
peer.Log.Named("nodestats:endpoint"),
peer.Overlay.DB,
@ -492,7 +476,6 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metai
{ // setup graceful exit
if config.GracefulExit.Enabled {
log.Debug("Satellite API Process setting up graceful exit endpoint")
peer.GracefulExit.Endpoint = gracefulexit.NewEndpoint(
peer.Log.Named("gracefulexit:endpoint"),
signing.SignerFromFullIdentity(peer.Identity),
@ -506,6 +489,8 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metai
pb.RegisterSatelliteGracefulExitServer(peer.Server.GRPC(), peer.GracefulExit.Endpoint)
pb.DRPCRegisterSatelliteGracefulExit(peer.Server.DRPC(), peer.GracefulExit.Endpoint.DRPC())
} else {
peer.Log.Named("gracefulexit").Info("disabled")
}
}

View File

@ -57,7 +57,6 @@ func NewWorker(log *zap.Logger, queue *Queue, verifier *Verifier, reporter *Repo
// Run runs audit service 2.0.
func (worker *Worker) Run(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err)
worker.log.Debug("starting")
// Wait for all audits to run.
defer worker.limiter.Wait()

View File

@ -153,8 +153,6 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metainfo
}
{ // setup contact service
log.Debug("Starting contact service")
pbVersion, err := versionInfo.Proto()
if err != nil {
return nil, errs.Combine(err, peer.Close())
@ -174,19 +172,15 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metainfo
}
{ // setup overlay
log.Debug("Starting overlay")
peer.Overlay.DB = overlay.NewCombinedCache(peer.DB.OverlayCache())
peer.Overlay.Service = overlay.NewService(peer.Log.Named("overlay"), peer.Overlay.DB, config.Overlay)
}
{ // setup live accounting
log.Debug("Setting up live accounting")
peer.LiveAccounting.Cache = liveAccounting
}
{ // setup accounting project usage
log.Debug("Setting up accounting project usage")
peer.Accounting.ProjectUsage = accounting.NewService(
peer.DB.ProjectAccounting(),
peer.LiveAccounting.Cache,
@ -195,7 +189,6 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metainfo
}
{ // setup orders
log.Debug("Setting up orders")
peer.Orders.Service = orders.NewService(
peer.Log.Named("orders:service"),
signing.SignerFromFullIdentity(peer.Identity),
@ -211,8 +204,6 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metainfo
}
{ // setup metainfo
log.Debug("Setting up metainfo")
peer.Metainfo.Database = pointerDB // for logging: storelogger.New(peer.Log.Named("pdb"), db)
peer.Metainfo.Service = metainfo.NewService(peer.Log.Named("metainfo:service"),
peer.Metainfo.Database,
@ -222,7 +213,6 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metainfo
}
{ // setup datarepair
log.Debug("Setting up datarepair")
// TODO: simplify argument list somehow
peer.Repair.Checker = checker.NewChecker(
peer.Log.Named("checker"),
@ -255,7 +245,6 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metainfo
}
{ // setup audit
log.Debug("Setting up audits")
config := config.Audit
peer.Audit.Queue = &audit.Queue{}
@ -297,8 +286,6 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metainfo
}
{ // setup garbage collection
log.Debug("Setting up garbage collection")
peer.GarbageCollection.Service = gc.NewService(
peer.Log.Named("garbage collection"),
config.GarbageCollection,
@ -309,19 +296,16 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metainfo
}
{ // setup db cleanup
log.Debug("Setting up db cleanup")
peer.DBCleanup.Chore = dbcleanup.NewChore(peer.Log.Named("dbcleanup"), peer.DB.Orders(), config.DBCleanup)
}
{ // setup accounting
log.Debug("Setting up accounting")
peer.Accounting.Tally = tally.New(peer.Log.Named("tally"), peer.DB.StoragenodeAccounting(), peer.DB.ProjectAccounting(), peer.LiveAccounting.Cache, peer.Metainfo.Loop, config.Tally.Interval)
peer.Accounting.Rollup = rollup.New(peer.Log.Named("rollup"), peer.DB.StoragenodeAccounting(), config.Rollup.Interval, config.Rollup.DeleteTallies)
}
// TODO: remove in future, should be in API
{ // setup payments
log.Debug("Setting up payments")
pc := config.Payments
switch pc.Provider {
@ -329,7 +313,7 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metainfo
peer.Payments.Accounts = mockpayments.Accounts()
case "stripecoinpayments":
service := stripecoinpayments.NewService(
peer.Log.Named("stripecoinpayments service"),
peer.Log.Named("payments.stripe:service"),
pc.StripeCoinPayments,
peer.DB.StripeCoinPayments(),
peer.DB.Console().Projects(),
@ -341,7 +325,7 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metainfo
peer.Payments.Accounts = service.Accounts()
peer.Payments.Chore = stripecoinpayments.NewChore(
peer.Log.Named("stripecoinpayments clearing loop"),
peer.Log.Named("payments.stripe:clearing"),
service,
pc.StripeCoinPayments.TransactionUpdateInterval,
pc.StripeCoinPayments.AccountBalanceUpdateInterval,
@ -353,8 +337,9 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metainfo
{ // setup graceful exit
if config.GracefulExit.Enabled {
log.Debug("Setting up graceful exit")
peer.GracefulExit.Chore = gracefulexit.NewChore(peer.Log.Named("graceful exit chore"), peer.DB.GracefulExit(), peer.Overlay.DB, peer.Metainfo.Loop, config.GracefulExit)
peer.GracefulExit.Chore = gracefulexit.NewChore(peer.Log.Named("gracefulexit"), peer.DB.GracefulExit(), peer.Overlay.DB, peer.Metainfo.Loop, config.GracefulExit)
} else {
peer.Log.Named("gracefulexit").Info("disabled")
}
}
@ -367,8 +352,6 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metainfo
}
{ // setup downtime tracking
log.Debug("Starting downtime tracking")
peer.DowntimeTracking.Service = downtime.NewService(peer.Log.Named("downtime"), peer.Overlay.Service, peer.Contact.Service)
peer.DowntimeTracking.DetectionChore = downtime.NewDetectionChore(

View File

@ -72,17 +72,14 @@ func NewRepairer(log *zap.Logger, full *identity.FullIdentity, pointerDB metainf
}
{ // setup metainfo
log.Debug("Setting up metainfo")
peer.Metainfo = metainfo.NewService(log.Named("metainfo"), pointerDB, bucketsDB)
}
{ // setup overlay
log.Debug("Setting up overlay")
peer.Overlay = overlay.NewService(log.Named("overlay"), overlayCache, config.Overlay)
}
{ // setup orders
log.Debug("Setting up orders")
peer.Orders = orders.NewService(
log.Named("orders"),
signing.SignerFromFullIdentity(peer.Identity),
@ -98,9 +95,8 @@ func NewRepairer(log *zap.Logger, full *identity.FullIdentity, pointerDB metainf
}
{ // setup repairer
log.Debug("Setting up repairer")
peer.SegmentRepairer = repairer.NewSegmentRepairer(
log.Named("segment repairer"),
log.Named("segment-repair"),
peer.Metainfo,
peer.Orders,
peer.Overlay,

View File

@ -58,8 +58,6 @@ func NewChore(log *zap.Logger, interval time.Duration, trust *trust.Pool, dialer
// Run the contact chore on a regular interval with jitter
func (chore *Chore) Run(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err)
chore.log.Info("Storagenode contact chore starting up")
var group errgroup.Group
if !chore.service.initialized.Wait(ctx) {

View File

@ -261,7 +261,7 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten
)
peer.Storage2.TrashChore = pieces.NewTrashChore(
log.Named("pieces:trashchore"),
log.Named("pieces:trash"),
24*time.Hour, // choreInterval: how often to run the chore
7*24*time.Hour, // trashExpiryInterval: when items in the trash should be deleted
peer.Storage2.Trust,
@ -269,7 +269,7 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten
)
peer.Storage2.CacheService = pieces.NewService(
log.Named("piecestore:cacheUpdate"),
log.Named("piecestore:cache"),
peer.Storage2.BlobsCache,
peer.Storage2.Store,
config.Storage2.CacheSyncInterval,

View File

@ -42,17 +42,15 @@ func NewTrashChore(log *zap.Logger, choreInterval, trashExpiryInterval time.Dura
func (chore *TrashChore) Run(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err)
chore.log.Info("Storagenode TrashChore starting up")
chore.cycle = sync2.NewCycle(chore.interval)
chore.cycle.Start(ctx, &errgroup.Group{}, func(ctx context.Context) error {
chore.log.Debug("starting EmptyTrash cycle")
chore.log.Debug("starting emptying trash")
for _, satelliteID := range chore.trust.GetSatellites(ctx) {
trashedBefore := time.Now().Add(-chore.trashExpiryInterval)
err := chore.store.EmptyTrash(ctx, satelliteID, trashedBefore)
if err != nil {
chore.log.Error("EmptyTrash cycle failed", zap.Error(err))
chore.log.Error("emptying trash failed", zap.Error(err))
}
}