satellite: update log levels (#3851)

* satellite: update log levels

Change-Id: I86bc32e042d742af6dbc469a294291a2e667e81f

* log version on start up for every service

Change-Id: Ic128bb9c5ac52d4dc6d6c4cb3059fbad73f5d3de

* Use monkit for tracking failed ip resolutions

Change-Id: Ia5aa71d315515e0c5f62c98d9d115ef984cd50c2

* fix compile errors

Change-Id: Ia33c8b6e34e780bd1115120dc347a439d99e83bf

* add request limit value to storage node rpc err

Change-Id: I1ad6706a60237928e29da300d96a1bafa94156e5

* we cant track storage node ids in monkit metrics so lets use logging to track that for expired orders

Change-Id: I1cc1d240b29019ae2f8c774792765df3cbeac887

* fix build errs

Change-Id: I6d0ffe058e9a38b7ed031c85a29440f3d68e8d47
This commit is contained in:
Jess G 2020-04-15 12:32:22 -07:00 committed by GitHub
parent d3ce845f82
commit 75b9a5971e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 52 additions and 45 deletions

View File

@ -126,12 +126,13 @@ func (service *Service) checkVersion(ctx context.Context) (latestVersion version
service.log.Debug("Allowed minimum version from control server.", zap.Stringer("Minimum Version", minimum.Version))
if isAcceptedVersion(service.Info.Version, minimumOld) {
service.log.Info("Running on allowed version.", zap.Stringer("Version", service.Info.Version.Version))
service.log.Debug("Running on allowed version.", zap.Stringer("Version", service.Info.Version.Version))
return suggestedVersion, true
}
service.log.Error("Running on not allowed/outdated version.", zap.Stringer("Version", service.Info.Version.Version))
service.log.Warn("version not allowed/outdated",
zap.Stringer("current version", service.Info.Version.Version),
zap.Stringer("minimum allowed version", minimumOld),
)
return suggestedVersion, false
}

View File

@ -205,14 +205,12 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
var err error
{
if !versionInfo.IsZero() {
peer.Log.Debug("Version info",
zap.Stringer("Version", versionInfo.Version.Version),
zap.String("Commit Hash", versionInfo.CommitHash),
zap.Stringer("Build Timestamp", versionInfo.Timestamp),
zap.Bool("Release Build", versionInfo.Release),
)
}
peer.Log.Info("Version info",
zap.Stringer("Version", versionInfo.Version.Version),
zap.String("Commit Hash", versionInfo.CommitHash),
zap.Stringer("Build Timestamp", versionInfo.Timestamp),
zap.Bool("Release Build", versionInfo.Release),
)
peer.Version.Service = checker.NewService(log.Named("version"), config.Version, versionInfo, "Satellite")
peer.Version.Chore = checker.NewChore(peer.Version.Service, config.Version.CheckInterval)

View File

@ -177,14 +177,12 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB,
var err error
{ // setup version control
if !versionInfo.IsZero() {
peer.Log.Debug("Version info",
zap.Stringer("Version", versionInfo.Version.Version),
zap.String("Commit Hash", versionInfo.CommitHash),
zap.Stringer("Build Timestamp", versionInfo.Timestamp),
zap.Bool("Release Build", versionInfo.Release),
)
}
peer.Log.Info("Version info",
zap.Stringer("Version", versionInfo.Version.Version),
zap.String("Commit Hash", versionInfo.CommitHash),
zap.Stringer("Build Timestamp", versionInfo.Timestamp),
zap.Bool("Release Build", versionInfo.Release),
)
peer.Version.Service = version_checker.NewService(log.Named("version"), config.Version, versionInfo, "Satellite")
peer.Version.Chore = version_checker.NewChore(peer.Version.Service, config.Version.CheckInterval)

View File

@ -103,14 +103,12 @@ func NewGarbageCollection(log *zap.Logger, full *identity.FullIdentity, db DB,
}
{ // setup version control
if !versionInfo.IsZero() {
peer.Log.Debug("Version info",
zap.Stringer("Version", versionInfo.Version.Version),
zap.String("Commit Hash", versionInfo.CommitHash),
zap.Stringer("Build Timestamp", versionInfo.Timestamp),
zap.Bool("Release Build", versionInfo.Release),
)
}
peer.Log.Info("Version info",
zap.Stringer("Version", versionInfo.Version.Version),
zap.String("Commit Hash", versionInfo.CommitHash),
zap.Stringer("Build Timestamp", versionInfo.Timestamp),
zap.Bool("Release Build", versionInfo.Release),
)
peer.Version.Service = version_checker.NewService(log.Named("version"), config.Version, versionInfo, "Satellite")
peer.Version.Chore = version_checker.NewChore(peer.Version.Service, config.Version.CheckInterval)

View File

@ -535,7 +535,12 @@ func (endpoint *Endpoint) handleSucceeded(ctx context.Context, stream processStr
func (endpoint *Endpoint) handleFailed(ctx context.Context, pending *PendingMap, nodeID storj.NodeID, message *pb.StorageNodeMessage_Failed) (err error) {
defer mon.Task()(&ctx)(&err)
endpoint.log.Warn("transfer failed", zap.Stringer("Piece ID", message.Failed.OriginalPieceId), zap.Stringer("transfer error", message.Failed.GetError()))
endpoint.log.Warn("transfer failed",
zap.Stringer("Piece ID", message.Failed.OriginalPieceId),
zap.Stringer("nodeID", nodeID),
zap.Stringer("transfer error", message.Failed.GetError()),
)
mon.Meter("graceful_exit_transfer_piece_fail").Mark(1) //locked
pieceID := message.Failed.OriginalPieceId

View File

@ -127,7 +127,7 @@ func (ec *ECRepairer) Get(ctx context.Context, limits []*pb.AddressedOrderLimit,
NodeId: limit.GetLimit().StorageNodeId,
})
} else {
ec.log.Info("Failed to download pieces for repair",
ec.log.Debug("Failed to download pieces for repair",
zap.Error(err))
}
return
@ -346,13 +346,17 @@ func (ec *ECRepairer) Repair(ctx context.Context, limits []*pb.AddressedOrderLim
if info.err != nil {
if !errs2.IsCanceled(info.err) {
failureCount++
ec.log.Warn("Repair to a storage node failed",
zap.Stringer("Node ID", limits[info.i].GetLimit().StorageNodeId),
zap.Error(info.err),
)
} else {
cancellationCount++
ec.log.Debug("Repair to storage node cancelled",
zap.Stringer("Node ID", limits[info.i].GetLimit().StorageNodeId),
zap.Error(info.err),
)
}
ec.log.Info("Repair to storage node failed",
zap.Stringer("Node ID", limits[info.i].GetLimit().StorageNodeId),
zap.Error(info.err),
)
continue
}
@ -380,7 +384,7 @@ func (ec *ECRepairer) Repair(ctx context.Context, limits []*pb.AddressedOrderLim
return nil, nil, Error.New("repair to all nodes failed")
}
ec.log.Info("Successfully repaired",
ec.log.Debug("Successfully repaired",
zap.Int32("Success Count", atomic.LoadInt32(&successfulCount)),
)

View File

@ -101,14 +101,12 @@ func NewRepairer(log *zap.Logger, full *identity.FullIdentity,
}
{
if !versionInfo.IsZero() {
peer.Log.Debug("Version info",
zap.Stringer("Version", versionInfo.Version.Version),
zap.String("Commit Hash", versionInfo.CommitHash),
zap.Stringer("Build Timestamp", versionInfo.Timestamp),
zap.Bool("Release Build", versionInfo.Release),
)
}
peer.Log.Info("Version info",
zap.Stringer("Version", versionInfo.Version.Version),
zap.String("Commit Hash", versionInfo.CommitHash),
zap.Stringer("Build Timestamp", versionInfo.Timestamp),
zap.Bool("Release Build", versionInfo.Release),
)
peer.Version.Service = version_checker.NewService(log.Named("version"), config.Version, versionInfo, "Satellite")
peer.Version.Chore = version_checker.NewChore(peer.Version.Service, config.Version.CheckInterval)

View File

@ -5,6 +5,7 @@ package piecestore
import (
"context"
"fmt"
"io"
"os"
"sync/atomic"
@ -234,8 +235,12 @@ func (endpoint *Endpoint) doUpload(stream uploadStream, requestLimit int) (err e
endpoint.pingStats.WasPinged(time.Now())
if requestLimit > 0 && int(liveRequests) > requestLimit {
endpoint.log.Error("upload rejected, too many requests", zap.Int32("live requests", liveRequests))
return rpcstatus.Error(rpcstatus.Unavailable, "storage node overloaded")
endpoint.log.Error("upload rejected, too many requests",
zap.Int32("live requests", liveRequests),
zap.Int("requestLimit", requestLimit),
)
errMsg := fmt.Sprintf("storage node overloaded, request limit: %d", requestLimit)
return rpcstatus.Error(rpcstatus.Unavailable, errMsg)
}
startTime := time.Now().UTC()