satellite: reduce logging of node status

Change-Id: I6618cf4bf31b856acd7a28b54011a943c03ab22a
This commit is contained in:
stefanbenten 2020-01-18 18:16:39 +01:00 committed by Egon Elbre
parent 76a6b28b33
commit f4097d518c
6 changed files with 37 additions and 11 deletions

View File

@ -336,6 +336,7 @@ func (planet *Planet) newSatellites(count int) ([]*SatelliteSystem, error) {
SettlementBatchSize: 10, SettlementBatchSize: 10,
FlushBatchSize: 10, FlushBatchSize: 10,
FlushInterval: defaultInterval, FlushInterval: defaultInterval,
NodeStatusLogging: true,
}, },
Checker: checker.Config{ Checker: checker.Config{
Interval: defaultInterval, Interval: defaultInterval,

View File

@ -258,6 +258,7 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metai
Address: config.Contact.ExternalAddress, Address: config.Contact.ExternalAddress,
}, },
config.Repairer.MaxExcessRateOptimalThreshold, config.Repairer.MaxExcessRateOptimalThreshold,
config.Orders.NodeStatusLogging,
) )
pb.RegisterOrdersServer(peer.Server.GRPC(), peer.Orders.Endpoint) pb.RegisterOrdersServer(peer.Server.GRPC(), peer.Orders.Endpoint)
pb.DRPCRegisterOrders(peer.Server.DRPC(), peer.Orders.Endpoint.DRPC()) pb.DRPCRegisterOrders(peer.Server.DRPC(), peer.Orders.Endpoint.DRPC())

View File

@ -208,6 +208,7 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metainfo
Address: config.Contact.ExternalAddress, Address: config.Contact.ExternalAddress,
}, },
config.Repairer.MaxExcessRateOptimalThreshold, config.Repairer.MaxExcessRateOptimalThreshold,
config.Orders.NodeStatusLogging,
) )
} }

View File

@ -30,6 +30,7 @@ type Config struct {
FlushBatchSize int `help:"how many items in the rollups write cache before they are flushed to the database" devDefault:"20" releaseDefault:"10000"` FlushBatchSize int `help:"how many items in the rollups write cache before they are flushed to the database" devDefault:"20" releaseDefault:"10000"`
FlushInterval time.Duration `help:"how often to flush the rollups write cache to the database" devDefault:"30s" releaseDefault:"1m"` FlushInterval time.Duration `help:"how often to flush the rollups write cache to the database" devDefault:"30s" releaseDefault:"1m"`
ReportedRollupsReadBatchSize int `help:"how many records to read in a single transaction when calculating billable bandwidth" default:"1000"` ReportedRollupsReadBatchSize int `help:"how many records to read in a single transaction when calculating billable bandwidth" default:"1000"`
NodeStatusLogging bool `help:"log the offline/disqualification status of nodes" default:"false"`
} }
// Service for creating order limits. // Service for creating order limits.
@ -43,13 +44,14 @@ type Service struct {
satelliteAddress *pb.NodeAddress satelliteAddress *pb.NodeAddress
orderExpiration time.Duration orderExpiration time.Duration
repairMaxExcessRateOptimalThreshold float64 repairMaxExcessRateOptimalThreshold float64
nodeStatusLogging bool
} }
// NewService creates new service for creating order limits. // NewService creates new service for creating order limits.
func NewService( func NewService(
log *zap.Logger, satellite signing.Signer, overlay *overlay.Service, log *zap.Logger, satellite signing.Signer, overlay *overlay.Service,
orders DB, orderExpiration time.Duration, satelliteAddress *pb.NodeAddress, orders DB, orderExpiration time.Duration, satelliteAddress *pb.NodeAddress,
repairMaxExcessRateOptimalThreshold float64, repairMaxExcessRateOptimalThreshold float64, nodeStatusLogging bool,
) *Service { ) *Service {
return &Service{ return &Service{
log: log, log: log,
@ -59,6 +61,7 @@ func NewService(
satelliteAddress: satelliteAddress, satelliteAddress: satelliteAddress,
orderExpiration: orderExpiration, orderExpiration: orderExpiration,
repairMaxExcessRateOptimalThreshold: repairMaxExcessRateOptimalThreshold, repairMaxExcessRateOptimalThreshold: repairMaxExcessRateOptimalThreshold,
nodeStatusLogging: nodeStatusLogging,
} }
} }
@ -70,11 +73,11 @@ func (service *Service) VerifyOrderLimitSignature(ctx context.Context, signed *p
func (service *Service) createSerial(ctx context.Context) (_ storj.SerialNumber, err error) { func (service *Service) createSerial(ctx context.Context) (_ storj.SerialNumber, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
uuid, err := uuid.New() id, err := uuid.New()
if err != nil { if err != nil {
return storj.SerialNumber{}, Error.Wrap(err) return storj.SerialNumber{}, Error.Wrap(err)
} }
return storj.SerialNumber(*uuid), nil return storj.SerialNumber(*id), nil
} }
func (service *Service) saveSerial(ctx context.Context, serialNumber storj.SerialNumber, bucketID []byte, expiresAt time.Time) (err error) { func (service *Service) saveSerial(ctx context.Context, serialNumber storj.SerialNumber, bucketID []byte, expiresAt time.Time) (err error) {
@ -147,13 +150,17 @@ func (service *Service) CreateGetOrderLimits(ctx context.Context, bucketID []byt
} }
if node.Disqualified != nil { if node.Disqualified != nil {
service.log.Debug("node is disqualified", zap.Stringer("ID", node.Id)) if service.nodeStatusLogging {
service.log.Debug("node is disqualified", zap.Stringer("ID", node.Id))
}
combinedErrs = errs.Combine(combinedErrs, overlay.ErrNodeDisqualified.New("%v", node.Id)) combinedErrs = errs.Combine(combinedErrs, overlay.ErrNodeDisqualified.New("%v", node.Id))
continue continue
} }
if !service.overlay.IsOnline(node) { if !service.overlay.IsOnline(node) {
service.log.Debug("node is offline", zap.Stringer("ID", node.Id)) if service.nodeStatusLogging {
service.log.Debug("node is offline", zap.Stringer("ID", node.Id))
}
combinedErrs = errs.Combine(combinedErrs, overlay.ErrNodeOffline.New("%v", node.Id)) combinedErrs = errs.Combine(combinedErrs, overlay.ErrNodeOffline.New("%v", node.Id))
continue continue
} }
@ -292,13 +299,17 @@ func (service *Service) CreateDeleteOrderLimits(ctx context.Context, bucketID []
} }
if node.Disqualified != nil { if node.Disqualified != nil {
service.log.Debug("node is disqualified", zap.Stringer("ID", node.Id)) if service.nodeStatusLogging {
service.log.Debug("node is disqualified", zap.Stringer("ID", node.Id))
}
combinedErrs = errs.Combine(combinedErrs, overlay.ErrNodeDisqualified.New("%v", node.Id)) combinedErrs = errs.Combine(combinedErrs, overlay.ErrNodeDisqualified.New("%v", node.Id))
continue continue
} }
if !service.overlay.IsOnline(node) { if !service.overlay.IsOnline(node) {
service.log.Debug("node is offline", zap.Stringer("ID", node.Id)) if service.nodeStatusLogging {
service.log.Debug("node is offline", zap.Stringer("ID", node.Id))
}
combinedErrs = errs.Combine(combinedErrs, overlay.ErrNodeOffline.New("%v", node.Id)) combinedErrs = errs.Combine(combinedErrs, overlay.ErrNodeOffline.New("%v", node.Id))
continue continue
} }
@ -376,13 +387,17 @@ func (service *Service) CreateAuditOrderLimits(ctx context.Context, bucketID []b
} }
if node.Disqualified != nil { if node.Disqualified != nil {
service.log.Debug("node is disqualified", zap.Stringer("ID", node.Id)) if service.nodeStatusLogging {
service.log.Debug("node is disqualified", zap.Stringer("ID", node.Id))
}
combinedErrs = errs.Combine(combinedErrs, overlay.ErrNodeDisqualified.New("%v", node.Id)) combinedErrs = errs.Combine(combinedErrs, overlay.ErrNodeDisqualified.New("%v", node.Id))
continue continue
} }
if !service.overlay.IsOnline(node) { if !service.overlay.IsOnline(node) {
service.log.Debug("node is offline", zap.Stringer("ID", node.Id)) if service.nodeStatusLogging {
service.log.Debug("node is offline", zap.Stringer("ID", node.Id))
}
combinedErrs = errs.Combine(combinedErrs, overlay.ErrNodeOffline.New("%v", node.Id)) combinedErrs = errs.Combine(combinedErrs, overlay.ErrNodeOffline.New("%v", node.Id))
continue continue
} }
@ -541,13 +556,17 @@ func (service *Service) CreateGetRepairOrderLimits(ctx context.Context, bucketID
} }
if node.Disqualified != nil { if node.Disqualified != nil {
service.log.Debug("node is disqualified", zap.Stringer("ID", node.Id)) if service.nodeStatusLogging {
service.log.Debug("node is disqualified", zap.Stringer("ID", node.Id))
}
combinedErrs = errs.Combine(combinedErrs, overlay.ErrNodeDisqualified.New("%v", node.Id)) combinedErrs = errs.Combine(combinedErrs, overlay.ErrNodeDisqualified.New("%v", node.Id))
continue continue
} }
if !service.overlay.IsOnline(node) { if !service.overlay.IsOnline(node) {
service.log.Debug("node is offline", zap.Stringer("ID", node.Id)) if service.nodeStatusLogging {
service.log.Debug("node is offline", zap.Stringer("ID", node.Id))
}
combinedErrs = errs.Combine(combinedErrs, overlay.ErrNodeOffline.New("%v", node.Id)) combinedErrs = errs.Combine(combinedErrs, overlay.ErrNodeOffline.New("%v", node.Id))
continue continue
} }

View File

@ -98,6 +98,7 @@ func NewRepairer(log *zap.Logger, full *identity.FullIdentity, pointerDB metainf
Address: config.Contact.ExternalAddress, Address: config.Contact.ExternalAddress,
}, },
config.Repairer.MaxExcessRateOptimalThreshold, config.Repairer.MaxExcessRateOptimalThreshold,
config.Orders.NodeStatusLogging,
) )
} }

View File

@ -304,6 +304,9 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
# how often to flush the rollups write cache to the database # how often to flush the rollups write cache to the database
# orders.flush-interval: 1m0s # orders.flush-interval: 1m0s
# log the offline/disqualification status of nodes
# orders.node-status-logging: false
# how many records to read in a single transaction when calculating billable bandwidth # how many records to read in a single transaction when calculating billable bandwidth
# orders.reported-rollups-read-batch-size: 1000 # orders.reported-rollups-read-batch-size: 1000