Use zap.Stringer instead of zap.String (#2223)

This commit is contained in:
Kaloyan Raev 2019-06-18 02:37:44 +03:00 committed by littleskunk
parent b1e5cf1200
commit 8e29ef8a6b
11 changed files with 44 additions and 44 deletions

View File

@ -109,7 +109,7 @@ func (verifier *Verifier) Verify(ctx context.Context, stripe *Stripe, skip map[s
}) {
// dial timeout
offlineNodes = append(offlineNodes, share.NodeID)
verifier.log.Debug("Verify: dial timeout (offline)", zap.String("Node ID", share.NodeID.String()), zap.Error(share.Error))
verifier.log.Debug("Verify: dial timeout (offline)", zap.Stringer("Node ID", share.NodeID), zap.Error(share.Error))
continue
}
if errs.IsFunc(share.Error, func(err error) bool {
@ -117,12 +117,12 @@ func (verifier *Verifier) Verify(ctx context.Context, stripe *Stripe, skip map[s
}) {
// dial failed -- offline node
offlineNodes = append(offlineNodes, share.NodeID)
verifier.log.Debug("Verify: dial failed (offline)", zap.String("Node ID", share.NodeID.String()), zap.Error(share.Error))
verifier.log.Debug("Verify: dial failed (offline)", zap.Stringer("Node ID", share.NodeID), zap.Error(share.Error))
continue
}
// unknown transport error
containedNodes[pieceNum] = share.NodeID
verifier.log.Debug("Verify: unknown transport error (contained)", zap.String("Node ID", share.NodeID.String()), zap.Error(share.Error))
verifier.log.Debug("Verify: unknown transport error (contained)", zap.Stringer("Node ID", share.NodeID), zap.Error(share.Error))
}
if errs.IsFunc(share.Error, func(err error) bool {
@ -130,7 +130,7 @@ func (verifier *Verifier) Verify(ctx context.Context, stripe *Stripe, skip map[s
}) {
// missing share
failedNodes = append(failedNodes, share.NodeID)
verifier.log.Debug("Verify: piece not found (audit failed)", zap.String("Node ID", share.NodeID.String()), zap.Error(share.Error))
verifier.log.Debug("Verify: piece not found (audit failed)", zap.Stringer("Node ID", share.NodeID), zap.Error(share.Error))
continue
}
@ -139,13 +139,13 @@ func (verifier *Verifier) Verify(ctx context.Context, stripe *Stripe, skip map[s
}) {
// dial successful, but download timed out
containedNodes[pieceNum] = share.NodeID
verifier.log.Debug("Verify: download timeout (contained)", zap.String("Node ID", share.NodeID.String()), zap.Error(share.Error))
verifier.log.Debug("Verify: download timeout (contained)", zap.Stringer("Node ID", share.NodeID), zap.Error(share.Error))
continue
}
// unknown error
containedNodes[pieceNum] = share.NodeID
verifier.log.Debug("Verify: unknown error (contained)", zap.String("Node ID", share.NodeID.String()), zap.Error(share.Error))
verifier.log.Debug("Verify: unknown error (contained)", zap.Stringer("Node ID", share.NodeID), zap.Error(share.Error))
}
required := int(pointer.Remote.Redundancy.GetMinReq())
@ -294,7 +294,7 @@ func (verifier *Verifier) Reverify(ctx context.Context, stripe *Stripe) (report
continue
}
ch <- result{nodeID: piece.NodeId, status: erred, err: err}
verifier.log.Debug("Reverify: error getting from containment db", zap.String("Node ID", piece.NodeId.String()), zap.Error(err))
verifier.log.Debug("Reverify: error getting from containment db", zap.Stringer("Node ID", piece.NodeId), zap.Error(err))
continue
}
@ -303,11 +303,11 @@ func (verifier *Verifier) Reverify(ctx context.Context, stripe *Stripe) (report
if err != nil {
if overlay.ErrNodeOffline.Has(err) {
ch <- result{nodeID: piece.NodeId, status: offline}
verifier.log.Debug("Reverify: order limit not created (offline)", zap.String("Node ID", piece.NodeId.String()))
verifier.log.Debug("Reverify: order limit not created (offline)", zap.Stringer("Node ID", piece.NodeId))
return
}
ch <- result{nodeID: piece.NodeId, status: erred, err: err}
verifier.log.Debug("Reverify: error creating order limit", zap.String("Node ID", piece.NodeId.String()), zap.Error(err))
verifier.log.Debug("Reverify: error creating order limit", zap.Stringer("Node ID", piece.NodeId), zap.Error(err))
return
}
@ -319,20 +319,20 @@ func (verifier *Verifier) Reverify(ctx context.Context, stripe *Stripe) (report
}) {
// dial timeout
ch <- result{nodeID: piece.NodeId, status: offline}
verifier.log.Debug("Reverify: dial timeout (offline)", zap.String("Node ID", piece.NodeId.String()), zap.Error(err))
verifier.log.Debug("Reverify: dial timeout (offline)", zap.Stringer("Node ID", piece.NodeId), zap.Error(err))
return
}
if errs.IsFunc(err, func(err error) bool {
return status.Code(err) == codes.Unknown
}) {
// dial failed -- offline node
verifier.log.Debug("Reverify: dial failed (offline)", zap.String("Node ID", piece.NodeId.String()), zap.Error(err))
verifier.log.Debug("Reverify: dial failed (offline)", zap.Stringer("Node ID", piece.NodeId), zap.Error(err))
ch <- result{nodeID: piece.NodeId, status: offline}
return
}
// unknown transport error
ch <- result{nodeID: piece.NodeId, status: contained, pendingAudit: pending}
verifier.log.Debug("Reverify: unknown transport error (contained)", zap.String("Node ID", piece.NodeId.String()), zap.Error(err))
verifier.log.Debug("Reverify: unknown transport error (contained)", zap.Stringer("Node ID", piece.NodeId), zap.Error(err))
return
}
@ -341,7 +341,7 @@ func (verifier *Verifier) Reverify(ctx context.Context, stripe *Stripe) (report
}) {
// missing share
ch <- result{nodeID: piece.NodeId, status: failed}
verifier.log.Debug("Reverify: piece not found (audit failed)", zap.String("Node ID", piece.NodeId.String()), zap.Error(err))
verifier.log.Debug("Reverify: piece not found (audit failed)", zap.Stringer("Node ID", piece.NodeId), zap.Error(err))
return
}
@ -350,23 +350,23 @@ func (verifier *Verifier) Reverify(ctx context.Context, stripe *Stripe) (report
}) {
// dial successful, but download timed out
ch <- result{nodeID: piece.NodeId, status: contained, pendingAudit: pending}
verifier.log.Debug("Reverify: download timeout (contained)", zap.String("Node ID", piece.NodeId.String()), zap.Error(err))
verifier.log.Debug("Reverify: download timeout (contained)", zap.Stringer("Node ID", piece.NodeId), zap.Error(err))
return
}
// unknown error
ch <- result{nodeID: piece.NodeId, status: contained, pendingAudit: pending}
verifier.log.Debug("Reverify: unknown error (contained)", zap.String("Node ID", piece.NodeId.String()), zap.Error(err))
verifier.log.Debug("Reverify: unknown error (contained)", zap.Stringer("Node ID", piece.NodeId), zap.Error(err))
return
}
downloadedHash := pkcrypto.SHA256Hash(share.Data)
if bytes.Equal(downloadedHash, pending.ExpectedShareHash) {
ch <- result{nodeID: piece.NodeId, status: success}
verifier.log.Debug("Reverify: hashes match (audit success)", zap.String("Node ID", piece.NodeId.String()))
verifier.log.Debug("Reverify: hashes match (audit success)", zap.Stringer("Node ID", piece.NodeId))
} else {
ch <- result{nodeID: piece.NodeId, status: failed}
verifier.log.Debug("Reverify: hashes mismatch (audit failed)", zap.String("Node ID", piece.NodeId.String()),
verifier.log.Debug("Reverify: hashes mismatch (audit failed)", zap.Stringer("Node ID", piece.NodeId),
zap.Binary("expected hash", pending.ExpectedShareHash), zap.Binary("downloaded hash", downloadedHash))
}
}(pending, piece)

View File

@ -252,9 +252,9 @@ func (c CertificateSigner) Sign(ctx context.Context, req *pb.SigningRequest) (_
Token: *token,
}
c.log.Info("certificate successfully signed",
zap.String("node ID", peerIdent.ID.String()),
zap.Stringer("node ID", peerIdent.ID),
zap.Uint16("difficulty", difficulty),
zap.String("truncated token", tokenFormatter.String()),
zap.Stringer("truncated token", tokenFormatter),
)
return &pb.SigningResponse{

View File

@ -120,7 +120,7 @@ func (c CertServerConfig) Run(ctx context.Context, srv *server.Server) (err erro
certSrv.log.Info(
"Certificate signing server running",
zap.String("address", srv.Addr().String()),
zap.Stringer("address", srv.Addr()),
)
ctx, cancel := context.WithCancel(ctx)

View File

@ -118,10 +118,10 @@ func (discovery *Discovery) refresh(ctx context.Context) (err error) {
ping, err := discovery.kad.Ping(ctx, node.Node)
if err != nil {
discovery.log.Info("could not ping node", zap.String("ID", node.Id.String()), zap.Error(err))
discovery.log.Info("could not ping node", zap.Stringer("ID", node.Id), zap.Error(err))
_, err := discovery.cache.UpdateUptime(ctx, node.Id, false)
if err != nil {
discovery.log.Error("could not update node uptime in cache", zap.String("ID", node.Id.String()), zap.Error(err))
discovery.log.Error("could not update node uptime in cache", zap.Stringer("ID", node.Id), zap.Error(err))
}
continue
}
@ -132,19 +132,19 @@ func (discovery *Discovery) refresh(ctx context.Context) (err error) {
_, err = discovery.cache.UpdateUptime(ctx, ping.Id, true)
if err != nil {
discovery.log.Error("could not update node uptime in cache", zap.String("ID", ping.Id.String()), zap.Error(err))
discovery.log.Error("could not update node uptime in cache", zap.Stringer("ID", ping.Id), zap.Error(err))
}
// update node info
info, err := discovery.kad.FetchInfo(ctx, node.Node)
if err != nil {
discovery.log.Warn("could not fetch node info", zap.String("ID", ping.GetAddress().String()))
discovery.log.Warn("could not fetch node info", zap.Stringer("ID", ping.GetAddress()))
continue
}
_, err = discovery.cache.UpdateNodeInfo(ctx, ping.Id, info)
if err != nil {
discovery.log.Warn("could not update node info", zap.String("ID", ping.GetAddress().String()))
discovery.log.Warn("could not update node info", zap.Stringer("ID", ping.GetAddress()))
}
}

View File

@ -56,7 +56,7 @@ func (endpoint *Endpoint) pingback(ctx context.Context, target *pb.Node) {
defer mon.Task()(&ctx)(&err)
_, err = endpoint.service.Ping(ctx, *target)
if err != nil {
endpoint.log.Debug("connection to node failed", zap.Error(err), zap.String("nodeID", target.Id.String()))
endpoint.log.Debug("connection to node failed", zap.Error(err), zap.Stringer("nodeID", target.Id))
err = endpoint.routingTable.ConnectionFailed(ctx, target)
if err != nil {
endpoint.log.Error("could not respond to connection failed", zap.Error(err))

View File

@ -94,7 +94,7 @@ func rootMutation(log *zap.Logger, service *console.Service, mailService *mailse
token, err := service.GenerateActivationToken(p.Context, user.ID, user.Email)
if err != nil {
log.Error("register: failed to generate activation token",
zap.String("id", user.ID.String()),
zap.Stringer("id", user.ID),
zap.String("email", user.Email),
zap.Error(err))

View File

@ -174,9 +174,9 @@ func (s *Server) bucketUsageReportHandler(w http.ResponseWriter, req *http.Reque
before = time.Unix(beforeStamp, 0)
s.log.Debug("querying bucket usage report",
zap.String("projectID", projectID.String()),
zap.String("since", since.String()),
zap.String("before", before.String()))
zap.Stringer("projectID", projectID),
zap.Stringer("since", since),
zap.Stringer("before", before))
ctx = console.WithAuth(ctx, auth)
bucketRollups, err := s.service.GetBucketUsageRollups(ctx, *projectID, since, before)

View File

@ -138,8 +138,8 @@ func (service *Service) SendRendered(ctx context.Context, to []post.Address, msg
if err != nil {
service.log.Error("fail sending email",
zap.String("error", err.Error()),
zap.Strings("recipients", recipients))
zap.Strings("recipients", recipients),
zap.Error(err))
} else {
service.log.Info("email sent successfully",
zap.Strings("recipients", recipients))

View File

@ -179,7 +179,7 @@ func (endpoint *Endpoint) Settlement(stream pb.Orders_SettlementServer) (err err
return nil
}()
if rejectErr != err {
log.Debug("order limit/order verification failed", zap.String("serial", orderLimit.SerialNumber.String()), zap.Error(err))
log.Debug("order limit/order verification failed", zap.Stringer("serial", orderLimit.SerialNumber), zap.Error(err))
err := monitoredSettlementStreamSend(ctx, stream, &pb.SettlementResponse{
SerialNumber: orderLimit.SerialNumber,
Status: pb.SettlementResponse_REJECTED,

View File

@ -149,7 +149,7 @@ func (service *Service) CreateGetOrderLimits(ctx context.Context, uplink *identi
}
if !service.cache.IsOnline(node) {
service.log.Debug("node is offline", zap.String("ID", node.Id.String()))
service.log.Debug("node is offline", zap.Stringer("ID", node.Id))
combinedErrs = errs.Combine(combinedErrs, Error.New("node is offline: %s", node.Id.String()))
continue
}
@ -288,7 +288,7 @@ func (service *Service) CreateDeleteOrderLimits(ctx context.Context, uplink *ide
}
if !service.cache.IsOnline(node) {
service.log.Debug("node is offline", zap.String("ID", node.Id.String()))
service.log.Debug("node is offline", zap.Stringer("ID", node.Id))
combinedErrs = errs.Combine(combinedErrs, Error.New("node is offline: %s", node.Id.String()))
continue
}
@ -373,7 +373,7 @@ func (service *Service) CreateAuditOrderLimits(ctx context.Context, auditor *ide
}
if !service.cache.IsOnline(node) {
service.log.Debug("node is offline", zap.String("ID", node.Id.String()))
service.log.Debug("node is offline", zap.Stringer("ID", node.Id))
combinedErrs = errs.Combine(combinedErrs, Error.New("node is offline: %s", node.Id.String()))
continue
}
@ -516,7 +516,7 @@ func (service *Service) CreateGetRepairOrderLimits(ctx context.Context, repairer
}
if !service.cache.IsOnline(node) {
service.log.Debug("node is offline", zap.String("ID", node.Id.String()))
service.log.Debug("node is offline", zap.Stringer("ID", node.Id))
combinedErrs = errs.Combine(combinedErrs, Error.New("node is offline: %s", node.Id.String()))
continue
}

View File

@ -34,14 +34,14 @@ func New(log *zap.Logger, store storage.KeyValueStore) *Logger {
// Put adds a value to store
func (store *Logger) Put(ctx context.Context, key storage.Key, value storage.Value) (err error) {
defer mon.Task()(&ctx)(&err)
store.log.Debug("Put", zap.String("key", string(key)), zap.Int("value length", len(value)), zap.Binary("truncated value", truncate(value)))
store.log.Debug("Put", zap.ByteString("key", key), zap.Int("value length", len(value)), zap.Binary("truncated value", truncate(value)))
return store.store.Put(ctx, key, value)
}
// Get gets a value to store
func (store *Logger) Get(ctx context.Context, key storage.Key) (_ storage.Value, err error) {
defer mon.Task()(&ctx)(&err)
store.log.Debug("Get", zap.String("key", string(key)))
store.log.Debug("Get", zap.ByteString("key", key))
return store.store.Get(ctx, key)
}
@ -55,7 +55,7 @@ func (store *Logger) GetAll(ctx context.Context, keys storage.Keys) (_ storage.V
// Delete deletes key and the value
func (store *Logger) Delete(ctx context.Context, key storage.Key) (err error) {
defer mon.Task()(&ctx)(&err)
store.log.Debug("Delete", zap.String("key", string(key)))
store.log.Debug("Delete", zap.ByteString("key", key))
return store.store.Delete(ctx, key)
}
@ -63,7 +63,7 @@ func (store *Logger) Delete(ctx context.Context, key storage.Key) (err error) {
func (store *Logger) List(ctx context.Context, first storage.Key, limit int) (_ storage.Keys, err error) {
defer mon.Task()(&ctx)(&err)
keys, err := store.store.List(ctx, first, limit)
store.log.Debug("List", zap.String("first", string(first)), zap.Int("limit", limit), zap.Any("keys", keys.Strings()))
store.log.Debug("List", zap.ByteString("first", first), zap.Int("limit", limit), zap.Strings("keys", keys.Strings()))
return keys, err
}
@ -71,8 +71,8 @@ func (store *Logger) List(ctx context.Context, first storage.Key, limit int) (_
func (store *Logger) Iterate(ctx context.Context, opts storage.IterateOptions, fn func(context.Context, storage.Iterator) error) (err error) {
defer mon.Task()(&ctx)(&err)
store.log.Debug("Iterate",
zap.String("prefix", string(opts.Prefix)),
zap.String("first", string(opts.First)),
zap.ByteString("prefix", opts.Prefix),
zap.ByteString("first", opts.First),
zap.Bool("recurse", opts.Recurse),
zap.Bool("reverse", opts.Reverse),
)
@ -81,7 +81,7 @@ func (store *Logger) Iterate(ctx context.Context, opts storage.IterateOptions, f
ok := it.Next(ctx, item)
if ok {
store.log.Debug(" ",
zap.String("key", string(item.Key)),
zap.ByteString("key", item.Key),
zap.Int("value length", len(item.Value)),
zap.Binary("truncated value", truncate(item.Value)),
)