storagenode/piecestore: return node certificate chain at upload conclusion

uplinks currently get the node's certificate chain over TLS. once Noise
is in use, uplinks will no longer be able to do this. we should start
having the upload request return the certificate chain in the same
release that starts supporting noise.

Change-Id: I619b23cb8e25691bcc62d760f884403a4ccd64a0
This commit is contained in:
JT Olio 2023-01-30 15:44:45 -05:00 committed by JT Olio
parent 33bd929308
commit ae9ea22193
3 changed files with 9 additions and 8 deletions

View File

@ -23,7 +23,6 @@ import (
"storj.io/common/peertls/extensions"
"storj.io/common/peertls/tlsopts"
"storj.io/common/rpc"
"storj.io/common/signing"
"storj.io/common/storj"
"storj.io/private/debug"
"storj.io/private/version"
@ -541,7 +540,7 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten
peer.Storage2.Endpoint, err = piecestore.NewEndpoint(
peer.Log.Named("piecestore"),
signing.SignerFromFullIdentity(peer.Identity),
peer.Identity,
peer.Storage2.Trust,
peer.Storage2.Monitor,
peer.Storage2.RetainService,

View File

@ -93,7 +93,7 @@ type Endpoint struct {
log *zap.Logger
config Config
signer signing.Signer
ident *identity.FullIdentity
trust *trust.Pool
monitor *monitor.Service
retain *retain.Service
@ -110,12 +110,12 @@ type Endpoint struct {
}
// NewEndpoint creates a new piecestore endpoint.
func NewEndpoint(log *zap.Logger, signer signing.Signer, trust *trust.Pool, monitor *monitor.Service, retain *retain.Service, pingStats pingStatsSource, store *pieces.Store, trashChore *pieces.TrashChore, pieceDeleter *pieces.Deleter, ordersStore *orders.FileStore, usage bandwidth.DB, usedSerials *usedserials.Table, config Config) (*Endpoint, error) {
func NewEndpoint(log *zap.Logger, ident *identity.FullIdentity, trust *trust.Pool, monitor *monitor.Service, retain *retain.Service, pingStats pingStatsSource, store *pieces.Store, trashChore *pieces.TrashChore, pieceDeleter *pieces.Deleter, ordersStore *orders.FileStore, usage bandwidth.DB, usedSerials *usedserials.Table, config Config) (*Endpoint, error) {
return &Endpoint{
log: log,
config: config,
signer: signer,
ident: ident,
trust: trust,
monitor: monitor,
retain: retain,
@ -498,7 +498,7 @@ func (endpoint *Endpoint) Upload(stream pb.DRPCPiecestore_UploadStream) (err err
}
}
storageNodeHash, err := signing.SignPieceHash(ctx, endpoint.signer, &pb.PieceHash{
storageNodeHash, err := signing.SignPieceHash(ctx, signing.SignerFromFullIdentity(endpoint.ident), &pb.PieceHash{
PieceId: limit.PieceId,
Hash: calculatedHash,
HashAlgorithm: hashAlgorithm,
@ -510,7 +510,9 @@ func (endpoint *Endpoint) Upload(stream pb.DRPCPiecestore_UploadStream) (err err
}
closeErr := rpctimeout.Run(ctx, endpoint.config.StreamOperationTimeout, func(_ context.Context) (err error) {
return stream.SendAndClose(&pb.PieceUploadResponse{Done: storageNodeHash})
return stream.SendAndClose(&pb.PieceUploadResponse{
Done: storageNodeHash,
NodeCertchain: identity.EncodePeerIdentity(endpoint.ident.PeerIdentity())})
})
if errs.Is(closeErr, io.EOF) {
closeErr = nil

View File

@ -35,7 +35,7 @@ func (endpoint *Endpoint) verifyOrderLimit(ctx context.Context, limit *pb.OrderL
switch {
case limit.Limit < 0:
return rpcstatus.Error(rpcstatus.InvalidArgument, "order limit is negative")
case endpoint.signer.ID() != limit.StorageNodeId:
case endpoint.ident.ID != limit.StorageNodeId:
return rpcstatus.Errorf(rpcstatus.InvalidArgument, "order intended for other storagenode: %v", limit.StorageNodeId)
case endpoint.IsExpired(limit.PieceExpiration):
return rpcstatus.Errorf(rpcstatus.InvalidArgument, "piece expired: %v", limit.PieceExpiration)