satellite/metainfo: prevent internal DB errors in Public API
Resolves https://github.com/storj/storj/issues/6081 Change-Id: I0e530db39947138dcafc1b6bd1710ff1ca96b8c5
This commit is contained in:
parent
e21978f11a
commit
89d682f49f
@ -131,7 +131,7 @@ func (endpoint *Endpoint) tryUpdateBucketAttribution(ctx context.Context, header
|
||||
}
|
||||
} else if !attribution.ErrBucketNotAttributed.Has(err) {
|
||||
endpoint.log.Error("error while getting attribution from DB", zap.Error(err))
|
||||
return rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return rpcstatus.Error(rpcstatus.Internal, "unable to get bucket attribution")
|
||||
}
|
||||
|
||||
// checks if bucket exists before updates it or makes a new entry
|
||||
@ -171,7 +171,7 @@ func (endpoint *Endpoint) tryUpdateBucketAttribution(ctx context.Context, header
|
||||
})
|
||||
if err != nil {
|
||||
endpoint.log.Error("error while inserting attribution to DB", zap.Error(err))
|
||||
return rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return rpcstatus.Error(rpcstatus.Internal, "unable to set bucket attribution")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -329,7 +329,7 @@ func (endpoint *Endpoint) convertMetabaseErr(err error) error {
|
||||
return rpcstatus.Error(rpcstatus.PermissionDenied, err.Error())
|
||||
default:
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return rpcstatus.Error(rpcstatus.Internal, "internal error")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -41,7 +41,7 @@ func (endpoint *Endpoint) GetBucket(ctx context.Context, req *pb.BucketGetReques
|
||||
return nil, rpcstatus.Error(rpcstatus.NotFound, err.Error())
|
||||
}
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to get bucket metadata")
|
||||
}
|
||||
|
||||
// override RS to fit satellite settings
|
||||
@ -111,7 +111,7 @@ func (endpoint *Endpoint) CreateBucket(ctx context.Context, req *pb.BucketCreate
|
||||
exists, err := endpoint.buckets.HasBucket(ctx, req.GetName(), keyInfo.ProjectID)
|
||||
if err != nil {
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to check if bucket exists")
|
||||
} else if exists {
|
||||
// When the bucket exists, try to set the attribution.
|
||||
if err := endpoint.ensureAttribution(ctx, req.Header, keyInfo, req.GetName(), nil, true); err != nil {
|
||||
@ -263,7 +263,7 @@ func (endpoint *Endpoint) DeleteBucket(ctx context.Context, req *pb.BucketDelete
|
||||
return &pb.BucketDeleteResponse{Bucket: convBucket}, nil
|
||||
}
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to delete bucket")
|
||||
}
|
||||
|
||||
return &pb.BucketDeleteResponse{Bucket: convBucket}, nil
|
||||
|
@ -113,7 +113,7 @@ func (endpoint *Endpoint) BeginObject(ctx context.Context, req *pb.ObjectBeginRe
|
||||
return nil, rpcstatus.Errorf(rpcstatus.NotFound, "bucket not found: %s", req.Bucket)
|
||||
}
|
||||
endpoint.log.Error("unable to check bucket", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to get bucket placement")
|
||||
}
|
||||
|
||||
if err := endpoint.ensureAttribution(ctx, req.Header, keyInfo, req.Bucket, nil, false); err != nil {
|
||||
@ -123,7 +123,7 @@ func (endpoint *Endpoint) BeginObject(ctx context.Context, req *pb.ObjectBeginRe
|
||||
streamID, err := uuid.New()
|
||||
if err != nil {
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to create stream id")
|
||||
}
|
||||
|
||||
// TODO this will work only with newest uplink
|
||||
@ -180,7 +180,7 @@ func (endpoint *Endpoint) BeginObject(ctx context.Context, req *pb.ObjectBeginRe
|
||||
})
|
||||
if err != nil {
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to create stream id")
|
||||
}
|
||||
|
||||
endpoint.log.Info("Object Upload", zap.Stringer("Project ID", keyInfo.ProjectID), zap.String("operation", "put"), zap.String("type", "object"))
|
||||
@ -268,7 +268,7 @@ func (endpoint *Endpoint) CommitObject(ctx context.Context, req *pb.ObjectCommit
|
||||
id, err := uuid.FromBytes(streamID.StreamId)
|
||||
if err != nil {
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to parse stream id")
|
||||
}
|
||||
|
||||
// for old uplinks get Encryption from StreamMeta
|
||||
@ -435,7 +435,7 @@ func (endpoint *Endpoint) GetObject(ctx context.Context, req *pb.ObjectGetReques
|
||||
object, err := endpoint.objectToProto(ctx, mbObject, segmentRS)
|
||||
if err != nil {
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "internal error")
|
||||
}
|
||||
|
||||
endpoint.log.Info("Object Get", zap.Stringer("Project ID", keyInfo.ProjectID), zap.String("operation", "get"), zap.String("type", "object"))
|
||||
@ -562,14 +562,14 @@ func (endpoint *Endpoint) DownloadObject(ctx context.Context, req *pb.ObjectDown
|
||||
encryptedKeyNonce, err := storj.NonceFromBytes(segment.EncryptedKeyNonce)
|
||||
if err != nil {
|
||||
endpoint.log.Error("unable to get encryption key nonce from metadata", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to get encryption key nonce from metadata")
|
||||
}
|
||||
|
||||
if segment.Inline() {
|
||||
err := endpoint.orders.UpdateGetInlineOrder(ctx, object.Location().Bucket(), downloadSizes.plainSize)
|
||||
if err != nil {
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to update GET inline order")
|
||||
}
|
||||
|
||||
// TODO we may think about fallback to encrypted size
|
||||
@ -609,7 +609,7 @@ func (endpoint *Endpoint) DownloadObject(ctx context.Context, req *pb.ObjectDown
|
||||
)
|
||||
}
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to create order limits")
|
||||
}
|
||||
|
||||
// TODO we may think about fallback to encrypted size
|
||||
@ -656,13 +656,13 @@ func (endpoint *Endpoint) DownloadObject(ctx context.Context, req *pb.ObjectDown
|
||||
protoObject, err := endpoint.objectToProto(ctx, object, nil)
|
||||
if err != nil {
|
||||
endpoint.log.Error("unable to convert object to proto", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "internal error")
|
||||
}
|
||||
|
||||
segmentList, err := convertSegmentListResults(segments)
|
||||
if err != nil {
|
||||
endpoint.log.Error("unable to convert stream list", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to convert stream list")
|
||||
}
|
||||
|
||||
endpoint.log.Info("Object Download", zap.Stringer("Project ID", keyInfo.ProjectID), zap.String("operation", "download"), zap.String("type", "object"))
|
||||
@ -869,7 +869,7 @@ func (endpoint *Endpoint) ListObjects(ctx context.Context, req *pb.ObjectListReq
|
||||
return nil, rpcstatus.Errorf(rpcstatus.NotFound, "bucket not found: %s", req.Bucket)
|
||||
}
|
||||
endpoint.log.Error("unable to check bucket", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to get bucket placement")
|
||||
}
|
||||
|
||||
limit := int(req.Limit)
|
||||
@ -1120,7 +1120,7 @@ func (endpoint *Endpoint) ListPendingObjectStreams(ctx context.Context, req *pb.
|
||||
return nil, rpcstatus.Errorf(rpcstatus.NotFound, "bucket not found: %s", req.Bucket)
|
||||
}
|
||||
endpoint.log.Error("unable to check bucket", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to get bucket placement")
|
||||
}
|
||||
|
||||
cursor := metabase.StreamIDCursor{}
|
||||
@ -1132,7 +1132,7 @@ func (endpoint *Endpoint) ListPendingObjectStreams(ctx context.Context, req *pb.
|
||||
cursor.StreamID, err = uuid.FromBytes(streamID.StreamId)
|
||||
if err != nil {
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to parse stream id")
|
||||
}
|
||||
}
|
||||
|
||||
@ -1389,7 +1389,7 @@ func (endpoint *Endpoint) GetObjectIPs(ctx context.Context, req *pb.ObjectGetIPs
|
||||
nodeIPMap, err := endpoint.overlay.GetNodeIPsFromPlacement(ctx, nodeIDs, placement)
|
||||
if err != nil {
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to get node IPs from placement")
|
||||
}
|
||||
|
||||
nodeIPs := make([][]byte, 0, len(nodeIPMap))
|
||||
@ -1450,7 +1450,7 @@ func (endpoint *Endpoint) UpdateObjectMetadata(ctx context.Context, req *pb.Obje
|
||||
id, err := uuid.FromBytes(streamID.StreamId)
|
||||
if err != nil {
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to parse stream id")
|
||||
}
|
||||
|
||||
var encryptedMetadataNonce []byte
|
||||
@ -1873,7 +1873,7 @@ func (endpoint *Endpoint) BeginMoveObject(ctx context.Context, req *pb.ObjectBeg
|
||||
return nil, rpcstatus.Errorf(rpcstatus.NotFound, "bucket not found: %s", req.Bucket)
|
||||
}
|
||||
endpoint.log.Error("unable to check bucket", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to get bucket placement")
|
||||
}
|
||||
newBucketPlacement, err := endpoint.buckets.GetBucketPlacement(ctx, req.NewBucket, keyInfo.ProjectID)
|
||||
if err != nil {
|
||||
@ -1881,7 +1881,7 @@ func (endpoint *Endpoint) BeginMoveObject(ctx context.Context, req *pb.ObjectBeg
|
||||
return nil, rpcstatus.Errorf(rpcstatus.NotFound, "bucket not found: %s", req.NewBucket)
|
||||
}
|
||||
endpoint.log.Error("unable to check bucket", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to get bucket placement")
|
||||
}
|
||||
if oldBucketPlacement != newBucketPlacement {
|
||||
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, "copying object to bucket with different placement policy is not (yet) supported")
|
||||
@ -1902,7 +1902,7 @@ func (endpoint *Endpoint) BeginMoveObject(ctx context.Context, req *pb.ObjectBeg
|
||||
response, err := convertBeginMoveObjectResults(result)
|
||||
if err != nil {
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "internal error")
|
||||
}
|
||||
|
||||
satStreamID, err := endpoint.packStreamID(ctx, &internalpb.StreamID{
|
||||
@ -1917,7 +1917,7 @@ func (endpoint *Endpoint) BeginMoveObject(ctx context.Context, req *pb.ObjectBeg
|
||||
})
|
||||
if err != nil {
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to create stream id")
|
||||
}
|
||||
|
||||
endpoint.log.Info("Object Move Begins", zap.Stringer("Project ID", keyInfo.ProjectID), zap.String("operation", "move"), zap.String("type", "object"))
|
||||
@ -2012,7 +2012,7 @@ func (endpoint *Endpoint) FinishMoveObject(ctx context.Context, req *pb.ObjectFi
|
||||
exists, err := endpoint.buckets.HasBucket(ctx, req.NewBucket, keyInfo.ProjectID)
|
||||
if err != nil {
|
||||
endpoint.log.Error("unable to check bucket", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to check bucket")
|
||||
} else if !exists {
|
||||
return nil, rpcstatus.Errorf(rpcstatus.NotFound, "target bucket not found: %s", req.NewBucket)
|
||||
}
|
||||
@ -2098,7 +2098,7 @@ func (endpoint *Endpoint) BeginCopyObject(ctx context.Context, req *pb.ObjectBeg
|
||||
return nil, rpcstatus.Errorf(rpcstatus.NotFound, "bucket not found: %s", req.Bucket)
|
||||
}
|
||||
endpoint.log.Error("unable to check bucket", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to get bucket placement")
|
||||
}
|
||||
newBucketPlacement, err := endpoint.buckets.GetBucketPlacement(ctx, req.NewBucket, keyInfo.ProjectID)
|
||||
if err != nil {
|
||||
@ -2106,7 +2106,7 @@ func (endpoint *Endpoint) BeginCopyObject(ctx context.Context, req *pb.ObjectBeg
|
||||
return nil, rpcstatus.Errorf(rpcstatus.NotFound, "bucket not found: %s", req.NewBucket)
|
||||
}
|
||||
endpoint.log.Error("unable to check bucket", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to get bucket placement")
|
||||
}
|
||||
if oldBucketPlacement != newBucketPlacement {
|
||||
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, "copying object to bucket with different placement policy is not (yet) supported")
|
||||
@ -2130,7 +2130,7 @@ func (endpoint *Endpoint) BeginCopyObject(ctx context.Context, req *pb.ObjectBeg
|
||||
response, err := convertBeginCopyObjectResults(result)
|
||||
if err != nil {
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "internal error")
|
||||
}
|
||||
|
||||
satStreamID, err := endpoint.packStreamID(ctx, &internalpb.StreamID{
|
||||
@ -2145,7 +2145,7 @@ func (endpoint *Endpoint) BeginCopyObject(ctx context.Context, req *pb.ObjectBeg
|
||||
})
|
||||
if err != nil {
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to create stream ID")
|
||||
}
|
||||
|
||||
endpoint.log.Info("Object Copy Begins", zap.Stringer("Project ID", keyInfo.ProjectID), zap.String("operation", "copy"), zap.String("type", "object"))
|
||||
@ -2207,7 +2207,7 @@ func (endpoint *Endpoint) FinishCopyObject(ctx context.Context, req *pb.ObjectFi
|
||||
exists, err := endpoint.buckets.HasBucket(ctx, req.NewBucket, keyInfo.ProjectID)
|
||||
if err != nil {
|
||||
endpoint.log.Error("unable to check bucket", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to check bucket")
|
||||
} else if !exists {
|
||||
return nil, rpcstatus.Errorf(rpcstatus.NotFound, "target bucket not found: %s", req.NewBucket)
|
||||
}
|
||||
@ -2251,7 +2251,7 @@ func (endpoint *Endpoint) FinishCopyObject(ctx context.Context, req *pb.ObjectFi
|
||||
protoObject, err := endpoint.objectToProto(ctx, object, nil)
|
||||
if err != nil {
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "internal error")
|
||||
}
|
||||
|
||||
endpoint.log.Info("Object Copy Finished", zap.Stringer("Project ID", keyInfo.ProjectID), zap.String("operation", "copy"), zap.String("type", "object"))
|
||||
|
@ -82,21 +82,24 @@ func (endpoint *Endpoint) BeginSegment(ctx context.Context, req *pb.SegmentBegin
|
||||
}
|
||||
nodes, err := endpoint.overlay.FindStorageNodesForUpload(ctx, request)
|
||||
if err != nil {
|
||||
if overlay.ErrNotEnoughNodes.Has(err) {
|
||||
return nil, rpcstatus.Error(rpcstatus.FailedPrecondition, err.Error())
|
||||
}
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "internal error")
|
||||
}
|
||||
|
||||
bucket := metabase.BucketLocation{ProjectID: keyInfo.ProjectID, BucketName: string(streamID.Bucket)}
|
||||
rootPieceID, addressedLimits, piecePrivateKey, err := endpoint.orders.CreatePutOrderLimits(ctx, bucket, nodes, streamID.ExpirationDate, maxPieceSize)
|
||||
if err != nil {
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to create order limits")
|
||||
}
|
||||
|
||||
id, err := uuid.FromBytes(streamID.StreamId)
|
||||
if err != nil {
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to parse stream id")
|
||||
}
|
||||
|
||||
pieces := metabase.Pieces{}
|
||||
@ -137,7 +140,7 @@ func (endpoint *Endpoint) BeginSegment(ctx context.Context, req *pb.SegmentBegin
|
||||
})
|
||||
if err != nil {
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to create segment id")
|
||||
}
|
||||
|
||||
endpoint.log.Info("Segment Upload", zap.Stringer("Project ID", keyInfo.ProjectID), zap.String("operation", "put"), zap.String("type", "remote"))
|
||||
@ -214,6 +217,9 @@ func (endpoint *Endpoint) RetryBeginSegmentPieces(ctx context.Context, req *pb.R
|
||||
ExcludedIDs: excludedIDs,
|
||||
})
|
||||
if err != nil {
|
||||
if overlay.ErrNotEnoughNodes.Has(err) {
|
||||
return nil, rpcstatus.Error(rpcstatus.FailedPrecondition, err.Error())
|
||||
}
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
}
|
||||
@ -221,7 +227,7 @@ func (endpoint *Endpoint) RetryBeginSegmentPieces(ctx context.Context, req *pb.R
|
||||
addressedLimits, err := endpoint.orders.ReplacePutOrderLimits(ctx, segmentID.RootPieceId, segmentID.OriginalOrderLimits, nodes, req.RetryPieceNumbers)
|
||||
if err != nil {
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "internal error")
|
||||
}
|
||||
|
||||
segmentID.OriginalOrderLimits = addressedLimits
|
||||
@ -229,7 +235,7 @@ func (endpoint *Endpoint) RetryBeginSegmentPieces(ctx context.Context, req *pb.R
|
||||
amendedSegmentID, err := endpoint.packSegmentID(ctx, segmentID)
|
||||
if err != nil {
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to create segment id")
|
||||
}
|
||||
|
||||
endpoint.log.Info("Segment Upload Piece Retry", zap.Stringer("Project ID", keyInfo.ProjectID), zap.String("operation", "put"), zap.String("type", "remote"))
|
||||
@ -340,7 +346,7 @@ func (endpoint *Endpoint) CommitSegment(ctx context.Context, req *pb.SegmentComm
|
||||
id, err := uuid.FromBytes(streamID.StreamId)
|
||||
if err != nil {
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to parse stream id")
|
||||
}
|
||||
|
||||
var expiresAt *time.Time
|
||||
@ -457,7 +463,7 @@ func (endpoint *Endpoint) MakeInlineSegment(ctx context.Context, req *pb.Segment
|
||||
id, err := uuid.FromBytes(streamID.StreamId)
|
||||
if err != nil {
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to parse stream id")
|
||||
}
|
||||
|
||||
if err := endpoint.checkUploadLimits(ctx, keyInfo.ProjectID); err != nil {
|
||||
@ -501,7 +507,7 @@ func (endpoint *Endpoint) MakeInlineSegment(ctx context.Context, req *pb.Segment
|
||||
err = endpoint.orders.UpdatePutInlineOrder(ctx, bucket, inlineUsed)
|
||||
if err != nil {
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to update PUT inline order")
|
||||
}
|
||||
|
||||
if err := endpoint.addSegmentToUploadLimits(ctx, keyInfo.ProjectID, inlineUsed); err != nil {
|
||||
@ -546,7 +552,7 @@ func (endpoint *Endpoint) ListSegments(ctx context.Context, req *pb.SegmentListR
|
||||
id, err := uuid.FromBytes(streamID.StreamId)
|
||||
if err != nil {
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to parse stream id")
|
||||
}
|
||||
|
||||
result, err := endpoint.metabase.ListStreamPositions(ctx, metabase.ListStreamPositions{
|
||||
@ -564,7 +570,7 @@ func (endpoint *Endpoint) ListSegments(ctx context.Context, req *pb.SegmentListR
|
||||
response, err := convertStreamListResults(result)
|
||||
if err != nil {
|
||||
endpoint.log.Error("unable to convert stream list", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to convert stream list")
|
||||
}
|
||||
response.EncryptionParameters = streamID.EncryptionParameters
|
||||
|
||||
@ -650,7 +656,7 @@ func (endpoint *Endpoint) DownloadSegment(ctx context.Context, req *pb.SegmentDo
|
||||
id, err := uuid.FromBytes(streamID.StreamId)
|
||||
if err != nil {
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to parse stream id")
|
||||
}
|
||||
|
||||
var segment metabase.Segment
|
||||
@ -698,14 +704,14 @@ func (endpoint *Endpoint) DownloadSegment(ctx context.Context, req *pb.SegmentDo
|
||||
encryptedKeyNonce, err := storj.NonceFromBytes(segment.EncryptedKeyNonce)
|
||||
if err != nil {
|
||||
endpoint.log.Error("unable to get encryption key nonce from metadata", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to get encryption key nonce from metadata")
|
||||
}
|
||||
|
||||
if segment.Inline() {
|
||||
err := endpoint.orders.UpdateGetInlineOrder(ctx, bucket, int64(len(segment.InlineData)))
|
||||
if err != nil {
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to update GET inline order")
|
||||
}
|
||||
|
||||
endpoint.versionCollector.collectTransferStats(req.Header.UserAgent, download, len(segment.InlineData))
|
||||
@ -739,7 +745,7 @@ func (endpoint *Endpoint) DownloadSegment(ctx context.Context, req *pb.SegmentDo
|
||||
)
|
||||
}
|
||||
endpoint.log.Error("internal", zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to create order limits")
|
||||
}
|
||||
|
||||
endpoint.versionCollector.collectTransferStats(req.Header.UserAgent, download, int(segment.EncryptedSize))
|
||||
|
@ -594,7 +594,7 @@ func TestRetryBeginSegmentPieces(t *testing.T) {
|
||||
SegmentID: beginSegmentResp.SegmentID,
|
||||
RetryPieceNumbers: []int{0, 1, 2, 3, 4, 5, 6},
|
||||
})
|
||||
rpctest.RequireStatus(t, err, rpcstatus.Internal, "metaclient: not enough nodes: not enough nodes: requested from cache 7, found 2")
|
||||
rpctest.RequireStatus(t, err, rpcstatus.FailedPrecondition, "metaclient: not enough nodes: not enough nodes: requested from cache 7, found 2")
|
||||
|
||||
// This exchange should succeed.
|
||||
exchangeSegmentPieceOrdersResp, err := metainfoClient.RetryBeginSegmentPieces(ctx, metaclient.RetryBeginSegmentPiecesParams{
|
||||
|
@ -18,6 +18,7 @@ import (
|
||||
"unicode"
|
||||
|
||||
"github.com/jackc/pgx/v5/pgconn"
|
||||
|
||||
"storj.io/private/tagsql"
|
||||
)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user