metainfo: Batch request (#2694)

This commit is contained in:
Michal Niewrzal 2019-08-06 16:56:23 +02:00 committed by GitHub
parent c32fc283c3
commit de7dddbe59
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 2988 additions and 350 deletions

File diff suppressed because it is too large Load Diff

View File

@ -37,6 +37,8 @@ service Metainfo {
rpc ListSegments(SegmentListRequest) returns (SegmentListResponse); rpc ListSegments(SegmentListRequest) returns (SegmentListResponse);
rpc DownloadSegment(SegmentDownloadRequest) returns (SegmentDownloadResponse); rpc DownloadSegment(SegmentDownloadRequest) returns (SegmentDownloadResponse);
rpc Batch(BatchRequest) returns (BatchResponse);
rpc CreateSegmentOld(SegmentWriteRequestOld) returns (SegmentWriteResponseOld); rpc CreateSegmentOld(SegmentWriteRequestOld) returns (SegmentWriteResponseOld);
rpc CommitSegmentOld(SegmentCommitRequestOld) returns (SegmentCommitResponseOld); rpc CommitSegmentOld(SegmentCommitRequestOld) returns (SegmentCommitResponseOld);
rpc SegmentInfoOld(SegmentInfoRequestOld) returns (SegmentInfoResponseOld); rpc SegmentInfoOld(SegmentInfoRequestOld) returns (SegmentInfoResponseOld);
@ -501,3 +503,65 @@ message SegmentDownloadResponse {
SegmentPosition next = 8; // can be nil SegmentPosition next = 8; // can be nil
} }
message BatchRequest {
repeated BatchRequestItem requests = 1;
}
message BatchRequestItem {
oneof Request {
BucketCreateRequest bucket_create = 1;
BucketGetRequest bucket_get = 2;
BucketDeleteRequest bucket_delete = 3;
BucketListRequest bucket_list = 4;
BucketSetAttributionRequest bucket_set_attribution = 5;
ObjectBeginRequest object_begin = 6;
ObjectCommitRequest object_commit = 7;
ObjectGetRequest object_get = 8;
ObjectListRequest object_list = 9;
ObjectBeginDeleteRequest object_begin_delete = 10;
ObjectFinishDeleteRequest object_finish_delete = 11;
SegmentBeginRequest segment_begin = 12;
SegmentCommitRequest segment_commit = 13;
SegmentMakeInlineRequest segment_make_inline = 14;
SegmentBeginDeleteRequest segment_begin_delete = 15;
SegmentFinishDeleteRequest segment_finish_delete = 16;
SegmentListRequest segment_list = 17;
SegmentDownloadRequest segment_download = 18;
}
}
message BatchResponse {
repeated BatchResponseItem responses = 1;
}
message BatchResponseItem {
oneof Response {
BucketCreateResponse bucket_create = 1;
BucketGetResponse bucket_get = 2;
BucketDeleteResponse bucket_delete = 3;
BucketListResponse bucket_list = 4;
BucketSetAttributionResponse bucket_set_attribution = 5;
ObjectBeginResponse object_begin = 6;
ObjectCommitResponse object_commit = 7;
ObjectGetResponse object_get = 8;
ObjectListResponse object_list = 9;
ObjectBeginDeleteResponse object_begin_delete = 10;
ObjectFinishDeleteResponse object_finish_delete = 11;
SegmentBeginResponse segment_begin = 12;
SegmentCommitResponse segment_commit = 13;
SegmentMakeInlineResponse segment_make_inline = 14;
SegmentBeginDeleteResponse segment_begin_delete = 15;
SegmentFinishDeleteResponse segment_finish_delete = 16;
SegmentListResponse segment_list = 17;
SegmentDownloadResponse segment_download = 18;
}
}

View File

@ -3379,6 +3379,218 @@
"type": "SegmentPosition" "type": "SegmentPosition"
} }
] ]
},
{
"name": "BatchRequest",
"fields": [
{
"id": 1,
"name": "requests",
"type": "BatchRequestItem",
"is_repeated": true
}
]
},
{
"name": "BatchRequestItem",
"fields": [
{
"id": 1,
"name": "bucket_create",
"type": "BucketCreateRequest"
},
{
"id": 2,
"name": "bucket_get",
"type": "BucketGetRequest"
},
{
"id": 3,
"name": "bucket_delete",
"type": "BucketDeleteRequest"
},
{
"id": 4,
"name": "bucket_list",
"type": "BucketListRequest"
},
{
"id": 5,
"name": "bucket_set_attribution",
"type": "BucketSetAttributionRequest"
},
{
"id": 6,
"name": "object_begin",
"type": "ObjectBeginRequest"
},
{
"id": 7,
"name": "object_commit",
"type": "ObjectCommitRequest"
},
{
"id": 8,
"name": "object_get",
"type": "ObjectGetRequest"
},
{
"id": 9,
"name": "object_list",
"type": "ObjectListRequest"
},
{
"id": 10,
"name": "object_begin_delete",
"type": "ObjectBeginDeleteRequest"
},
{
"id": 11,
"name": "object_finish_delete",
"type": "ObjectFinishDeleteRequest"
},
{
"id": 12,
"name": "segment_begin",
"type": "SegmentBeginRequest"
},
{
"id": 13,
"name": "segment_commit",
"type": "SegmentCommitRequest"
},
{
"id": 14,
"name": "segment_make_inline",
"type": "SegmentMakeInlineRequest"
},
{
"id": 15,
"name": "segment_begin_delete",
"type": "SegmentBeginDeleteRequest"
},
{
"id": 16,
"name": "segment_finish_delete",
"type": "SegmentFinishDeleteRequest"
},
{
"id": 17,
"name": "segment_list",
"type": "SegmentListRequest"
},
{
"id": 18,
"name": "segment_download",
"type": "SegmentDownloadRequest"
}
]
},
{
"name": "BatchResponse",
"fields": [
{
"id": 1,
"name": "responses",
"type": "BatchResponseItem",
"is_repeated": true
}
]
},
{
"name": "BatchResponseItem",
"fields": [
{
"id": 1,
"name": "bucket_create",
"type": "BucketCreateResponse"
},
{
"id": 2,
"name": "bucket_get",
"type": "BucketGetResponse"
},
{
"id": 3,
"name": "bucket_delete",
"type": "BucketDeleteResponse"
},
{
"id": 4,
"name": "bucket_list",
"type": "BucketListResponse"
},
{
"id": 5,
"name": "bucket_set_attribution",
"type": "BucketSetAttributionResponse"
},
{
"id": 6,
"name": "object_begin",
"type": "ObjectBeginResponse"
},
{
"id": 7,
"name": "object_commit",
"type": "ObjectCommitResponse"
},
{
"id": 8,
"name": "object_get",
"type": "ObjectGetResponse"
},
{
"id": 9,
"name": "object_list",
"type": "ObjectListResponse"
},
{
"id": 10,
"name": "object_begin_delete",
"type": "ObjectBeginDeleteResponse"
},
{
"id": 11,
"name": "object_finish_delete",
"type": "ObjectFinishDeleteResponse"
},
{
"id": 12,
"name": "segment_begin",
"type": "SegmentBeginResponse"
},
{
"id": 13,
"name": "segment_commit",
"type": "SegmentCommitResponse"
},
{
"id": 14,
"name": "segment_make_inline",
"type": "SegmentMakeInlineResponse"
},
{
"id": 15,
"name": "segment_begin_delete",
"type": "SegmentBeginDeleteResponse"
},
{
"id": 16,
"name": "segment_finish_delete",
"type": "SegmentFinishDeleteResponse"
},
{
"id": 17,
"name": "segment_list",
"type": "SegmentListResponse"
},
{
"id": 18,
"name": "segment_download",
"type": "SegmentDownloadResponse"
}
]
} }
], ],
"services": [ "services": [
@ -3475,6 +3687,11 @@
"in_type": "SegmentDownloadRequest", "in_type": "SegmentDownloadRequest",
"out_type": "SegmentDownloadResponse" "out_type": "SegmentDownloadResponse"
}, },
{
"name": "Batch",
"in_type": "BatchRequest",
"out_type": "BatchResponse"
},
{ {
"name": "CreateSegmentOld", "name": "CreateSegmentOld",
"in_type": "SegmentWriteRequestOld", "in_type": "SegmentWriteRequestOld",

215
satellite/metainfo/batch.go Normal file
View File

@ -0,0 +1,215 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package metainfo
import (
"context"
"github.com/zeebo/errs"
"storj.io/storj/pkg/pb"
)
// Batch handle requests sent in batch
func (endpoint *Endpoint) Batch(ctx context.Context, req *pb.BatchRequest) (resp *pb.BatchResponse, err error) {
defer mon.Task()(&ctx)(&err)
resp = &pb.BatchResponse{}
resp.Responses = make([]*pb.BatchResponseItem, 0, len(req.Requests))
// TODO find a way to pass some parameters between request -> response > request
// TODO maybe use reflection to shrink code
for _, request := range req.Requests {
switch singleRequest := request.Request.(type) {
// BUCKET
case *pb.BatchRequestItem_BucketCreate:
response, err := endpoint.CreateBucket(ctx, singleRequest.BucketCreate)
if err != nil {
return resp, err
}
resp.Responses = append(resp.Responses, &pb.BatchResponseItem{
Response: &pb.BatchResponseItem_BucketCreate{
BucketCreate: response,
},
})
case *pb.BatchRequestItem_BucketGet:
response, err := endpoint.GetBucket(ctx, singleRequest.BucketGet)
if err != nil {
return resp, err
}
resp.Responses = append(resp.Responses, &pb.BatchResponseItem{
Response: &pb.BatchResponseItem_BucketGet{
BucketGet: response,
},
})
case *pb.BatchRequestItem_BucketDelete:
response, err := endpoint.DeleteBucket(ctx, singleRequest.BucketDelete)
if err != nil {
return resp, err
}
resp.Responses = append(resp.Responses, &pb.BatchResponseItem{
Response: &pb.BatchResponseItem_BucketDelete{
BucketDelete: response,
},
})
case *pb.BatchRequestItem_BucketList:
response, err := endpoint.ListBuckets(ctx, singleRequest.BucketList)
if err != nil {
return resp, err
}
resp.Responses = append(resp.Responses, &pb.BatchResponseItem{
Response: &pb.BatchResponseItem_BucketList{
BucketList: response,
},
})
case *pb.BatchRequestItem_BucketSetAttribution:
response, err := endpoint.SetBucketAttribution(ctx, singleRequest.BucketSetAttribution)
if err != nil {
return resp, err
}
resp.Responses = append(resp.Responses, &pb.BatchResponseItem{
Response: &pb.BatchResponseItem_BucketSetAttribution{
BucketSetAttribution: response,
},
})
//OBJECT
case *pb.BatchRequestItem_ObjectBegin:
response, err := endpoint.BeginObject(ctx, singleRequest.ObjectBegin)
if err != nil {
return resp, err
}
resp.Responses = append(resp.Responses, &pb.BatchResponseItem{
Response: &pb.BatchResponseItem_ObjectBegin{
ObjectBegin: response,
},
})
case *pb.BatchRequestItem_ObjectCommit:
response, err := endpoint.CommitObject(ctx, singleRequest.ObjectCommit)
if err != nil {
return resp, err
}
resp.Responses = append(resp.Responses, &pb.BatchResponseItem{
Response: &pb.BatchResponseItem_ObjectCommit{
ObjectCommit: response,
},
})
case *pb.BatchRequestItem_ObjectGet:
response, err := endpoint.GetObject(ctx, singleRequest.ObjectGet)
if err != nil {
return resp, err
}
resp.Responses = append(resp.Responses, &pb.BatchResponseItem{
Response: &pb.BatchResponseItem_ObjectGet{
ObjectGet: response,
},
})
case *pb.BatchRequestItem_ObjectList:
response, err := endpoint.ListObjects(ctx, singleRequest.ObjectList)
if err != nil {
return resp, err
}
resp.Responses = append(resp.Responses, &pb.BatchResponseItem{
Response: &pb.BatchResponseItem_ObjectList{
ObjectList: response,
},
})
case *pb.BatchRequestItem_ObjectBeginDelete:
response, err := endpoint.BeginDeleteObject(ctx, singleRequest.ObjectBeginDelete)
if err != nil {
return resp, err
}
resp.Responses = append(resp.Responses, &pb.BatchResponseItem{
Response: &pb.BatchResponseItem_ObjectBeginDelete{
ObjectBeginDelete: response,
},
})
case *pb.BatchRequestItem_ObjectFinishDelete:
response, err := endpoint.FinishDeleteObject(ctx, singleRequest.ObjectFinishDelete)
if err != nil {
return resp, err
}
resp.Responses = append(resp.Responses, &pb.BatchResponseItem{
Response: &pb.BatchResponseItem_ObjectFinishDelete{
ObjectFinishDelete: response,
},
})
// SEGMENT
case *pb.BatchRequestItem_SegmentBegin:
response, err := endpoint.BeginSegment(ctx, singleRequest.SegmentBegin)
if err != nil {
return resp, err
}
resp.Responses = append(resp.Responses, &pb.BatchResponseItem{
Response: &pb.BatchResponseItem_SegmentBegin{
SegmentBegin: response,
},
})
case *pb.BatchRequestItem_SegmentCommit:
response, err := endpoint.CommitSegment(ctx, singleRequest.SegmentCommit)
if err != nil {
return resp, err
}
resp.Responses = append(resp.Responses, &pb.BatchResponseItem{
Response: &pb.BatchResponseItem_SegmentCommit{
SegmentCommit: response,
},
})
case *pb.BatchRequestItem_SegmentList:
response, err := endpoint.ListSegments(ctx, singleRequest.SegmentList)
if err != nil {
return resp, err
}
resp.Responses = append(resp.Responses, &pb.BatchResponseItem{
Response: &pb.BatchResponseItem_SegmentList{
SegmentList: response,
},
})
case *pb.BatchRequestItem_SegmentMakeInline:
response, err := endpoint.MakeInlineSegment(ctx, singleRequest.SegmentMakeInline)
if err != nil {
return resp, err
}
resp.Responses = append(resp.Responses, &pb.BatchResponseItem{
Response: &pb.BatchResponseItem_SegmentMakeInline{
SegmentMakeInline: response,
},
})
case *pb.BatchRequestItem_SegmentDownload:
response, err := endpoint.DownloadSegment(ctx, singleRequest.SegmentDownload)
if err != nil {
return resp, err
}
resp.Responses = append(resp.Responses, &pb.BatchResponseItem{
Response: &pb.BatchResponseItem_SegmentDownload{
SegmentDownload: response,
},
})
case *pb.BatchRequestItem_SegmentBeginDelete:
response, err := endpoint.BeginDeleteSegment(ctx, singleRequest.SegmentBeginDelete)
if err != nil {
return resp, err
}
resp.Responses = append(resp.Responses, &pb.BatchResponseItem{
Response: &pb.BatchResponseItem_SegmentBeginDelete{
SegmentBeginDelete: response,
},
})
case *pb.BatchRequestItem_SegmentFinishDelete:
response, err := endpoint.FinishDeleteSegment(ctx, singleRequest.SegmentFinishDelete)
if err != nil {
return resp, err
}
resp.Responses = append(resp.Responses, &pb.BatchResponseItem{
Response: &pb.BatchResponseItem_SegmentFinishDelete{
SegmentFinishDelete: response,
},
})
default:
return nil, errs.New("unsupported request type")
}
}
return resp, nil
}

View File

@ -1206,7 +1206,9 @@ func TestInlineSegment(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
require.Empty(t, items) require.Empty(t, items)
err = metainfoClient.FinishDeleteObject(ctx, streamID) err = metainfoClient.FinishDeleteObject(ctx, metainfo.FinishDeleteObjectParams{
StreamID: streamID,
})
require.NoError(t, err) require.NoError(t, err)
} }
}) })
@ -1293,7 +1295,9 @@ func TestRemoteSegment(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
} }
err = metainfoClient.FinishDeleteObject(ctx, streamID) err = metainfoClient.FinishDeleteObject(ctx, metainfo.FinishDeleteObjectParams{
StreamID: streamID,
})
require.NoError(t, err) require.NoError(t, err)
items, _, err = metainfoClient.ListObjects(ctx, metainfo.ListObjectsParams{ items, _, err = metainfoClient.ListObjects(ctx, metainfo.ListObjectsParams{
@ -1368,3 +1372,117 @@ func TestIDs(t *testing.T) {
} }
}) })
} }
func TestBatch(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
require.NoError(t, err)
defer ctx.Check(metainfoClient.Close)
{ // create few buckets and list them in one batch
requests := make([]metainfo.BatchItem, 0)
numOfBuckets := 5
for i := 0; i < numOfBuckets; i++ {
requests = append(requests, &metainfo.CreateBucketParams{
Name: []byte("test-bucket-" + strconv.Itoa(i)),
PathCipher: storj.EncAESGCM,
DefaultSegmentsSize: memory.MiB.Int64(),
})
}
requests = append(requests, &metainfo.ListBucketsParams{
ListOpts: storj.BucketListOptions{
Cursor: "",
Direction: storj.After,
},
})
responses, err := metainfoClient.Batch(ctx, requests...)
require.NoError(t, err)
require.Equal(t, numOfBuckets+1, len(responses))
for i := 0; i < numOfBuckets; i++ {
response, err := responses[i].CreateBucket()
require.NoError(t, err)
require.Equal(t, "test-bucket-"+strconv.Itoa(i), response.Bucket.Name)
_, err = responses[i].GetBucket()
require.Error(t, err)
}
bucketsListResp, err := responses[numOfBuckets].ListBuckets()
require.NoError(t, err)
require.Equal(t, numOfBuckets, len(bucketsListResp.BucketList.Items))
}
{ // create bucket, object, upload inline segments in batch, download inline segments in batch
err := planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "second-test-bucket")
require.NoError(t, err)
streamID, err := metainfoClient.BeginObject(ctx, metainfo.BeginObjectParams{
Bucket: []byte("second-test-bucket"),
EncryptedPath: []byte("encrypted-path"),
})
require.NoError(t, err)
requests := make([]metainfo.BatchItem, 0)
numOfSegments := 10
expectedData := make([][]byte, numOfSegments)
for i := 0; i < numOfSegments; i++ {
expectedData[i] = testrand.Bytes(memory.KiB)
requests = append(requests, &metainfo.MakeInlineSegmentParams{
StreamID: streamID,
Position: storj.SegmentPosition{
Index: int32(i),
},
EncryptedInlineData: expectedData[i],
})
}
requests = append(requests, &metainfo.CommitObjectParams{
StreamID: streamID,
})
requests = append(requests, &metainfo.ListSegmentsParams{
StreamID: streamID,
})
requests = append(requests, &metainfo.GetObjectParams{
Bucket: []byte("second-test-bucket"),
EncryptedPath: []byte("encrypted-path"),
})
responses, err := metainfoClient.Batch(ctx, requests...)
require.NoError(t, err)
require.Equal(t, numOfSegments+3, len(responses))
listResponse, err := responses[numOfSegments+1].ListSegment()
require.NoError(t, err)
require.Equal(t, numOfSegments, len(listResponse.Items))
getResponse, err := responses[numOfSegments+2].GetObject()
require.NoError(t, err)
requests = make([]metainfo.BatchItem, 0)
for _, segment := range listResponse.Items {
requests = append(requests, &metainfo.DownloadSegmentParams{
StreamID: getResponse.Info.StreamID,
Position: segment.Position,
})
}
responses, err = metainfoClient.Batch(ctx, requests...)
require.NoError(t, err)
require.Equal(t, len(listResponse.Items), len(responses))
for i, response := range responses {
downloadResponse, err := response.DownloadSegment()
require.NoError(t, err)
require.Equal(t, expectedData[i], downloadResponse.Info.EncryptedInlineData)
}
}
})
}

142
uplink/metainfo/batch.go Normal file
View File

@ -0,0 +1,142 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package metainfo
import (
"github.com/zeebo/errs"
"storj.io/storj/pkg/pb"
)
var (
// ErrInvalidType error for inalid response type casting
ErrInvalidType = errs.New("invalid response type")
)
// BatchItem represents single request in batch
type BatchItem interface {
BatchItem() *pb.BatchRequestItem
}
// BatchResponse single response from batch call
type BatchResponse struct {
pbRequest interface{}
pbResponse interface{}
}
// CreateBucket returns BatchResponse for CreateBucket request
func (resp *BatchResponse) CreateBucket() (CreateBucketResponse, error) {
item, ok := resp.pbResponse.(*pb.BatchResponseItem_BucketCreate)
if !ok {
return CreateBucketResponse{}, ErrInvalidType
}
createResponse, err := newCreateBucketResponse(item.BucketCreate)
if err != nil {
return CreateBucketResponse{}, err
}
return createResponse, nil
}
// GetBucket returns response for GetBucket request
func (resp *BatchResponse) GetBucket() (GetBucketResponse, error) {
item, ok := resp.pbResponse.(*pb.BatchResponseItem_BucketGet)
if !ok {
return GetBucketResponse{}, ErrInvalidType
}
getResponse, err := newGetBucketResponse(item.BucketGet)
if err != nil {
return GetBucketResponse{}, err
}
return getResponse, nil
}
// ListBuckets returns response for ListBuckets request
func (resp *BatchResponse) ListBuckets() (ListBucketsResponse, error) {
item, ok := resp.pbResponse.(*pb.BatchResponseItem_BucketList)
if !ok {
return ListBucketsResponse{}, ErrInvalidType
}
return newListBucketsResponse(item.BucketList), nil
}
// BeginObject returns response for BeginObject request
func (resp *BatchResponse) BeginObject() (BeginObjectResponse, error) {
item, ok := resp.pbResponse.(*pb.BatchResponseItem_ObjectBegin)
if !ok {
return BeginObjectResponse{}, ErrInvalidType
}
return newBeginObjectResponse(item.ObjectBegin), nil
}
// BeginDeleteObject returns response for BeginDeleteObject request
func (resp *BatchResponse) BeginDeleteObject() (BeginDeleteObjectResponse, error) {
item, ok := resp.pbResponse.(*pb.BatchResponseItem_ObjectBeginDelete)
if !ok {
return BeginDeleteObjectResponse{}, ErrInvalidType
}
return newBeginDeleteObjectResponse(item.ObjectBeginDelete), nil
}
// GetObject returns response for GetObject request
func (resp *BatchResponse) GetObject() (GetObjectResponse, error) {
item, ok := resp.pbResponse.(*pb.BatchResponseItem_ObjectGet)
if !ok {
return GetObjectResponse{}, ErrInvalidType
}
return newGetObjectResponse(item.ObjectGet), nil
}
// ListObjects returns response for ListObjects request
func (resp *BatchResponse) ListObjects() (ListObjectsResponse, error) {
item, ok := resp.pbResponse.(*pb.BatchResponseItem_ObjectList)
if !ok {
return ListObjectsResponse{}, ErrInvalidType
}
requestItem, ok := resp.pbRequest.(*pb.BatchRequestItem_ObjectList)
if !ok {
return ListObjectsResponse{}, ErrInvalidType
}
return newListObjectsResponse(item.ObjectList, requestItem.ObjectList.EncryptedPrefix, requestItem.ObjectList.Recursive), nil
}
// BeginSegment returns response for BeginSegment request
func (resp *BatchResponse) BeginSegment() (BeginSegmentResponse, error) {
item, ok := resp.pbResponse.(*pb.BatchResponseItem_SegmentBegin)
if !ok {
return BeginSegmentResponse{}, ErrInvalidType
}
return newBeginSegmentResponse(item.SegmentBegin), nil
}
// BeginDeleteSegment returns response for BeginDeleteSegment request
func (resp *BatchResponse) BeginDeleteSegment() (BeginDeleteSegmentResponse, error) {
item, ok := resp.pbResponse.(*pb.BatchResponseItem_SegmentBeginDelete)
if !ok {
return BeginDeleteSegmentResponse{}, ErrInvalidType
}
return newBeginDeleteSegmentResponse(item.SegmentBeginDelete), nil
}
// ListSegment returns response for ListSegment request
func (resp *BatchResponse) ListSegment() (ListSegmentsResponse, error) {
item, ok := resp.pbResponse.(*pb.BatchResponseItem_SegmentList)
if !ok {
return ListSegmentsResponse{}, ErrInvalidType
}
return newListSegmentsResponse(item.SegmentList), nil
}
// DownloadSegment returns response for DownloadSegment request
func (resp *BatchResponse) DownloadSegment() (DownloadSegmentResponse, error) {
item, ok := resp.pbResponse.(*pb.BatchResponseItem_SegmentDownload)
if !ok {
return DownloadSegmentResponse{}, ErrInvalidType
}
return newDownloadSegmentResponse(item.SegmentDownload), nil
}

View File

@ -235,29 +235,120 @@ func (client *Client) GetProjectInfo(ctx context.Context) (resp *pb.ProjectInfoR
return client.client.ProjectInfo(ctx, &pb.ProjectInfoRequest{}) return client.client.ProjectInfo(ctx, &pb.ProjectInfoRequest{})
} }
// CreateBucket creates a new bucket // CreateBucketParams parameters for CreateBucket method
func (client *Client) CreateBucket(ctx context.Context, bucket storj.Bucket) (respBucket storj.Bucket, err error) { type CreateBucketParams struct {
defer mon.Task()(&ctx)(&err) Name []byte
req, err := convertBucketToProtoRequest(bucket) PathCipher storj.CipherSuite
if err != nil { PartnerID []byte
return respBucket, Error.Wrap(err) DefaultSegmentsSize int64
DefaultRedundancyScheme storj.RedundancyScheme
DefaultEncryptionParameters storj.EncryptionParameters
}
func (params *CreateBucketParams) toRequest() *pb.BucketCreateRequest {
defaultRS := params.DefaultRedundancyScheme
defaultEP := params.DefaultEncryptionParameters
return &pb.BucketCreateRequest{
Name: params.Name,
PathCipher: pb.CipherSuite(params.PathCipher),
PartnerId: params.PartnerID,
DefaultSegmentSize: params.DefaultSegmentsSize,
DefaultRedundancyScheme: &pb.RedundancyScheme{
Type: pb.RedundancyScheme_SchemeType(defaultRS.Algorithm),
MinReq: int32(defaultRS.RequiredShares),
Total: int32(defaultRS.TotalShares),
RepairThreshold: int32(defaultRS.RepairShares),
SuccessThreshold: int32(defaultRS.OptimalShares),
ErasureShareSize: defaultRS.ShareSize,
},
DefaultEncryptionParameters: &pb.EncryptionParameters{
CipherSuite: pb.CipherSuite(defaultEP.CipherSuite),
BlockSize: int64(defaultEP.BlockSize),
},
} }
resp, err := client.client.CreateBucket(ctx, &req) }
// BatchItem returns single item for batch request
func (params *CreateBucketParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_BucketCreate{
BucketCreate: params.toRequest(),
},
}
}
// TODO potential names *Response/*Out/*Result
// CreateBucketResponse response for CreateBucket request
type CreateBucketResponse struct {
Bucket storj.Bucket
}
func newCreateBucketResponse(response *pb.BucketCreateResponse) (CreateBucketResponse, error) {
bucket, err := convertProtoToBucket(response.Bucket)
if err != nil {
return CreateBucketResponse{}, err
}
return CreateBucketResponse{
Bucket: bucket,
}, nil
}
// CreateBucket creates a new bucket
func (client *Client) CreateBucket(ctx context.Context, params CreateBucketParams) (respBucket storj.Bucket, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.CreateBucket(ctx, params.toRequest())
if err != nil { if err != nil {
return storj.Bucket{}, Error.Wrap(err) return storj.Bucket{}, Error.Wrap(err)
} }
respBucket, err = convertProtoToBucket(resp.Bucket) respBucket, err = convertProtoToBucket(response.Bucket)
if err != nil { if err != nil {
return respBucket, Error.Wrap(err) return storj.Bucket{}, Error.Wrap(err)
} }
return respBucket, nil return respBucket, nil
} }
// GetBucketParams parmaters for GetBucketParams method
type GetBucketParams struct {
Name []byte
}
func (params *GetBucketParams) toRequest() *pb.BucketGetRequest {
return &pb.BucketGetRequest{Name: params.Name}
}
// BatchItem returns single item for batch request
func (params *GetBucketParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_BucketGet{
BucketGet: params.toRequest(),
},
}
}
// GetBucketResponse response for GetBucket request
type GetBucketResponse struct {
Bucket storj.Bucket
}
func newGetBucketResponse(response *pb.BucketGetResponse) (GetBucketResponse, error) {
bucket, err := convertProtoToBucket(response.Bucket)
if err != nil {
return GetBucketResponse{}, err
}
return GetBucketResponse{
Bucket: bucket,
}, nil
}
// GetBucket returns a bucket // GetBucket returns a bucket
func (client *Client) GetBucket(ctx context.Context, bucketName string) (respBucket storj.Bucket, err error) { func (client *Client) GetBucket(ctx context.Context, params GetBucketParams) (respBucket storj.Bucket, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
resp, err := client.client.GetBucket(ctx, &pb.BucketGetRequest{Name: []byte(bucketName)})
resp, err := client.client.GetBucket(ctx, params.toRequest())
if err != nil { if err != nil {
if status.Code(err) == codes.NotFound { if status.Code(err) == codes.NotFound {
return storj.Bucket{}, storj.ErrBucketNotFound.Wrap(err) return storj.Bucket{}, storj.ErrBucketNotFound.Wrap(err)
@ -267,15 +358,33 @@ func (client *Client) GetBucket(ctx context.Context, bucketName string) (respBuc
respBucket, err = convertProtoToBucket(resp.Bucket) respBucket, err = convertProtoToBucket(resp.Bucket)
if err != nil { if err != nil {
return respBucket, Error.Wrap(err) return storj.Bucket{}, Error.Wrap(err)
} }
return respBucket, nil return respBucket, nil
} }
// DeleteBucketParams parmaters for DeleteBucket method
type DeleteBucketParams struct {
Name []byte
}
func (params *DeleteBucketParams) toRequest() *pb.BucketDeleteRequest {
return &pb.BucketDeleteRequest{Name: params.Name}
}
// BatchItem returns single item for batch request
func (params *DeleteBucketParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_BucketDelete{
BucketDelete: params.toRequest(),
},
}
}
// DeleteBucket deletes a bucket // DeleteBucket deletes a bucket
func (client *Client) DeleteBucket(ctx context.Context, bucketName string) (err error) { func (client *Client) DeleteBucket(ctx context.Context, params DeleteBucketParams) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
_, err = client.client.DeleteBucket(ctx, &pb.BucketDeleteRequest{Name: []byte(bucketName)}) _, err = client.client.DeleteBucket(ctx, params.toRequest())
if err != nil { if err != nil {
if status.Code(err) == codes.NotFound { if status.Code(err) == codes.NotFound {
return storj.ErrBucketNotFound.Wrap(err) return storj.ErrBucketNotFound.Wrap(err)
@ -285,15 +394,54 @@ func (client *Client) DeleteBucket(ctx context.Context, bucketName string) (err
return nil return nil
} }
// ListBuckets lists buckets // ListBucketsParams parmaters for ListBucketsParams method
func (client *Client) ListBuckets(ctx context.Context, listOpts storj.BucketListOptions) (_ storj.BucketList, err error) { type ListBucketsParams struct {
defer mon.Task()(&ctx)(&err) ListOpts storj.BucketListOptions
req := &pb.BucketListRequest{ }
Cursor: []byte(listOpts.Cursor),
Limit: int32(listOpts.Limit), func (params *ListBucketsParams) toRequest() *pb.BucketListRequest {
Direction: int32(listOpts.Direction), return &pb.BucketListRequest{
Cursor: []byte(params.ListOpts.Cursor),
Limit: int32(params.ListOpts.Limit),
Direction: int32(params.ListOpts.Direction),
} }
resp, err := client.client.ListBuckets(ctx, req) }
// BatchItem returns single item for batch request
func (params *ListBucketsParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_BucketList{
BucketList: params.toRequest(),
},
}
}
// ListBucketsResponse response for ListBucket request
type ListBucketsResponse struct {
BucketList storj.BucketList
}
func newListBucketsResponse(response *pb.BucketListResponse) ListBucketsResponse {
bucketList := storj.BucketList{
More: response.More,
}
bucketList.Items = make([]storj.Bucket, len(response.Items))
for i, item := range response.GetItems() {
bucketList.Items[i] = storj.Bucket{
Name: string(item.Name),
Created: item.CreatedAt,
}
}
return ListBucketsResponse{
BucketList: bucketList,
}
}
// ListBuckets lists buckets
func (client *Client) ListBuckets(ctx context.Context, params ListBucketsParams) (_ storj.BucketList, err error) {
defer mon.Task()(&ctx)(&err)
resp, err := client.client.ListBuckets(ctx, params.toRequest())
if err != nil { if err != nil {
return storj.BucketList{}, Error.Wrap(err) return storj.BucketList{}, Error.Wrap(err)
} }
@ -310,32 +458,6 @@ func (client *Client) ListBuckets(ctx context.Context, listOpts storj.BucketList
return resultBucketList, nil return resultBucketList, nil
} }
func convertBucketToProtoRequest(bucket storj.Bucket) (bucketReq pb.BucketCreateRequest, err error) {
rs := bucket.DefaultRedundancyScheme
partnerID, err := bucket.PartnerID.MarshalJSON()
if err != nil {
return bucketReq, Error.Wrap(err)
}
return pb.BucketCreateRequest{
Name: []byte(bucket.Name),
PathCipher: pb.CipherSuite(bucket.PathCipher),
PartnerId: partnerID,
DefaultSegmentSize: bucket.DefaultSegmentsSize,
DefaultRedundancyScheme: &pb.RedundancyScheme{
Type: pb.RedundancyScheme_SchemeType(rs.Algorithm),
MinReq: int32(rs.RequiredShares),
Total: int32(rs.TotalShares),
RepairThreshold: int32(rs.RepairShares),
SuccessThreshold: int32(rs.OptimalShares),
ErasureShareSize: rs.ShareSize,
},
DefaultEncryptionParameters: &pb.EncryptionParameters{
CipherSuite: pb.CipherSuite(bucket.DefaultEncryptionParameters.CipherSuite),
BlockSize: int64(bucket.DefaultEncryptionParameters.BlockSize),
},
}, nil
}
func convertProtoToBucket(pbBucket *pb.Bucket) (bucket storj.Bucket, err error) { func convertProtoToBucket(pbBucket *pb.Bucket) (bucket storj.Bucket, err error) {
defaultRS := pbBucket.GetDefaultRedundancyScheme() defaultRS := pbBucket.GetDefaultRedundancyScheme()
defaultEP := pbBucket.GetDefaultEncryptionParameters() defaultEP := pbBucket.GetDefaultEncryptionParameters()
@ -365,6 +487,37 @@ func convertProtoToBucket(pbBucket *pb.Bucket) (bucket storj.Bucket, err error)
}, nil }, nil
} }
// SetBucketAttributionParams parameters for SetBucketAttribution method
type SetBucketAttributionParams struct {
Bucket string
PartnerID uuid.UUID
}
func (params *SetBucketAttributionParams) toRequest() *pb.BucketSetAttributionRequest {
return &pb.BucketSetAttributionRequest{
Name: []byte(params.Bucket),
PartnerId: params.PartnerID[:],
}
}
// BatchItem returns single item for batch request
func (params *SetBucketAttributionParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_BucketSetAttribution{
BucketSetAttribution: params.toRequest(),
},
}
}
// SetBucketAttribution tries to set the attribution information on the bucket.
func (client *Client) SetBucketAttribution(ctx context.Context, params SetBucketAttributionParams) (err error) {
defer mon.Task()(&ctx)(&err)
_, err = client.client.SetBucketAttribution(ctx, params.toRequest())
return Error.Wrap(err)
}
// BeginObjectParams parmaters for BeginObject method // BeginObjectParams parmaters for BeginObject method
type BeginObjectParams struct { type BeginObjectParams struct {
Bucket []byte Bucket []byte
@ -375,11 +528,8 @@ type BeginObjectParams struct {
ExpiresAt time.Time ExpiresAt time.Time
} }
// BeginObject begins object creation func (params *BeginObjectParams) toRequest() *pb.ObjectBeginRequest {
func (client *Client) BeginObject(ctx context.Context, params BeginObjectParams) (_ storj.StreamID, err error) { return &pb.ObjectBeginRequest{
defer mon.Task()(&ctx)(&err)
response, err := client.client.BeginObject(ctx, &pb.ObjectBeginRequest{
Bucket: params.Bucket, Bucket: params.Bucket,
EncryptedPath: params.EncryptedPath, EncryptedPath: params.EncryptedPath,
Version: params.Version, Version: params.Version,
@ -396,7 +546,34 @@ func (client *Client) BeginObject(ctx context.Context, params BeginObjectParams)
CipherSuite: pb.CipherSuite(params.EncryptionParameters.CipherSuite), CipherSuite: pb.CipherSuite(params.EncryptionParameters.CipherSuite),
BlockSize: int64(params.EncryptionParameters.BlockSize), BlockSize: int64(params.EncryptionParameters.BlockSize),
}, },
}) }
}
// BatchItem returns single item for batch request
func (params *BeginObjectParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_ObjectBegin{
ObjectBegin: params.toRequest(),
},
}
}
// BeginObjectResponse response for BeginObject request
type BeginObjectResponse struct {
StreamID storj.StreamID
}
func newBeginObjectResponse(response *pb.ObjectBeginResponse) BeginObjectResponse {
return BeginObjectResponse{
StreamID: response.StreamId,
}
}
// BeginObject begins object creation
func (client *Client) BeginObject(ctx context.Context, params BeginObjectParams) (_ storj.StreamID, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.BeginObject(ctx, params.toRequest())
if err != nil { if err != nil {
return nil, Error.Wrap(err) return nil, Error.Wrap(err)
} }
@ -412,15 +589,29 @@ type CommitObjectParams struct {
EncryptedMetadata []byte EncryptedMetadata []byte
} }
func (params *CommitObjectParams) toRequest() *pb.ObjectCommitRequest {
return &pb.ObjectCommitRequest{
StreamId: params.StreamID,
EncryptedMetadataNonce: params.EncryptedMetadataNonce,
EncryptedMetadata: params.EncryptedMetadata,
}
}
// BatchItem returns single item for batch request
func (params *CommitObjectParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_ObjectCommit{
ObjectCommit: params.toRequest(),
},
}
}
// CommitObject commits created object // CommitObject commits created object
func (client *Client) CommitObject(ctx context.Context, params CommitObjectParams) (err error) { func (client *Client) CommitObject(ctx context.Context, params CommitObjectParams) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
_, err = client.client.CommitObject(ctx, &pb.ObjectCommitRequest{ _, err = client.client.CommitObject(ctx, params.toRequest())
StreamId: params.StreamID,
EncryptedMetadataNonce: params.EncryptedMetadataNonce,
EncryptedMetadata: params.EncryptedMetadata,
})
return Error.Wrap(err) return Error.Wrap(err)
} }
@ -431,23 +622,29 @@ type GetObjectParams struct {
Version int32 Version int32
} }
// GetObject gets single object func (params *GetObjectParams) toRequest() *pb.ObjectGetRequest {
func (client *Client) GetObject(ctx context.Context, params GetObjectParams) (_ storj.ObjectInfo, err error) { return &pb.ObjectGetRequest{
defer mon.Task()(&ctx)(&err)
response, err := client.client.GetObject(ctx, &pb.ObjectGetRequest{
Bucket: params.Bucket, Bucket: params.Bucket,
EncryptedPath: params.EncryptedPath, EncryptedPath: params.EncryptedPath,
Version: params.Version, Version: params.Version,
})
if err != nil {
if status.Code(err) == codes.NotFound {
return storj.ObjectInfo{}, storj.ErrObjectNotFound.Wrap(err)
}
return storj.ObjectInfo{}, Error.Wrap(err)
} }
}
// BatchItem returns single item for batch request
func (params *GetObjectParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_ObjectGet{
ObjectGet: params.toRequest(),
},
}
}
// GetObjectResponse response for GetObject request
type GetObjectResponse struct {
Info storj.ObjectInfo
}
func newGetObjectResponse(response *pb.ObjectGetResponse) GetObjectResponse {
object := storj.ObjectInfo{ object := storj.ObjectInfo{
Bucket: string(response.Object.Bucket), Bucket: string(response.Object.Bucket),
Path: storj.Path(response.Object.EncryptedPath), Path: storj.Path(response.Object.EncryptedPath),
@ -478,8 +675,26 @@ func (client *Client) GetObject(ctx context.Context, params GetObjectParams) (_
TotalShares: int16(pbRS.Total), TotalShares: int16(pbRS.Total),
} }
} }
return GetObjectResponse{
Info: object,
}
}
return object, nil // GetObject gets single object
func (client *Client) GetObject(ctx context.Context, params GetObjectParams) (_ storj.ObjectInfo, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.GetObject(ctx, params.toRequest())
if err != nil {
if status.Code(err) == codes.NotFound {
return storj.ObjectInfo{}, storj.ErrObjectNotFound.Wrap(err)
}
return storj.ObjectInfo{}, Error.Wrap(err)
}
getResponse := newGetObjectResponse(response)
return getResponse.Info, nil
} }
// BeginDeleteObjectParams parameters for BeginDeleteObject method // BeginDeleteObjectParams parameters for BeginDeleteObject method
@ -489,15 +704,39 @@ type BeginDeleteObjectParams struct {
Version int32 Version int32
} }
func (params *BeginDeleteObjectParams) toRequest() *pb.ObjectBeginDeleteRequest {
return &pb.ObjectBeginDeleteRequest{
Bucket: params.Bucket,
EncryptedPath: params.EncryptedPath,
Version: params.Version,
}
}
// BatchItem returns single item for batch request
func (params *BeginDeleteObjectParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_ObjectBeginDelete{
ObjectBeginDelete: params.toRequest(),
},
}
}
// BeginDeleteObjectResponse response for BeginDeleteObject request
type BeginDeleteObjectResponse struct {
StreamID storj.StreamID
}
func newBeginDeleteObjectResponse(response *pb.ObjectBeginDeleteResponse) BeginDeleteObjectResponse {
return BeginDeleteObjectResponse{
StreamID: response.StreamId,
}
}
// BeginDeleteObject begins object deletion process // BeginDeleteObject begins object deletion process
func (client *Client) BeginDeleteObject(ctx context.Context, params BeginDeleteObjectParams) (_ storj.StreamID, err error) { func (client *Client) BeginDeleteObject(ctx context.Context, params BeginDeleteObjectParams) (_ storj.StreamID, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
response, err := client.client.BeginDeleteObject(ctx, &pb.ObjectBeginDeleteRequest{ response, err := client.client.BeginDeleteObject(ctx, params.toRequest())
Bucket: params.Bucket,
EncryptedPath: params.EncryptedPath,
Version: params.Version,
})
if err != nil { if err != nil {
if status.Code(err) == codes.NotFound { if status.Code(err) == codes.NotFound {
return storj.StreamID{}, storj.ErrObjectNotFound.Wrap(err) return storj.StreamID{}, storj.ErrObjectNotFound.Wrap(err)
@ -508,13 +747,32 @@ func (client *Client) BeginDeleteObject(ctx context.Context, params BeginDeleteO
return response.StreamId, nil return response.StreamId, nil
} }
// FinishDeleteObjectParams parameters for FinishDeleteObject method
type FinishDeleteObjectParams struct {
StreamID storj.StreamID
}
func (params *FinishDeleteObjectParams) toRequest() *pb.ObjectFinishDeleteRequest {
return &pb.ObjectFinishDeleteRequest{
StreamId: params.StreamID,
}
}
// BatchItem returns single item for batch request
func (params *FinishDeleteObjectParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_ObjectFinishDelete{
ObjectFinishDelete: params.toRequest(),
},
}
}
// FinishDeleteObject finishes object deletion process // FinishDeleteObject finishes object deletion process
func (client *Client) FinishDeleteObject(ctx context.Context, streamID storj.StreamID) (err error) { func (client *Client) FinishDeleteObject(ctx context.Context, params FinishDeleteObjectParams) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
_, err = client.client.FinishDeleteObject(ctx, &pb.ObjectFinishDeleteRequest{ _, err = client.client.FinishDeleteObject(ctx, params.toRequest())
StreamId: streamID,
})
return Error.Wrap(err) return Error.Wrap(err)
} }
@ -528,11 +786,8 @@ type ListObjectsParams struct {
Recursive bool Recursive bool
} }
// ListObjects lists objects according to specific parameters func (params *ListObjectsParams) toRequest() *pb.ObjectListRequest {
func (client *Client) ListObjects(ctx context.Context, params ListObjectsParams) (_ []storj.ObjectListItem, more bool, err error) { return &pb.ObjectListRequest{
defer mon.Task()(&ctx)(&err)
response, err := client.client.ListObjects(ctx, &pb.ObjectListRequest{
Bucket: params.Bucket, Bucket: params.Bucket,
EncryptedPrefix: params.EncryptedPrefix, EncryptedPrefix: params.EncryptedPrefix,
EncryptedCursor: params.EncryptedCursor, EncryptedCursor: params.EncryptedCursor,
@ -541,16 +796,30 @@ func (client *Client) ListObjects(ctx context.Context, params ListObjectsParams)
Metadata: params.IncludeMetadata, Metadata: params.IncludeMetadata,
}, },
Recursive: params.Recursive, Recursive: params.Recursive,
})
if err != nil {
return []storj.ObjectListItem{}, false, Error.Wrap(err)
} }
}
// BatchItem returns single item for batch request
func (params *ListObjectsParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_ObjectList{
ObjectList: params.toRequest(),
},
}
}
// ListObjectsResponse response for ListObjects request
type ListObjectsResponse struct {
Items []storj.ObjectListItem
More bool
}
func newListObjectsResponse(response *pb.ObjectListResponse, encryptedPrefix []byte, recursive bool) ListObjectsResponse {
objects := make([]storj.ObjectListItem, len(response.Items)) objects := make([]storj.ObjectListItem, len(response.Items))
for i, object := range response.Items { for i, object := range response.Items {
encryptedPath := object.EncryptedPath encryptedPath := object.EncryptedPath
isPrefix := false isPrefix := false
if !params.Recursive && len(encryptedPath) != 0 && encryptedPath[len(encryptedPath)-1] == '/' && !bytes.Equal(encryptedPath, params.EncryptedPrefix) { if !recursive && len(encryptedPath) != 0 && encryptedPath[len(encryptedPath)-1] == '/' && !bytes.Equal(encryptedPath, encryptedPrefix) {
isPrefix = true isPrefix = true
} }
@ -568,7 +837,23 @@ func (client *Client) ListObjects(ctx context.Context, params ListObjectsParams)
} }
} }
return objects, response.More, Error.Wrap(err) return ListObjectsResponse{
Items: objects,
More: response.More,
}
}
// ListObjects lists objects according to specific parameters
func (client *Client) ListObjects(ctx context.Context, params ListObjectsParams) (_ []storj.ObjectListItem, more bool, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.ListObjects(ctx, params.toRequest())
if err != nil {
return []storj.ObjectListItem{}, false, Error.Wrap(err)
}
listResponse := newListObjectsResponse(response, params.EncryptedPrefix, params.Recursive)
return listResponse.Items, listResponse.More, Error.Wrap(err)
} }
// BeginSegmentParams parameters for BeginSegment method // BeginSegmentParams parameters for BeginSegment method
@ -578,18 +863,46 @@ type BeginSegmentParams struct {
MaxOderLimit int64 MaxOderLimit int64
} }
// BeginSegment begins segment upload func (params *BeginSegmentParams) toRequest() *pb.SegmentBeginRequest {
func (client *Client) BeginSegment(ctx context.Context, params BeginSegmentParams) (_ storj.SegmentID, limits []*pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, err error) { return &pb.SegmentBeginRequest{
defer mon.Task()(&ctx)(&err)
response, err := client.client.BeginSegment(ctx, &pb.SegmentBeginRequest{
StreamId: params.StreamID, StreamId: params.StreamID,
Position: &pb.SegmentPosition{ Position: &pb.SegmentPosition{
PartNumber: params.Position.PartNumber, PartNumber: params.Position.PartNumber,
Index: params.Position.Index, Index: params.Position.Index,
}, },
MaxOrderLimit: params.MaxOderLimit, MaxOrderLimit: params.MaxOderLimit,
}) }
}
// BatchItem returns single item for batch request
func (params *BeginSegmentParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_SegmentBegin{
SegmentBegin: params.toRequest(),
},
}
}
// BeginSegmentResponse response for BeginSegment request
type BeginSegmentResponse struct {
SegmentID storj.SegmentID
Limits []*pb.AddressedOrderLimit
PiecePrivateKey storj.PiecePrivateKey
}
func newBeginSegmentResponse(response *pb.SegmentBeginResponse) BeginSegmentResponse {
return BeginSegmentResponse{
SegmentID: response.SegmentId,
Limits: response.AddressedLimits,
PiecePrivateKey: response.PrivateKey,
}
}
// BeginSegment begins segment upload
func (client *Client) BeginSegment(ctx context.Context, params BeginSegmentParams) (_ storj.SegmentID, limits []*pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.BeginSegment(ctx, params.toRequest())
if err != nil { if err != nil {
return storj.SegmentID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err) return storj.SegmentID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
} }
@ -606,23 +919,33 @@ type CommitSegmentParams struct {
UploadResult []*pb.SegmentPieceUploadResult UploadResult []*pb.SegmentPieceUploadResult
} }
// CommitSegmentNew commits segment after upload func (params *CommitSegmentParams) toRequest() *pb.SegmentCommitRequest {
func (client *Client) CommitSegmentNew(ctx context.Context, params CommitSegmentParams) (err error) { return &pb.SegmentCommitRequest{
defer mon.Task()(&ctx)(&err)
_, err = client.client.CommitSegment(ctx, &pb.SegmentCommitRequest{
SegmentId: params.SegmentID, SegmentId: params.SegmentID,
EncryptedKeyNonce: params.Encryption.EncryptedKeyNonce, EncryptedKeyNonce: params.Encryption.EncryptedKeyNonce,
EncryptedKey: params.Encryption.EncryptedKey, EncryptedKey: params.Encryption.EncryptedKey,
SizeEncryptedData: params.SizeEncryptedData, SizeEncryptedData: params.SizeEncryptedData,
UploadResult: params.UploadResult, UploadResult: params.UploadResult,
})
if err != nil {
return Error.Wrap(err)
} }
}
return nil // BatchItem returns single item for batch request
func (params *CommitSegmentParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_SegmentCommit{
SegmentCommit: params.toRequest(),
},
}
}
// CommitSegmentNew commits segment after upload
func (client *Client) CommitSegmentNew(ctx context.Context, params CommitSegmentParams) (err error) {
defer mon.Task()(&ctx)(&err)
_, err = client.client.CommitSegment(ctx, params.toRequest())
return Error.Wrap(err)
} }
// MakeInlineSegmentParams parameters for MakeInlineSegment method // MakeInlineSegmentParams parameters for MakeInlineSegment method
@ -633,11 +956,8 @@ type MakeInlineSegmentParams struct {
EncryptedInlineData []byte EncryptedInlineData []byte
} }
// MakeInlineSegment commits segment after upload func (params *MakeInlineSegmentParams) toRequest() *pb.SegmentMakeInlineRequest {
func (client *Client) MakeInlineSegment(ctx context.Context, params MakeInlineSegmentParams) (err error) { return &pb.SegmentMakeInlineRequest{
defer mon.Task()(&ctx)(&err)
_, err = client.client.MakeInlineSegment(ctx, &pb.SegmentMakeInlineRequest{
StreamId: params.StreamID, StreamId: params.StreamID,
Position: &pb.SegmentPosition{ Position: &pb.SegmentPosition{
PartNumber: params.Position.PartNumber, PartNumber: params.Position.PartNumber,
@ -646,12 +966,25 @@ func (client *Client) MakeInlineSegment(ctx context.Context, params MakeInlineSe
EncryptedKeyNonce: params.Encryption.EncryptedKeyNonce, EncryptedKeyNonce: params.Encryption.EncryptedKeyNonce,
EncryptedKey: params.Encryption.EncryptedKey, EncryptedKey: params.Encryption.EncryptedKey,
EncryptedInlineData: params.EncryptedInlineData, EncryptedInlineData: params.EncryptedInlineData,
})
if err != nil {
return Error.Wrap(err)
} }
}
return nil // BatchItem returns single item for batch request
func (params *MakeInlineSegmentParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_SegmentMakeInline{
SegmentMakeInline: params.toRequest(),
},
}
}
// MakeInlineSegment commits segment after upload
func (client *Client) MakeInlineSegment(ctx context.Context, params MakeInlineSegmentParams) (err error) {
defer mon.Task()(&ctx)(&err)
_, err = client.client.MakeInlineSegment(ctx, params.toRequest())
return Error.Wrap(err)
} }
// BeginDeleteSegmentParams parameters for BeginDeleteSegment method // BeginDeleteSegmentParams parameters for BeginDeleteSegment method
@ -660,17 +993,45 @@ type BeginDeleteSegmentParams struct {
Position storj.SegmentPosition Position storj.SegmentPosition
} }
// BeginDeleteSegment begins segment upload process func (params *BeginDeleteSegmentParams) toRequest() *pb.SegmentBeginDeleteRequest {
func (client *Client) BeginDeleteSegment(ctx context.Context, params BeginDeleteSegmentParams) (_ storj.SegmentID, limits []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) { return &pb.SegmentBeginDeleteRequest{
defer mon.Task()(&ctx)(&err)
response, err := client.client.BeginDeleteSegment(ctx, &pb.SegmentBeginDeleteRequest{
StreamId: params.StreamID, StreamId: params.StreamID,
Position: &pb.SegmentPosition{ Position: &pb.SegmentPosition{
PartNumber: params.Position.PartNumber, PartNumber: params.Position.PartNumber,
Index: params.Position.Index, Index: params.Position.Index,
}, },
}) }
}
// BatchItem returns single item for batch request
func (params *BeginDeleteSegmentParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_SegmentBeginDelete{
SegmentBeginDelete: params.toRequest(),
},
}
}
// BeginDeleteSegmentResponse response for BeginDeleteSegment request
type BeginDeleteSegmentResponse struct {
SegmentID storj.SegmentID
Limits []*pb.AddressedOrderLimit
PiecePrivateKey storj.PiecePrivateKey
}
func newBeginDeleteSegmentResponse(response *pb.SegmentBeginDeleteResponse) BeginDeleteSegmentResponse {
return BeginDeleteSegmentResponse{
SegmentID: response.SegmentId,
Limits: response.AddressedLimits,
PiecePrivateKey: response.PrivateKey,
}
}
// BeginDeleteSegment begins segment upload process
func (client *Client) BeginDeleteSegment(ctx context.Context, params BeginDeleteSegmentParams) (_ storj.SegmentID, limits []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.BeginDeleteSegment(ctx, params.toRequest())
if err != nil { if err != nil {
return storj.SegmentID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err) return storj.SegmentID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
} }
@ -685,14 +1046,28 @@ type FinishDeleteSegmentParams struct {
DeleteResults []*pb.SegmentPieceDeleteResult DeleteResults []*pb.SegmentPieceDeleteResult
} }
func (params *FinishDeleteSegmentParams) toRequest() *pb.SegmentFinishDeleteRequest {
return &pb.SegmentFinishDeleteRequest{
SegmentId: params.SegmentID,
Results: params.DeleteResults,
}
}
// BatchItem returns single item for batch request
func (params *FinishDeleteSegmentParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_SegmentFinishDelete{
SegmentFinishDelete: params.toRequest(),
},
}
}
// FinishDeleteSegment finishes segment upload process // FinishDeleteSegment finishes segment upload process
func (client *Client) FinishDeleteSegment(ctx context.Context, params FinishDeleteSegmentParams) (err error) { func (client *Client) FinishDeleteSegment(ctx context.Context, params FinishDeleteSegmentParams) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
_, err = client.client.FinishDeleteSegment(ctx, &pb.SegmentFinishDeleteRequest{ _, err = client.client.FinishDeleteSegment(ctx, params.toRequest())
SegmentId: params.SegmentID,
Results: params.DeleteResults,
})
return Error.Wrap(err) return Error.Wrap(err)
} }
@ -702,21 +1077,33 @@ type DownloadSegmentParams struct {
Position storj.SegmentPosition Position storj.SegmentPosition
} }
// DownloadSegment gets info for downloading remote segment or data from inline segment func (params *DownloadSegmentParams) toRequest() *pb.SegmentDownloadRequest {
func (client *Client) DownloadSegment(ctx context.Context, params DownloadSegmentParams) (_ storj.SegmentDownloadInfo, _ []*pb.AddressedOrderLimit, err error) { return &pb.SegmentDownloadRequest{
defer mon.Task()(&ctx)(&err)
response, err := client.client.DownloadSegment(ctx, &pb.SegmentDownloadRequest{
StreamId: params.StreamID, StreamId: params.StreamID,
CursorPosition: &pb.SegmentPosition{ CursorPosition: &pb.SegmentPosition{
PartNumber: params.Position.PartNumber, PartNumber: params.Position.PartNumber,
Index: params.Position.Index, Index: params.Position.Index,
}, },
})
if err != nil {
return storj.SegmentDownloadInfo{}, nil, Error.Wrap(err)
} }
}
// BatchItem returns single item for batch request
func (params *DownloadSegmentParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_SegmentDownload{
SegmentDownload: params.toRequest(),
},
}
}
// DownloadSegmentResponse response for DownloadSegment request
type DownloadSegmentResponse struct {
Info storj.SegmentDownloadInfo
Limits []*pb.AddressedOrderLimit
}
func newDownloadSegmentResponse(response *pb.SegmentDownloadResponse) DownloadSegmentResponse {
info := storj.SegmentDownloadInfo{ info := storj.SegmentDownloadInfo{
SegmentID: response.SegmentId, SegmentID: response.SegmentId,
Size: response.SegmentSize, Size: response.SegmentSize,
@ -739,8 +1126,23 @@ func (client *Client) DownloadSegment(ctx context.Context, params DownloadSegmen
response.AddressedLimits[i] = nil response.AddressedLimits[i] = nil
} }
} }
return DownloadSegmentResponse{
Info: info,
Limits: response.AddressedLimits,
}
}
return info, response.AddressedLimits, nil // DownloadSegment gets info for downloading remote segment or data from inline segment
func (client *Client) DownloadSegment(ctx context.Context, params DownloadSegmentParams) (_ storj.SegmentDownloadInfo, _ []*pb.AddressedOrderLimit, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.DownloadSegment(ctx, params.toRequest())
if err != nil {
return storj.SegmentDownloadInfo{}, nil, Error.Wrap(err)
}
downloadResponse := newDownloadSegmentResponse(response)
return downloadResponse.Info, downloadResponse.Limits, nil
} }
// ListSegmentsParams parameters for ListSegment method // ListSegmentsParams parameters for ListSegment method
@ -750,22 +1152,33 @@ type ListSegmentsParams struct {
Limit int32 Limit int32
} }
// ListSegmentsNew lists object segments // ListSegmentsResponse response for ListSegments request
func (client *Client) ListSegmentsNew(ctx context.Context, params ListSegmentsParams) (_ []storj.SegmentListItem, more bool, err error) { type ListSegmentsResponse struct {
defer mon.Task()(&ctx)(&err) Items []storj.SegmentListItem
More bool
}
response, err := client.client.ListSegments(ctx, &pb.SegmentListRequest{ func (params *ListSegmentsParams) toRequest() *pb.SegmentListRequest {
return &pb.SegmentListRequest{
StreamId: params.StreamID, StreamId: params.StreamID,
CursorPosition: &pb.SegmentPosition{ CursorPosition: &pb.SegmentPosition{
PartNumber: params.CursorPosition.PartNumber, PartNumber: params.CursorPosition.PartNumber,
Index: params.CursorPosition.Index, Index: params.CursorPosition.Index,
}, },
Limit: params.Limit, Limit: params.Limit,
})
if err != nil {
return []storj.SegmentListItem{}, false, Error.Wrap(err)
} }
}
// BatchItem returns single item for batch request
func (params *ListSegmentsParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_SegmentList{
SegmentList: params.toRequest(),
},
}
}
func newListSegmentsResponse(response *pb.SegmentListResponse) ListSegmentsResponse {
items := make([]storj.SegmentListItem, len(response.Items)) items := make([]storj.SegmentListItem, len(response.Items))
for i, responseItem := range response.Items { for i, responseItem := range response.Items {
items[i] = storj.SegmentListItem{ items[i] = storj.SegmentListItem{
@ -775,23 +1188,47 @@ func (client *Client) ListSegmentsNew(ctx context.Context, params ListSegmentsPa
}, },
} }
} }
return items, response.More, Error.Wrap(err) return ListSegmentsResponse{
Items: items,
More: response.More,
}
} }
// SetBucketAttributionParams parameters for SetBucketAttribution method // ListSegmentsNew lists object segments
type SetBucketAttributionParams struct { func (client *Client) ListSegmentsNew(ctx context.Context, params ListSegmentsParams) (_ []storj.SegmentListItem, more bool, err error) {
Bucket string
PartnerID uuid.UUID
}
// SetBucketAttribution tries to set the attribution information on the bucket.
func (client *Client) SetBucketAttribution(ctx context.Context, params SetBucketAttributionParams) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
_, err = client.client.SetBucketAttribution(ctx, &pb.BucketSetAttributionRequest{ response, err := client.client.ListSegments(ctx, params.toRequest())
PartnerId: params.PartnerID[:], // TODO: implement storj.UUID that can be sent using pb if err != nil {
Name: []byte(params.Bucket), return []storj.SegmentListItem{}, false, Error.Wrap(err)
}) }
return Error.Wrap(err) listResponse := newListSegmentsResponse(response)
return listResponse.Items, listResponse.More, Error.Wrap(err)
}
// Batch sends multiple requests in one batch
func (client *Client) Batch(ctx context.Context, requests ...BatchItem) (resp []BatchResponse, err error) {
defer mon.Task()(&ctx)(&err)
batchItems := make([]*pb.BatchRequestItem, len(requests))
for i, request := range requests {
batchItems[i] = request.BatchItem()
}
response, err := client.client.Batch(ctx, &pb.BatchRequest{
Requests: batchItems,
})
if err != nil {
return []BatchResponse{}, err
}
resp = make([]BatchResponse, len(response.Responses))
for i, response := range response.Responses {
resp[i] = BatchResponse{
pbRequest: batchItems[i].Request,
pbResponse: response.Response,
}
}
return resp, nil
} }

View File

@ -38,23 +38,39 @@ func NewStore(metainfoClient metainfo.Client) *BucketStore {
// Create creates a bucket // Create creates a bucket
func (store *BucketStore) Create(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error) { func (store *BucketStore) Create(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
return store.metainfoClient.CreateBucket(ctx, bucket)
// uuid MarshalJSON implementation always returns err == nil
partnerID, _ := bucket.PartnerID.MarshalJSON()
return store.metainfoClient.CreateBucket(ctx, metainfo.CreateBucketParams{
Name: []byte(bucket.Name),
PathCipher: bucket.PathCipher,
PartnerID: partnerID,
DefaultSegmentsSize: bucket.DefaultSegmentsSize,
DefaultRedundancyScheme: bucket.DefaultRedundancyScheme,
DefaultEncryptionParameters: bucket.DefaultEncryptionParameters,
})
} }
// Get returns a bucket // Get returns a bucket
func (store *BucketStore) Get(ctx context.Context, bucketName string) (_ storj.Bucket, err error) { func (store *BucketStore) Get(ctx context.Context, bucketName string) (_ storj.Bucket, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
return store.metainfoClient.GetBucket(ctx, bucketName) return store.metainfoClient.GetBucket(ctx, metainfo.GetBucketParams{
Name: []byte(bucketName),
})
} }
// Delete deletes a bucket // Delete deletes a bucket
func (store *BucketStore) Delete(ctx context.Context, bucketName string) (err error) { func (store *BucketStore) Delete(ctx context.Context, bucketName string) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
return store.metainfoClient.DeleteBucket(ctx, bucketName) return store.metainfoClient.DeleteBucket(ctx, metainfo.DeleteBucketParams{
Name: []byte(bucketName),
})
} }
// List returns a list of buckets // List returns a list of buckets
func (store *BucketStore) List(ctx context.Context, listOpts storj.BucketListOptions) (_ storj.BucketList, err error) { func (store *BucketStore) List(ctx context.Context, listOpts storj.BucketListOptions) (_ storj.BucketList, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
return store.metainfoClient.ListBuckets(ctx, listOpts) return store.metainfoClient.ListBuckets(ctx, metainfo.ListBucketsParams{
ListOpts: listOpts,
})
} }