2019-03-18 10:55:06 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package metainfo
|
|
|
|
|
|
|
|
import (
|
2019-08-01 10:04:31 +01:00
|
|
|
"bytes"
|
2019-03-18 10:55:06 +00:00
|
|
|
"context"
|
|
|
|
"time"
|
|
|
|
|
2019-06-19 13:02:37 +01:00
|
|
|
"github.com/skyrings/skyring-common/tools/uuid"
|
2019-03-18 10:55:06 +00:00
|
|
|
"github.com/zeebo/errs"
|
2019-07-09 22:54:00 +01:00
|
|
|
"gopkg.in/spacemonkeygo/monkit.v2"
|
2019-03-18 10:55:06 +00:00
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
"storj.io/storj/pkg/macaroon"
|
2019-03-18 10:55:06 +00:00
|
|
|
"storj.io/storj/pkg/pb"
|
2019-09-19 05:46:39 +01:00
|
|
|
"storj.io/storj/pkg/rpc"
|
|
|
|
"storj.io/storj/pkg/rpc/rpcstatus"
|
2019-03-18 10:55:06 +00:00
|
|
|
"storj.io/storj/pkg/storj"
|
2019-11-14 19:46:15 +00:00
|
|
|
"storj.io/storj/private/errs2"
|
2019-03-18 10:55:06 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
mon = monkit.Package()
|
|
|
|
|
|
|
|
// Error is the errs class of standard metainfo errors
|
|
|
|
Error = errs.Class("metainfo error")
|
|
|
|
)
|
|
|
|
|
2019-06-25 16:36:23 +01:00
|
|
|
// Client creates a grpcClient
|
|
|
|
type Client struct {
|
2019-09-19 05:46:39 +01:00
|
|
|
conn *rpc.Conn
|
|
|
|
client rpc.MetainfoClient
|
2019-09-19 17:19:29 +01:00
|
|
|
apiKeyRaw []byte
|
2019-11-26 11:12:37 +00:00
|
|
|
|
|
|
|
userAgent string
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListItem is a single item in a listing
|
|
|
|
type ListItem struct {
|
|
|
|
Path storj.Path
|
|
|
|
Pointer *pb.Pointer
|
|
|
|
IsPrefix bool
|
|
|
|
}
|
|
|
|
|
2019-06-25 16:36:23 +01:00
|
|
|
// New used as a public function
|
2019-11-26 11:12:37 +00:00
|
|
|
func New(client rpc.MetainfoClient, apiKey *macaroon.APIKey, userAgent string) *Client {
|
2019-06-25 16:36:23 +01:00
|
|
|
return &Client{
|
2019-09-19 17:19:29 +01:00
|
|
|
client: client,
|
|
|
|
apiKeyRaw: apiKey.SerializeRaw(),
|
2019-11-26 11:12:37 +00:00
|
|
|
|
|
|
|
userAgent: userAgent,
|
2019-06-25 16:36:23 +01:00
|
|
|
}
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
|
2019-06-25 16:36:23 +01:00
|
|
|
// Dial dials to metainfo endpoint with the specified api key.
|
2019-11-26 11:12:37 +00:00
|
|
|
func Dial(ctx context.Context, dialer rpc.Dialer, address string, apiKey *macaroon.APIKey, userAgent string) (*Client, error) {
|
2019-10-12 21:34:41 +01:00
|
|
|
conn, err := dialer.DialAddressInsecureBestEffort(ctx, address)
|
2019-03-18 10:55:06 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2019-06-25 16:36:23 +01:00
|
|
|
return &Client{
|
2019-09-19 17:19:29 +01:00
|
|
|
conn: conn,
|
2019-09-19 05:46:39 +01:00
|
|
|
client: conn.MetainfoClient(),
|
2019-09-19 17:19:29 +01:00
|
|
|
apiKeyRaw: apiKey.SerializeRaw(),
|
2019-11-26 11:12:37 +00:00
|
|
|
userAgent: userAgent,
|
2019-06-25 16:36:23 +01:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close closes the dialed connection.
|
|
|
|
func (client *Client) Close() error {
|
|
|
|
if client.conn != nil {
|
|
|
|
return Error.Wrap(client.conn.Close())
|
|
|
|
}
|
|
|
|
return nil
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
func (client *Client) header() *pb.RequestHeader {
|
|
|
|
return &pb.RequestHeader{
|
2019-11-26 11:12:37 +00:00
|
|
|
ApiKey: client.apiKeyRaw,
|
|
|
|
UserAgent: []byte(client.userAgent),
|
2019-09-19 17:19:29 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-27 18:36:51 +01:00
|
|
|
// GetProjectInfo gets the ProjectInfo for the api key associated with the metainfo client.
|
|
|
|
func (client *Client) GetProjectInfo(ctx context.Context) (resp *pb.ProjectInfoResponse, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
return client.client.ProjectInfo(ctx, &pb.ProjectInfoRequest{
|
|
|
|
Header: client.header(),
|
|
|
|
})
|
2019-06-27 18:36:51 +01:00
|
|
|
}
|
2019-07-12 13:57:02 +01:00
|
|
|
|
2019-08-06 15:56:23 +01:00
|
|
|
// CreateBucketParams parameters for CreateBucket method
|
|
|
|
type CreateBucketParams struct {
|
|
|
|
Name []byte
|
|
|
|
PathCipher storj.CipherSuite
|
|
|
|
PartnerID []byte
|
|
|
|
DefaultSegmentsSize int64
|
|
|
|
DefaultRedundancyScheme storj.RedundancyScheme
|
|
|
|
DefaultEncryptionParameters storj.EncryptionParameters
|
|
|
|
}
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
func (params *CreateBucketParams) toRequest(header *pb.RequestHeader) *pb.BucketCreateRequest {
|
2019-08-06 15:56:23 +01:00
|
|
|
defaultRS := params.DefaultRedundancyScheme
|
|
|
|
defaultEP := params.DefaultEncryptionParameters
|
|
|
|
|
|
|
|
return &pb.BucketCreateRequest{
|
2019-09-19 17:19:29 +01:00
|
|
|
Header: header,
|
2019-08-06 15:56:23 +01:00
|
|
|
Name: params.Name,
|
|
|
|
PathCipher: pb.CipherSuite(params.PathCipher),
|
|
|
|
PartnerId: params.PartnerID,
|
|
|
|
DefaultSegmentSize: params.DefaultSegmentsSize,
|
|
|
|
DefaultRedundancyScheme: &pb.RedundancyScheme{
|
|
|
|
Type: pb.RedundancyScheme_SchemeType(defaultRS.Algorithm),
|
|
|
|
MinReq: int32(defaultRS.RequiredShares),
|
|
|
|
Total: int32(defaultRS.TotalShares),
|
|
|
|
RepairThreshold: int32(defaultRS.RepairShares),
|
|
|
|
SuccessThreshold: int32(defaultRS.OptimalShares),
|
|
|
|
ErasureShareSize: defaultRS.ShareSize,
|
|
|
|
},
|
|
|
|
DefaultEncryptionParameters: &pb.EncryptionParameters{
|
|
|
|
CipherSuite: pb.CipherSuite(defaultEP.CipherSuite),
|
|
|
|
BlockSize: int64(defaultEP.BlockSize),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// BatchItem returns single item for batch request
|
|
|
|
func (params *CreateBucketParams) BatchItem() *pb.BatchRequestItem {
|
|
|
|
return &pb.BatchRequestItem{
|
|
|
|
Request: &pb.BatchRequestItem_BucketCreate{
|
2019-09-19 17:19:29 +01:00
|
|
|
BucketCreate: params.toRequest(nil),
|
2019-08-06 15:56:23 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO potential names *Response/*Out/*Result
|
|
|
|
|
|
|
|
// CreateBucketResponse response for CreateBucket request
|
|
|
|
type CreateBucketResponse struct {
|
|
|
|
Bucket storj.Bucket
|
|
|
|
}
|
|
|
|
|
|
|
|
func newCreateBucketResponse(response *pb.BucketCreateResponse) (CreateBucketResponse, error) {
|
|
|
|
bucket, err := convertProtoToBucket(response.Bucket)
|
|
|
|
if err != nil {
|
|
|
|
return CreateBucketResponse{}, err
|
|
|
|
}
|
|
|
|
return CreateBucketResponse{
|
|
|
|
Bucket: bucket,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2019-07-12 13:57:02 +01:00
|
|
|
// CreateBucket creates a new bucket
|
2019-08-06 15:56:23 +01:00
|
|
|
func (client *Client) CreateBucket(ctx context.Context, params CreateBucketParams) (respBucket storj.Bucket, err error) {
|
2019-07-12 13:57:02 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-08-06 15:56:23 +01:00
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
response, err := client.client.CreateBucket(ctx, params.toRequest(client.header()))
|
2019-07-19 16:17:34 +01:00
|
|
|
if err != nil {
|
2019-08-06 15:56:23 +01:00
|
|
|
return storj.Bucket{}, Error.Wrap(err)
|
2019-07-19 16:17:34 +01:00
|
|
|
}
|
2019-08-06 15:56:23 +01:00
|
|
|
|
|
|
|
respBucket, err = convertProtoToBucket(response.Bucket)
|
2019-07-12 13:57:02 +01:00
|
|
|
if err != nil {
|
|
|
|
return storj.Bucket{}, Error.Wrap(err)
|
|
|
|
}
|
2019-08-06 15:56:23 +01:00
|
|
|
return respBucket, nil
|
|
|
|
}
|
2019-07-12 13:57:02 +01:00
|
|
|
|
2019-08-06 15:56:23 +01:00
|
|
|
// GetBucketParams parmaters for GetBucketParams method
|
|
|
|
type GetBucketParams struct {
|
|
|
|
Name []byte
|
|
|
|
}
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
func (params *GetBucketParams) toRequest(header *pb.RequestHeader) *pb.BucketGetRequest {
|
|
|
|
return &pb.BucketGetRequest{
|
|
|
|
Header: header,
|
|
|
|
Name: params.Name,
|
|
|
|
}
|
2019-08-06 15:56:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// BatchItem returns single item for batch request
|
|
|
|
func (params *GetBucketParams) BatchItem() *pb.BatchRequestItem {
|
|
|
|
return &pb.BatchRequestItem{
|
|
|
|
Request: &pb.BatchRequestItem_BucketGet{
|
2019-09-19 17:19:29 +01:00
|
|
|
BucketGet: params.toRequest(nil),
|
2019-08-06 15:56:23 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetBucketResponse response for GetBucket request
|
|
|
|
type GetBucketResponse struct {
|
|
|
|
Bucket storj.Bucket
|
|
|
|
}
|
|
|
|
|
|
|
|
func newGetBucketResponse(response *pb.BucketGetResponse) (GetBucketResponse, error) {
|
|
|
|
bucket, err := convertProtoToBucket(response.Bucket)
|
2019-07-19 16:17:34 +01:00
|
|
|
if err != nil {
|
2019-08-06 15:56:23 +01:00
|
|
|
return GetBucketResponse{}, err
|
2019-07-19 16:17:34 +01:00
|
|
|
}
|
2019-08-06 15:56:23 +01:00
|
|
|
return GetBucketResponse{
|
|
|
|
Bucket: bucket,
|
|
|
|
}, nil
|
2019-07-12 13:57:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetBucket returns a bucket
|
2019-08-06 15:56:23 +01:00
|
|
|
func (client *Client) GetBucket(ctx context.Context, params GetBucketParams) (respBucket storj.Bucket, err error) {
|
2019-07-12 13:57:02 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-08-06 15:56:23 +01:00
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
resp, err := client.client.GetBucket(ctx, params.toRequest(client.header()))
|
2019-07-12 13:57:02 +01:00
|
|
|
if err != nil {
|
2019-09-19 05:46:39 +01:00
|
|
|
if errs2.IsRPC(err, rpcstatus.NotFound) {
|
2019-07-12 13:57:02 +01:00
|
|
|
return storj.Bucket{}, storj.ErrBucketNotFound.Wrap(err)
|
|
|
|
}
|
|
|
|
return storj.Bucket{}, Error.Wrap(err)
|
|
|
|
}
|
2019-07-19 16:17:34 +01:00
|
|
|
|
|
|
|
respBucket, err = convertProtoToBucket(resp.Bucket)
|
|
|
|
if err != nil {
|
2019-08-06 15:56:23 +01:00
|
|
|
return storj.Bucket{}, Error.Wrap(err)
|
2019-07-19 16:17:34 +01:00
|
|
|
}
|
|
|
|
return respBucket, nil
|
2019-07-12 13:57:02 +01:00
|
|
|
}
|
|
|
|
|
2019-08-06 15:56:23 +01:00
|
|
|
// DeleteBucketParams parmaters for DeleteBucket method
|
|
|
|
type DeleteBucketParams struct {
|
|
|
|
Name []byte
|
|
|
|
}
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
func (params *DeleteBucketParams) toRequest(header *pb.RequestHeader) *pb.BucketDeleteRequest {
|
|
|
|
return &pb.BucketDeleteRequest{
|
|
|
|
Header: header,
|
|
|
|
Name: params.Name,
|
|
|
|
}
|
2019-08-06 15:56:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// BatchItem returns single item for batch request
|
|
|
|
func (params *DeleteBucketParams) BatchItem() *pb.BatchRequestItem {
|
|
|
|
return &pb.BatchRequestItem{
|
|
|
|
Request: &pb.BatchRequestItem_BucketDelete{
|
2019-09-19 17:19:29 +01:00
|
|
|
BucketDelete: params.toRequest(nil),
|
2019-08-06 15:56:23 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-12 13:57:02 +01:00
|
|
|
// DeleteBucket deletes a bucket
|
2019-08-06 15:56:23 +01:00
|
|
|
func (client *Client) DeleteBucket(ctx context.Context, params DeleteBucketParams) (err error) {
|
2019-07-12 13:57:02 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-09-19 17:19:29 +01:00
|
|
|
_, err = client.client.DeleteBucket(ctx, params.toRequest(client.header()))
|
2019-07-12 13:57:02 +01:00
|
|
|
if err != nil {
|
2019-09-19 05:46:39 +01:00
|
|
|
if errs2.IsRPC(err, rpcstatus.NotFound) {
|
2019-07-12 13:57:02 +01:00
|
|
|
return storj.ErrBucketNotFound.Wrap(err)
|
|
|
|
}
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-08-06 15:56:23 +01:00
|
|
|
// ListBucketsParams parmaters for ListBucketsParams method
|
|
|
|
type ListBucketsParams struct {
|
|
|
|
ListOpts storj.BucketListOptions
|
|
|
|
}
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
func (params *ListBucketsParams) toRequest(header *pb.RequestHeader) *pb.BucketListRequest {
|
2019-08-06 15:56:23 +01:00
|
|
|
return &pb.BucketListRequest{
|
2019-09-19 17:19:29 +01:00
|
|
|
Header: header,
|
2019-08-06 15:56:23 +01:00
|
|
|
Cursor: []byte(params.ListOpts.Cursor),
|
|
|
|
Limit: int32(params.ListOpts.Limit),
|
|
|
|
Direction: int32(params.ListOpts.Direction),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// BatchItem returns single item for batch request
|
|
|
|
func (params *ListBucketsParams) BatchItem() *pb.BatchRequestItem {
|
|
|
|
return &pb.BatchRequestItem{
|
|
|
|
Request: &pb.BatchRequestItem_BucketList{
|
2019-09-19 17:19:29 +01:00
|
|
|
BucketList: params.toRequest(nil),
|
2019-08-06 15:56:23 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListBucketsResponse response for ListBucket request
|
|
|
|
type ListBucketsResponse struct {
|
|
|
|
BucketList storj.BucketList
|
|
|
|
}
|
|
|
|
|
|
|
|
func newListBucketsResponse(response *pb.BucketListResponse) ListBucketsResponse {
|
|
|
|
bucketList := storj.BucketList{
|
|
|
|
More: response.More,
|
|
|
|
}
|
|
|
|
bucketList.Items = make([]storj.Bucket, len(response.Items))
|
|
|
|
for i, item := range response.GetItems() {
|
|
|
|
bucketList.Items[i] = storj.Bucket{
|
|
|
|
Name: string(item.Name),
|
|
|
|
Created: item.CreatedAt,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ListBucketsResponse{
|
|
|
|
BucketList: bucketList,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-12 13:57:02 +01:00
|
|
|
// ListBuckets lists buckets
|
2019-08-06 15:56:23 +01:00
|
|
|
func (client *Client) ListBuckets(ctx context.Context, params ListBucketsParams) (_ storj.BucketList, err error) {
|
2019-07-12 13:57:02 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-08-06 15:56:23 +01:00
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
resp, err := client.client.ListBuckets(ctx, params.toRequest(client.header()))
|
2019-07-12 13:57:02 +01:00
|
|
|
if err != nil {
|
|
|
|
return storj.BucketList{}, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
resultBucketList := storj.BucketList{
|
|
|
|
More: resp.GetMore(),
|
|
|
|
}
|
|
|
|
resultBucketList.Items = make([]storj.Bucket, len(resp.GetItems()))
|
|
|
|
for i, item := range resp.GetItems() {
|
|
|
|
resultBucketList.Items[i] = storj.Bucket{
|
|
|
|
Name: string(item.GetName()),
|
|
|
|
Created: item.GetCreatedAt(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return resultBucketList, nil
|
|
|
|
}
|
|
|
|
|
2019-07-19 16:17:34 +01:00
|
|
|
func convertProtoToBucket(pbBucket *pb.Bucket) (bucket storj.Bucket, err error) {
|
2019-07-12 13:57:02 +01:00
|
|
|
defaultRS := pbBucket.GetDefaultRedundancyScheme()
|
|
|
|
defaultEP := pbBucket.GetDefaultEncryptionParameters()
|
2019-07-19 16:17:34 +01:00
|
|
|
var partnerID uuid.UUID
|
|
|
|
err = partnerID.UnmarshalJSON(pbBucket.GetPartnerId())
|
|
|
|
if err != nil && !partnerID.IsZero() {
|
|
|
|
return bucket, errs.New("Invalid uuid")
|
|
|
|
}
|
2019-07-12 13:57:02 +01:00
|
|
|
return storj.Bucket{
|
|
|
|
Name: string(pbBucket.GetName()),
|
2019-07-19 16:17:34 +01:00
|
|
|
PartnerID: partnerID,
|
2019-07-12 13:57:02 +01:00
|
|
|
PathCipher: storj.CipherSuite(pbBucket.GetPathCipher()),
|
|
|
|
Created: pbBucket.GetCreatedAt(),
|
|
|
|
DefaultSegmentsSize: pbBucket.GetDefaultSegmentSize(),
|
|
|
|
DefaultRedundancyScheme: storj.RedundancyScheme{
|
|
|
|
Algorithm: storj.RedundancyAlgorithm(defaultRS.GetType()),
|
|
|
|
ShareSize: defaultRS.GetErasureShareSize(),
|
|
|
|
RequiredShares: int16(defaultRS.GetMinReq()),
|
|
|
|
RepairShares: int16(defaultRS.GetRepairThreshold()),
|
|
|
|
OptimalShares: int16(defaultRS.GetSuccessThreshold()),
|
|
|
|
TotalShares: int16(defaultRS.GetTotal()),
|
|
|
|
},
|
|
|
|
DefaultEncryptionParameters: storj.EncryptionParameters{
|
|
|
|
CipherSuite: storj.CipherSuite(defaultEP.CipherSuite),
|
|
|
|
BlockSize: int32(defaultEP.BlockSize),
|
|
|
|
},
|
2019-07-19 16:17:34 +01:00
|
|
|
}, nil
|
2019-07-12 13:57:02 +01:00
|
|
|
}
|
2019-07-16 11:39:23 +01:00
|
|
|
|
2019-08-06 15:56:23 +01:00
|
|
|
// SetBucketAttributionParams parameters for SetBucketAttribution method
|
|
|
|
type SetBucketAttributionParams struct {
|
|
|
|
Bucket string
|
|
|
|
PartnerID uuid.UUID
|
|
|
|
}
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
func (params *SetBucketAttributionParams) toRequest(header *pb.RequestHeader) *pb.BucketSetAttributionRequest {
|
2019-11-26 11:12:37 +00:00
|
|
|
var bytes []byte
|
|
|
|
if !params.PartnerID.IsZero() {
|
|
|
|
bytes = params.PartnerID[:]
|
|
|
|
}
|
|
|
|
|
2019-08-06 15:56:23 +01:00
|
|
|
return &pb.BucketSetAttributionRequest{
|
2019-09-19 17:19:29 +01:00
|
|
|
Header: header,
|
2019-08-06 15:56:23 +01:00
|
|
|
Name: []byte(params.Bucket),
|
2019-11-26 11:12:37 +00:00
|
|
|
PartnerId: bytes,
|
2019-08-06 15:56:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// BatchItem returns single item for batch request
|
|
|
|
func (params *SetBucketAttributionParams) BatchItem() *pb.BatchRequestItem {
|
|
|
|
return &pb.BatchRequestItem{
|
|
|
|
Request: &pb.BatchRequestItem_BucketSetAttribution{
|
2019-09-19 17:19:29 +01:00
|
|
|
BucketSetAttribution: params.toRequest(nil),
|
2019-08-06 15:56:23 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetBucketAttribution tries to set the attribution information on the bucket.
|
|
|
|
func (client *Client) SetBucketAttribution(ctx context.Context, params SetBucketAttributionParams) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
_, err = client.client.SetBucketAttribution(ctx, params.toRequest(client.header()))
|
2019-08-06 15:56:23 +01:00
|
|
|
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2019-07-24 12:33:23 +01:00
|
|
|
// BeginObjectParams parmaters for BeginObject method
|
|
|
|
type BeginObjectParams struct {
|
2019-08-01 10:04:31 +01:00
|
|
|
Bucket []byte
|
|
|
|
EncryptedPath []byte
|
|
|
|
Version int32
|
|
|
|
Redundancy storj.RedundancyScheme
|
|
|
|
EncryptionParameters storj.EncryptionParameters
|
|
|
|
ExpiresAt time.Time
|
2019-07-24 12:33:23 +01:00
|
|
|
}
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
func (params *BeginObjectParams) toRequest(header *pb.RequestHeader) *pb.ObjectBeginRequest {
|
2019-08-06 15:56:23 +01:00
|
|
|
return &pb.ObjectBeginRequest{
|
2019-09-19 17:19:29 +01:00
|
|
|
Header: header,
|
2019-08-01 10:04:31 +01:00
|
|
|
Bucket: params.Bucket,
|
|
|
|
EncryptedPath: params.EncryptedPath,
|
|
|
|
Version: params.Version,
|
|
|
|
ExpiresAt: params.ExpiresAt,
|
2019-07-16 11:39:23 +01:00
|
|
|
RedundancyScheme: &pb.RedundancyScheme{
|
2019-07-24 12:33:23 +01:00
|
|
|
Type: pb.RedundancyScheme_SchemeType(params.Redundancy.Algorithm),
|
|
|
|
ErasureShareSize: params.Redundancy.ShareSize,
|
|
|
|
MinReq: int32(params.Redundancy.RequiredShares),
|
|
|
|
RepairThreshold: int32(params.Redundancy.RepairShares),
|
|
|
|
SuccessThreshold: int32(params.Redundancy.OptimalShares),
|
|
|
|
Total: int32(params.Redundancy.TotalShares),
|
2019-07-16 11:39:23 +01:00
|
|
|
},
|
|
|
|
EncryptionParameters: &pb.EncryptionParameters{
|
2019-07-24 12:33:23 +01:00
|
|
|
CipherSuite: pb.CipherSuite(params.EncryptionParameters.CipherSuite),
|
|
|
|
BlockSize: int64(params.EncryptionParameters.BlockSize),
|
2019-07-16 11:39:23 +01:00
|
|
|
},
|
2019-08-06 15:56:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// BatchItem returns single item for batch request
|
|
|
|
func (params *BeginObjectParams) BatchItem() *pb.BatchRequestItem {
|
|
|
|
return &pb.BatchRequestItem{
|
|
|
|
Request: &pb.BatchRequestItem_ObjectBegin{
|
2019-09-19 17:19:29 +01:00
|
|
|
ObjectBegin: params.toRequest(nil),
|
2019-08-06 15:56:23 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// BeginObjectResponse response for BeginObject request
|
|
|
|
type BeginObjectResponse struct {
|
|
|
|
StreamID storj.StreamID
|
|
|
|
}
|
|
|
|
|
|
|
|
func newBeginObjectResponse(response *pb.ObjectBeginResponse) BeginObjectResponse {
|
|
|
|
return BeginObjectResponse{
|
|
|
|
StreamID: response.StreamId,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// BeginObject begins object creation
|
|
|
|
func (client *Client) BeginObject(ctx context.Context, params BeginObjectParams) (_ storj.StreamID, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
response, err := client.client.BeginObject(ctx, params.toRequest(client.header()))
|
2019-07-16 11:39:23 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return response.StreamId, nil
|
|
|
|
}
|
|
|
|
|
2019-08-01 10:04:31 +01:00
|
|
|
// CommitObjectParams parmaters for CommitObject method
|
|
|
|
type CommitObjectParams struct {
|
|
|
|
StreamID storj.StreamID
|
|
|
|
|
|
|
|
EncryptedMetadataNonce storj.Nonce
|
|
|
|
EncryptedMetadata []byte
|
|
|
|
}
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
func (params *CommitObjectParams) toRequest(header *pb.RequestHeader) *pb.ObjectCommitRequest {
|
2019-08-06 15:56:23 +01:00
|
|
|
return &pb.ObjectCommitRequest{
|
2019-09-19 17:19:29 +01:00
|
|
|
Header: header,
|
2019-08-06 15:56:23 +01:00
|
|
|
StreamId: params.StreamID,
|
|
|
|
EncryptedMetadataNonce: params.EncryptedMetadataNonce,
|
|
|
|
EncryptedMetadata: params.EncryptedMetadata,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// BatchItem returns single item for batch request
|
|
|
|
func (params *CommitObjectParams) BatchItem() *pb.BatchRequestItem {
|
|
|
|
return &pb.BatchRequestItem{
|
|
|
|
Request: &pb.BatchRequestItem_ObjectCommit{
|
2019-09-19 17:19:29 +01:00
|
|
|
ObjectCommit: params.toRequest(nil),
|
2019-08-06 15:56:23 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-16 11:39:23 +01:00
|
|
|
// CommitObject commits created object
|
2019-08-01 10:04:31 +01:00
|
|
|
func (client *Client) CommitObject(ctx context.Context, params CommitObjectParams) (err error) {
|
2019-07-16 11:39:23 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
_, err = client.client.CommitObject(ctx, params.toRequest(client.header()))
|
2019-08-06 15:56:23 +01:00
|
|
|
|
2019-07-16 11:39:23 +01:00
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2019-07-24 12:33:23 +01:00
|
|
|
// GetObjectParams parameters for GetObject method
|
|
|
|
type GetObjectParams struct {
|
|
|
|
Bucket []byte
|
|
|
|
EncryptedPath []byte
|
|
|
|
Version int32
|
|
|
|
}
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
func (params *GetObjectParams) toRequest(header *pb.RequestHeader) *pb.ObjectGetRequest {
|
2019-08-06 15:56:23 +01:00
|
|
|
return &pb.ObjectGetRequest{
|
2019-09-19 17:19:29 +01:00
|
|
|
Header: header,
|
2019-07-24 12:33:23 +01:00
|
|
|
Bucket: params.Bucket,
|
|
|
|
EncryptedPath: params.EncryptedPath,
|
|
|
|
Version: params.Version,
|
2019-08-06 15:56:23 +01:00
|
|
|
}
|
|
|
|
}
|
2019-08-01 10:04:31 +01:00
|
|
|
|
2019-08-06 15:56:23 +01:00
|
|
|
// BatchItem returns single item for batch request
|
|
|
|
func (params *GetObjectParams) BatchItem() *pb.BatchRequestItem {
|
|
|
|
return &pb.BatchRequestItem{
|
|
|
|
Request: &pb.BatchRequestItem_ObjectGet{
|
2019-09-19 17:19:29 +01:00
|
|
|
ObjectGet: params.toRequest(nil),
|
2019-08-06 15:56:23 +01:00
|
|
|
},
|
2019-07-23 12:09:12 +01:00
|
|
|
}
|
2019-08-06 15:56:23 +01:00
|
|
|
}
|
2019-07-23 12:09:12 +01:00
|
|
|
|
2019-08-06 15:56:23 +01:00
|
|
|
// GetObjectResponse response for GetObject request
|
|
|
|
type GetObjectResponse struct {
|
|
|
|
Info storj.ObjectInfo
|
|
|
|
}
|
|
|
|
|
|
|
|
func newGetObjectResponse(response *pb.ObjectGetResponse) GetObjectResponse {
|
2019-08-01 10:04:31 +01:00
|
|
|
object := storj.ObjectInfo{
|
|
|
|
Bucket: string(response.Object.Bucket),
|
|
|
|
Path: storj.Path(response.Object.EncryptedPath),
|
|
|
|
|
|
|
|
StreamID: response.Object.StreamId,
|
|
|
|
|
|
|
|
Created: response.Object.CreatedAt,
|
|
|
|
Modified: response.Object.CreatedAt,
|
|
|
|
Expires: response.Object.ExpiresAt,
|
|
|
|
Metadata: response.Object.EncryptedMetadata,
|
|
|
|
Stream: storj.Stream{
|
|
|
|
Size: response.Object.TotalSize,
|
|
|
|
EncryptionParameters: storj.EncryptionParameters{
|
|
|
|
CipherSuite: storj.CipherSuite(response.Object.EncryptionParameters.CipherSuite),
|
|
|
|
BlockSize: int32(response.Object.EncryptionParameters.BlockSize),
|
|
|
|
},
|
2019-07-23 12:09:12 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2019-08-01 10:04:31 +01:00
|
|
|
pbRS := response.Object.RedundancyScheme
|
|
|
|
if pbRS != nil {
|
|
|
|
object.Stream.RedundancyScheme = storj.RedundancyScheme{
|
|
|
|
Algorithm: storj.RedundancyAlgorithm(pbRS.Type),
|
|
|
|
ShareSize: pbRS.ErasureShareSize,
|
|
|
|
RequiredShares: int16(pbRS.MinReq),
|
|
|
|
RepairShares: int16(pbRS.RepairThreshold),
|
|
|
|
OptimalShares: int16(pbRS.SuccessThreshold),
|
|
|
|
TotalShares: int16(pbRS.Total),
|
|
|
|
}
|
|
|
|
}
|
2019-08-06 15:56:23 +01:00
|
|
|
return GetObjectResponse{
|
|
|
|
Info: object,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetObject gets single object
|
|
|
|
func (client *Client) GetObject(ctx context.Context, params GetObjectParams) (_ storj.ObjectInfo, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
response, err := client.client.GetObject(ctx, params.toRequest(client.header()))
|
2019-08-01 10:04:31 +01:00
|
|
|
|
2019-08-06 15:56:23 +01:00
|
|
|
if err != nil {
|
2019-09-19 05:46:39 +01:00
|
|
|
if errs2.IsRPC(err, rpcstatus.NotFound) {
|
2019-08-06 15:56:23 +01:00
|
|
|
return storj.ObjectInfo{}, storj.ErrObjectNotFound.Wrap(err)
|
|
|
|
}
|
|
|
|
return storj.ObjectInfo{}, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
getResponse := newGetObjectResponse(response)
|
|
|
|
return getResponse.Info, nil
|
2019-07-23 12:09:12 +01:00
|
|
|
}
|
|
|
|
|
2019-07-24 12:33:23 +01:00
|
|
|
// BeginDeleteObjectParams parameters for BeginDeleteObject method
|
|
|
|
type BeginDeleteObjectParams struct {
|
|
|
|
Bucket []byte
|
|
|
|
EncryptedPath []byte
|
|
|
|
Version int32
|
|
|
|
}
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
func (params *BeginDeleteObjectParams) toRequest(header *pb.RequestHeader) *pb.ObjectBeginDeleteRequest {
|
2019-08-06 15:56:23 +01:00
|
|
|
return &pb.ObjectBeginDeleteRequest{
|
2019-09-19 17:19:29 +01:00
|
|
|
Header: header,
|
2019-08-06 15:56:23 +01:00
|
|
|
Bucket: params.Bucket,
|
|
|
|
EncryptedPath: params.EncryptedPath,
|
|
|
|
Version: params.Version,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// BatchItem returns single item for batch request
|
|
|
|
func (params *BeginDeleteObjectParams) BatchItem() *pb.BatchRequestItem {
|
|
|
|
return &pb.BatchRequestItem{
|
|
|
|
Request: &pb.BatchRequestItem_ObjectBeginDelete{
|
2019-09-19 17:19:29 +01:00
|
|
|
ObjectBeginDelete: params.toRequest(nil),
|
2019-08-06 15:56:23 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// BeginDeleteObjectResponse response for BeginDeleteObject request
|
|
|
|
type BeginDeleteObjectResponse struct {
|
|
|
|
StreamID storj.StreamID
|
|
|
|
}
|
|
|
|
|
|
|
|
func newBeginDeleteObjectResponse(response *pb.ObjectBeginDeleteResponse) BeginDeleteObjectResponse {
|
|
|
|
return BeginDeleteObjectResponse{
|
|
|
|
StreamID: response.StreamId,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-16 11:39:23 +01:00
|
|
|
// BeginDeleteObject begins object deletion process
|
2019-07-24 12:33:23 +01:00
|
|
|
func (client *Client) BeginDeleteObject(ctx context.Context, params BeginDeleteObjectParams) (_ storj.StreamID, err error) {
|
2019-07-16 11:39:23 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
response, err := client.client.BeginDeleteObject(ctx, params.toRequest(client.header()))
|
2019-07-16 11:39:23 +01:00
|
|
|
if err != nil {
|
2019-09-19 05:46:39 +01:00
|
|
|
if errs2.IsRPC(err, rpcstatus.NotFound) {
|
2019-08-01 10:04:31 +01:00
|
|
|
return storj.StreamID{}, storj.ErrObjectNotFound.Wrap(err)
|
|
|
|
}
|
2019-07-16 11:39:23 +01:00
|
|
|
return storj.StreamID{}, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return response.StreamId, nil
|
|
|
|
}
|
|
|
|
|
2019-08-06 15:56:23 +01:00
|
|
|
// FinishDeleteObjectParams parameters for FinishDeleteObject method
|
|
|
|
type FinishDeleteObjectParams struct {
|
|
|
|
StreamID storj.StreamID
|
|
|
|
}
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
func (params *FinishDeleteObjectParams) toRequest(header *pb.RequestHeader) *pb.ObjectFinishDeleteRequest {
|
2019-08-06 15:56:23 +01:00
|
|
|
return &pb.ObjectFinishDeleteRequest{
|
2019-09-19 17:19:29 +01:00
|
|
|
Header: header,
|
2019-08-06 15:56:23 +01:00
|
|
|
StreamId: params.StreamID,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// BatchItem returns single item for batch request
|
|
|
|
func (params *FinishDeleteObjectParams) BatchItem() *pb.BatchRequestItem {
|
|
|
|
return &pb.BatchRequestItem{
|
|
|
|
Request: &pb.BatchRequestItem_ObjectFinishDelete{
|
2019-09-19 17:19:29 +01:00
|
|
|
ObjectFinishDelete: params.toRequest(nil),
|
2019-08-06 15:56:23 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-16 11:39:23 +01:00
|
|
|
// FinishDeleteObject finishes object deletion process
|
2019-08-06 15:56:23 +01:00
|
|
|
func (client *Client) FinishDeleteObject(ctx context.Context, params FinishDeleteObjectParams) (err error) {
|
2019-07-16 11:39:23 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
_, err = client.client.FinishDeleteObject(ctx, params.toRequest(client.header()))
|
2019-08-06 15:56:23 +01:00
|
|
|
|
2019-07-16 11:39:23 +01:00
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2019-07-24 12:33:23 +01:00
|
|
|
// ListObjectsParams parameters for ListObjects method
|
|
|
|
type ListObjectsParams struct {
|
|
|
|
Bucket []byte
|
|
|
|
EncryptedPrefix []byte
|
|
|
|
EncryptedCursor []byte
|
|
|
|
Limit int32
|
|
|
|
IncludeMetadata bool
|
2019-08-01 10:04:31 +01:00
|
|
|
Recursive bool
|
2019-07-24 12:33:23 +01:00
|
|
|
}
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
func (params *ListObjectsParams) toRequest(header *pb.RequestHeader) *pb.ObjectListRequest {
|
2019-08-06 15:56:23 +01:00
|
|
|
return &pb.ObjectListRequest{
|
2019-09-19 17:19:29 +01:00
|
|
|
Header: header,
|
2019-07-24 12:33:23 +01:00
|
|
|
Bucket: params.Bucket,
|
|
|
|
EncryptedPrefix: params.EncryptedPrefix,
|
|
|
|
EncryptedCursor: params.EncryptedCursor,
|
|
|
|
Limit: params.Limit,
|
|
|
|
ObjectIncludes: &pb.ObjectListItemIncludes{
|
|
|
|
Metadata: params.IncludeMetadata,
|
|
|
|
},
|
2019-08-01 10:04:31 +01:00
|
|
|
Recursive: params.Recursive,
|
2019-07-16 11:39:23 +01:00
|
|
|
}
|
2019-08-06 15:56:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// BatchItem returns single item for batch request
|
|
|
|
func (params *ListObjectsParams) BatchItem() *pb.BatchRequestItem {
|
|
|
|
return &pb.BatchRequestItem{
|
|
|
|
Request: &pb.BatchRequestItem_ObjectList{
|
2019-09-19 17:19:29 +01:00
|
|
|
ObjectList: params.toRequest(nil),
|
2019-08-06 15:56:23 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjectsResponse response for ListObjects request
|
|
|
|
type ListObjectsResponse struct {
|
|
|
|
Items []storj.ObjectListItem
|
|
|
|
More bool
|
|
|
|
}
|
2019-07-16 11:39:23 +01:00
|
|
|
|
2019-08-06 15:56:23 +01:00
|
|
|
func newListObjectsResponse(response *pb.ObjectListResponse, encryptedPrefix []byte, recursive bool) ListObjectsResponse {
|
2019-07-16 11:39:23 +01:00
|
|
|
objects := make([]storj.ObjectListItem, len(response.Items))
|
|
|
|
for i, object := range response.Items {
|
2019-08-01 10:04:31 +01:00
|
|
|
encryptedPath := object.EncryptedPath
|
|
|
|
isPrefix := false
|
2019-08-06 15:56:23 +01:00
|
|
|
if !recursive && len(encryptedPath) != 0 && encryptedPath[len(encryptedPath)-1] == '/' && !bytes.Equal(encryptedPath, encryptedPrefix) {
|
2019-08-01 10:04:31 +01:00
|
|
|
isPrefix = true
|
|
|
|
}
|
|
|
|
|
2019-07-16 11:39:23 +01:00
|
|
|
objects[i] = storj.ObjectListItem{
|
|
|
|
EncryptedPath: object.EncryptedPath,
|
|
|
|
Version: object.Version,
|
|
|
|
Status: int32(object.Status),
|
|
|
|
StatusAt: object.StatusAt,
|
|
|
|
CreatedAt: object.CreatedAt,
|
|
|
|
ExpiresAt: object.ExpiresAt,
|
|
|
|
EncryptedMetadataNonce: object.EncryptedMetadataNonce,
|
|
|
|
EncryptedMetadata: object.EncryptedMetadata,
|
2019-08-01 10:04:31 +01:00
|
|
|
|
|
|
|
IsPrefix: isPrefix,
|
2019-07-16 11:39:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-06 15:56:23 +01:00
|
|
|
return ListObjectsResponse{
|
|
|
|
Items: objects,
|
|
|
|
More: response.More,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjects lists objects according to specific parameters
|
|
|
|
func (client *Client) ListObjects(ctx context.Context, params ListObjectsParams) (_ []storj.ObjectListItem, more bool, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
response, err := client.client.ListObjects(ctx, params.toRequest(client.header()))
|
2019-08-06 15:56:23 +01:00
|
|
|
if err != nil {
|
|
|
|
return []storj.ObjectListItem{}, false, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
listResponse := newListObjectsResponse(response, params.EncryptedPrefix, params.Recursive)
|
|
|
|
return listResponse.Items, listResponse.More, Error.Wrap(err)
|
2019-07-16 11:39:23 +01:00
|
|
|
}
|
2019-07-24 12:33:23 +01:00
|
|
|
|
|
|
|
// BeginSegmentParams parameters for BeginSegment method
|
|
|
|
type BeginSegmentParams struct {
|
2019-09-10 16:39:47 +01:00
|
|
|
StreamID storj.StreamID
|
|
|
|
Position storj.SegmentPosition
|
|
|
|
MaxOrderLimit int64
|
2019-07-24 12:33:23 +01:00
|
|
|
}
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
func (params *BeginSegmentParams) toRequest(header *pb.RequestHeader) *pb.SegmentBeginRequest {
|
2019-08-06 15:56:23 +01:00
|
|
|
return &pb.SegmentBeginRequest{
|
2019-09-19 17:19:29 +01:00
|
|
|
Header: header,
|
2019-07-24 12:33:23 +01:00
|
|
|
StreamId: params.StreamID,
|
|
|
|
Position: &pb.SegmentPosition{
|
|
|
|
PartNumber: params.Position.PartNumber,
|
|
|
|
Index: params.Position.Index,
|
|
|
|
},
|
2019-09-10 16:39:47 +01:00
|
|
|
MaxOrderLimit: params.MaxOrderLimit,
|
2019-08-06 15:56:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// BatchItem returns single item for batch request
|
|
|
|
func (params *BeginSegmentParams) BatchItem() *pb.BatchRequestItem {
|
|
|
|
return &pb.BatchRequestItem{
|
|
|
|
Request: &pb.BatchRequestItem_SegmentBegin{
|
2019-09-19 17:19:29 +01:00
|
|
|
SegmentBegin: params.toRequest(nil),
|
2019-08-06 15:56:23 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// BeginSegmentResponse response for BeginSegment request
|
|
|
|
type BeginSegmentResponse struct {
|
|
|
|
SegmentID storj.SegmentID
|
|
|
|
Limits []*pb.AddressedOrderLimit
|
|
|
|
PiecePrivateKey storj.PiecePrivateKey
|
|
|
|
}
|
|
|
|
|
|
|
|
func newBeginSegmentResponse(response *pb.SegmentBeginResponse) BeginSegmentResponse {
|
|
|
|
return BeginSegmentResponse{
|
|
|
|
SegmentID: response.SegmentId,
|
|
|
|
Limits: response.AddressedLimits,
|
|
|
|
PiecePrivateKey: response.PrivateKey,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// BeginSegment begins segment upload
|
|
|
|
func (client *Client) BeginSegment(ctx context.Context, params BeginSegmentParams) (_ storj.SegmentID, limits []*pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
response, err := client.client.BeginSegment(ctx, params.toRequest(client.header()))
|
2019-07-24 12:33:23 +01:00
|
|
|
if err != nil {
|
|
|
|
return storj.SegmentID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return response.SegmentId, response.AddressedLimits, response.PrivateKey, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CommitSegmentParams parameters for CommitSegment method
|
|
|
|
type CommitSegmentParams struct {
|
|
|
|
SegmentID storj.SegmentID
|
2019-08-01 10:04:31 +01:00
|
|
|
Encryption storj.SegmentEncryption
|
2019-07-24 12:33:23 +01:00
|
|
|
SizeEncryptedData int64
|
2019-08-01 10:04:31 +01:00
|
|
|
|
2019-07-24 12:33:23 +01:00
|
|
|
UploadResult []*pb.SegmentPieceUploadResult
|
|
|
|
}
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
func (params *CommitSegmentParams) toRequest(header *pb.RequestHeader) *pb.SegmentCommitRequest {
|
2019-08-06 15:56:23 +01:00
|
|
|
return &pb.SegmentCommitRequest{
|
2019-09-19 17:19:29 +01:00
|
|
|
Header: header,
|
2019-08-01 10:04:31 +01:00
|
|
|
SegmentId: params.SegmentID,
|
|
|
|
|
|
|
|
EncryptedKeyNonce: params.Encryption.EncryptedKeyNonce,
|
|
|
|
EncryptedKey: params.Encryption.EncryptedKey,
|
2019-07-24 12:33:23 +01:00
|
|
|
SizeEncryptedData: params.SizeEncryptedData,
|
|
|
|
UploadResult: params.UploadResult,
|
|
|
|
}
|
2019-08-06 15:56:23 +01:00
|
|
|
}
|
2019-07-24 12:33:23 +01:00
|
|
|
|
2019-08-06 15:56:23 +01:00
|
|
|
// BatchItem returns single item for batch request
|
|
|
|
func (params *CommitSegmentParams) BatchItem() *pb.BatchRequestItem {
|
|
|
|
return &pb.BatchRequestItem{
|
|
|
|
Request: &pb.BatchRequestItem_SegmentCommit{
|
2019-09-19 17:19:29 +01:00
|
|
|
SegmentCommit: params.toRequest(nil),
|
2019-08-06 15:56:23 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
// CommitSegment commits segment after upload
|
|
|
|
func (client *Client) CommitSegment(ctx context.Context, params CommitSegmentParams) (err error) {
|
2019-08-06 15:56:23 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
_, err = client.client.CommitSegment(ctx, params.toRequest(client.header()))
|
2019-08-06 15:56:23 +01:00
|
|
|
|
|
|
|
return Error.Wrap(err)
|
2019-07-24 12:33:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// MakeInlineSegmentParams parameters for MakeInlineSegment method
|
|
|
|
type MakeInlineSegmentParams struct {
|
|
|
|
StreamID storj.StreamID
|
|
|
|
Position storj.SegmentPosition
|
2019-08-01 10:04:31 +01:00
|
|
|
Encryption storj.SegmentEncryption
|
2019-07-24 12:33:23 +01:00
|
|
|
EncryptedInlineData []byte
|
|
|
|
}
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
func (params *MakeInlineSegmentParams) toRequest(header *pb.RequestHeader) *pb.SegmentMakeInlineRequest {
|
2019-08-06 15:56:23 +01:00
|
|
|
return &pb.SegmentMakeInlineRequest{
|
2019-09-19 17:19:29 +01:00
|
|
|
Header: header,
|
2019-07-24 12:33:23 +01:00
|
|
|
StreamId: params.StreamID,
|
|
|
|
Position: &pb.SegmentPosition{
|
|
|
|
PartNumber: params.Position.PartNumber,
|
|
|
|
Index: params.Position.Index,
|
|
|
|
},
|
2019-08-01 10:04:31 +01:00
|
|
|
EncryptedKeyNonce: params.Encryption.EncryptedKeyNonce,
|
|
|
|
EncryptedKey: params.Encryption.EncryptedKey,
|
2019-07-24 12:33:23 +01:00
|
|
|
EncryptedInlineData: params.EncryptedInlineData,
|
|
|
|
}
|
2019-08-06 15:56:23 +01:00
|
|
|
}
|
2019-07-24 12:33:23 +01:00
|
|
|
|
2019-08-06 15:56:23 +01:00
|
|
|
// BatchItem returns single item for batch request
|
|
|
|
func (params *MakeInlineSegmentParams) BatchItem() *pb.BatchRequestItem {
|
|
|
|
return &pb.BatchRequestItem{
|
|
|
|
Request: &pb.BatchRequestItem_SegmentMakeInline{
|
2019-09-19 17:19:29 +01:00
|
|
|
SegmentMakeInline: params.toRequest(nil),
|
2019-08-06 15:56:23 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// MakeInlineSegment commits segment after upload
|
|
|
|
func (client *Client) MakeInlineSegment(ctx context.Context, params MakeInlineSegmentParams) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
_, err = client.client.MakeInlineSegment(ctx, params.toRequest(client.header()))
|
2019-08-06 15:56:23 +01:00
|
|
|
|
|
|
|
return Error.Wrap(err)
|
2019-07-24 12:33:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// BeginDeleteSegmentParams parameters for BeginDeleteSegment method
|
|
|
|
type BeginDeleteSegmentParams struct {
|
|
|
|
StreamID storj.StreamID
|
|
|
|
Position storj.SegmentPosition
|
|
|
|
}
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
func (params *BeginDeleteSegmentParams) toRequest(header *pb.RequestHeader) *pb.SegmentBeginDeleteRequest {
|
2019-08-06 15:56:23 +01:00
|
|
|
return &pb.SegmentBeginDeleteRequest{
|
2019-09-19 17:19:29 +01:00
|
|
|
Header: header,
|
2019-07-24 12:33:23 +01:00
|
|
|
StreamId: params.StreamID,
|
|
|
|
Position: &pb.SegmentPosition{
|
|
|
|
PartNumber: params.Position.PartNumber,
|
|
|
|
Index: params.Position.Index,
|
|
|
|
},
|
2019-08-06 15:56:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// BatchItem returns single item for batch request
|
|
|
|
func (params *BeginDeleteSegmentParams) BatchItem() *pb.BatchRequestItem {
|
|
|
|
return &pb.BatchRequestItem{
|
|
|
|
Request: &pb.BatchRequestItem_SegmentBeginDelete{
|
2019-09-19 17:19:29 +01:00
|
|
|
SegmentBeginDelete: params.toRequest(nil),
|
2019-08-06 15:56:23 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// BeginDeleteSegmentResponse response for BeginDeleteSegment request
|
|
|
|
type BeginDeleteSegmentResponse struct {
|
|
|
|
SegmentID storj.SegmentID
|
|
|
|
Limits []*pb.AddressedOrderLimit
|
|
|
|
PiecePrivateKey storj.PiecePrivateKey
|
|
|
|
}
|
|
|
|
|
|
|
|
func newBeginDeleteSegmentResponse(response *pb.SegmentBeginDeleteResponse) BeginDeleteSegmentResponse {
|
|
|
|
return BeginDeleteSegmentResponse{
|
|
|
|
SegmentID: response.SegmentId,
|
|
|
|
Limits: response.AddressedLimits,
|
|
|
|
PiecePrivateKey: response.PrivateKey,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// BeginDeleteSegment begins segment upload process
|
|
|
|
func (client *Client) BeginDeleteSegment(ctx context.Context, params BeginDeleteSegmentParams) (_ storj.SegmentID, limits []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
response, err := client.client.BeginDeleteSegment(ctx, params.toRequest(client.header()))
|
2019-07-24 12:33:23 +01:00
|
|
|
if err != nil {
|
2019-08-01 10:04:31 +01:00
|
|
|
return storj.SegmentID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
|
2019-07-24 12:33:23 +01:00
|
|
|
}
|
|
|
|
|
2019-08-01 10:04:31 +01:00
|
|
|
return response.SegmentId, response.AddressedLimits, response.PrivateKey, nil
|
2019-07-24 12:33:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// FinishDeleteSegmentParams parameters for FinishDeleteSegment method
|
|
|
|
type FinishDeleteSegmentParams struct {
|
|
|
|
SegmentID storj.SegmentID
|
2019-08-01 10:04:31 +01:00
|
|
|
|
2019-07-24 12:33:23 +01:00
|
|
|
DeleteResults []*pb.SegmentPieceDeleteResult
|
|
|
|
}
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
func (params *FinishDeleteSegmentParams) toRequest(header *pb.RequestHeader) *pb.SegmentFinishDeleteRequest {
|
2019-08-06 15:56:23 +01:00
|
|
|
return &pb.SegmentFinishDeleteRequest{
|
2019-09-19 17:19:29 +01:00
|
|
|
Header: header,
|
2019-08-06 15:56:23 +01:00
|
|
|
SegmentId: params.SegmentID,
|
|
|
|
Results: params.DeleteResults,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// BatchItem returns single item for batch request
|
|
|
|
func (params *FinishDeleteSegmentParams) BatchItem() *pb.BatchRequestItem {
|
|
|
|
return &pb.BatchRequestItem{
|
|
|
|
Request: &pb.BatchRequestItem_SegmentFinishDelete{
|
2019-09-19 17:19:29 +01:00
|
|
|
SegmentFinishDelete: params.toRequest(nil),
|
2019-08-06 15:56:23 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-24 12:33:23 +01:00
|
|
|
// FinishDeleteSegment finishes segment upload process
|
|
|
|
func (client *Client) FinishDeleteSegment(ctx context.Context, params FinishDeleteSegmentParams) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
_, err = client.client.FinishDeleteSegment(ctx, params.toRequest(client.header()))
|
2019-08-06 15:56:23 +01:00
|
|
|
|
2019-07-24 12:33:23 +01:00
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
// DownloadSegmentParams parameters for DownloadSegment method
|
|
|
|
type DownloadSegmentParams struct {
|
|
|
|
StreamID storj.StreamID
|
|
|
|
Position storj.SegmentPosition
|
|
|
|
}
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
func (params *DownloadSegmentParams) toRequest(header *pb.RequestHeader) *pb.SegmentDownloadRequest {
|
2019-08-06 15:56:23 +01:00
|
|
|
return &pb.SegmentDownloadRequest{
|
2019-09-19 17:19:29 +01:00
|
|
|
Header: header,
|
2019-07-24 12:33:23 +01:00
|
|
|
StreamId: params.StreamID,
|
|
|
|
CursorPosition: &pb.SegmentPosition{
|
|
|
|
PartNumber: params.Position.PartNumber,
|
|
|
|
Index: params.Position.Index,
|
|
|
|
},
|
|
|
|
}
|
2019-08-06 15:56:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// BatchItem returns single item for batch request
|
|
|
|
func (params *DownloadSegmentParams) BatchItem() *pb.BatchRequestItem {
|
|
|
|
return &pb.BatchRequestItem{
|
|
|
|
Request: &pb.BatchRequestItem_SegmentDownload{
|
2019-09-19 17:19:29 +01:00
|
|
|
SegmentDownload: params.toRequest(nil),
|
2019-08-06 15:56:23 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// DownloadSegmentResponse response for DownloadSegment request
|
|
|
|
type DownloadSegmentResponse struct {
|
|
|
|
Info storj.SegmentDownloadInfo
|
2019-07-24 12:33:23 +01:00
|
|
|
|
2019-08-06 15:56:23 +01:00
|
|
|
Limits []*pb.AddressedOrderLimit
|
|
|
|
}
|
|
|
|
|
|
|
|
func newDownloadSegmentResponse(response *pb.SegmentDownloadResponse) DownloadSegmentResponse {
|
2019-07-24 12:33:23 +01:00
|
|
|
info := storj.SegmentDownloadInfo{
|
|
|
|
SegmentID: response.SegmentId,
|
2019-08-01 10:04:31 +01:00
|
|
|
Size: response.SegmentSize,
|
2019-07-24 12:33:23 +01:00
|
|
|
EncryptedInlineData: response.EncryptedInlineData,
|
2019-08-01 10:04:31 +01:00
|
|
|
PiecePrivateKey: response.PrivateKey,
|
|
|
|
SegmentEncryption: storj.SegmentEncryption{
|
|
|
|
EncryptedKeyNonce: response.EncryptedKeyNonce,
|
|
|
|
EncryptedKey: response.EncryptedKey,
|
|
|
|
},
|
2019-07-24 12:33:23 +01:00
|
|
|
}
|
|
|
|
if response.Next != nil {
|
|
|
|
info.Next = storj.SegmentPosition{
|
|
|
|
PartNumber: response.Next.PartNumber,
|
|
|
|
Index: response.Next.Index,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-01 10:04:31 +01:00
|
|
|
for i := range response.AddressedLimits {
|
|
|
|
if response.AddressedLimits[i].Limit == nil {
|
|
|
|
response.AddressedLimits[i] = nil
|
|
|
|
}
|
|
|
|
}
|
2019-08-06 15:56:23 +01:00
|
|
|
return DownloadSegmentResponse{
|
|
|
|
Info: info,
|
|
|
|
Limits: response.AddressedLimits,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// DownloadSegment gets info for downloading remote segment or data from inline segment
|
|
|
|
func (client *Client) DownloadSegment(ctx context.Context, params DownloadSegmentParams) (_ storj.SegmentDownloadInfo, _ []*pb.AddressedOrderLimit, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
response, err := client.client.DownloadSegment(ctx, params.toRequest(client.header()))
|
2019-08-06 15:56:23 +01:00
|
|
|
if err != nil {
|
2019-11-19 12:58:26 +00:00
|
|
|
if errs2.IsRPC(err, rpcstatus.NotFound) {
|
|
|
|
return storj.SegmentDownloadInfo{}, nil, storj.ErrObjectNotFound.Wrap(err)
|
|
|
|
}
|
2019-08-06 15:56:23 +01:00
|
|
|
return storj.SegmentDownloadInfo{}, nil, Error.Wrap(err)
|
|
|
|
}
|
2019-08-01 10:04:31 +01:00
|
|
|
|
2019-08-06 15:56:23 +01:00
|
|
|
downloadResponse := newDownloadSegmentResponse(response)
|
|
|
|
return downloadResponse.Info, downloadResponse.Limits, nil
|
2019-07-24 12:33:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListSegmentsParams parameters for ListSegment method
|
|
|
|
type ListSegmentsParams struct {
|
|
|
|
StreamID storj.StreamID
|
|
|
|
CursorPosition storj.SegmentPosition
|
|
|
|
Limit int32
|
|
|
|
}
|
|
|
|
|
2019-08-06 15:56:23 +01:00
|
|
|
// ListSegmentsResponse response for ListSegments request
|
|
|
|
type ListSegmentsResponse struct {
|
|
|
|
Items []storj.SegmentListItem
|
|
|
|
More bool
|
|
|
|
}
|
2019-07-24 12:33:23 +01:00
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
func (params *ListSegmentsParams) toRequest(header *pb.RequestHeader) *pb.SegmentListRequest {
|
2019-08-06 15:56:23 +01:00
|
|
|
return &pb.SegmentListRequest{
|
2019-09-19 17:19:29 +01:00
|
|
|
Header: header,
|
2019-07-24 12:33:23 +01:00
|
|
|
StreamId: params.StreamID,
|
|
|
|
CursorPosition: &pb.SegmentPosition{
|
|
|
|
PartNumber: params.CursorPosition.PartNumber,
|
|
|
|
Index: params.CursorPosition.Index,
|
|
|
|
},
|
|
|
|
Limit: params.Limit,
|
|
|
|
}
|
2019-08-06 15:56:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// BatchItem returns single item for batch request
|
|
|
|
func (params *ListSegmentsParams) BatchItem() *pb.BatchRequestItem {
|
|
|
|
return &pb.BatchRequestItem{
|
|
|
|
Request: &pb.BatchRequestItem_SegmentList{
|
2019-09-19 17:19:29 +01:00
|
|
|
SegmentList: params.toRequest(nil),
|
2019-08-06 15:56:23 +01:00
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
2019-07-24 12:33:23 +01:00
|
|
|
|
2019-08-06 15:56:23 +01:00
|
|
|
func newListSegmentsResponse(response *pb.SegmentListResponse) ListSegmentsResponse {
|
2019-07-24 12:33:23 +01:00
|
|
|
items := make([]storj.SegmentListItem, len(response.Items))
|
|
|
|
for i, responseItem := range response.Items {
|
|
|
|
items[i] = storj.SegmentListItem{
|
|
|
|
Position: storj.SegmentPosition{
|
|
|
|
PartNumber: responseItem.Position.PartNumber,
|
|
|
|
Index: responseItem.Position.Index,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
2019-08-06 15:56:23 +01:00
|
|
|
return ListSegmentsResponse{
|
|
|
|
Items: items,
|
|
|
|
More: response.More,
|
|
|
|
}
|
2019-07-24 12:33:23 +01:00
|
|
|
}
|
2019-08-05 08:07:40 +01:00
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
// ListSegments lists object segments
|
|
|
|
func (client *Client) ListSegments(ctx context.Context, params ListSegmentsParams) (_ []storj.SegmentListItem, more bool, err error) {
|
2019-08-06 15:56:23 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
response, err := client.client.ListSegments(ctx, params.toRequest(client.header()))
|
2019-08-06 15:56:23 +01:00
|
|
|
if err != nil {
|
2019-09-19 05:46:39 +01:00
|
|
|
if errs2.IsRPC(err, rpcstatus.NotFound) {
|
2019-08-30 22:30:18 +01:00
|
|
|
return []storj.SegmentListItem{}, false, storj.ErrObjectNotFound.Wrap(err)
|
|
|
|
}
|
2019-08-06 15:56:23 +01:00
|
|
|
return []storj.SegmentListItem{}, false, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
listResponse := newListSegmentsResponse(response)
|
|
|
|
return listResponse.Items, listResponse.More, Error.Wrap(err)
|
2019-08-05 08:07:40 +01:00
|
|
|
}
|
|
|
|
|
2019-08-06 15:56:23 +01:00
|
|
|
// Batch sends multiple requests in one batch
|
|
|
|
func (client *Client) Batch(ctx context.Context, requests ...BatchItem) (resp []BatchResponse, err error) {
|
2019-08-05 08:07:40 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-08-06 15:56:23 +01:00
|
|
|
batchItems := make([]*pb.BatchRequestItem, len(requests))
|
|
|
|
for i, request := range requests {
|
|
|
|
batchItems[i] = request.BatchItem()
|
|
|
|
}
|
|
|
|
response, err := client.client.Batch(ctx, &pb.BatchRequest{
|
2019-09-19 17:19:29 +01:00
|
|
|
Header: client.header(),
|
2019-08-06 15:56:23 +01:00
|
|
|
Requests: batchItems,
|
2019-08-05 08:07:40 +01:00
|
|
|
})
|
2019-08-06 15:56:23 +01:00
|
|
|
if err != nil {
|
2019-10-24 22:18:48 +01:00
|
|
|
if errs2.IsRPC(err, rpcstatus.NotFound) {
|
|
|
|
return []BatchResponse{}, storj.ErrObjectNotFound.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return []BatchResponse{}, Error.Wrap(err)
|
2019-08-06 15:56:23 +01:00
|
|
|
}
|
2019-08-05 08:07:40 +01:00
|
|
|
|
2019-08-06 15:56:23 +01:00
|
|
|
resp = make([]BatchResponse, len(response.Responses))
|
|
|
|
for i, response := range response.Responses {
|
|
|
|
resp[i] = BatchResponse{
|
|
|
|
pbRequest: batchItems[i].Request,
|
|
|
|
pbResponse: response.Response,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return resp, nil
|
2019-08-05 08:07:40 +01:00
|
|
|
}
|
2019-09-19 17:19:29 +01:00
|
|
|
|
|
|
|
// SetRawAPIKey sets the client's raw API key. Mainly used for testing.
|
|
|
|
func (client *Client) SetRawAPIKey(key []byte) {
|
|
|
|
client.apiKeyRaw = key
|
|
|
|
}
|