metainfo: move api keys to part of the request (#3069)

What: we move api keys out of the grpc connection-level metadata on the client side and into the request protobufs directly. the server side still supports both mechanisms for backwards compatibility.

Why: dRPC won't support connection-level metadata. the only thing we currently use connection-level metadata for is api keys. we need to move all information needed by a request into the request protobuf itself for drpc support. check out the .proto changes for the main details.

One fun side-fact: Did you know that protobuf fields 1-15 are special and only use one byte for both the field number and type? Additionally did you know we don't use field 15 anywhere yet? So the new request header will use field 15, and should use field 15 on all protobufs going forward.

Please describe the tests: all existing tests should pass

Please describe the performance impact: none
This commit is contained in:
JT Olio 2019-09-19 10:19:29 -06:00 committed by GitHub
parent d22987ea1d
commit 946ec201e2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 1078 additions and 534 deletions

View File

@ -39,7 +39,7 @@ type Uplink struct {
Transport transport.Client
StorageNodeCount int
APIKey map[storj.NodeID]string
APIKey map[storj.NodeID]*macaroon.APIKey
ProjectID map[storj.NodeID]uuid.UUID
}
@ -75,7 +75,7 @@ func (planet *Planet) newUplink(name string, storageNodeCount int) (*Uplink, err
Log: planet.log.Named(name),
Identity: identity,
StorageNodeCount: storageNodeCount,
APIKey: map[storj.NodeID]string{},
APIKey: map[storj.NodeID]*macaroon.APIKey{},
ProjectID: map[storj.NodeID]uuid.UUID{},
}
@ -126,7 +126,7 @@ func (planet *Planet) newUplink(name string, storageNodeCount int) (*Uplink, err
return nil, err
}
uplink.APIKey[satellite.ID()] = key.Serialize()
uplink.APIKey[satellite.ID()] = key
uplink.ProjectID[satellite.ID()] = project.ID
}
@ -148,7 +148,7 @@ func (client *Uplink) Local() pb.Node { return client.Info }
func (client *Uplink) Shutdown() error { return nil }
// DialMetainfo dials destination with apikey and returns metainfo Client
func (client *Uplink) DialMetainfo(ctx context.Context, destination Peer, apikey string) (*metainfo.Client, error) {
func (client *Uplink) DialMetainfo(ctx context.Context, destination Peer, apikey *macaroon.APIKey) (*metainfo.Client, error) {
return metainfo.Dial(ctx, client.Transport, destination.Addr(), apikey)
}
@ -338,7 +338,9 @@ func (client *Uplink) CreateBucket(ctx context.Context, satellite *SatelliteSyst
func (client *Uplink) GetConfig(satellite *SatelliteSystem) uplink.Config {
config := getDefaultConfig()
apiKey, err := libuplink.ParseAPIKey(client.APIKey[satellite.ID()])
// client.APIKey[satellite.ID()] is a *macaroon.APIKey, but we want a
// *libuplink.APIKey, so, serialize and parse for now
apiKey, err := libuplink.ParseAPIKey(client.APIKey[satellite.ID()].Serialize())
if err != nil {
panic(err)
}

View File

@ -266,9 +266,9 @@ func TestDeleteWithOfflineStoragenode(t *testing.T) {
err = planet.Uplinks[0].Delete(ctx, planet.Satellites[0], "test-bucket", "test-file")
require.Error(t, err)
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
key := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], key)
require.NoError(t, err)
defer ctx.Check(metainfoClient.Close)

View File

@ -36,7 +36,7 @@ func testPlanetWithLibUplink(t *testing.T, cfg testConfig,
satellite := planet.Satellites[0]
cfg.uplinkCfg.Volatile.TLS.SkipPeerCAWhitelist = true
apiKey, err := uplink.ParseAPIKey(testUplink.APIKey[satellite.ID()])
apiKey, err := uplink.ParseAPIKey(testUplink.APIKey[satellite.ID()].Serialize())
if err != nil {
t.Fatalf("could not parse API key from testplanet: %v", err)
}
@ -66,7 +66,7 @@ func TestPartnerBucketAttrs(t *testing.T) {
SatelliteCount: 1, StorageNodeCount: 5, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
apikey, err := uplink.ParseAPIKey(planet.Uplinks[0].APIKey[satellite.ID()])
apikey, err := uplink.ParseAPIKey(planet.Uplinks[0].APIKey[satellite.ID()].Serialize())
require.NoError(t, err)
partnerID := testrand.UUID()

View File

@ -26,7 +26,7 @@ func TestBucketExamples(t *testing.T) {
cfg.Volatile.TLS.SkipPeerCAWhitelist = true
satelliteAddr := planet.Satellites[0].Local().Address.Address
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()].Serialize()
out := bytes.NewBuffer(nil)
err := ListBucketsExample(ctx, satelliteAddr, apiKey, &cfg, out)

View File

@ -46,7 +46,7 @@ func TestPutGetList(t *testing.T) {
StorageNodeCount: 1,
UplinkCount: 1},
func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()].Serialize()
satelliteAddr := planet.Satellites[0].Local().Address.Address
tests := []putGetListTest{

View File

@ -161,7 +161,7 @@ func NewUplink(ctx context.Context, cfg *Config) (_ *Uplink, err error) {
func (u *Uplink) OpenProject(ctx context.Context, satelliteAddr string, apiKey APIKey) (p *Project, err error) {
defer mon.Task()(&ctx)(&err)
m, err := metainfo.Dial(ctx, u.tc, satelliteAddr, apiKey.Serialize())
m, err := metainfo.Dial(ctx, u.tc, satelliteAddr, apiKey.key)
if err != nil {
return nil, err
}

View File

@ -80,7 +80,7 @@ func TestC(t *testing.T) {
cmd.Dir = filepath.Dir(testexe)
cmd.Env = append(os.Environ(),
"SATELLITE_0_ADDR="+planet.Satellites[0].Addr(),
"GATEWAY_0_API_KEY="+planet.Uplinks[0].APIKey[planet.Satellites[0].ID()],
"GATEWAY_0_API_KEY="+planet.Uplinks[0].APIKey[planet.Satellites[0].ID()].Serialize(),
)
out, err := cmd.CombinedOutput()

View File

@ -122,7 +122,7 @@ func testHandlerRequests(t *testing.T, ctx *testcontext.Context, planet *testpla
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "test/foo", []byte("FOO"))
require.NoError(t, err)
apiKey, err := uplink.ParseAPIKey(planet.Uplinks[0].APIKey[planet.Satellites[0].ID()])
apiKey, err := uplink.ParseAPIKey(planet.Uplinks[0].APIKey[planet.Satellites[0].ID()].Serialize())
require.NoError(t, err)
scope, err := (&uplink.Scope{

View File

@ -32,25 +32,25 @@ func InterceptAPIKey(ctx context.Context, req interface{}, info *grpc.UnaryServe
return handler(auth.WithAPIKey(ctx, []byte(apikeys[0])), req)
}
// APIKeyCredentials implements grpc/credentials.PerRPCCredentials
// for authenticating with the grpc server.
type APIKeyCredentials struct {
// DeprecatedAPIKeyCredentials implements grpc/credentials.PerRPCCredentials
// for authenticating with the grpc server. This does not work with drpc.
type DeprecatedAPIKeyCredentials struct {
value string
}
// NewAPIKeyCredentials returns a new APIKeyCredentials
func NewAPIKeyCredentials(apikey string) *APIKeyCredentials {
return &APIKeyCredentials{apikey}
// NewDeprecatedAPIKeyCredentials returns a new DeprecatedAPIKeyCredentials
func NewDeprecatedAPIKeyCredentials(apikey string) *DeprecatedAPIKeyCredentials {
return &DeprecatedAPIKeyCredentials{apikey}
}
// GetRequestMetadata gets the current request metadata, refreshing tokens if required.
func (creds *APIKeyCredentials) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
func (creds *DeprecatedAPIKeyCredentials) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
return map[string]string{
"apikey": creds.value,
}, nil
}
// RequireTransportSecurity indicates whether the credentials requires transport security.
func (creds *APIKeyCredentials) RequireTransportSecurity() bool {
return false
func (creds *DeprecatedAPIKeyCredentials) RequireTransportSecurity() bool {
return false // Deprecated anyway, but how was this the right choice?
}

View File

@ -52,7 +52,7 @@ func TestAPIKey(t *testing.T) {
{"good key", codes.OK},
} {
conn, err := grpc.DialContext(ctx, listener.Addr().String(),
grpc.WithPerRPCCredentials(grpcauth.NewAPIKeyCredentials(test.apikey)),
grpc.WithPerRPCCredentials(grpcauth.NewDeprecatedAPIKeyCredentials(test.apikey)),
grpc.WithBlock(),
grpc.WithInsecure(),
)

View File

@ -19,6 +19,10 @@ const (
fieldSignature fieldType = 6
)
const (
version byte = 2
)
type packet struct {
fieldType fieldType
data []byte
@ -29,7 +33,7 @@ func (m *Macaroon) Serialize() (data []byte) {
ctx := context.TODO()
defer mon.Task()(&ctx)(nil)
// Start data from version int
data = append(data, 2)
data = append(data, version)
// Serilize Identity
data = serializePacket(data, packet{
@ -78,6 +82,12 @@ func appendVarint(data []byte, x int) []byte {
func ParseMacaroon(data []byte) (_ *Macaroon, err error) {
ctx := context.TODO()
defer mon.Task()(&ctx)(&err)
if len(data) < 2 {
return nil, errors.New("empty macaroon")
}
if data[0] != version {
return nil, errors.New("invalid macaroon version")
}
// skip version
data = data[1:]
// Parse Location

View File

@ -684,7 +684,7 @@ func initEnv(ctx context.Context, t *testing.T, planet *testplanet.Planet) (mini
return nil, nil, nil, err
}
m, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey.Serialize())
m, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
if err != nil {
return nil, nil, nil, err
}

File diff suppressed because it is too large Load Diff

View File

@ -46,10 +46,14 @@ service Metainfo {
rpc DeleteSegmentOld(SegmentDeleteRequestOld) returns (SegmentDeleteResponseOld);
rpc ListSegmentsOld(ListSegmentsRequestOld) returns (ListSegmentsResponseOld);
rpc SetAttributionOld(SetAttributionRequestOld) returns (SetAttributionResponseOld);
rpc ProjectInfo(ProjectInfoRequest) returns (ProjectInfoResponse);
}
message RequestHeader {
bytes api_key = 1;
}
message Bucket {
bytes name = 1;
encryption.CipherSuite path_cipher = 2;
@ -69,6 +73,8 @@ message BucketListItem {
}
message BucketCreateRequest {
RequestHeader header = 15;
bytes name = 1;
encryption.CipherSuite path_cipher = 2;
@ -83,6 +89,8 @@ message BucketCreateResponse {
}
message BucketGetRequest {
RequestHeader header = 15;
bytes name = 1;
}
@ -91,6 +99,8 @@ message BucketGetResponse {
}
message BucketDeleteRequest {
RequestHeader header = 15;
bytes name = 1;
}
@ -98,6 +108,8 @@ message BucketDeleteResponse {
}
message BucketListRequest {
RequestHeader header = 15;
bytes cursor = 1;
int32 limit = 2;
int32 direction = 3;
@ -109,6 +121,8 @@ message BucketListResponse {
}
message BucketSetAttributionRequest {
RequestHeader header = 15;
bytes name = 1;
bytes partner_id = 2;
}
@ -122,6 +136,8 @@ message AddressedOrderLimit {
}
message SegmentWriteRequestOld {
RequestHeader header = 15;
bytes bucket = 1;
bytes path = 2;
int64 segment = 3;
@ -137,6 +153,8 @@ message SegmentWriteResponseOld {
}
message SegmentCommitRequestOld {
RequestHeader header = 15;
bytes bucket = 1;
bytes path = 2;
int64 segment = 3;
@ -149,6 +167,8 @@ message SegmentCommitResponseOld {
}
message SegmentDownloadRequestOld {
RequestHeader header = 15;
bytes bucket = 1;
bytes path = 2;
int64 segment = 3;
@ -161,6 +181,8 @@ message SegmentDownloadResponseOld {
}
message SegmentInfoRequestOld {
RequestHeader header = 15;
bytes bucket = 1;
bytes path = 2;
int64 segment = 3;
@ -171,6 +193,8 @@ message SegmentInfoResponseOld {
}
message SegmentDeleteRequestOld {
RequestHeader header = 15;
bytes bucket = 1;
bytes path = 2;
int64 segment = 3;
@ -182,6 +206,8 @@ message SegmentDeleteResponseOld {
}
message ListSegmentsRequestOld {
RequestHeader header = 15;
bytes bucket = 1;
bytes prefix = 2;
bytes start_after = 3;
@ -203,6 +229,8 @@ message ListSegmentsResponseOld {
}
message SetAttributionRequestOld {
RequestHeader header = 15;
bytes bucket_name = 1;
bytes partner_id = 2 ;
}
@ -211,6 +239,7 @@ message SetAttributionResponseOld {
}
message ProjectInfoRequest {
RequestHeader header = 15;
}
message ProjectInfoResponse {
@ -254,6 +283,8 @@ message Object {
}
message ObjectBeginRequest {
RequestHeader header = 15;
bytes bucket = 1;
bytes encrypted_path = 2;
int32 version = 3;
@ -276,6 +307,8 @@ message ObjectBeginResponse {
}
message ObjectCommitRequest {
RequestHeader header = 15;
bytes stream_id = 1 [(gogoproto.customtype) = "StreamID", (gogoproto.nullable) = false];
bytes encrypted_metadata_nonce = 2 [(gogoproto.customtype) = "Nonce", (gogoproto.nullable) = false];
@ -286,6 +319,8 @@ message ObjectCommitResponse {
}
message ObjectGetRequest {
RequestHeader header = 15;
bytes bucket = 1;
bytes encrypted_path = 2;
int32 version = 3;
@ -296,6 +331,8 @@ message ObjectGetResponse {
}
message ObjectListRequest {
RequestHeader header = 15;
bytes bucket = 1;
bytes encrypted_prefix = 2;
bytes encrypted_cursor = 3;
@ -303,8 +340,6 @@ message ObjectListRequest {
int32 limit = 5;
ObjectListItemIncludes object_includes = 6;
}
message ObjectListResponse {
@ -330,6 +365,8 @@ message ObjectListItemIncludes {
}
message ObjectBeginDeleteRequest {
RequestHeader header = 15;
bytes bucket = 1;
bytes encrypted_path = 2;
int32 version = 3;
@ -340,6 +377,8 @@ message ObjectBeginDeleteResponse {
}
message ObjectFinishDeleteRequest {
RequestHeader header = 15;
bytes stream_id = 1 [(gogoproto.customtype) = "StreamID", (gogoproto.nullable) = false];
}
@ -388,6 +427,8 @@ message SegmentPosition {
}
message SegmentBeginRequest {
RequestHeader header = 15;
bytes stream_id = 1 [(gogoproto.customtype) = "StreamID", (gogoproto.nullable) = false];
SegmentPosition position = 2;
@ -401,6 +442,8 @@ message SegmentBeginResponse {
}
message SegmentCommitRequest {
RequestHeader header = 15;
bytes segment_id = 1 [(gogoproto.customtype) = "SegmentID", (gogoproto.nullable) = false];
bytes encrypted_key_nonce = 2 [(gogoproto.customtype) = "Nonce", (gogoproto.nullable) = false];
@ -437,6 +480,8 @@ message SegmentCommitResponse {
}
message SegmentMakeInlineRequest {
RequestHeader header = 15;
bytes stream_id = 1 [(gogoproto.customtype) = "StreamID", (gogoproto.nullable) = false];
SegmentPosition position = 2;
@ -449,6 +494,8 @@ message SegmentMakeInlineRequest {
message SegmentMakeInlineResponse {}
message SegmentBeginDeleteRequest {
RequestHeader header = 15;
bytes stream_id = 1;
SegmentPosition position = 2;
}
@ -460,6 +507,8 @@ message SegmentBeginDeleteResponse {
}
message SegmentFinishDeleteRequest {
RequestHeader header = 15;
bytes segment_id = 1 [(gogoproto.customtype) = "SegmentID", (gogoproto.nullable) = false];
repeated SegmentPieceDeleteResult results = 2;
}
@ -473,6 +522,8 @@ message SegmentPieceDeleteResult {
message SegmentFinishDeleteResponse {}
message SegmentListRequest {
RequestHeader header = 15;
bytes stream_id = 1;
SegmentPosition cursor_position = 2;
int32 limit = 3;
@ -488,6 +539,8 @@ message SegmentListItem {
}
message SegmentDownloadRequest {
RequestHeader header = 15;
bytes stream_id = 1 [(gogoproto.customtype) = "StreamID", (gogoproto.nullable) = false];
SegmentPosition cursor_position = 2;
}
@ -507,6 +560,9 @@ message SegmentDownloadResponse {
}
message BatchRequest {
RequestHeader header = 15; // the only header that matters in a batch.
// headers for specific BatchRequestItems are ignored entirely
repeated BatchRequestItem requests = 1;
}
@ -566,4 +622,4 @@ message BatchResponseItem {
SegmentListResponse segment_list = 17;
SegmentDownloadResponse segment_download = 18;
}
}
}

View File

@ -1701,6 +1701,16 @@
}
],
"messages": [
{
"name": "RequestHeader",
"fields": [
{
"id": 1,
"name": "api_key",
"type": "bytes"
}
]
},
{
"name": "Bucket",
"fields": [
@ -1779,6 +1789,11 @@
{
"name": "BucketCreateRequest",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "name",
@ -1824,6 +1839,11 @@
{
"name": "BucketGetRequest",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "name",
@ -1844,6 +1864,11 @@
{
"name": "BucketDeleteRequest",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "name",
@ -1857,6 +1882,11 @@
{
"name": "BucketListRequest",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "cursor",
@ -1893,6 +1923,11 @@
{
"name": "BucketSetAttributionRequest",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "name",
@ -1926,6 +1961,11 @@
{
"name": "SegmentWriteRequestOld",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "bucket",
@ -2012,6 +2052,11 @@
{
"name": "SegmentCommitRequestOld",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "bucket",
@ -2053,6 +2098,11 @@
{
"name": "SegmentDownloadRequestOld",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "bucket",
@ -2104,6 +2154,11 @@
{
"name": "SegmentInfoRequestOld",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "bucket",
@ -2134,6 +2189,11 @@
{
"name": "SegmentDeleteRequestOld",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "bucket",
@ -2180,6 +2240,11 @@
{
"name": "ListSegmentsRequestOld",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "bucket",
@ -2258,6 +2323,11 @@
{
"name": "SetAttributionRequestOld",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "bucket_name",
@ -2274,7 +2344,14 @@
"name": "SetAttributionResponseOld"
},
{
"name": "ProjectInfoRequest"
"name": "ProjectInfoRequest",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
}
]
},
{
"name": "ProjectInfoResponse",
@ -2424,6 +2501,11 @@
{
"name": "ObjectBeginRequest",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "bucket",
@ -2514,6 +2596,11 @@
{
"name": "ObjectCommitRequest",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "stream_id",
@ -2557,6 +2644,11 @@
{
"name": "ObjectGetRequest",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "bucket",
@ -2587,6 +2679,11 @@
{
"name": "ObjectListRequest",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "bucket",
@ -2733,6 +2830,11 @@
{
"name": "ObjectBeginDeleteRequest",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "bucket",
@ -2773,6 +2875,11 @@
{
"name": "ObjectFinishDeleteRequest",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "stream_id",
@ -2957,6 +3064,11 @@
{
"name": "SegmentBeginRequest",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "stream_id",
@ -3028,6 +3140,11 @@
{
"name": "SegmentCommitRequest",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "segment_id",
@ -3180,6 +3297,11 @@
{
"name": "SegmentMakeInlineRequest",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "stream_id",
@ -3233,6 +3355,11 @@
{
"name": "SegmentBeginDeleteRequest",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "stream_id",
@ -3289,6 +3416,11 @@
{
"name": "SegmentFinishDeleteRequest",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "segment_id",
@ -3348,6 +3480,11 @@
{
"name": "SegmentListRequest",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "stream_id",
@ -3394,6 +3531,11 @@
{
"name": "SegmentDownloadRequest",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "stream_id",
@ -3495,6 +3637,11 @@
{
"name": "BatchRequest",
"fields": [
{
"id": 15,
"name": "header",
"type": "RequestHeader"
},
{
"id": 1,
"name": "requests",

View File

@ -25,6 +25,7 @@ func (endpoint *Endpoint) Batch(ctx context.Context, req *pb.BatchRequest) (resp
switch singleRequest := request.Request.(type) {
// BUCKET
case *pb.BatchRequestItem_BucketCreate:
singleRequest.BucketCreate.Header = req.Header
response, err := endpoint.CreateBucket(ctx, singleRequest.BucketCreate)
if err != nil {
return resp, err
@ -35,6 +36,7 @@ func (endpoint *Endpoint) Batch(ctx context.Context, req *pb.BatchRequest) (resp
},
})
case *pb.BatchRequestItem_BucketGet:
singleRequest.BucketGet.Header = req.Header
response, err := endpoint.GetBucket(ctx, singleRequest.BucketGet)
if err != nil {
return resp, err
@ -45,6 +47,7 @@ func (endpoint *Endpoint) Batch(ctx context.Context, req *pb.BatchRequest) (resp
},
})
case *pb.BatchRequestItem_BucketDelete:
singleRequest.BucketDelete.Header = req.Header
response, err := endpoint.DeleteBucket(ctx, singleRequest.BucketDelete)
if err != nil {
return resp, err
@ -55,6 +58,7 @@ func (endpoint *Endpoint) Batch(ctx context.Context, req *pb.BatchRequest) (resp
},
})
case *pb.BatchRequestItem_BucketList:
singleRequest.BucketList.Header = req.Header
response, err := endpoint.ListBuckets(ctx, singleRequest.BucketList)
if err != nil {
return resp, err
@ -65,6 +69,7 @@ func (endpoint *Endpoint) Batch(ctx context.Context, req *pb.BatchRequest) (resp
},
})
case *pb.BatchRequestItem_BucketSetAttribution:
singleRequest.BucketSetAttribution.Header = req.Header
response, err := endpoint.SetBucketAttribution(ctx, singleRequest.BucketSetAttribution)
if err != nil {
return resp, err
@ -76,6 +81,7 @@ func (endpoint *Endpoint) Batch(ctx context.Context, req *pb.BatchRequest) (resp
})
//OBJECT
case *pb.BatchRequestItem_ObjectBegin:
singleRequest.ObjectBegin.Header = req.Header
response, err := endpoint.BeginObject(ctx, singleRequest.ObjectBegin)
if err != nil {
return resp, err
@ -86,6 +92,7 @@ func (endpoint *Endpoint) Batch(ctx context.Context, req *pb.BatchRequest) (resp
},
})
case *pb.BatchRequestItem_ObjectCommit:
singleRequest.ObjectCommit.Header = req.Header
response, err := endpoint.CommitObject(ctx, singleRequest.ObjectCommit)
if err != nil {
return resp, err
@ -96,6 +103,7 @@ func (endpoint *Endpoint) Batch(ctx context.Context, req *pb.BatchRequest) (resp
},
})
case *pb.BatchRequestItem_ObjectGet:
singleRequest.ObjectGet.Header = req.Header
response, err := endpoint.GetObject(ctx, singleRequest.ObjectGet)
if err != nil {
return resp, err
@ -106,6 +114,7 @@ func (endpoint *Endpoint) Batch(ctx context.Context, req *pb.BatchRequest) (resp
},
})
case *pb.BatchRequestItem_ObjectList:
singleRequest.ObjectList.Header = req.Header
response, err := endpoint.ListObjects(ctx, singleRequest.ObjectList)
if err != nil {
return resp, err
@ -116,6 +125,7 @@ func (endpoint *Endpoint) Batch(ctx context.Context, req *pb.BatchRequest) (resp
},
})
case *pb.BatchRequestItem_ObjectBeginDelete:
singleRequest.ObjectBeginDelete.Header = req.Header
response, err := endpoint.BeginDeleteObject(ctx, singleRequest.ObjectBeginDelete)
if err != nil {
return resp, err
@ -126,6 +136,7 @@ func (endpoint *Endpoint) Batch(ctx context.Context, req *pb.BatchRequest) (resp
},
})
case *pb.BatchRequestItem_ObjectFinishDelete:
singleRequest.ObjectFinishDelete.Header = req.Header
response, err := endpoint.FinishDeleteObject(ctx, singleRequest.ObjectFinishDelete)
if err != nil {
return resp, err
@ -137,6 +148,7 @@ func (endpoint *Endpoint) Batch(ctx context.Context, req *pb.BatchRequest) (resp
})
// SEGMENT
case *pb.BatchRequestItem_SegmentBegin:
singleRequest.SegmentBegin.Header = req.Header
response, err := endpoint.BeginSegment(ctx, singleRequest.SegmentBegin)
if err != nil {
return resp, err
@ -147,6 +159,7 @@ func (endpoint *Endpoint) Batch(ctx context.Context, req *pb.BatchRequest) (resp
},
})
case *pb.BatchRequestItem_SegmentCommit:
singleRequest.SegmentCommit.Header = req.Header
response, err := endpoint.CommitSegment(ctx, singleRequest.SegmentCommit)
if err != nil {
return resp, err
@ -157,6 +170,7 @@ func (endpoint *Endpoint) Batch(ctx context.Context, req *pb.BatchRequest) (resp
},
})
case *pb.BatchRequestItem_SegmentList:
singleRequest.SegmentList.Header = req.Header
response, err := endpoint.ListSegments(ctx, singleRequest.SegmentList)
if err != nil {
return resp, err
@ -167,6 +181,7 @@ func (endpoint *Endpoint) Batch(ctx context.Context, req *pb.BatchRequest) (resp
},
})
case *pb.BatchRequestItem_SegmentMakeInline:
singleRequest.SegmentMakeInline.Header = req.Header
response, err := endpoint.MakeInlineSegment(ctx, singleRequest.SegmentMakeInline)
if err != nil {
return resp, err
@ -177,6 +192,7 @@ func (endpoint *Endpoint) Batch(ctx context.Context, req *pb.BatchRequest) (resp
},
})
case *pb.BatchRequestItem_SegmentDownload:
singleRequest.SegmentDownload.Header = req.Header
response, err := endpoint.DownloadSegment(ctx, singleRequest.SegmentDownload)
if err != nil {
return resp, err
@ -187,6 +203,7 @@ func (endpoint *Endpoint) Batch(ctx context.Context, req *pb.BatchRequest) (resp
},
})
case *pb.BatchRequestItem_SegmentBeginDelete:
singleRequest.SegmentBeginDelete.Header = req.Header
response, err := endpoint.BeginDeleteSegment(ctx, singleRequest.SegmentBeginDelete)
if err != nil {
return resp, err
@ -197,6 +214,7 @@ func (endpoint *Endpoint) Batch(ctx context.Context, req *pb.BatchRequest) (resp
},
})
case *pb.BatchRequestItem_SegmentFinishDelete:
singleRequest.SegmentFinishDelete.Header = req.Header
response, err := endpoint.FinishDeleteSegment(ctx, singleRequest.SegmentFinishDelete)
if err != nil {
return resp, err

View File

@ -18,7 +18,6 @@ import (
"google.golang.org/grpc/status"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/auth"
"storj.io/storj/pkg/identity"
"storj.io/storj/pkg/macaroon"
"storj.io/storj/pkg/pb"
@ -113,7 +112,7 @@ func (endpoint *Endpoint) Close() error { return nil }
func (endpoint *Endpoint) SegmentInfoOld(ctx context.Context, req *pb.SegmentInfoRequestOld) (resp *pb.SegmentInfoResponseOld, err error) {
defer mon.Task()(&ctx)(&err)
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionRead,
Bucket: req.Bucket,
EncryptedPath: req.Path,
@ -140,7 +139,7 @@ func (endpoint *Endpoint) SegmentInfoOld(ctx context.Context, req *pb.SegmentInf
func (endpoint *Endpoint) CreateSegmentOld(ctx context.Context, req *pb.SegmentWriteRequestOld) (resp *pb.SegmentWriteResponseOld, err error) {
defer mon.Task()(&ctx)(&err)
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionWrite,
Bucket: req.Bucket,
EncryptedPath: req.Path,
@ -228,7 +227,7 @@ func calculateSpaceUsed(ptr *pb.Pointer) (inlineSpace, remoteSpace int64) {
func (endpoint *Endpoint) CommitSegmentOld(ctx context.Context, req *pb.SegmentCommitRequestOld) (resp *pb.SegmentCommitResponseOld, err error) {
defer mon.Task()(&ctx)(&err)
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionWrite,
Bucket: req.Bucket,
EncryptedPath: req.Path,
@ -322,7 +321,7 @@ func (endpoint *Endpoint) CommitSegmentOld(ctx context.Context, req *pb.SegmentC
func (endpoint *Endpoint) DownloadSegmentOld(ctx context.Context, req *pb.SegmentDownloadRequestOld) (resp *pb.SegmentDownloadResponseOld, err error) {
defer mon.Task()(&ctx)(&err)
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionRead,
Bucket: req.Bucket,
EncryptedPath: req.Path,
@ -377,7 +376,7 @@ func (endpoint *Endpoint) DownloadSegmentOld(ctx context.Context, req *pb.Segmen
func (endpoint *Endpoint) DeleteSegmentOld(ctx context.Context, req *pb.SegmentDeleteRequestOld) (resp *pb.SegmentDeleteResponseOld, err error) {
defer mon.Task()(&ctx)(&err)
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionDelete,
Bucket: req.Bucket,
EncryptedPath: req.Path,
@ -436,7 +435,7 @@ func (endpoint *Endpoint) DeleteSegmentOld(ctx context.Context, req *pb.SegmentD
func (endpoint *Endpoint) ListSegmentsOld(ctx context.Context, req *pb.ListSegmentsRequestOld) (resp *pb.ListSegmentsResponseOld, err error) {
defer mon.Task()(&ctx)(&err)
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionList,
Bucket: req.Bucket,
EncryptedPath: req.Prefix,
@ -594,7 +593,7 @@ func CreatePath(ctx context.Context, projectID uuid.UUID, segmentIndex int64, bu
func (endpoint *Endpoint) SetAttributionOld(ctx context.Context, req *pb.SetAttributionRequestOld) (_ *pb.SetAttributionResponseOld, err error) {
defer mon.Task()(&ctx)(&err)
err = endpoint.setBucketAttribution(ctx, req.BucketName, req.PartnerId)
err = endpoint.setBucketAttribution(ctx, req.Header, req.BucketName, req.PartnerId)
return &pb.SetAttributionResponseOld{}, err
}
@ -615,7 +614,7 @@ func bytesToUUID(data []byte) (uuid.UUID, error) {
func (endpoint *Endpoint) ProjectInfo(ctx context.Context, req *pb.ProjectInfoRequest) (_ *pb.ProjectInfoResponse, err error) {
defer mon.Task()(&ctx)(&err)
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionProjectInfo,
Time: time.Now(),
})
@ -634,7 +633,7 @@ func (endpoint *Endpoint) ProjectInfo(ctx context.Context, req *pb.ProjectInfoRe
func (endpoint *Endpoint) GetBucket(ctx context.Context, req *pb.BucketGetRequest) (resp *pb.BucketGetResponse, err error) {
defer mon.Task()(&ctx)(&err)
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionRead,
Bucket: req.Name,
Time: time.Now(),
@ -665,7 +664,7 @@ func (endpoint *Endpoint) GetBucket(ctx context.Context, req *pb.BucketGetReques
func (endpoint *Endpoint) CreateBucket(ctx context.Context, req *pb.BucketCreateRequest) (resp *pb.BucketCreateResponse, err error) {
defer mon.Task()(&ctx)(&err)
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionWrite,
Bucket: req.Name,
Time: time.Now(),
@ -742,7 +741,7 @@ func (endpoint *Endpoint) CreateBucket(ctx context.Context, req *pb.BucketCreate
func (endpoint *Endpoint) DeleteBucket(ctx context.Context, req *pb.BucketDeleteRequest) (resp *pb.BucketDeleteResponse, err error) {
defer mon.Task()(&ctx)(&err)
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionDelete,
Bucket: req.Name,
Time: time.Now(),
@ -771,12 +770,12 @@ func (endpoint *Endpoint) ListBuckets(ctx context.Context, req *pb.BucketListReq
Op: macaroon.ActionRead,
Time: time.Now(),
}
keyInfo, err := endpoint.validateAuth(ctx, action)
keyInfo, err := endpoint.validateAuth(ctx, req.Header, action)
if err != nil {
return nil, status.Error(codes.Unauthenticated, err.Error())
}
allowedBuckets, err := getAllowedBuckets(ctx, action)
allowedBuckets, err := getAllowedBuckets(ctx, req.Header, action)
if err != nil {
return nil, err
}
@ -805,12 +804,8 @@ func (endpoint *Endpoint) ListBuckets(ctx context.Context, req *pb.BucketListReq
}, nil
}
func getAllowedBuckets(ctx context.Context, action macaroon.Action) (_ macaroon.AllowedBuckets, err error) {
keyData, ok := auth.GetAPIKey(ctx)
if !ok {
return macaroon.AllowedBuckets{}, status.Errorf(codes.Unauthenticated, "Missing API credentials: %v", err)
}
key, err := macaroon.ParseAPIKey(string(keyData))
func getAllowedBuckets(ctx context.Context, header *pb.RequestHeader, action macaroon.Action) (_ macaroon.AllowedBuckets, err error) {
key, err := getAPIKey(ctx, header)
if err != nil {
return macaroon.AllowedBuckets{}, status.Errorf(codes.InvalidArgument, "Invalid API credentials: %v", err)
}
@ -825,13 +820,13 @@ func getAllowedBuckets(ctx context.Context, action macaroon.Action) (_ macaroon.
func (endpoint *Endpoint) SetBucketAttribution(ctx context.Context, req *pb.BucketSetAttributionRequest) (resp *pb.BucketSetAttributionResponse, err error) {
defer mon.Task()(&ctx)(&err)
err = endpoint.setBucketAttribution(ctx, req.Name, req.PartnerId)
err = endpoint.setBucketAttribution(ctx, req.Header, req.Name, req.PartnerId)
return &pb.BucketSetAttributionResponse{}, err
}
func (endpoint *Endpoint) setBucketAttribution(ctx context.Context, bucketName []byte, parterID []byte) error {
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
func (endpoint *Endpoint) setBucketAttribution(ctx context.Context, header *pb.RequestHeader, bucketName []byte, parterID []byte) error {
keyInfo, err := endpoint.validateAuth(ctx, header, macaroon.Action{
Op: macaroon.ActionList,
Bucket: bucketName,
EncryptedPath: []byte(""),
@ -957,7 +952,7 @@ func convertBucketToProto(ctx context.Context, bucket storj.Bucket) (pbBucket *p
func (endpoint *Endpoint) BeginObject(ctx context.Context, req *pb.ObjectBeginRequest) (resp *pb.ObjectBeginResponse, err error) {
defer mon.Task()(&ctx)(&err)
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionWrite,
Bucket: req.Bucket,
EncryptedPath: req.EncryptedPath,
@ -1047,7 +1042,7 @@ func (endpoint *Endpoint) CommitObject(ctx context.Context, req *pb.ObjectCommit
return nil, status.Error(codes.InvalidArgument, "stream ID expired")
}
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionWrite,
Bucket: streamID.Bucket,
EncryptedPath: streamID.EncryptedPath,
@ -1111,7 +1106,7 @@ func (endpoint *Endpoint) CommitObject(ctx context.Context, req *pb.ObjectCommit
func (endpoint *Endpoint) GetObject(ctx context.Context, req *pb.ObjectGetRequest) (resp *pb.ObjectGetResponse, err error) {
defer mon.Task()(&ctx)(&err)
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionRead,
Bucket: req.Bucket,
EncryptedPath: req.EncryptedPath,
@ -1208,7 +1203,7 @@ func (endpoint *Endpoint) GetObject(ctx context.Context, req *pb.ObjectGetReques
func (endpoint *Endpoint) ListObjects(ctx context.Context, req *pb.ObjectListRequest) (resp *pb.ObjectListResponse, err error) {
defer mon.Task()(&ctx)(&err)
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionList,
Bucket: req.Bucket,
EncryptedPath: []byte{},
@ -1258,7 +1253,7 @@ func (endpoint *Endpoint) ListObjects(ctx context.Context, req *pb.ObjectListReq
func (endpoint *Endpoint) BeginDeleteObject(ctx context.Context, req *pb.ObjectBeginDeleteRequest) (resp *pb.ObjectBeginDeleteResponse, err error) {
defer mon.Task()(&ctx)(&err)
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionDelete,
Bucket: req.Bucket,
EncryptedPath: req.EncryptedPath,
@ -1324,7 +1319,7 @@ func (endpoint *Endpoint) FinishDeleteObject(ctx context.Context, req *pb.Object
return nil, status.Error(codes.InvalidArgument, "stream ID expired")
}
_, err = endpoint.validateAuth(ctx, macaroon.Action{
_, err = endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionDelete,
Bucket: streamID.Bucket,
EncryptedPath: streamID.EncryptedPath,
@ -1348,7 +1343,7 @@ func (endpoint *Endpoint) BeginSegment(ctx context.Context, req *pb.SegmentBegin
return nil, status.Error(codes.InvalidArgument, err.Error())
}
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionWrite,
Bucket: streamID.Bucket,
EncryptedPath: streamID.EncryptedPath,
@ -1424,7 +1419,7 @@ func (endpoint *Endpoint) CommitSegment(ctx context.Context, req *pb.SegmentComm
streamID := segmentID.StreamId
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionWrite,
Bucket: streamID.Bucket,
EncryptedPath: streamID.EncryptedPath,
@ -1542,7 +1537,7 @@ func (endpoint *Endpoint) MakeInlineSegment(ctx context.Context, req *pb.Segment
return nil, status.Error(codes.InvalidArgument, err.Error())
}
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionWrite,
Bucket: streamID.Bucket,
EncryptedPath: streamID.EncryptedPath,
@ -1616,7 +1611,7 @@ func (endpoint *Endpoint) BeginDeleteSegment(ctx context.Context, req *pb.Segmen
return nil, status.Error(codes.InvalidArgument, err.Error())
}
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionDelete,
Bucket: streamID.Bucket,
EncryptedPath: streamID.EncryptedPath,
@ -1666,7 +1661,7 @@ func (endpoint *Endpoint) FinishDeleteSegment(ctx context.Context, req *pb.Segme
streamID := segmentID.StreamId
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionDelete,
Bucket: streamID.Bucket,
EncryptedPath: streamID.EncryptedPath,
@ -1705,7 +1700,7 @@ func (endpoint *Endpoint) ListSegments(ctx context.Context, req *pb.SegmentListR
return nil, status.Error(codes.InvalidArgument, err.Error())
}
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionList,
Bucket: streamID.Bucket,
EncryptedPath: streamID.EncryptedPath,
@ -1843,7 +1838,7 @@ func (endpoint *Endpoint) DownloadSegment(ctx context.Context, req *pb.SegmentDo
return nil, status.Error(codes.InvalidArgument, err.Error())
}
keyInfo, err := endpoint.validateAuth(ctx, macaroon.Action{
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionRead,
Bucket: streamID.Bucket,
EncryptedPath: streamID.EncryptedPath,

View File

@ -41,13 +41,18 @@ func TestInvalidAPIKey(t *testing.T) {
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
throwawayKey, err := macaroon.NewAPIKey([]byte("secret"))
require.NoError(t, err)
planet.Start(ctx)
for _, invalidAPIKey := range []string{"", "invalid", "testKey"} {
client, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], invalidAPIKey)
client, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], throwawayKey)
require.NoError(t, err)
defer ctx.Check(client.Close)
client.SetRawAPIKey([]byte(invalidAPIKey))
_, _, _, err = client.CreateSegment(ctx, "hello", "world", 1, &pb.RedundancyScheme{}, 123, time.Now().Add(time.Hour))
assertUnauthenticated(t, err, false)
@ -78,8 +83,7 @@ func TestRestrictedAPIKey(t *testing.T) {
planet.Start(ctx)
key, err := macaroon.ParseAPIKey(planet.Uplinks[0].APIKey[planet.Satellites[0].ID()])
require.NoError(t, err)
key := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
tests := []struct {
Caveat macaroon.Caveat
@ -158,7 +162,7 @@ func TestRestrictedAPIKey(t *testing.T) {
restrictedKey, err := key.Restrict(test.Caveat)
require.NoError(t, err)
client, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], restrictedKey.Serialize())
client, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], restrictedKey)
require.NoError(t, err)
defer ctx.Check(client.Close)

View File

@ -121,15 +121,24 @@ func (requests *createRequests) cleanup() {
}
}
func (endpoint *Endpoint) validateAuth(ctx context.Context, action macaroon.Action) (_ *console.APIKeyInfo, err error) {
func getAPIKey(ctx context.Context, header *pb.RequestHeader) (key *macaroon.APIKey, err error) {
defer mon.Task()(&ctx)(&err)
keyData, ok := auth.GetAPIKey(ctx)
if !ok {
endpoint.log.Debug("unauthorized request")
return nil, status.Error(codes.Unauthenticated, "Missing API credentials")
if header != nil {
return macaroon.ParseRawAPIKey(header.ApiKey)
}
key, err := macaroon.ParseAPIKey(string(keyData))
keyData, ok := auth.GetAPIKey(ctx)
if !ok {
return nil, errs.New("missing credentials")
}
return macaroon.ParseAPIKey(string(keyData))
}
func (endpoint *Endpoint) validateAuth(ctx context.Context, header *pb.RequestHeader, action macaroon.Action) (_ *console.APIKeyInfo, err error) {
defer mon.Task()(&ctx)(&err)
key, err := getAPIKey(ctx, header)
if err != nil {
endpoint.log.Debug("invalid request", zap.Error(err))
return nil, status.Error(codes.InvalidArgument, "Invalid API credentials")

View File

@ -15,7 +15,7 @@ import (
"google.golang.org/grpc/status"
"gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/auth/grpcauth"
"storj.io/storj/pkg/macaroon"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/transport"
@ -31,8 +31,9 @@ var (
// Client creates a grpcClient
type Client struct {
client pb.MetainfoClient
conn *grpc.ClientConn
client pb.MetainfoClient
conn *grpc.ClientConn
apiKeyRaw []byte
}
// ListItem is a single item in a listing
@ -43,26 +44,24 @@ type ListItem struct {
}
// New used as a public function
func New(client pb.MetainfoClient) *Client {
func New(client pb.MetainfoClient, apiKey *macaroon.APIKey) *Client {
return &Client{
client: client,
client: client,
apiKeyRaw: apiKey.SerializeRaw(),
}
}
// Dial dials to metainfo endpoint with the specified api key.
func Dial(ctx context.Context, tc transport.Client, address string, apikey string) (*Client, error) {
conn, err := tc.DialAddress(
ctx,
address,
grpc.WithPerRPCCredentials(grpcauth.NewAPIKeyCredentials(apikey)),
)
func Dial(ctx context.Context, tc transport.Client, address string, apiKey *macaroon.APIKey) (*Client, error) {
conn, err := tc.DialAddress(ctx, address)
if err != nil {
return nil, Error.Wrap(err)
}
return &Client{
client: pb.NewMetainfoClient(conn),
conn: conn,
client: pb.NewMetainfoClient(conn),
conn: conn,
apiKeyRaw: apiKey.SerializeRaw(),
}, nil
}
@ -74,11 +73,18 @@ func (client *Client) Close() error {
return nil
}
func (client *Client) header() *pb.RequestHeader {
return &pb.RequestHeader{
ApiKey: client.apiKeyRaw,
}
}
// CreateSegment requests the order limits for creating a new segment
func (client *Client) CreateSegment(ctx context.Context, bucket string, path storj.Path, segmentIndex int64, redundancy *pb.RedundancyScheme, maxEncryptedSegmentSize int64, expiration time.Time) (limits []*pb.AddressedOrderLimit, rootPieceID storj.PieceID, piecePrivateKey storj.PiecePrivateKey, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.CreateSegmentOld(ctx, &pb.SegmentWriteRequestOld{
Header: client.header(),
Bucket: []byte(bucket),
Path: []byte(path),
Segment: segmentIndex,
@ -98,6 +104,7 @@ func (client *Client) CommitSegment(ctx context.Context, bucket string, path sto
defer mon.Task()(&ctx)(&err)
response, err := client.client.CommitSegmentOld(ctx, &pb.SegmentCommitRequestOld{
Header: client.header(),
Bucket: []byte(bucket),
Path: []byte(path),
Segment: segmentIndex,
@ -116,6 +123,7 @@ func (client *Client) SegmentInfo(ctx context.Context, bucket string, path storj
defer mon.Task()(&ctx)(&err)
response, err := client.client.SegmentInfoOld(ctx, &pb.SegmentInfoRequestOld{
Header: client.header(),
Bucket: []byte(bucket),
Path: []byte(path),
Segment: segmentIndex,
@ -135,6 +143,7 @@ func (client *Client) ReadSegment(ctx context.Context, bucket string, path storj
defer mon.Task()(&ctx)(&err)
response, err := client.client.DownloadSegmentOld(ctx, &pb.SegmentDownloadRequestOld{
Header: client.header(),
Bucket: []byte(bucket),
Path: []byte(path),
Segment: segmentIndex,
@ -172,6 +181,7 @@ func (client *Client) DeleteSegment(ctx context.Context, bucket string, path sto
defer mon.Task()(&ctx)(&err)
response, err := client.client.DeleteSegmentOld(ctx, &pb.SegmentDeleteRequestOld{
Header: client.header(),
Bucket: []byte(bucket),
Path: []byte(path),
Segment: segmentIndex,
@ -191,6 +201,7 @@ func (client *Client) ListSegments(ctx context.Context, bucket string, prefix, s
defer mon.Task()(&ctx)(&err)
response, err := client.client.ListSegmentsOld(ctx, &pb.ListSegmentsRequestOld{
Header: client.header(),
Bucket: []byte(bucket),
Prefix: []byte(prefix),
StartAfter: []byte(startAfter),
@ -221,6 +232,7 @@ func (client *Client) SetAttribution(ctx context.Context, bucket string, partner
defer mon.Task()(&ctx)(&err)
_, err = client.client.SetAttributionOld(ctx, &pb.SetAttributionRequestOld{
Header: client.header(),
PartnerId: partnerID[:], // TODO: implement storj.UUID that can be sent using pb
BucketName: []byte(bucket),
})
@ -232,7 +244,9 @@ func (client *Client) SetAttribution(ctx context.Context, bucket string, partner
func (client *Client) GetProjectInfo(ctx context.Context) (resp *pb.ProjectInfoResponse, err error) {
defer mon.Task()(&ctx)(&err)
return client.client.ProjectInfo(ctx, &pb.ProjectInfoRequest{})
return client.client.ProjectInfo(ctx, &pb.ProjectInfoRequest{
Header: client.header(),
})
}
// CreateBucketParams parameters for CreateBucket method
@ -245,11 +259,12 @@ type CreateBucketParams struct {
DefaultEncryptionParameters storj.EncryptionParameters
}
func (params *CreateBucketParams) toRequest() *pb.BucketCreateRequest {
func (params *CreateBucketParams) toRequest(header *pb.RequestHeader) *pb.BucketCreateRequest {
defaultRS := params.DefaultRedundancyScheme
defaultEP := params.DefaultEncryptionParameters
return &pb.BucketCreateRequest{
Header: header,
Name: params.Name,
PathCipher: pb.CipherSuite(params.PathCipher),
PartnerId: params.PartnerID,
@ -273,7 +288,7 @@ func (params *CreateBucketParams) toRequest() *pb.BucketCreateRequest {
func (params *CreateBucketParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_BucketCreate{
BucketCreate: params.toRequest(),
BucketCreate: params.toRequest(nil),
},
}
}
@ -299,7 +314,7 @@ func newCreateBucketResponse(response *pb.BucketCreateResponse) (CreateBucketRes
func (client *Client) CreateBucket(ctx context.Context, params CreateBucketParams) (respBucket storj.Bucket, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.CreateBucket(ctx, params.toRequest())
response, err := client.client.CreateBucket(ctx, params.toRequest(client.header()))
if err != nil {
return storj.Bucket{}, Error.Wrap(err)
}
@ -316,15 +331,18 @@ type GetBucketParams struct {
Name []byte
}
func (params *GetBucketParams) toRequest() *pb.BucketGetRequest {
return &pb.BucketGetRequest{Name: params.Name}
func (params *GetBucketParams) toRequest(header *pb.RequestHeader) *pb.BucketGetRequest {
return &pb.BucketGetRequest{
Header: header,
Name: params.Name,
}
}
// BatchItem returns single item for batch request
func (params *GetBucketParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_BucketGet{
BucketGet: params.toRequest(),
BucketGet: params.toRequest(nil),
},
}
}
@ -348,7 +366,7 @@ func newGetBucketResponse(response *pb.BucketGetResponse) (GetBucketResponse, er
func (client *Client) GetBucket(ctx context.Context, params GetBucketParams) (respBucket storj.Bucket, err error) {
defer mon.Task()(&ctx)(&err)
resp, err := client.client.GetBucket(ctx, params.toRequest())
resp, err := client.client.GetBucket(ctx, params.toRequest(client.header()))
if err != nil {
if status.Code(err) == codes.NotFound {
return storj.Bucket{}, storj.ErrBucketNotFound.Wrap(err)
@ -368,15 +386,18 @@ type DeleteBucketParams struct {
Name []byte
}
func (params *DeleteBucketParams) toRequest() *pb.BucketDeleteRequest {
return &pb.BucketDeleteRequest{Name: params.Name}
func (params *DeleteBucketParams) toRequest(header *pb.RequestHeader) *pb.BucketDeleteRequest {
return &pb.BucketDeleteRequest{
Header: header,
Name: params.Name,
}
}
// BatchItem returns single item for batch request
func (params *DeleteBucketParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_BucketDelete{
BucketDelete: params.toRequest(),
BucketDelete: params.toRequest(nil),
},
}
}
@ -384,7 +405,7 @@ func (params *DeleteBucketParams) BatchItem() *pb.BatchRequestItem {
// DeleteBucket deletes a bucket
func (client *Client) DeleteBucket(ctx context.Context, params DeleteBucketParams) (err error) {
defer mon.Task()(&ctx)(&err)
_, err = client.client.DeleteBucket(ctx, params.toRequest())
_, err = client.client.DeleteBucket(ctx, params.toRequest(client.header()))
if err != nil {
if status.Code(err) == codes.NotFound {
return storj.ErrBucketNotFound.Wrap(err)
@ -399,8 +420,9 @@ type ListBucketsParams struct {
ListOpts storj.BucketListOptions
}
func (params *ListBucketsParams) toRequest() *pb.BucketListRequest {
func (params *ListBucketsParams) toRequest(header *pb.RequestHeader) *pb.BucketListRequest {
return &pb.BucketListRequest{
Header: header,
Cursor: []byte(params.ListOpts.Cursor),
Limit: int32(params.ListOpts.Limit),
Direction: int32(params.ListOpts.Direction),
@ -411,7 +433,7 @@ func (params *ListBucketsParams) toRequest() *pb.BucketListRequest {
func (params *ListBucketsParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_BucketList{
BucketList: params.toRequest(),
BucketList: params.toRequest(nil),
},
}
}
@ -441,7 +463,7 @@ func newListBucketsResponse(response *pb.BucketListResponse) ListBucketsResponse
func (client *Client) ListBuckets(ctx context.Context, params ListBucketsParams) (_ storj.BucketList, err error) {
defer mon.Task()(&ctx)(&err)
resp, err := client.client.ListBuckets(ctx, params.toRequest())
resp, err := client.client.ListBuckets(ctx, params.toRequest(client.header()))
if err != nil {
return storj.BucketList{}, Error.Wrap(err)
}
@ -493,8 +515,9 @@ type SetBucketAttributionParams struct {
PartnerID uuid.UUID
}
func (params *SetBucketAttributionParams) toRequest() *pb.BucketSetAttributionRequest {
func (params *SetBucketAttributionParams) toRequest(header *pb.RequestHeader) *pb.BucketSetAttributionRequest {
return &pb.BucketSetAttributionRequest{
Header: header,
Name: []byte(params.Bucket),
PartnerId: params.PartnerID[:],
}
@ -504,7 +527,7 @@ func (params *SetBucketAttributionParams) toRequest() *pb.BucketSetAttributionRe
func (params *SetBucketAttributionParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_BucketSetAttribution{
BucketSetAttribution: params.toRequest(),
BucketSetAttribution: params.toRequest(nil),
},
}
}
@ -513,7 +536,7 @@ func (params *SetBucketAttributionParams) BatchItem() *pb.BatchRequestItem {
func (client *Client) SetBucketAttribution(ctx context.Context, params SetBucketAttributionParams) (err error) {
defer mon.Task()(&ctx)(&err)
_, err = client.client.SetBucketAttribution(ctx, params.toRequest())
_, err = client.client.SetBucketAttribution(ctx, params.toRequest(client.header()))
return Error.Wrap(err)
}
@ -528,8 +551,9 @@ type BeginObjectParams struct {
ExpiresAt time.Time
}
func (params *BeginObjectParams) toRequest() *pb.ObjectBeginRequest {
func (params *BeginObjectParams) toRequest(header *pb.RequestHeader) *pb.ObjectBeginRequest {
return &pb.ObjectBeginRequest{
Header: header,
Bucket: params.Bucket,
EncryptedPath: params.EncryptedPath,
Version: params.Version,
@ -553,7 +577,7 @@ func (params *BeginObjectParams) toRequest() *pb.ObjectBeginRequest {
func (params *BeginObjectParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_ObjectBegin{
ObjectBegin: params.toRequest(),
ObjectBegin: params.toRequest(nil),
},
}
}
@ -573,7 +597,7 @@ func newBeginObjectResponse(response *pb.ObjectBeginResponse) BeginObjectRespons
func (client *Client) BeginObject(ctx context.Context, params BeginObjectParams) (_ storj.StreamID, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.BeginObject(ctx, params.toRequest())
response, err := client.client.BeginObject(ctx, params.toRequest(client.header()))
if err != nil {
return nil, Error.Wrap(err)
}
@ -589,8 +613,9 @@ type CommitObjectParams struct {
EncryptedMetadata []byte
}
func (params *CommitObjectParams) toRequest() *pb.ObjectCommitRequest {
func (params *CommitObjectParams) toRequest(header *pb.RequestHeader) *pb.ObjectCommitRequest {
return &pb.ObjectCommitRequest{
Header: header,
StreamId: params.StreamID,
EncryptedMetadataNonce: params.EncryptedMetadataNonce,
EncryptedMetadata: params.EncryptedMetadata,
@ -601,7 +626,7 @@ func (params *CommitObjectParams) toRequest() *pb.ObjectCommitRequest {
func (params *CommitObjectParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_ObjectCommit{
ObjectCommit: params.toRequest(),
ObjectCommit: params.toRequest(nil),
},
}
}
@ -610,7 +635,7 @@ func (params *CommitObjectParams) BatchItem() *pb.BatchRequestItem {
func (client *Client) CommitObject(ctx context.Context, params CommitObjectParams) (err error) {
defer mon.Task()(&ctx)(&err)
_, err = client.client.CommitObject(ctx, params.toRequest())
_, err = client.client.CommitObject(ctx, params.toRequest(client.header()))
return Error.Wrap(err)
}
@ -622,8 +647,9 @@ type GetObjectParams struct {
Version int32
}
func (params *GetObjectParams) toRequest() *pb.ObjectGetRequest {
func (params *GetObjectParams) toRequest(header *pb.RequestHeader) *pb.ObjectGetRequest {
return &pb.ObjectGetRequest{
Header: header,
Bucket: params.Bucket,
EncryptedPath: params.EncryptedPath,
Version: params.Version,
@ -634,7 +660,7 @@ func (params *GetObjectParams) toRequest() *pb.ObjectGetRequest {
func (params *GetObjectParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_ObjectGet{
ObjectGet: params.toRequest(),
ObjectGet: params.toRequest(nil),
},
}
}
@ -684,7 +710,7 @@ func newGetObjectResponse(response *pb.ObjectGetResponse) GetObjectResponse {
func (client *Client) GetObject(ctx context.Context, params GetObjectParams) (_ storj.ObjectInfo, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.GetObject(ctx, params.toRequest())
response, err := client.client.GetObject(ctx, params.toRequest(client.header()))
if err != nil {
if status.Code(err) == codes.NotFound {
@ -704,8 +730,9 @@ type BeginDeleteObjectParams struct {
Version int32
}
func (params *BeginDeleteObjectParams) toRequest() *pb.ObjectBeginDeleteRequest {
func (params *BeginDeleteObjectParams) toRequest(header *pb.RequestHeader) *pb.ObjectBeginDeleteRequest {
return &pb.ObjectBeginDeleteRequest{
Header: header,
Bucket: params.Bucket,
EncryptedPath: params.EncryptedPath,
Version: params.Version,
@ -716,7 +743,7 @@ func (params *BeginDeleteObjectParams) toRequest() *pb.ObjectBeginDeleteRequest
func (params *BeginDeleteObjectParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_ObjectBeginDelete{
ObjectBeginDelete: params.toRequest(),
ObjectBeginDelete: params.toRequest(nil),
},
}
}
@ -736,7 +763,7 @@ func newBeginDeleteObjectResponse(response *pb.ObjectBeginDeleteResponse) BeginD
func (client *Client) BeginDeleteObject(ctx context.Context, params BeginDeleteObjectParams) (_ storj.StreamID, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.BeginDeleteObject(ctx, params.toRequest())
response, err := client.client.BeginDeleteObject(ctx, params.toRequest(client.header()))
if err != nil {
if status.Code(err) == codes.NotFound {
return storj.StreamID{}, storj.ErrObjectNotFound.Wrap(err)
@ -752,8 +779,9 @@ type FinishDeleteObjectParams struct {
StreamID storj.StreamID
}
func (params *FinishDeleteObjectParams) toRequest() *pb.ObjectFinishDeleteRequest {
func (params *FinishDeleteObjectParams) toRequest(header *pb.RequestHeader) *pb.ObjectFinishDeleteRequest {
return &pb.ObjectFinishDeleteRequest{
Header: header,
StreamId: params.StreamID,
}
}
@ -762,7 +790,7 @@ func (params *FinishDeleteObjectParams) toRequest() *pb.ObjectFinishDeleteReques
func (params *FinishDeleteObjectParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_ObjectFinishDelete{
ObjectFinishDelete: params.toRequest(),
ObjectFinishDelete: params.toRequest(nil),
},
}
}
@ -771,7 +799,7 @@ func (params *FinishDeleteObjectParams) BatchItem() *pb.BatchRequestItem {
func (client *Client) FinishDeleteObject(ctx context.Context, params FinishDeleteObjectParams) (err error) {
defer mon.Task()(&ctx)(&err)
_, err = client.client.FinishDeleteObject(ctx, params.toRequest())
_, err = client.client.FinishDeleteObject(ctx, params.toRequest(client.header()))
return Error.Wrap(err)
}
@ -786,8 +814,9 @@ type ListObjectsParams struct {
Recursive bool
}
func (params *ListObjectsParams) toRequest() *pb.ObjectListRequest {
func (params *ListObjectsParams) toRequest(header *pb.RequestHeader) *pb.ObjectListRequest {
return &pb.ObjectListRequest{
Header: header,
Bucket: params.Bucket,
EncryptedPrefix: params.EncryptedPrefix,
EncryptedCursor: params.EncryptedCursor,
@ -803,7 +832,7 @@ func (params *ListObjectsParams) toRequest() *pb.ObjectListRequest {
func (params *ListObjectsParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_ObjectList{
ObjectList: params.toRequest(),
ObjectList: params.toRequest(nil),
},
}
}
@ -847,7 +876,7 @@ func newListObjectsResponse(response *pb.ObjectListResponse, encryptedPrefix []b
func (client *Client) ListObjects(ctx context.Context, params ListObjectsParams) (_ []storj.ObjectListItem, more bool, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.ListObjects(ctx, params.toRequest())
response, err := client.client.ListObjects(ctx, params.toRequest(client.header()))
if err != nil {
return []storj.ObjectListItem{}, false, Error.Wrap(err)
}
@ -863,8 +892,9 @@ type BeginSegmentParams struct {
MaxOrderLimit int64
}
func (params *BeginSegmentParams) toRequest() *pb.SegmentBeginRequest {
func (params *BeginSegmentParams) toRequest(header *pb.RequestHeader) *pb.SegmentBeginRequest {
return &pb.SegmentBeginRequest{
Header: header,
StreamId: params.StreamID,
Position: &pb.SegmentPosition{
PartNumber: params.Position.PartNumber,
@ -878,7 +908,7 @@ func (params *BeginSegmentParams) toRequest() *pb.SegmentBeginRequest {
func (params *BeginSegmentParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_SegmentBegin{
SegmentBegin: params.toRequest(),
SegmentBegin: params.toRequest(nil),
},
}
}
@ -902,7 +932,7 @@ func newBeginSegmentResponse(response *pb.SegmentBeginResponse) BeginSegmentResp
func (client *Client) BeginSegment(ctx context.Context, params BeginSegmentParams) (_ storj.SegmentID, limits []*pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.BeginSegment(ctx, params.toRequest())
response, err := client.client.BeginSegment(ctx, params.toRequest(client.header()))
if err != nil {
return storj.SegmentID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
@ -919,8 +949,9 @@ type CommitSegmentParams struct {
UploadResult []*pb.SegmentPieceUploadResult
}
func (params *CommitSegmentParams) toRequest() *pb.SegmentCommitRequest {
func (params *CommitSegmentParams) toRequest(header *pb.RequestHeader) *pb.SegmentCommitRequest {
return &pb.SegmentCommitRequest{
Header: header,
SegmentId: params.SegmentID,
EncryptedKeyNonce: params.Encryption.EncryptedKeyNonce,
@ -934,7 +965,7 @@ func (params *CommitSegmentParams) toRequest() *pb.SegmentCommitRequest {
func (params *CommitSegmentParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_SegmentCommit{
SegmentCommit: params.toRequest(),
SegmentCommit: params.toRequest(nil),
},
}
}
@ -943,7 +974,7 @@ func (params *CommitSegmentParams) BatchItem() *pb.BatchRequestItem {
func (client *Client) CommitSegmentNew(ctx context.Context, params CommitSegmentParams) (err error) {
defer mon.Task()(&ctx)(&err)
_, err = client.client.CommitSegment(ctx, params.toRequest())
_, err = client.client.CommitSegment(ctx, params.toRequest(client.header()))
return Error.Wrap(err)
}
@ -956,8 +987,9 @@ type MakeInlineSegmentParams struct {
EncryptedInlineData []byte
}
func (params *MakeInlineSegmentParams) toRequest() *pb.SegmentMakeInlineRequest {
func (params *MakeInlineSegmentParams) toRequest(header *pb.RequestHeader) *pb.SegmentMakeInlineRequest {
return &pb.SegmentMakeInlineRequest{
Header: header,
StreamId: params.StreamID,
Position: &pb.SegmentPosition{
PartNumber: params.Position.PartNumber,
@ -973,7 +1005,7 @@ func (params *MakeInlineSegmentParams) toRequest() *pb.SegmentMakeInlineRequest
func (params *MakeInlineSegmentParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_SegmentMakeInline{
SegmentMakeInline: params.toRequest(),
SegmentMakeInline: params.toRequest(nil),
},
}
}
@ -982,7 +1014,7 @@ func (params *MakeInlineSegmentParams) BatchItem() *pb.BatchRequestItem {
func (client *Client) MakeInlineSegment(ctx context.Context, params MakeInlineSegmentParams) (err error) {
defer mon.Task()(&ctx)(&err)
_, err = client.client.MakeInlineSegment(ctx, params.toRequest())
_, err = client.client.MakeInlineSegment(ctx, params.toRequest(client.header()))
return Error.Wrap(err)
}
@ -993,8 +1025,9 @@ type BeginDeleteSegmentParams struct {
Position storj.SegmentPosition
}
func (params *BeginDeleteSegmentParams) toRequest() *pb.SegmentBeginDeleteRequest {
func (params *BeginDeleteSegmentParams) toRequest(header *pb.RequestHeader) *pb.SegmentBeginDeleteRequest {
return &pb.SegmentBeginDeleteRequest{
Header: header,
StreamId: params.StreamID,
Position: &pb.SegmentPosition{
PartNumber: params.Position.PartNumber,
@ -1007,7 +1040,7 @@ func (params *BeginDeleteSegmentParams) toRequest() *pb.SegmentBeginDeleteReques
func (params *BeginDeleteSegmentParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_SegmentBeginDelete{
SegmentBeginDelete: params.toRequest(),
SegmentBeginDelete: params.toRequest(nil),
},
}
}
@ -1031,7 +1064,7 @@ func newBeginDeleteSegmentResponse(response *pb.SegmentBeginDeleteResponse) Begi
func (client *Client) BeginDeleteSegment(ctx context.Context, params BeginDeleteSegmentParams) (_ storj.SegmentID, limits []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.BeginDeleteSegment(ctx, params.toRequest())
response, err := client.client.BeginDeleteSegment(ctx, params.toRequest(client.header()))
if err != nil {
return storj.SegmentID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
}
@ -1046,8 +1079,9 @@ type FinishDeleteSegmentParams struct {
DeleteResults []*pb.SegmentPieceDeleteResult
}
func (params *FinishDeleteSegmentParams) toRequest() *pb.SegmentFinishDeleteRequest {
func (params *FinishDeleteSegmentParams) toRequest(header *pb.RequestHeader) *pb.SegmentFinishDeleteRequest {
return &pb.SegmentFinishDeleteRequest{
Header: header,
SegmentId: params.SegmentID,
Results: params.DeleteResults,
}
@ -1057,7 +1091,7 @@ func (params *FinishDeleteSegmentParams) toRequest() *pb.SegmentFinishDeleteRequ
func (params *FinishDeleteSegmentParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_SegmentFinishDelete{
SegmentFinishDelete: params.toRequest(),
SegmentFinishDelete: params.toRequest(nil),
},
}
}
@ -1066,7 +1100,7 @@ func (params *FinishDeleteSegmentParams) BatchItem() *pb.BatchRequestItem {
func (client *Client) FinishDeleteSegment(ctx context.Context, params FinishDeleteSegmentParams) (err error) {
defer mon.Task()(&ctx)(&err)
_, err = client.client.FinishDeleteSegment(ctx, params.toRequest())
_, err = client.client.FinishDeleteSegment(ctx, params.toRequest(client.header()))
return Error.Wrap(err)
}
@ -1077,8 +1111,9 @@ type DownloadSegmentParams struct {
Position storj.SegmentPosition
}
func (params *DownloadSegmentParams) toRequest() *pb.SegmentDownloadRequest {
func (params *DownloadSegmentParams) toRequest(header *pb.RequestHeader) *pb.SegmentDownloadRequest {
return &pb.SegmentDownloadRequest{
Header: header,
StreamId: params.StreamID,
CursorPosition: &pb.SegmentPosition{
PartNumber: params.Position.PartNumber,
@ -1091,7 +1126,7 @@ func (params *DownloadSegmentParams) toRequest() *pb.SegmentDownloadRequest {
func (params *DownloadSegmentParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_SegmentDownload{
SegmentDownload: params.toRequest(),
SegmentDownload: params.toRequest(nil),
},
}
}
@ -1136,7 +1171,7 @@ func newDownloadSegmentResponse(response *pb.SegmentDownloadResponse) DownloadSe
func (client *Client) DownloadSegment(ctx context.Context, params DownloadSegmentParams) (_ storj.SegmentDownloadInfo, _ []*pb.AddressedOrderLimit, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.DownloadSegment(ctx, params.toRequest())
response, err := client.client.DownloadSegment(ctx, params.toRequest(client.header()))
if err != nil {
return storj.SegmentDownloadInfo{}, nil, Error.Wrap(err)
}
@ -1158,8 +1193,9 @@ type ListSegmentsResponse struct {
More bool
}
func (params *ListSegmentsParams) toRequest() *pb.SegmentListRequest {
func (params *ListSegmentsParams) toRequest(header *pb.RequestHeader) *pb.SegmentListRequest {
return &pb.SegmentListRequest{
Header: header,
StreamId: params.StreamID,
CursorPosition: &pb.SegmentPosition{
PartNumber: params.CursorPosition.PartNumber,
@ -1173,7 +1209,7 @@ func (params *ListSegmentsParams) toRequest() *pb.SegmentListRequest {
func (params *ListSegmentsParams) BatchItem() *pb.BatchRequestItem {
return &pb.BatchRequestItem{
Request: &pb.BatchRequestItem_SegmentList{
SegmentList: params.toRequest(),
SegmentList: params.toRequest(nil),
},
}
}
@ -1198,7 +1234,7 @@ func newListSegmentsResponse(response *pb.SegmentListResponse) ListSegmentsRespo
func (client *Client) ListSegmentsNew(ctx context.Context, params ListSegmentsParams) (_ []storj.SegmentListItem, more bool, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.ListSegments(ctx, params.toRequest())
response, err := client.client.ListSegments(ctx, params.toRequest(client.header()))
if err != nil {
if status.Code(err) == codes.NotFound {
return []storj.SegmentListItem{}, false, storj.ErrObjectNotFound.Wrap(err)
@ -1219,6 +1255,7 @@ func (client *Client) Batch(ctx context.Context, requests ...BatchItem) (resp []
batchItems[i] = request.BatchItem()
}
response, err := client.client.Batch(ctx, &pb.BatchRequest{
Header: client.header(),
Requests: batchItems,
})
if err != nil {
@ -1235,3 +1272,8 @@ func (client *Client) Batch(ctx context.Context, requests ...BatchItem) (resp []
return resp, nil
}
// SetRawAPIKey sets the client's raw API key. Mainly used for testing.
func (client *Client) SetRawAPIKey(key []byte) {
client.apiKeyRaw = key
}

View File

@ -250,7 +250,7 @@ func newMetainfoParts(planet *testplanet.Planet) (*kvmetainfo.DB, streams.Store,
return nil, nil, err
}
metainfo, err := planet.Uplinks[0].DialMetainfo(context.Background(), planet.Satellites[0], apiKey.Serialize())
metainfo, err := planet.Uplinks[0].DialMetainfo(context.Background(), planet.Satellites[0], apiKey)
if err != nil {
return nil, nil, err
}

View File

@ -270,7 +270,7 @@ func storeTestSetup(t *testing.T, ctx *testcontext.Context, planet *testplanet.P
_, err = planet.Satellites[0].DB.Console().APIKeys().Create(context.Background(), apiKey.Head(), apiKeyInfo)
require.NoError(t, err)
TestAPIKey := apiKey.Serialize()
TestAPIKey := apiKey
metainfo, err := planet.Uplinks[0].DialMetainfo(context.Background(), planet.Satellites[0], TestAPIKey)
require.NoError(t, err)