storj/satellite/metainfo/metainfo_test.go

1489 lines
44 KiB
Go
Raw Normal View History

2019-03-30 11:21:49 +00:00
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package metainfo_test
import (
"context"
"sort"
2019-07-16 11:39:23 +01:00
"strconv"
2019-03-30 11:21:49 +00:00
"testing"
"time"
"github.com/gogo/protobuf/proto"
2019-03-30 11:21:49 +00:00
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/zeebo/errs"
"go.uber.org/zap"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
2019-03-30 11:21:49 +00:00
"storj.io/storj/internal/memory"
2019-03-30 11:21:49 +00:00
"storj.io/storj/internal/testcontext"
"storj.io/storj/internal/testplanet"
"storj.io/storj/internal/testrand"
"storj.io/storj/pkg/macaroon"
2019-03-30 11:21:49 +00:00
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/signing"
2019-03-30 11:21:49 +00:00
"storj.io/storj/pkg/storj"
"storj.io/storj/satellite"
2019-03-30 11:21:49 +00:00
"storj.io/storj/satellite/console"
"storj.io/storj/uplink/eestream"
"storj.io/storj/uplink/metainfo"
2019-03-30 11:21:49 +00:00
)
// mockAPIKeys is mock for api keys store of pointerdb
type mockAPIKeys struct {
info console.APIKeyInfo
err error
}
// GetByKey return api key info for given key
func (keys *mockAPIKeys) GetByKey(ctx context.Context, key macaroon.APIKey) (*console.APIKeyInfo, error) {
2019-03-30 11:21:49 +00:00
return &keys.info, keys.err
}
func TestInvalidAPIKey(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
2019-07-16 20:16:41 +01:00
planet, err := testplanet.New(t, 1, 0, 1)
2019-03-30 11:21:49 +00:00
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
planet.Start(ctx)
for _, invalidAPIKey := range []string{"", "invalid", "testKey"} {
client, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], invalidAPIKey)
require.NoError(t, err)
2019-06-25 16:36:23 +01:00
defer ctx.Check(client.Close)
2019-03-30 11:21:49 +00:00
_, _, _, err = client.CreateSegment(ctx, "hello", "world", 1, &pb.RedundancyScheme{}, 123, time.Now().Add(time.Hour))
assertUnauthenticated(t, err, false)
2019-03-30 11:21:49 +00:00
_, err = client.CommitSegment(ctx, "testbucket", "testpath", 0, &pb.Pointer{}, nil)
assertUnauthenticated(t, err, false)
2019-03-30 11:21:49 +00:00
_, err = client.SegmentInfo(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, false)
2019-03-30 11:21:49 +00:00
_, _, _, err = client.ReadSegment(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, false)
2019-03-30 11:21:49 +00:00
_, _, err = client.DeleteSegment(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, false)
2019-03-30 11:21:49 +00:00
_, _, err = client.ListSegments(ctx, "testbucket", "", "", "", true, 1, 0)
assertUnauthenticated(t, err, false)
2019-03-30 11:21:49 +00:00
}
}
func TestRestrictedAPIKey(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
2019-07-16 20:16:41 +01:00
planet, err := testplanet.New(t, 1, 0, 1)
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
planet.Start(ctx)
key, err := macaroon.ParseAPIKey(planet.Uplinks[0].APIKey[planet.Satellites[0].ID()])
require.NoError(t, err)
tests := []struct {
Caveat macaroon.Caveat
CreateSegmentAllowed bool
CommitSegmentAllowed bool
SegmentInfoAllowed bool
ReadSegmentAllowed bool
DeleteSegmentAllowed bool
ListSegmentsAllowed bool
ReadBucketAllowed bool
}{
{ // Everything disallowed
Caveat: macaroon.Caveat{
DisallowReads: true,
DisallowWrites: true,
DisallowLists: true,
DisallowDeletes: true,
},
ReadBucketAllowed: true,
},
{ // Read only
Caveat: macaroon.Caveat{
DisallowWrites: true,
DisallowDeletes: true,
},
SegmentInfoAllowed: true,
ReadSegmentAllowed: true,
ListSegmentsAllowed: true,
ReadBucketAllowed: true,
},
{ // Write only
Caveat: macaroon.Caveat{
DisallowReads: true,
DisallowLists: true,
},
CreateSegmentAllowed: true,
CommitSegmentAllowed: true,
DeleteSegmentAllowed: true,
ReadBucketAllowed: true,
},
{ // Bucket restriction
Caveat: macaroon.Caveat{
AllowedPaths: []*macaroon.Caveat_Path{{
Bucket: []byte("otherbucket"),
}},
},
},
{ // Path restriction
Caveat: macaroon.Caveat{
AllowedPaths: []*macaroon.Caveat_Path{{
Bucket: []byte("testbucket"),
EncryptedPathPrefix: []byte("otherpath"),
}},
},
ReadBucketAllowed: true,
},
{ // Time restriction after
Caveat: macaroon.Caveat{
NotAfter: func(x time.Time) *time.Time { return &x }(time.Now()),
},
},
{ // Time restriction before
Caveat: macaroon.Caveat{
NotBefore: func(x time.Time) *time.Time { return &x }(time.Now().Add(time.Hour)),
},
},
}
for _, test := range tests {
restrictedKey, err := key.Restrict(test.Caveat)
require.NoError(t, err)
client, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], restrictedKey.Serialize())
require.NoError(t, err)
2019-06-25 16:36:23 +01:00
defer ctx.Check(client.Close)
_, _, _, err = client.CreateSegment(ctx, "testbucket", "testpath", 1, &pb.RedundancyScheme{}, 123, time.Now().Add(time.Hour))
assertUnauthenticated(t, err, test.CreateSegmentAllowed)
_, err = client.CommitSegment(ctx, "testbucket", "testpath", 0, &pb.Pointer{}, nil)
assertUnauthenticated(t, err, test.CommitSegmentAllowed)
_, err = client.SegmentInfo(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, test.SegmentInfoAllowed)
_, _, _, err = client.ReadSegment(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, test.ReadSegmentAllowed)
_, _, err = client.DeleteSegment(ctx, "testbucket", "testpath", 0)
assertUnauthenticated(t, err, test.DeleteSegmentAllowed)
_, _, err = client.ListSegments(ctx, "testbucket", "testpath", "", "", true, 1, 0)
assertUnauthenticated(t, err, test.ListSegmentsAllowed)
_, _, _, err = client.ReadSegment(ctx, "testbucket", "", -1)
assertUnauthenticated(t, err, test.ReadBucketAllowed)
}
}
func assertUnauthenticated(t *testing.T, err error, allowed bool) {
2019-03-30 11:21:49 +00:00
t.Helper()
// If it's allowed, we allow any non-unauthenticated error because
// some calls error after authentication checks.
2019-03-30 11:21:49 +00:00
if err, ok := status.FromError(errs.Unwrap(err)); ok {
assert.Equal(t, codes.Unauthenticated == err.Code(), !allowed)
} else if !allowed {
2019-03-30 11:21:49 +00:00
assert.Fail(t, "got unexpected error", "%T", err)
}
}
func TestServiceList(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
2019-07-16 20:16:41 +01:00
planet, err := testplanet.New(t, 1, 0, 1)
2019-03-30 11:21:49 +00:00
require.NoError(t, err)
defer ctx.Check(planet.Shutdown)
planet.Start(ctx)
items := []struct {
Key string
Value []byte
}{
{Key: "sample.😶", Value: []byte{1}},
{Key: "müsic", Value: []byte{2}},
{Key: "müsic/söng1.mp3", Value: []byte{3}},
{Key: "müsic/söng2.mp3", Value: []byte{4}},
{Key: "müsic/album/söng3.mp3", Value: []byte{5}},
{Key: "müsic/söng4.mp3", Value: []byte{6}},
{Key: "ビデオ/movie.mkv", Value: []byte{7}},
}
for _, item := range items {
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", item.Key, item.Value)
assert.NoError(t, err)
}
type Test struct {
Request storj.ListOptions
Expected storj.ObjectList // objects are partial
}
config := planet.Uplinks[0].GetConfig(planet.Satellites[0])
project, bucket, err := planet.Uplinks[0].GetProjectAndBucket(ctx, planet.Satellites[0], "testbucket", config)
require.NoError(t, err)
defer ctx.Check(bucket.Close)
defer ctx.Check(project.Close)
list, err := bucket.ListObjects(ctx, &storj.ListOptions{Recursive: true, Direction: storj.After})
2019-03-30 11:21:49 +00:00
require.NoError(t, err)
expected := []storj.Object{
{Path: "müsic"},
{Path: "müsic/album/söng3.mp3"},
{Path: "müsic/söng1.mp3"},
{Path: "müsic/söng2.mp3"},
{Path: "müsic/söng4.mp3"},
{Path: "sample.😶"},
{Path: "ビデオ/movie.mkv"},
}
require.Equal(t, len(expected), len(list.Items))
sort.Slice(list.Items, func(i, k int) bool {
return list.Items[i].Path < list.Items[k].Path
})
for i, item := range expected {
require.Equal(t, item.Path, list.Items[i].Path)
require.Equal(t, item.IsPrefix, list.Items[i].IsPrefix)
}
list, err = bucket.ListObjects(ctx, &storj.ListOptions{Recursive: false, Direction: storj.After})
2019-03-30 11:21:49 +00:00
require.NoError(t, err)
expected = []storj.Object{
{Path: "müsic"},
{Path: "müsic/", IsPrefix: true},
{Path: "sample.😶"},
{Path: "ビデオ/", IsPrefix: true},
}
require.Equal(t, len(expected), len(list.Items))
sort.Slice(list.Items, func(i, k int) bool {
return list.Items[i].Path < list.Items[k].Path
})
for i, item := range expected {
t.Log(item.Path, list.Items[i].Path)
2019-03-30 11:21:49 +00:00
require.Equal(t, item.Path, list.Items[i].Path)
require.Equal(t, item.IsPrefix, list.Items[i].IsPrefix)
}
}
func TestCommitSegment(t *testing.T) {
2019-04-22 10:07:50 +01:00
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
2019-04-22 10:07:50 +01:00
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
2019-04-22 10:07:50 +01:00
metainfo, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
require.NoError(t, err)
2019-06-25 16:36:23 +01:00
defer ctx.Check(metainfo.Close)
2019-04-22 10:07:50 +01:00
{
// error if pointer is nil
_, err = metainfo.CommitSegment(ctx, "bucket", "path", -1, nil, []*pb.OrderLimit{})
2019-04-22 10:07:50 +01:00
require.Error(t, err)
}
2019-04-22 10:07:50 +01:00
{
// error if number of remote pieces is lower than repair threshold
2019-04-22 10:07:50 +01:00
redundancy := &pb.RedundancyScheme{
MinReq: 1,
RepairThreshold: 2,
SuccessThreshold: 3,
Total: 4,
ErasureShareSize: 256,
2019-04-22 10:07:50 +01:00
}
expirationDate := time.Now().Add(time.Hour)
addressedLimits, rootPieceID, _, err := metainfo.CreateSegment(ctx, "bucket", "path", -1, redundancy, 1000, expirationDate)
2019-04-22 10:07:50 +01:00
require.NoError(t, err)
// create number of pieces below repair threshold
usedForPieces := addressedLimits[:redundancy.RepairThreshold-1]
2019-04-22 10:07:50 +01:00
pieces := make([]*pb.RemotePiece, len(usedForPieces))
for i, limit := range usedForPieces {
pieces[i] = &pb.RemotePiece{
PieceNum: int32(i),
NodeId: limit.Limit.StorageNodeId,
Hash: &pb.PieceHash{
PieceId: limit.Limit.PieceId,
PieceSize: 256,
Timestamp: time.Now(),
},
2019-04-22 10:07:50 +01:00
}
}
2019-04-22 10:07:50 +01:00
pointer := &pb.Pointer{
CreationDate: time.Now(),
Type: pb.Pointer_REMOTE,
SegmentSize: 10,
2019-04-22 10:07:50 +01:00
Remote: &pb.RemoteSegment{
RootPieceId: rootPieceID,
Redundancy: redundancy,
RemotePieces: pieces,
},
ExpirationDate: expirationDate,
2019-04-22 10:07:50 +01:00
}
limits := make([]*pb.OrderLimit, len(addressedLimits))
for i, addressedLimit := range addressedLimits {
limits[i] = addressedLimit.Limit
2019-04-22 10:07:50 +01:00
}
_, err = metainfo.CommitSegment(ctx, "bucket", "path", -1, pointer, limits)
require.Error(t, err)
require.Contains(t, err.Error(), "is less than or equal to the repair threshold")
}
{
// error if number of remote pieces is lower than success threshold
redundancy := &pb.RedundancyScheme{
MinReq: 1,
RepairThreshold: 2,
SuccessThreshold: 5,
Total: 6,
ErasureShareSize: 256,
}
expirationDate := time.Now().Add(time.Hour)
addressedLimits, rootPieceID, _, err := metainfo.CreateSegment(ctx, "bucket", "path", -1, redundancy, 1000, expirationDate)
require.NoError(t, err)
// create number of pieces below success threshold
usedForPieces := addressedLimits[:redundancy.SuccessThreshold-1]
pieces := make([]*pb.RemotePiece, len(usedForPieces))
for i, limit := range usedForPieces {
pieces[i] = &pb.RemotePiece{
PieceNum: int32(i),
NodeId: limit.Limit.StorageNodeId,
Hash: &pb.PieceHash{
PieceId: limit.Limit.PieceId,
PieceSize: 256,
Timestamp: time.Now(),
},
}
}
pointer := &pb.Pointer{
CreationDate: time.Now(),
Type: pb.Pointer_REMOTE,
SegmentSize: 10,
Remote: &pb.RemoteSegment{
RootPieceId: rootPieceID,
Redundancy: redundancy,
RemotePieces: pieces,
},
ExpirationDate: expirationDate,
}
limits := make([]*pb.OrderLimit, len(addressedLimits))
for i, addressedLimit := range addressedLimits {
limits[i] = addressedLimit.Limit
}
_, err = metainfo.CommitSegment(ctx, "bucket", "path", -1, pointer, limits)
require.Error(t, err)
require.Contains(t, err.Error(), "is less than the success threshold")
}
2019-04-22 10:07:50 +01:00
})
}
func TestCreateSegment(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Metainfo.RS.Validate = true
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
metainfo, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
require.NoError(t, err)
2019-06-25 16:36:23 +01:00
defer ctx.Check(metainfo.Close)
for _, r := range []struct {
rs *pb.RedundancyScheme
fail bool
}{
{ // error - ErasureShareSize <= 0
rs: &pb.RedundancyScheme{
MinReq: 1,
RepairThreshold: 2,
SuccessThreshold: 3,
Total: 4,
ErasureShareSize: -1,
},
fail: true,
},
{ // error - any of the values are negative
rs: &pb.RedundancyScheme{
MinReq: 1,
RepairThreshold: -2,
SuccessThreshold: 3,
Total: -4,
ErasureShareSize: 10,
},
fail: true,
},
{ // error - MinReq >= RepairThreshold
rs: &pb.RedundancyScheme{
MinReq: 10,
RepairThreshold: 2,
SuccessThreshold: 3,
Total: 4,
ErasureShareSize: 10,
},
fail: true,
},
{ // error - MinReq >= RepairThreshold
rs: &pb.RedundancyScheme{
MinReq: 2,
RepairThreshold: 2,
SuccessThreshold: 3,
Total: 4,
ErasureShareSize: 10,
},
fail: true,
},
{ // error - RepairThreshold >= SuccessThreshol
rs: &pb.RedundancyScheme{
MinReq: 1,
RepairThreshold: 3,
SuccessThreshold: 3,
Total: 4,
ErasureShareSize: 10,
},
fail: true,
},
{ // error - SuccessThreshold >= Total
rs: &pb.RedundancyScheme{
MinReq: 1,
RepairThreshold: 2,
SuccessThreshold: 4,
Total: 4,
ErasureShareSize: 10,
},
fail: true,
},
{ // ok - valid RS parameters
rs: &pb.RedundancyScheme{
MinReq: 1,
RepairThreshold: 2,
SuccessThreshold: 3,
Total: 4,
ErasureShareSize: 256,
},
fail: false,
},
} {
_, _, _, err := metainfo.CreateSegment(ctx, "bucket", "path", -1, r.rs, 1000, time.Now().Add(time.Hour))
if r.fail {
require.Error(t, err)
} else {
require.NoError(t, err)
}
}
})
}
func TestExpirationTimeSegment(t *testing.T) {
testplanet.Run(t, testplanet.Config{
2019-07-16 20:16:41 +01:00
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
metainfo, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
require.NoError(t, err)
defer ctx.Check(metainfo.Close)
2019-07-16 20:16:41 +01:00
rs := &pb.RedundancyScheme{
MinReq: 1,
RepairThreshold: 1,
SuccessThreshold: 1,
Total: 1,
ErasureShareSize: 1024,
Type: pb.RedundancyScheme_RS,
}
for _, r := range []struct {
expirationDate time.Time
errFlag bool
}{
{ // expiration time not set
time.Time{},
false,
},
{ // 10 days into future
time.Now().AddDate(0, 0, 10),
false,
},
{ // current time
time.Now(),
true,
},
{ // 10 days into past
time.Now().AddDate(0, 0, -10),
true,
},
} {
2019-07-16 20:16:41 +01:00
_, _, _, err := metainfo.CreateSegment(ctx, "my-bucket-name", "file/path", -1, rs, memory.MiB.Int64(), r.expirationDate)
if err != nil {
assert.True(t, r.errFlag)
} else {
assert.False(t, r.errFlag)
}
}
})
}
func TestDoubleCommitSegment(t *testing.T) {
testplanet.Run(t, testplanet.Config{
2019-07-16 20:16:41 +01:00
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
metainfo, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
require.NoError(t, err)
2019-06-25 16:36:23 +01:00
defer ctx.Check(metainfo.Close)
pointer, limits := runCreateSegment(ctx, t, metainfo)
2019-06-24 10:52:25 +01:00
_, err = metainfo.CommitSegment(ctx, "my-bucket-name", "file/path", -1, pointer, limits)
require.NoError(t, err)
2019-06-24 10:52:25 +01:00
_, err = metainfo.CommitSegment(ctx, "my-bucket-name", "file/path", -1, pointer, limits)
require.Error(t, err)
require.Contains(t, err.Error(), "missing create request or request expired")
})
}
func TestCommitSegmentPointer(t *testing.T) {
// all tests needs to generate error
tests := []struct {
// defines how modify pointer before CommitSegment
Modify func(pointer *pb.Pointer)
ErrorMessage string
}{
{
Modify: func(pointer *pb.Pointer) {
pointer.ExpirationDate = pointer.ExpirationDate.Add(time.Second * 100)
},
ErrorMessage: "pointer expiration date does not match requested one",
},
{
Modify: func(pointer *pb.Pointer) {
pointer.Remote.Redundancy.MinReq += 100
},
ErrorMessage: "pointer redundancy scheme date does not match requested one",
},
{
Modify: func(pointer *pb.Pointer) {
pointer.Remote.Redundancy.RepairThreshold += 100
},
ErrorMessage: "pointer redundancy scheme date does not match requested one",
},
{
Modify: func(pointer *pb.Pointer) {
pointer.Remote.Redundancy.SuccessThreshold += 100
},
ErrorMessage: "pointer redundancy scheme date does not match requested one",
},
{
Modify: func(pointer *pb.Pointer) {
pointer.Remote.Redundancy.Total += 100
},
// this error is triggered earlier then Create/Commit RS comparison
ErrorMessage: "invalid no order limit for piece",
},
{
Modify: func(pointer *pb.Pointer) {
pointer.Remote.Redundancy.ErasureShareSize += 100
},
ErrorMessage: "pointer redundancy scheme date does not match requested one",
},
{
Modify: func(pointer *pb.Pointer) {
pointer.Remote.Redundancy.Type = 100
},
ErrorMessage: "pointer redundancy scheme date does not match requested one",
},
{
Modify: func(pointer *pb.Pointer) {
pointer.Type = pb.Pointer_INLINE
},
ErrorMessage: "pointer type is INLINE but remote segment is set",
},
{
// no piece hash removes piece from pointer, not enough pieces for successful upload
Modify: func(pointer *pb.Pointer) {
pointer.Remote.RemotePieces[0].Hash = nil
},
ErrorMessage: "Number of valid pieces (2) is less than the success threshold (3)",
},
{
// invalid timestamp removes piece from pointer, not enough pieces for successful upload
Modify: func(pointer *pb.Pointer) {
pointer.Remote.RemotePieces[0].Hash.Timestamp = time.Now().Add(-24 * time.Hour)
},
ErrorMessage: "Number of valid pieces (2) is less than the success threshold (3)",
},
{
// invalid hash PieceID removes piece from pointer, not enough pieces for successful upload
Modify: func(pointer *pb.Pointer) {
pointer.Remote.RemotePieces[0].Hash.PieceId = storj.PieceID{1}
},
ErrorMessage: "Number of valid pieces (2) is less than the success threshold (3)",
},
{
Modify: func(pointer *pb.Pointer) {
pointer.Remote.RemotePieces[0].Hash.PieceSize = 1
},
ErrorMessage: "all pieces needs to have the same size",
},
{
Modify: func(pointer *pb.Pointer) {
pointer.SegmentSize = 100
},
ErrorMessage: "expected piece size is different from provided",
},
}
testplanet.Run(t, testplanet.Config{
2019-07-16 20:16:41 +01:00
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
metainfo, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
require.NoError(t, err)
2019-06-25 16:36:23 +01:00
defer ctx.Check(metainfo.Close)
for i, test := range tests {
pointer, limits := runCreateSegment(ctx, t, metainfo)
test.Modify(pointer)
2019-06-24 10:52:25 +01:00
_, err = metainfo.CommitSegment(ctx, "my-bucket-name", "file/path", -1, pointer, limits)
require.Error(t, err, "Case #%v", i)
require.Contains(t, err.Error(), test.ErrorMessage, "Case #%v", i)
}
})
}
func TestSetBucketAttribution(t *testing.T) {
testplanet.Run(t, testplanet.Config{
2019-07-16 20:16:41 +01:00
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
uplink := planet.Uplinks[0]
err := uplink.CreateBucket(ctx, planet.Satellites[0], "alpha")
require.NoError(t, err)
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
require.NoError(t, err)
2019-06-25 16:36:23 +01:00
defer ctx.Check(metainfoClient.Close)
partnerID := testrand.UUID()
{
// bucket with no items
err = metainfoClient.SetBucketAttribution(ctx, metainfo.SetBucketAttributionParams{
Bucket: "alpha",
PartnerID: partnerID,
})
require.NoError(t, err)
// no bucket exists
err = metainfoClient.SetBucketAttribution(ctx, metainfo.SetBucketAttributionParams{
Bucket: "beta",
PartnerID: partnerID,
})
require.NoError(t, err)
}
{
// already attributed bucket, adding files
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "alpha", "path", []byte{1, 2, 3})
assert.NoError(t, err)
// bucket with items
err = metainfoClient.SetBucketAttribution(ctx, metainfo.SetBucketAttributionParams{
Bucket: "beta",
PartnerID: partnerID,
})
require.NoError(t, err)
}
{
//non attributed bucket, and adding files
2019-07-03 19:56:17 +01:00
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "alpha-new", "path", []byte{1, 2, 3})
assert.NoError(t, err)
// bucket with items
err = metainfoClient.SetBucketAttribution(ctx, metainfo.SetBucketAttributionParams{
Bucket: "alpha-new",
PartnerID: partnerID,
})
require.Error(t, err)
}
})
}
lib/uplink: encryption context (#2349) * lib/uplink: encryption context Change-Id: I5c23dca3286a46b713b30c4997e9ae6e630b2280 * lib/uplink: bucket operation examples Change-Id: Ia0f6e69f365dcff0cf11c731f51b30842bce053b * lib/uplink: encryption key sharing test cases Change-Id: I3a172d565f33f4e591402cdcb9460664a7cc7fbe * fix encrypted path prefix restriction issue Change-Id: I8f3921f9d52aaf4b84039de608b8cbbc88769554 * implement panics in libuplink encryption code todo on cipher suite selection as well as an api concern Change-Id: Ifa39eb3cc4b3443f7d96f9304df9b2ac4ec4085d * implement GetProjectInfo api call to get salt Change-Id: Ic5f6b3be9ea35df48c1aa214ab5d355fb328e2cf * some fixes and accessors for encryption store Change-Id: I3bb61f6712a037900e2a96e72ad4029ec1d3f718 * general fixes to builds/tests/etc Change-Id: I9930fa96acb3b221d9a001f8e274af5729cc8a47 * java bindings changes Change-Id: Ia2bd4c9c69739c8d3154d79616cff1f36fb403b6 * get libuplink examples passing Change-Id: I828f09a144160e0a5dd932324f78491ae2ec8a07 * fix proto.lock file Change-Id: I2fbbf4d0976a7d0473c2645e6dcb21aaa3be7651 * fix proto.lock again Change-Id: I92702cf49e1a340eef6379c2be4f7c4a268112a9 * fix golint issues Change-Id: I631ff9f43307a58e3b25a58cbb4a4cc2495f5eb6 * more linting fixes Change-Id: I51f8f30b367b5bca14c94b15417b9a4c9e7aa0ce * bug fixed by structs bump Change-Id: Ibb03c691fce7606c35c08721b3ef0781ab48a38a * retrigger Change-Id: Ieee0470b6a2d07168a1578552e8e7f271ae93a13 * retrigger Change-Id: I753d63853171e6a436c104ce176048892eb974c5 * semantic merge conflict Change-Id: I9419448496de90340569047a6a16a1b858a7978a * update total to match prod defaults Change-Id: I693d55c1ebb28b5803ee1d26e9e198decf82308b * retrigger Change-Id: I28b74d5d6202f61aa3866fe407d423f6a0a14b9e * retrigger Change-Id: I6fd054885c715f602e2cef623fd464c42e88742c * retrigger Change-Id: I6a01bae88c72406d4ed5a8f13bf8a2b3c650bd2d
2019-06-27 18:36:51 +01:00
func TestGetProjectInfo(t *testing.T) {
testplanet.Run(t, testplanet.Config{
2019-07-16 20:16:41 +01:00
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 2,
lib/uplink: encryption context (#2349) * lib/uplink: encryption context Change-Id: I5c23dca3286a46b713b30c4997e9ae6e630b2280 * lib/uplink: bucket operation examples Change-Id: Ia0f6e69f365dcff0cf11c731f51b30842bce053b * lib/uplink: encryption key sharing test cases Change-Id: I3a172d565f33f4e591402cdcb9460664a7cc7fbe * fix encrypted path prefix restriction issue Change-Id: I8f3921f9d52aaf4b84039de608b8cbbc88769554 * implement panics in libuplink encryption code todo on cipher suite selection as well as an api concern Change-Id: Ifa39eb3cc4b3443f7d96f9304df9b2ac4ec4085d * implement GetProjectInfo api call to get salt Change-Id: Ic5f6b3be9ea35df48c1aa214ab5d355fb328e2cf * some fixes and accessors for encryption store Change-Id: I3bb61f6712a037900e2a96e72ad4029ec1d3f718 * general fixes to builds/tests/etc Change-Id: I9930fa96acb3b221d9a001f8e274af5729cc8a47 * java bindings changes Change-Id: Ia2bd4c9c69739c8d3154d79616cff1f36fb403b6 * get libuplink examples passing Change-Id: I828f09a144160e0a5dd932324f78491ae2ec8a07 * fix proto.lock file Change-Id: I2fbbf4d0976a7d0473c2645e6dcb21aaa3be7651 * fix proto.lock again Change-Id: I92702cf49e1a340eef6379c2be4f7c4a268112a9 * fix golint issues Change-Id: I631ff9f43307a58e3b25a58cbb4a4cc2495f5eb6 * more linting fixes Change-Id: I51f8f30b367b5bca14c94b15417b9a4c9e7aa0ce * bug fixed by structs bump Change-Id: Ibb03c691fce7606c35c08721b3ef0781ab48a38a * retrigger Change-Id: Ieee0470b6a2d07168a1578552e8e7f271ae93a13 * retrigger Change-Id: I753d63853171e6a436c104ce176048892eb974c5 * semantic merge conflict Change-Id: I9419448496de90340569047a6a16a1b858a7978a * update total to match prod defaults Change-Id: I693d55c1ebb28b5803ee1d26e9e198decf82308b * retrigger Change-Id: I28b74d5d6202f61aa3866fe407d423f6a0a14b9e * retrigger Change-Id: I6fd054885c715f602e2cef623fd464c42e88742c * retrigger Change-Id: I6a01bae88c72406d4ed5a8f13bf8a2b3c650bd2d
2019-06-27 18:36:51 +01:00
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
apiKey0 := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
apiKey1 := planet.Uplinks[1].APIKey[planet.Satellites[0].ID()]
metainfo0, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey0)
require.NoError(t, err)
metainfo1, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey1)
require.NoError(t, err)
info0, err := metainfo0.GetProjectInfo(ctx)
require.NoError(t, err)
require.NotNil(t, info0.ProjectSalt)
info1, err := metainfo1.GetProjectInfo(ctx)
require.NoError(t, err)
require.NotNil(t, info1.ProjectSalt)
// Different projects should have different salts
require.NotEqual(t, info0.ProjectSalt, info1.ProjectSalt)
})
}
func runCreateSegment(ctx context.Context, t *testing.T, metainfo *metainfo.Client) (*pb.Pointer, []*pb.OrderLimit) {
pointer := createTestPointer(t)
addressedLimits, rootPieceID, _, err := metainfo.CreateSegment(ctx, "my-bucket-name", "file/path", -1, pointer.Remote.Redundancy, memory.MiB.Int64(), pointer.ExpirationDate)
require.NoError(t, err)
pointer.Remote.RootPieceId = rootPieceID
limits := make([]*pb.OrderLimit, len(addressedLimits))
for i, addressedLimit := range addressedLimits {
limits[i] = addressedLimit.Limit
if len(pointer.Remote.RemotePieces) > i {
pointer.Remote.RemotePieces[i].NodeId = addressedLimits[i].Limit.StorageNodeId
pointer.Remote.RemotePieces[i].Hash.PieceId = addressedLimits[i].Limit.PieceId
}
}
return pointer, limits
}
func createTestPointer(t *testing.T) *pb.Pointer {
rs := &pb.RedundancyScheme{
MinReq: 1,
RepairThreshold: 1,
SuccessThreshold: 3,
Total: 4,
ErasureShareSize: 1024,
Type: pb.RedundancyScheme_RS,
}
redundancy, err := eestream.NewRedundancyStrategyFromProto(rs)
require.NoError(t, err)
segmentSize := 4 * memory.KiB.Int64()
pieceSize := eestream.CalcPieceSize(segmentSize, redundancy)
timestamp := time.Now().Add(time.Hour)
pointer := &pb.Pointer{
CreationDate: time.Now(),
Type: pb.Pointer_REMOTE,
SegmentSize: segmentSize,
Remote: &pb.RemoteSegment{
Redundancy: rs,
RemotePieces: []*pb.RemotePiece{
{
PieceNum: 0,
Hash: &pb.PieceHash{
PieceSize: pieceSize,
Timestamp: timestamp,
},
},
{
PieceNum: 1,
Hash: &pb.PieceHash{
PieceSize: pieceSize,
Timestamp: timestamp,
},
},
{
PieceNum: 2,
Hash: &pb.PieceHash{
PieceSize: pieceSize,
Timestamp: timestamp,
},
},
},
},
ExpirationDate: timestamp,
}
return pointer
}
2019-06-24 10:52:25 +01:00
func TestBucketNameValidation(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
2019-06-24 10:52:25 +01:00
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
metainfo, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
require.NoError(t, err)
2019-06-25 16:36:23 +01:00
defer ctx.Check(metainfo.Close)
2019-06-24 10:52:25 +01:00
rs := &pb.RedundancyScheme{
MinReq: 1,
RepairThreshold: 1,
SuccessThreshold: 3,
Total: 4,
ErasureShareSize: 1024,
Type: pb.RedundancyScheme_RS,
}
validNames := []string{
"tes", "testbucket",
"test-bucket", "testbucket9",
"9testbucket", "a.b",
"test.bucket", "test-one.bucket-one",
"test.bucket.one",
"testbucket-63-0123456789012345678901234567890123456789012345abc",
}
for _, name := range validNames {
_, _, _, err = metainfo.CreateSegment(ctx, name, "", -1, rs, 1, time.Now().Add(time.Hour))
2019-06-24 10:52:25 +01:00
require.NoError(t, err, "bucket name: %v", name)
}
invalidNames := []string{
"", "t", "te", "-testbucket",
"testbucket-", "-testbucket-",
"a.b.", "test.bucket-.one",
"test.-bucket.one", "1.2.3.4",
"192.168.1.234", "testBUCKET",
"test/bucket",
"testbucket-64-0123456789012345678901234567890123456789012345abcd",
}
for _, name := range invalidNames {
_, _, _, err = metainfo.CreateSegment(ctx, name, "", -1, rs, 1, time.Now().Add(time.Hour))
2019-06-24 10:52:25 +01:00
require.Error(t, err, "bucket name: %v", name)
}
})
}
2019-07-16 11:39:23 +01:00
func TestListGetObjects(t *testing.T) {
2019-07-16 11:39:23 +01:00
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
uplink := planet.Uplinks[0]
files := make([]string, 10)
data := testrand.Bytes(1 * memory.KiB)
for i := 0; i < len(files); i++ {
files[i] = "path" + strconv.Itoa(i)
err := uplink.Upload(ctx, planet.Satellites[0], "testbucket", files[i], data)
require.NoError(t, err)
}
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
2019-07-16 11:39:23 +01:00
require.NoError(t, err)
defer ctx.Check(metainfoClient.Close)
2019-07-16 11:39:23 +01:00
expectedBucketName := "testbucket"
items, _, err := metainfoClient.ListObjects(ctx, metainfo.ListObjectsParams{
Bucket: []byte(expectedBucketName),
})
2019-07-16 11:39:23 +01:00
require.NoError(t, err)
require.Equal(t, len(files), len(items))
for _, item := range items {
require.NotEmpty(t, item.EncryptedPath)
require.True(t, item.CreatedAt.Before(time.Now()))
object, err := metainfoClient.GetObject(ctx, metainfo.GetObjectParams{
Bucket: []byte(expectedBucketName),
EncryptedPath: item.EncryptedPath,
})
require.NoError(t, err)
require.Equal(t, item.EncryptedPath, []byte(object.Path))
require.NotEmpty(t, object.StreamID)
2019-07-16 11:39:23 +01:00
}
items, _, err = metainfoClient.ListObjects(ctx, metainfo.ListObjectsParams{
Bucket: []byte(expectedBucketName),
Limit: 3,
})
2019-07-16 11:39:23 +01:00
require.NoError(t, err)
require.Equal(t, 3, len(items))
})
}
func TestBeginCommitListSegment(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
uplink := planet.Uplinks[0]
config := uplink.GetConfig(planet.Satellites[0])
metainfoService := planet.Satellites[0].Metainfo.Service
projects, err := planet.Satellites[0].DB.Console().Projects().GetAll(ctx)
require.NoError(t, err)
projectID := projects[0].ID
bucket := storj.Bucket{
Name: "initial-bucket",
ProjectID: projectID,
PathCipher: config.GetEncryptionParameters().CipherSuite,
}
_, err = metainfoService.CreateBucket(ctx, bucket)
require.NoError(t, err)
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
require.NoError(t, err)
defer ctx.Check(metainfoClient.Close)
params := metainfo.BeginObjectParams{
Bucket: []byte(bucket.Name),
EncryptedPath: []byte("encrypted-path"),
Redundancy: storj.RedundancyScheme{
Algorithm: storj.ReedSolomon,
ShareSize: 256,
RequiredShares: 1,
RepairShares: 1,
OptimalShares: 3,
TotalShares: 4,
},
EncryptionParameters: storj.EncryptionParameters{},
ExpiresAt: time.Now().UTC().Add(24 * time.Hour),
}
streamID, err := metainfoClient.BeginObject(ctx, params)
require.NoError(t, err)
segmentID, limits, _, err := metainfoClient.BeginSegment(ctx, metainfo.BeginSegmentParams{
StreamID: streamID,
Position: storj.SegmentPosition{
Index: 0,
},
MaxOderLimit: memory.MiB.Int64(),
})
require.NoError(t, err)
makeResult := func(num int32) *pb.SegmentPieceUploadResult {
return &pb.SegmentPieceUploadResult{
PieceNum: num,
NodeId: limits[num].Limit.StorageNodeId,
Hash: &pb.PieceHash{
PieceId: limits[num].Limit.PieceId,
PieceSize: 1048832,
Timestamp: time.Now(),
// TODO we still not verifying signature in metainfo
},
}
}
err = metainfoClient.CommitSegmentNew(ctx, metainfo.CommitSegmentParams{
SegmentID: segmentID,
SizeEncryptedData: memory.MiB.Int64(),
UploadResult: []*pb.SegmentPieceUploadResult{
makeResult(0),
makeResult(1),
makeResult(2),
},
})
require.NoError(t, err)
err = metainfoClient.CommitObject(ctx, metainfo.CommitObjectParams{
StreamID: streamID,
})
require.NoError(t, err)
objects, _, err := metainfoClient.ListObjects(ctx, metainfo.ListObjectsParams{
Bucket: []byte(bucket.Name),
})
require.NoError(t, err)
require.Len(t, objects, 1)
require.Equal(t, params.EncryptedPath, objects[0].EncryptedPath)
require.Equal(t, params.ExpiresAt, objects[0].ExpiresAt)
object, err := metainfoClient.GetObject(ctx, metainfo.GetObjectParams{
Bucket: []byte(bucket.Name),
EncryptedPath: objects[0].EncryptedPath,
})
require.NoError(t, err)
segments, _, err := metainfoClient.ListSegmentsNew(ctx, metainfo.ListSegmentsParams{
StreamID: object.StreamID,
})
require.NoError(t, err)
require.Len(t, segments, 1)
})
}
func TestInlineSegment(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
uplink := planet.Uplinks[0]
config := uplink.GetConfig(planet.Satellites[0])
metainfoService := planet.Satellites[0].Metainfo.Service
projects, err := planet.Satellites[0].DB.Console().Projects().GetAll(ctx)
require.NoError(t, err)
projectID := projects[0].ID
// TODO maybe split into separate cases
// Test:
// * create bucket
// * begin object
// * send several inline segments
// * commit object
// * list created object
// * list object segments
// * download segments
// * delete segments and object
bucket := storj.Bucket{
Name: "inline-segments-bucket",
ProjectID: projectID,
PathCipher: config.GetEncryptionParameters().CipherSuite,
}
_, err = metainfoService.CreateBucket(ctx, bucket)
require.NoError(t, err)
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
require.NoError(t, err)
defer ctx.Check(metainfoClient.Close)
params := metainfo.BeginObjectParams{
Bucket: []byte(bucket.Name),
EncryptedPath: []byte("encrypted-path"),
Redundancy: storj.RedundancyScheme{
Algorithm: storj.ReedSolomon,
ShareSize: 256,
RequiredShares: 1,
RepairShares: 1,
OptimalShares: 3,
TotalShares: 4,
},
EncryptionParameters: storj.EncryptionParameters{},
ExpiresAt: time.Now().UTC().Add(24 * time.Hour),
}
streamID, err := metainfoClient.BeginObject(ctx, params)
require.NoError(t, err)
segments := []int32{0, 1, 2, 3, 4, 5, 6}
segmentsData := make([][]byte, len(segments))
for i, segment := range segments {
segmentsData[i] = testrand.Bytes(memory.KiB)
err = metainfoClient.MakeInlineSegment(ctx, metainfo.MakeInlineSegmentParams{
StreamID: streamID,
Position: storj.SegmentPosition{
Index: segment,
},
EncryptedInlineData: segmentsData[i],
})
require.NoError(t, err)
}
err = metainfoClient.CommitObject(ctx, metainfo.CommitObjectParams{
StreamID: streamID,
})
require.NoError(t, err)
objects, _, err := metainfoClient.ListObjects(ctx, metainfo.ListObjectsParams{
Bucket: []byte(bucket.Name),
})
require.NoError(t, err)
require.Len(t, objects, 1)
require.Equal(t, params.EncryptedPath, objects[0].EncryptedPath)
require.Equal(t, params.ExpiresAt, objects[0].ExpiresAt)
object, err := metainfoClient.GetObject(ctx, metainfo.GetObjectParams{
Bucket: params.Bucket,
EncryptedPath: params.EncryptedPath,
})
require.NoError(t, err)
{ // test listing inline segments
for _, test := range []struct {
Index int32
Limit int
Result int
More bool
}{
{Index: 0, Result: len(segments), More: false},
{Index: 2, Result: len(segments) - 2, More: false},
{Index: 0, Result: 3, More: true, Limit: 3},
{Index: 0, Result: len(segments), More: false, Limit: len(segments)},
{Index: 0, Result: len(segments) - 1, More: true, Limit: len(segments) - 1},
} {
items, more, err := metainfoClient.ListSegmentsNew(ctx, metainfo.ListSegmentsParams{
StreamID: object.StreamID,
CursorPosition: storj.SegmentPosition{
Index: test.Index,
},
Limit: int32(test.Limit),
})
require.NoError(t, err)
require.Equal(t, test.Result, len(items))
require.Equal(t, test.More, more)
}
}
{ // test download inline segments
items, _, err := metainfoClient.ListSegmentsNew(ctx, metainfo.ListSegmentsParams{
StreamID: object.StreamID,
})
require.NoError(t, err)
require.Equal(t, len(segments), len(items))
for i, item := range items {
info, limits, err := metainfoClient.DownloadSegment(ctx, metainfo.DownloadSegmentParams{
StreamID: object.StreamID,
Position: storj.SegmentPosition{
Index: item.Position.Index,
},
})
require.NoError(t, err)
require.Nil(t, limits)
require.Equal(t, segmentsData[i], info.EncryptedInlineData)
}
}
{ // test deleting segments
streamID, err = metainfoClient.BeginDeleteObject(ctx, metainfo.BeginDeleteObjectParams{
Bucket: params.Bucket,
EncryptedPath: params.EncryptedPath,
})
require.NoError(t, err)
items, _, err := metainfoClient.ListSegmentsNew(ctx, metainfo.ListSegmentsParams{
StreamID: streamID,
})
require.NoError(t, err)
for _, item := range items {
segmentID, limits, _, err := metainfoClient.BeginDeleteSegment(ctx, metainfo.BeginDeleteSegmentParams{
StreamID: streamID,
Position: storj.SegmentPosition{
Index: item.Position.Index,
},
})
require.NoError(t, err)
require.Nil(t, limits)
err = metainfoClient.FinishDeleteSegment(ctx, metainfo.FinishDeleteSegmentParams{
SegmentID: segmentID,
})
require.NoError(t, err)
}
items, _, err = metainfoClient.ListSegmentsNew(ctx, metainfo.ListSegmentsParams{
StreamID: streamID,
})
require.NoError(t, err)
require.Empty(t, items)
2019-08-06 15:56:23 +01:00
err = metainfoClient.FinishDeleteObject(ctx, metainfo.FinishDeleteObjectParams{
StreamID: streamID,
})
require.NoError(t, err)
}
})
}
func TestRemoteSegment(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
uplink := planet.Uplinks[0]
expectedBucketName := "remote-segments-bucket"
err := uplink.Upload(ctx, planet.Satellites[0], expectedBucketName, "file-object", testrand.Bytes(10*memory.KiB))
require.NoError(t, err)
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
require.NoError(t, err)
defer ctx.Check(metainfoClient.Close)
items, _, err := metainfoClient.ListObjects(ctx, metainfo.ListObjectsParams{
Bucket: []byte(expectedBucketName),
})
require.NoError(t, err)
require.Len(t, items, 1)
{
// Get object
// List segments
// Download segment
object, err := metainfoClient.GetObject(ctx, metainfo.GetObjectParams{
Bucket: []byte(expectedBucketName),
EncryptedPath: items[0].EncryptedPath,
})
require.NoError(t, err)
segments, _, err := metainfoClient.ListSegmentsNew(ctx, metainfo.ListSegmentsParams{
StreamID: object.StreamID,
})
require.NoError(t, err)
require.Len(t, segments, 1)
_, limits, err := metainfoClient.DownloadSegment(ctx, metainfo.DownloadSegmentParams{
StreamID: object.StreamID,
Position: storj.SegmentPosition{
Index: segments[0].Position.Index,
},
})
require.NoError(t, err)
require.NotEmpty(t, limits)
}
{
// Begin deleting object
// List segments
// Begin/Finish deleting segment
// List objects
streamID, err := metainfoClient.BeginDeleteObject(ctx, metainfo.BeginDeleteObjectParams{
Bucket: []byte(expectedBucketName),
EncryptedPath: items[0].EncryptedPath,
})
require.NoError(t, err)
segments, _, err := metainfoClient.ListSegmentsNew(ctx, metainfo.ListSegmentsParams{
StreamID: streamID,
})
require.NoError(t, err)
for _, segment := range segments {
segmentID, limits, _, err := metainfoClient.BeginDeleteSegment(ctx, metainfo.BeginDeleteSegmentParams{
StreamID: streamID,
Position: storj.SegmentPosition{
Index: segment.Position.Index,
},
})
require.NoError(t, err)
require.NotEmpty(t, limits)
err = metainfoClient.FinishDeleteSegment(ctx, metainfo.FinishDeleteSegmentParams{
SegmentID: segmentID,
})
require.NoError(t, err)
}
2019-08-06 15:56:23 +01:00
err = metainfoClient.FinishDeleteObject(ctx, metainfo.FinishDeleteObjectParams{
StreamID: streamID,
})
require.NoError(t, err)
items, _, err = metainfoClient.ListObjects(ctx, metainfo.ListObjectsParams{
Bucket: []byte(expectedBucketName),
})
require.NoError(t, err)
require.Len(t, items, 0)
}
})
}
func TestIDs(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
require.NoError(t, err)
defer ctx.Check(metainfoClient.Close)
{
streamID := testrand.StreamID(256)
err = metainfoClient.CommitObject(ctx, metainfo.CommitObjectParams{
StreamID: streamID,
})
require.Error(t, err) // invalid streamID
segmentID := testrand.SegmentID(512)
err = metainfoClient.CommitSegmentNew(ctx, metainfo.CommitSegmentParams{
SegmentID: segmentID,
})
require.Error(t, err) // invalid segmentID
}
satellitePeer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
{ // streamID expired
signedStreamID, err := signing.SignStreamID(ctx, satellitePeer, &pb.SatStreamID{
CreationDate: time.Now().Add(-24 * time.Hour),
})
require.NoError(t, err)
encodedStreamID, err := proto.Marshal(signedStreamID)
require.NoError(t, err)
streamID, err := storj.StreamIDFromBytes(encodedStreamID)
require.NoError(t, err)
err = metainfoClient.CommitObject(ctx, metainfo.CommitObjectParams{
StreamID: streamID,
})
require.Error(t, err)
}
{ // segmentID expired
signedSegmentID, err := signing.SignSegmentID(ctx, satellitePeer, &pb.SatSegmentID{
CreationDate: time.Now().Add(-24 * time.Hour),
})
require.NoError(t, err)
encodedSegmentID, err := proto.Marshal(signedSegmentID)
require.NoError(t, err)
segmentID, err := storj.SegmentIDFromBytes(encodedSegmentID)
require.NoError(t, err)
err = metainfoClient.CommitSegmentNew(ctx, metainfo.CommitSegmentParams{
SegmentID: segmentID,
})
require.Error(t, err)
}
})
}
2019-08-06 15:56:23 +01:00
func TestBatch(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
require.NoError(t, err)
defer ctx.Check(metainfoClient.Close)
{ // create few buckets and list them in one batch
requests := make([]metainfo.BatchItem, 0)
numOfBuckets := 5
for i := 0; i < numOfBuckets; i++ {
requests = append(requests, &metainfo.CreateBucketParams{
Name: []byte("test-bucket-" + strconv.Itoa(i)),
PathCipher: storj.EncAESGCM,
DefaultSegmentsSize: memory.MiB.Int64(),
})
}
requests = append(requests, &metainfo.ListBucketsParams{
ListOpts: storj.BucketListOptions{
Cursor: "",
Direction: storj.After,
},
})
responses, err := metainfoClient.Batch(ctx, requests...)
require.NoError(t, err)
require.Equal(t, numOfBuckets+1, len(responses))
for i := 0; i < numOfBuckets; i++ {
response, err := responses[i].CreateBucket()
require.NoError(t, err)
require.Equal(t, "test-bucket-"+strconv.Itoa(i), response.Bucket.Name)
_, err = responses[i].GetBucket()
require.Error(t, err)
}
bucketsListResp, err := responses[numOfBuckets].ListBuckets()
require.NoError(t, err)
require.Equal(t, numOfBuckets, len(bucketsListResp.BucketList.Items))
}
{ // create bucket, object, upload inline segments in batch, download inline segments in batch
err := planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "second-test-bucket")
require.NoError(t, err)
streamID, err := metainfoClient.BeginObject(ctx, metainfo.BeginObjectParams{
Bucket: []byte("second-test-bucket"),
EncryptedPath: []byte("encrypted-path"),
})
require.NoError(t, err)
requests := make([]metainfo.BatchItem, 0)
numOfSegments := 10
expectedData := make([][]byte, numOfSegments)
for i := 0; i < numOfSegments; i++ {
expectedData[i] = testrand.Bytes(memory.KiB)
requests = append(requests, &metainfo.MakeInlineSegmentParams{
StreamID: streamID,
Position: storj.SegmentPosition{
Index: int32(i),
},
EncryptedInlineData: expectedData[i],
})
}
requests = append(requests, &metainfo.CommitObjectParams{
StreamID: streamID,
})
requests = append(requests, &metainfo.ListSegmentsParams{
StreamID: streamID,
})
requests = append(requests, &metainfo.GetObjectParams{
Bucket: []byte("second-test-bucket"),
EncryptedPath: []byte("encrypted-path"),
})
responses, err := metainfoClient.Batch(ctx, requests...)
require.NoError(t, err)
require.Equal(t, numOfSegments+3, len(responses))
listResponse, err := responses[numOfSegments+1].ListSegment()
require.NoError(t, err)
require.Equal(t, numOfSegments, len(listResponse.Items))
getResponse, err := responses[numOfSegments+2].GetObject()
require.NoError(t, err)
requests = make([]metainfo.BatchItem, 0)
for _, segment := range listResponse.Items {
requests = append(requests, &metainfo.DownloadSegmentParams{
StreamID: getResponse.Info.StreamID,
Position: segment.Position,
})
}
responses, err = metainfoClient.Batch(ctx, requests...)
require.NoError(t, err)
require.Equal(t, len(listResponse.Items), len(responses))
for i, response := range responses {
downloadResponse, err := response.DownloadSegment()
require.NoError(t, err)
require.Equal(t, expectedData[i], downloadResponse.Info.EncryptedInlineData)
}
}
})
}