2019-03-30 11:21:49 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package metainfo_test
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"sort"
|
2019-07-16 11:39:23 +01:00
|
|
|
"strconv"
|
2019-03-30 11:21:49 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2019-07-24 12:33:23 +01:00
|
|
|
"github.com/gogo/protobuf/proto"
|
2019-03-30 11:21:49 +00:00
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
2019-06-21 19:15:58 +01:00
|
|
|
"go.uber.org/zap"
|
2019-03-30 11:21:49 +00:00
|
|
|
|
2019-09-19 05:46:39 +01:00
|
|
|
"storj.io/storj/internal/errs2"
|
2019-06-05 17:41:02 +01:00
|
|
|
"storj.io/storj/internal/memory"
|
2019-03-30 11:21:49 +00:00
|
|
|
"storj.io/storj/internal/testcontext"
|
2019-09-18 14:50:33 +01:00
|
|
|
"storj.io/storj/internal/testidentity"
|
2019-03-30 11:21:49 +00:00
|
|
|
"storj.io/storj/internal/testplanet"
|
2019-06-26 11:38:51 +01:00
|
|
|
"storj.io/storj/internal/testrand"
|
2019-09-18 14:50:33 +01:00
|
|
|
"storj.io/storj/pkg/identity"
|
2019-05-24 17:51:27 +01:00
|
|
|
"storj.io/storj/pkg/macaroon"
|
2019-03-30 11:21:49 +00:00
|
|
|
"storj.io/storj/pkg/pb"
|
2019-09-19 05:46:39 +01:00
|
|
|
"storj.io/storj/pkg/rpc/rpcstatus"
|
2019-07-28 06:55:36 +01:00
|
|
|
"storj.io/storj/pkg/signing"
|
2019-03-30 11:21:49 +00:00
|
|
|
"storj.io/storj/pkg/storj"
|
2019-06-21 19:15:58 +01:00
|
|
|
"storj.io/storj/satellite"
|
2019-10-31 19:04:33 +00:00
|
|
|
"storj.io/storj/uplink"
|
2019-07-28 06:55:36 +01:00
|
|
|
"storj.io/storj/uplink/eestream"
|
2019-06-05 17:41:02 +01:00
|
|
|
"storj.io/storj/uplink/metainfo"
|
2019-03-30 11:21:49 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
func TestInvalidAPIKey(t *testing.T) {
|
|
|
|
ctx := testcontext.New(t)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
|
2019-07-16 20:16:41 +01:00
|
|
|
planet, err := testplanet.New(t, 1, 0, 1)
|
2019-03-30 11:21:49 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(planet.Shutdown)
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
throwawayKey, err := macaroon.NewAPIKey([]byte("secret"))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-03-30 11:21:49 +00:00
|
|
|
planet.Start(ctx)
|
|
|
|
|
|
|
|
for _, invalidAPIKey := range []string{"", "invalid", "testKey"} {
|
2019-09-19 17:19:29 +01:00
|
|
|
client, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], throwawayKey)
|
2019-03-30 11:21:49 +00:00
|
|
|
require.NoError(t, err)
|
2019-06-25 16:36:23 +01:00
|
|
|
defer ctx.Check(client.Close)
|
2019-03-30 11:21:49 +00:00
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
client.SetRawAPIKey([]byte(invalidAPIKey))
|
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
_, _, _, err = client.CreateSegmentOld(ctx, "hello", "world", 1, &pb.RedundancyScheme{}, 123, time.Now().Add(time.Hour))
|
2019-05-24 17:51:27 +01:00
|
|
|
assertUnauthenticated(t, err, false)
|
2019-03-30 11:21:49 +00:00
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
_, err = client.CommitSegmentOld(ctx, "testbucket", "testpath", 0, &pb.Pointer{}, nil)
|
2019-05-24 17:51:27 +01:00
|
|
|
assertUnauthenticated(t, err, false)
|
2019-03-30 11:21:49 +00:00
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
_, err = client.SegmentInfoOld(ctx, "testbucket", "testpath", 0)
|
2019-05-24 17:51:27 +01:00
|
|
|
assertUnauthenticated(t, err, false)
|
2019-03-30 11:21:49 +00:00
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
_, _, _, err = client.ReadSegmentOld(ctx, "testbucket", "testpath", 0)
|
2019-05-24 17:51:27 +01:00
|
|
|
assertUnauthenticated(t, err, false)
|
2019-03-30 11:21:49 +00:00
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
_, _, err = client.DeleteSegmentOld(ctx, "testbucket", "testpath", 0)
|
2019-05-24 17:51:27 +01:00
|
|
|
assertUnauthenticated(t, err, false)
|
2019-03-30 11:21:49 +00:00
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
_, _, err = client.ListSegmentsOld(ctx, "testbucket", "", "", "", true, 1, 0)
|
2019-05-24 17:51:27 +01:00
|
|
|
assertUnauthenticated(t, err, false)
|
2019-03-30 11:21:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-24 17:51:27 +01:00
|
|
|
func TestRestrictedAPIKey(t *testing.T) {
|
|
|
|
ctx := testcontext.New(t)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
|
2019-07-16 20:16:41 +01:00
|
|
|
planet, err := testplanet.New(t, 1, 0, 1)
|
2019-05-24 17:51:27 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(planet.Shutdown)
|
|
|
|
|
|
|
|
planet.Start(ctx)
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
key := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
2019-05-24 17:51:27 +01:00
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
Caveat macaroon.Caveat
|
|
|
|
CreateSegmentAllowed bool
|
|
|
|
CommitSegmentAllowed bool
|
|
|
|
SegmentInfoAllowed bool
|
|
|
|
ReadSegmentAllowed bool
|
|
|
|
DeleteSegmentAllowed bool
|
|
|
|
ListSegmentsAllowed bool
|
2019-06-25 00:36:10 +01:00
|
|
|
ReadBucketAllowed bool
|
2019-05-24 17:51:27 +01:00
|
|
|
}{
|
|
|
|
{ // Everything disallowed
|
|
|
|
Caveat: macaroon.Caveat{
|
|
|
|
DisallowReads: true,
|
|
|
|
DisallowWrites: true,
|
|
|
|
DisallowLists: true,
|
|
|
|
DisallowDeletes: true,
|
|
|
|
},
|
2019-06-25 00:36:10 +01:00
|
|
|
ReadBucketAllowed: true,
|
2019-05-24 17:51:27 +01:00
|
|
|
},
|
|
|
|
|
|
|
|
{ // Read only
|
|
|
|
Caveat: macaroon.Caveat{
|
|
|
|
DisallowWrites: true,
|
|
|
|
DisallowDeletes: true,
|
|
|
|
},
|
|
|
|
SegmentInfoAllowed: true,
|
|
|
|
ReadSegmentAllowed: true,
|
|
|
|
ListSegmentsAllowed: true,
|
2019-06-25 00:36:10 +01:00
|
|
|
ReadBucketAllowed: true,
|
2019-05-24 17:51:27 +01:00
|
|
|
},
|
|
|
|
|
|
|
|
{ // Write only
|
|
|
|
Caveat: macaroon.Caveat{
|
|
|
|
DisallowReads: true,
|
|
|
|
DisallowLists: true,
|
|
|
|
},
|
|
|
|
CreateSegmentAllowed: true,
|
|
|
|
CommitSegmentAllowed: true,
|
|
|
|
DeleteSegmentAllowed: true,
|
2019-06-25 00:36:10 +01:00
|
|
|
ReadBucketAllowed: true,
|
2019-05-24 17:51:27 +01:00
|
|
|
},
|
|
|
|
|
|
|
|
{ // Bucket restriction
|
|
|
|
Caveat: macaroon.Caveat{
|
|
|
|
AllowedPaths: []*macaroon.Caveat_Path{{
|
|
|
|
Bucket: []byte("otherbucket"),
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
{ // Path restriction
|
|
|
|
Caveat: macaroon.Caveat{
|
|
|
|
AllowedPaths: []*macaroon.Caveat_Path{{
|
|
|
|
Bucket: []byte("testbucket"),
|
|
|
|
EncryptedPathPrefix: []byte("otherpath"),
|
|
|
|
}},
|
|
|
|
},
|
2019-06-25 00:36:10 +01:00
|
|
|
ReadBucketAllowed: true,
|
2019-05-24 17:51:27 +01:00
|
|
|
},
|
|
|
|
|
|
|
|
{ // Time restriction after
|
|
|
|
Caveat: macaroon.Caveat{
|
|
|
|
NotAfter: func(x time.Time) *time.Time { return &x }(time.Now()),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
|
|
|
|
{ // Time restriction before
|
|
|
|
Caveat: macaroon.Caveat{
|
|
|
|
NotBefore: func(x time.Time) *time.Time { return &x }(time.Now().Add(time.Hour)),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, test := range tests {
|
|
|
|
restrictedKey, err := key.Restrict(test.Caveat)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
client, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], restrictedKey)
|
2019-05-24 17:51:27 +01:00
|
|
|
require.NoError(t, err)
|
2019-06-25 16:36:23 +01:00
|
|
|
defer ctx.Check(client.Close)
|
2019-05-24 17:51:27 +01:00
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
_, _, _, err = client.CreateSegmentOld(ctx, "testbucket", "testpath", 1, &pb.RedundancyScheme{}, 123, time.Now().Add(time.Hour))
|
2019-05-24 17:51:27 +01:00
|
|
|
assertUnauthenticated(t, err, test.CreateSegmentAllowed)
|
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
_, err = client.CommitSegmentOld(ctx, "testbucket", "testpath", 0, &pb.Pointer{}, nil)
|
2019-05-24 17:51:27 +01:00
|
|
|
assertUnauthenticated(t, err, test.CommitSegmentAllowed)
|
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
_, err = client.SegmentInfoOld(ctx, "testbucket", "testpath", 0)
|
2019-05-24 17:51:27 +01:00
|
|
|
assertUnauthenticated(t, err, test.SegmentInfoAllowed)
|
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
_, _, _, err = client.ReadSegmentOld(ctx, "testbucket", "testpath", 0)
|
2019-05-24 17:51:27 +01:00
|
|
|
assertUnauthenticated(t, err, test.ReadSegmentAllowed)
|
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
_, _, err = client.DeleteSegmentOld(ctx, "testbucket", "testpath", 0)
|
2019-05-24 17:51:27 +01:00
|
|
|
assertUnauthenticated(t, err, test.DeleteSegmentAllowed)
|
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
_, _, err = client.ListSegmentsOld(ctx, "testbucket", "testpath", "", "", true, 1, 0)
|
2019-05-24 17:51:27 +01:00
|
|
|
assertUnauthenticated(t, err, test.ListSegmentsAllowed)
|
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
_, _, _, err = client.ReadSegmentOld(ctx, "testbucket", "", -1)
|
2019-06-25 00:36:10 +01:00
|
|
|
assertUnauthenticated(t, err, test.ReadBucketAllowed)
|
2019-05-24 17:51:27 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func assertUnauthenticated(t *testing.T, err error, allowed bool) {
|
2019-03-30 11:21:49 +00:00
|
|
|
t.Helper()
|
|
|
|
|
2019-05-24 17:51:27 +01:00
|
|
|
// If it's allowed, we allow any non-unauthenticated error because
|
|
|
|
// some calls error after authentication checks.
|
2019-09-19 05:46:39 +01:00
|
|
|
if !allowed {
|
|
|
|
assert.True(t, errs2.IsRPC(err, rpcstatus.Unauthenticated))
|
2019-03-30 11:21:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestServiceList(t *testing.T) {
|
|
|
|
ctx := testcontext.New(t)
|
|
|
|
defer ctx.Cleanup()
|
|
|
|
|
2019-07-16 20:16:41 +01:00
|
|
|
planet, err := testplanet.New(t, 1, 0, 1)
|
2019-03-30 11:21:49 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(planet.Shutdown)
|
|
|
|
|
|
|
|
planet.Start(ctx)
|
|
|
|
|
|
|
|
items := []struct {
|
|
|
|
Key string
|
|
|
|
Value []byte
|
|
|
|
}{
|
|
|
|
{Key: "sample.😶", Value: []byte{1}},
|
|
|
|
{Key: "müsic", Value: []byte{2}},
|
|
|
|
{Key: "müsic/söng1.mp3", Value: []byte{3}},
|
|
|
|
{Key: "müsic/söng2.mp3", Value: []byte{4}},
|
|
|
|
{Key: "müsic/album/söng3.mp3", Value: []byte{5}},
|
|
|
|
{Key: "müsic/söng4.mp3", Value: []byte{6}},
|
|
|
|
{Key: "ビデオ/movie.mkv", Value: []byte{7}},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, item := range items {
|
|
|
|
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", item.Key, item.Value)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
2019-07-23 15:58:45 +01:00
|
|
|
config := planet.Uplinks[0].GetConfig(planet.Satellites[0])
|
|
|
|
project, bucket, err := planet.Uplinks[0].GetProjectAndBucket(ctx, planet.Satellites[0], "testbucket", config)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(bucket.Close)
|
|
|
|
defer ctx.Check(project.Close)
|
|
|
|
list, err := bucket.ListObjects(ctx, &storj.ListOptions{Recursive: true, Direction: storj.After})
|
2019-03-30 11:21:49 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
expected := []storj.Object{
|
|
|
|
{Path: "müsic"},
|
|
|
|
{Path: "müsic/album/söng3.mp3"},
|
|
|
|
{Path: "müsic/söng1.mp3"},
|
|
|
|
{Path: "müsic/söng2.mp3"},
|
|
|
|
{Path: "müsic/söng4.mp3"},
|
|
|
|
{Path: "sample.😶"},
|
|
|
|
{Path: "ビデオ/movie.mkv"},
|
|
|
|
}
|
|
|
|
|
|
|
|
require.Equal(t, len(expected), len(list.Items))
|
|
|
|
sort.Slice(list.Items, func(i, k int) bool {
|
|
|
|
return list.Items[i].Path < list.Items[k].Path
|
|
|
|
})
|
|
|
|
for i, item := range expected {
|
|
|
|
require.Equal(t, item.Path, list.Items[i].Path)
|
|
|
|
require.Equal(t, item.IsPrefix, list.Items[i].IsPrefix)
|
|
|
|
}
|
|
|
|
|
2019-07-23 15:58:45 +01:00
|
|
|
list, err = bucket.ListObjects(ctx, &storj.ListOptions{Recursive: false, Direction: storj.After})
|
2019-03-30 11:21:49 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
expected = []storj.Object{
|
|
|
|
{Path: "müsic"},
|
|
|
|
{Path: "müsic/", IsPrefix: true},
|
|
|
|
{Path: "sample.😶"},
|
|
|
|
{Path: "ビデオ/", IsPrefix: true},
|
|
|
|
}
|
|
|
|
|
|
|
|
require.Equal(t, len(expected), len(list.Items))
|
|
|
|
sort.Slice(list.Items, func(i, k int) bool {
|
|
|
|
return list.Items[i].Path < list.Items[k].Path
|
|
|
|
})
|
|
|
|
for i, item := range expected {
|
2019-05-24 17:51:27 +01:00
|
|
|
t.Log(item.Path, list.Items[i].Path)
|
2019-03-30 11:21:49 +00:00
|
|
|
require.Equal(t, item.Path, list.Items[i].Path)
|
|
|
|
require.Equal(t, item.IsPrefix, list.Items[i].IsPrefix)
|
|
|
|
}
|
|
|
|
}
|
2019-04-09 14:31:19 +01:00
|
|
|
|
|
|
|
func TestCommitSegment(t *testing.T) {
|
2019-04-22 10:07:50 +01:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
2019-07-31 19:28:43 +01:00
|
|
|
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
|
2019-04-22 10:07:50 +01:00
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
2019-05-24 17:51:27 +01:00
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
2019-04-09 14:31:19 +01:00
|
|
|
|
2019-04-22 10:07:50 +01:00
|
|
|
metainfo, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
2019-04-09 14:31:19 +01:00
|
|
|
require.NoError(t, err)
|
2019-06-25 16:36:23 +01:00
|
|
|
defer ctx.Check(metainfo.Close)
|
2019-04-09 14:31:19 +01:00
|
|
|
|
2019-09-18 14:50:33 +01:00
|
|
|
fullIDMap := make(map[storj.NodeID]*identity.FullIdentity)
|
|
|
|
for _, node := range planet.StorageNodes {
|
|
|
|
fullIDMap[node.ID()] = node.Identity
|
|
|
|
}
|
|
|
|
|
2019-04-22 10:07:50 +01:00
|
|
|
{
|
|
|
|
// error if pointer is nil
|
2019-10-23 07:59:56 +01:00
|
|
|
_, err = metainfo.CommitSegmentOld(ctx, "bucket", "path", -1, nil, []*pb.OrderLimit{})
|
2019-04-22 10:07:50 +01:00
|
|
|
require.Error(t, err)
|
2019-04-09 14:31:19 +01:00
|
|
|
}
|
2019-04-22 10:07:50 +01:00
|
|
|
{
|
2019-07-31 19:28:43 +01:00
|
|
|
// error if number of remote pieces is lower than repair threshold
|
2019-04-22 10:07:50 +01:00
|
|
|
redundancy := &pb.RedundancyScheme{
|
|
|
|
MinReq: 1,
|
|
|
|
RepairThreshold: 2,
|
2019-06-21 19:15:58 +01:00
|
|
|
SuccessThreshold: 3,
|
|
|
|
Total: 4,
|
|
|
|
ErasureShareSize: 256,
|
2019-04-22 10:07:50 +01:00
|
|
|
}
|
2019-07-08 19:10:16 +01:00
|
|
|
expirationDate := time.Now().Add(time.Hour)
|
2019-10-23 07:59:56 +01:00
|
|
|
addressedLimits, rootPieceID, _, err := metainfo.CreateSegmentOld(ctx, "bucket", "path", -1, redundancy, 1000, expirationDate)
|
2019-04-22 10:07:50 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// create number of pieces below repair threshold
|
2019-07-31 19:28:43 +01:00
|
|
|
usedForPieces := addressedLimits[:redundancy.RepairThreshold-1]
|
2019-04-22 10:07:50 +01:00
|
|
|
pieces := make([]*pb.RemotePiece, len(usedForPieces))
|
|
|
|
for i, limit := range usedForPieces {
|
2019-09-18 14:50:33 +01:00
|
|
|
newPiece := &pb.RemotePiece{
|
2019-04-22 10:07:50 +01:00
|
|
|
PieceNum: int32(i),
|
|
|
|
NodeId: limit.Limit.StorageNodeId,
|
2019-07-03 17:14:37 +01:00
|
|
|
Hash: &pb.PieceHash{
|
|
|
|
PieceId: limit.Limit.PieceId,
|
|
|
|
PieceSize: 256,
|
|
|
|
Timestamp: time.Now(),
|
|
|
|
},
|
2019-04-22 10:07:50 +01:00
|
|
|
}
|
2019-09-18 14:50:33 +01:00
|
|
|
|
|
|
|
fullID := fullIDMap[limit.Limit.StorageNodeId]
|
|
|
|
require.NotNil(t, fullID)
|
|
|
|
signer := signing.SignerFromFullIdentity(fullID)
|
|
|
|
newHash, err := signing.SignPieceHash(ctx, signer, newPiece.Hash)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
newPiece.Hash = newHash
|
|
|
|
|
|
|
|
pieces[i] = newPiece
|
2019-04-22 10:07:50 +01:00
|
|
|
}
|
2019-06-05 17:41:02 +01:00
|
|
|
|
2019-04-22 10:07:50 +01:00
|
|
|
pointer := &pb.Pointer{
|
2019-07-08 23:16:50 +01:00
|
|
|
CreationDate: time.Now(),
|
|
|
|
Type: pb.Pointer_REMOTE,
|
|
|
|
SegmentSize: 10,
|
2019-04-22 10:07:50 +01:00
|
|
|
Remote: &pb.RemoteSegment{
|
|
|
|
RootPieceId: rootPieceID,
|
|
|
|
Redundancy: redundancy,
|
|
|
|
RemotePieces: pieces,
|
|
|
|
},
|
2019-07-09 22:54:00 +01:00
|
|
|
ExpirationDate: expirationDate,
|
2019-04-22 10:07:50 +01:00
|
|
|
}
|
2019-04-09 14:31:19 +01:00
|
|
|
|
2019-07-31 19:28:43 +01:00
|
|
|
limits := make([]*pb.OrderLimit, len(addressedLimits))
|
|
|
|
for i, addressedLimit := range addressedLimits {
|
|
|
|
limits[i] = addressedLimit.Limit
|
2019-04-22 10:07:50 +01:00
|
|
|
}
|
2019-10-23 07:59:56 +01:00
|
|
|
_, err = metainfo.CommitSegmentOld(ctx, "bucket", "path", -1, pointer, limits)
|
2019-04-22 10:07:50 +01:00
|
|
|
require.Error(t, err)
|
2019-10-18 20:03:10 +01:00
|
|
|
require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
|
2019-07-31 19:28:43 +01:00
|
|
|
require.Contains(t, err.Error(), "is less than or equal to the repair threshold")
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// error if number of remote pieces is lower than success threshold
|
|
|
|
redundancy := &pb.RedundancyScheme{
|
|
|
|
MinReq: 1,
|
|
|
|
RepairThreshold: 2,
|
|
|
|
SuccessThreshold: 5,
|
|
|
|
Total: 6,
|
|
|
|
ErasureShareSize: 256,
|
|
|
|
}
|
|
|
|
expirationDate := time.Now().Add(time.Hour)
|
2019-10-23 07:59:56 +01:00
|
|
|
addressedLimits, rootPieceID, _, err := metainfo.CreateSegmentOld(ctx, "bucket", "path", -1, redundancy, 1000, expirationDate)
|
2019-07-31 19:28:43 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// create number of pieces below success threshold
|
|
|
|
usedForPieces := addressedLimits[:redundancy.SuccessThreshold-1]
|
|
|
|
pieces := make([]*pb.RemotePiece, len(usedForPieces))
|
|
|
|
for i, limit := range usedForPieces {
|
2019-09-18 14:50:33 +01:00
|
|
|
newPiece := &pb.RemotePiece{
|
2019-07-31 19:28:43 +01:00
|
|
|
PieceNum: int32(i),
|
|
|
|
NodeId: limit.Limit.StorageNodeId,
|
|
|
|
Hash: &pb.PieceHash{
|
|
|
|
PieceId: limit.Limit.PieceId,
|
|
|
|
PieceSize: 256,
|
|
|
|
Timestamp: time.Now(),
|
|
|
|
},
|
|
|
|
}
|
2019-09-18 14:50:33 +01:00
|
|
|
|
|
|
|
fullID := fullIDMap[limit.Limit.StorageNodeId]
|
|
|
|
require.NotNil(t, fullID)
|
|
|
|
signer := signing.SignerFromFullIdentity(fullID)
|
|
|
|
newHash, err := signing.SignPieceHash(ctx, signer, newPiece.Hash)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
newPiece.Hash = newHash
|
|
|
|
|
|
|
|
pieces[i] = newPiece
|
2019-07-31 19:28:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
pointer := &pb.Pointer{
|
|
|
|
CreationDate: time.Now(),
|
|
|
|
Type: pb.Pointer_REMOTE,
|
|
|
|
SegmentSize: 10,
|
|
|
|
Remote: &pb.RemoteSegment{
|
|
|
|
RootPieceId: rootPieceID,
|
|
|
|
Redundancy: redundancy,
|
|
|
|
RemotePieces: pieces,
|
|
|
|
},
|
|
|
|
ExpirationDate: expirationDate,
|
|
|
|
}
|
|
|
|
|
|
|
|
limits := make([]*pb.OrderLimit, len(addressedLimits))
|
|
|
|
for i, addressedLimit := range addressedLimits {
|
|
|
|
limits[i] = addressedLimit.Limit
|
|
|
|
}
|
2019-10-23 07:59:56 +01:00
|
|
|
_, err = metainfo.CommitSegmentOld(ctx, "bucket", "path", -1, pointer, limits)
|
2019-07-31 19:28:43 +01:00
|
|
|
require.Error(t, err)
|
|
|
|
require.Contains(t, err.Error(), "is less than the success threshold")
|
2019-04-09 14:31:19 +01:00
|
|
|
}
|
2019-04-22 10:07:50 +01:00
|
|
|
})
|
2019-04-09 14:31:19 +01:00
|
|
|
}
|
2019-06-05 17:41:02 +01:00
|
|
|
|
2019-06-21 19:15:58 +01:00
|
|
|
func TestCreateSegment(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
|
|
|
config.Metainfo.RS.Validate = true
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
|
|
|
|
metainfo, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
require.NoError(t, err)
|
2019-06-25 16:36:23 +01:00
|
|
|
defer ctx.Check(metainfo.Close)
|
2019-06-21 19:15:58 +01:00
|
|
|
|
|
|
|
for _, r := range []struct {
|
|
|
|
rs *pb.RedundancyScheme
|
|
|
|
fail bool
|
|
|
|
}{
|
|
|
|
{ // error - ErasureShareSize <= 0
|
|
|
|
rs: &pb.RedundancyScheme{
|
|
|
|
MinReq: 1,
|
|
|
|
RepairThreshold: 2,
|
|
|
|
SuccessThreshold: 3,
|
|
|
|
Total: 4,
|
|
|
|
ErasureShareSize: -1,
|
|
|
|
},
|
|
|
|
fail: true,
|
|
|
|
},
|
|
|
|
{ // error - any of the values are negative
|
|
|
|
rs: &pb.RedundancyScheme{
|
|
|
|
MinReq: 1,
|
|
|
|
RepairThreshold: -2,
|
|
|
|
SuccessThreshold: 3,
|
|
|
|
Total: -4,
|
|
|
|
ErasureShareSize: 10,
|
|
|
|
},
|
|
|
|
fail: true,
|
|
|
|
},
|
|
|
|
{ // error - MinReq >= RepairThreshold
|
|
|
|
rs: &pb.RedundancyScheme{
|
|
|
|
MinReq: 10,
|
|
|
|
RepairThreshold: 2,
|
|
|
|
SuccessThreshold: 3,
|
|
|
|
Total: 4,
|
|
|
|
ErasureShareSize: 10,
|
|
|
|
},
|
|
|
|
fail: true,
|
|
|
|
},
|
|
|
|
{ // error - MinReq >= RepairThreshold
|
|
|
|
rs: &pb.RedundancyScheme{
|
|
|
|
MinReq: 2,
|
|
|
|
RepairThreshold: 2,
|
|
|
|
SuccessThreshold: 3,
|
|
|
|
Total: 4,
|
|
|
|
ErasureShareSize: 10,
|
|
|
|
},
|
|
|
|
fail: true,
|
|
|
|
},
|
|
|
|
{ // error - RepairThreshold >= SuccessThreshol
|
|
|
|
rs: &pb.RedundancyScheme{
|
|
|
|
MinReq: 1,
|
|
|
|
RepairThreshold: 3,
|
|
|
|
SuccessThreshold: 3,
|
|
|
|
Total: 4,
|
|
|
|
ErasureShareSize: 10,
|
|
|
|
},
|
|
|
|
fail: true,
|
|
|
|
},
|
|
|
|
{ // error - SuccessThreshold >= Total
|
|
|
|
rs: &pb.RedundancyScheme{
|
|
|
|
MinReq: 1,
|
|
|
|
RepairThreshold: 2,
|
|
|
|
SuccessThreshold: 4,
|
|
|
|
Total: 4,
|
|
|
|
ErasureShareSize: 10,
|
|
|
|
},
|
|
|
|
fail: true,
|
|
|
|
},
|
|
|
|
{ // ok - valid RS parameters
|
|
|
|
rs: &pb.RedundancyScheme{
|
|
|
|
MinReq: 1,
|
|
|
|
RepairThreshold: 2,
|
|
|
|
SuccessThreshold: 3,
|
|
|
|
Total: 4,
|
|
|
|
ErasureShareSize: 256,
|
|
|
|
},
|
|
|
|
fail: false,
|
|
|
|
},
|
|
|
|
} {
|
2019-10-23 07:59:56 +01:00
|
|
|
_, _, _, err := metainfo.CreateSegmentOld(ctx, "bucket", "path", -1, r.rs, 1000, time.Now().Add(time.Hour))
|
2019-06-21 19:15:58 +01:00
|
|
|
if r.fail {
|
|
|
|
require.Error(t, err)
|
|
|
|
} else {
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-07-08 19:10:16 +01:00
|
|
|
func TestExpirationTimeSegment(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
2019-07-16 20:16:41 +01:00
|
|
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
2019-07-08 19:10:16 +01:00
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
|
|
|
|
metainfo, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(metainfo.Close)
|
2019-07-16 20:16:41 +01:00
|
|
|
rs := &pb.RedundancyScheme{
|
|
|
|
MinReq: 1,
|
|
|
|
RepairThreshold: 1,
|
|
|
|
SuccessThreshold: 1,
|
|
|
|
Total: 1,
|
|
|
|
ErasureShareSize: 1024,
|
|
|
|
Type: pb.RedundancyScheme_RS,
|
|
|
|
}
|
2019-07-08 19:10:16 +01:00
|
|
|
|
|
|
|
for _, r := range []struct {
|
|
|
|
expirationDate time.Time
|
|
|
|
errFlag bool
|
|
|
|
}{
|
|
|
|
{ // expiration time not set
|
|
|
|
time.Time{},
|
|
|
|
false,
|
|
|
|
},
|
|
|
|
{ // 10 days into future
|
|
|
|
time.Now().AddDate(0, 0, 10),
|
|
|
|
false,
|
|
|
|
},
|
|
|
|
{ // current time
|
|
|
|
time.Now(),
|
|
|
|
true,
|
|
|
|
},
|
|
|
|
{ // 10 days into past
|
|
|
|
time.Now().AddDate(0, 0, -10),
|
|
|
|
true,
|
|
|
|
},
|
|
|
|
} {
|
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
_, _, _, err := metainfo.CreateSegmentOld(ctx, "my-bucket-name", "file/path", -1, rs, memory.MiB.Int64(), r.expirationDate)
|
2019-07-08 19:10:16 +01:00
|
|
|
if err != nil {
|
|
|
|
assert.True(t, r.errFlag)
|
|
|
|
} else {
|
|
|
|
assert.False(t, r.errFlag)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-10-01 17:55:02 +01:00
|
|
|
func TestMaxCommitInterval(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
|
|
|
config.Metainfo.MaxCommitInterval = -1 * time.Hour
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
|
|
|
|
metainfo, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(metainfo.Close)
|
|
|
|
|
|
|
|
fullIDMap := make(map[storj.NodeID]*identity.FullIdentity)
|
|
|
|
for _, node := range planet.StorageNodes {
|
|
|
|
fullIDMap[node.ID()] = node.Identity
|
|
|
|
}
|
|
|
|
|
|
|
|
pointer, limits := runCreateSegment(ctx, t, metainfo, fullIDMap)
|
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
_, err = metainfo.CommitSegmentOld(ctx, "my-bucket-name", "file/path", -1, pointer, limits)
|
2019-10-01 17:55:02 +01:00
|
|
|
require.Error(t, err)
|
|
|
|
require.Contains(t, err.Error(), "not committed before max commit interval")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-06-05 17:41:02 +01:00
|
|
|
func TestDoubleCommitSegment(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
2019-07-16 20:16:41 +01:00
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
2019-06-05 17:41:02 +01:00
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
|
|
|
|
metainfo, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
require.NoError(t, err)
|
2019-06-25 16:36:23 +01:00
|
|
|
defer ctx.Check(metainfo.Close)
|
2019-06-05 17:41:02 +01:00
|
|
|
|
2019-09-18 14:50:33 +01:00
|
|
|
fullIDMap := make(map[storj.NodeID]*identity.FullIdentity)
|
|
|
|
for _, node := range planet.StorageNodes {
|
|
|
|
fullIDMap[node.ID()] = node.Identity
|
|
|
|
}
|
2019-06-05 17:41:02 +01:00
|
|
|
|
2019-09-18 14:50:33 +01:00
|
|
|
pointer, limits := runCreateSegment(ctx, t, metainfo, fullIDMap)
|
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
savedPointer, err := metainfo.CommitSegmentOld(ctx, "my-bucket-name", "file/path", -1, pointer, limits)
|
2019-06-05 17:41:02 +01:00
|
|
|
require.NoError(t, err)
|
2019-09-18 14:50:33 +01:00
|
|
|
require.True(t, savedPointer.PieceHashesVerified)
|
2019-06-05 17:41:02 +01:00
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
_, err = metainfo.CommitSegmentOld(ctx, "my-bucket-name", "file/path", -1, pointer, limits)
|
2019-06-05 17:41:02 +01:00
|
|
|
require.Error(t, err)
|
|
|
|
require.Contains(t, err.Error(), "missing create request or request expired")
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestCommitSegmentPointer(t *testing.T) {
|
|
|
|
// all tests needs to generate error
|
|
|
|
tests := []struct {
|
|
|
|
// defines how modify pointer before CommitSegment
|
2019-10-28 21:09:57 +00:00
|
|
|
Modify func(pointer *pb.Pointer, fullIDMap map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit)
|
2019-06-05 17:41:02 +01:00
|
|
|
ErrorMessage string
|
|
|
|
}{
|
|
|
|
{
|
2019-10-28 21:09:57 +00:00
|
|
|
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
|
2019-07-09 22:54:00 +01:00
|
|
|
pointer.ExpirationDate = pointer.ExpirationDate.Add(time.Second * 100)
|
2019-06-05 17:41:02 +01:00
|
|
|
},
|
|
|
|
ErrorMessage: "pointer expiration date does not match requested one",
|
|
|
|
},
|
|
|
|
{
|
2019-10-28 21:09:57 +00:00
|
|
|
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
|
2019-06-05 17:41:02 +01:00
|
|
|
pointer.Remote.Redundancy.MinReq += 100
|
|
|
|
},
|
|
|
|
ErrorMessage: "pointer redundancy scheme date does not match requested one",
|
|
|
|
},
|
|
|
|
{
|
2019-10-28 21:09:57 +00:00
|
|
|
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
|
2019-06-05 17:41:02 +01:00
|
|
|
pointer.Remote.Redundancy.RepairThreshold += 100
|
|
|
|
},
|
|
|
|
ErrorMessage: "pointer redundancy scheme date does not match requested one",
|
|
|
|
},
|
|
|
|
{
|
2019-10-28 21:09:57 +00:00
|
|
|
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
|
2019-06-05 17:41:02 +01:00
|
|
|
pointer.Remote.Redundancy.SuccessThreshold += 100
|
|
|
|
},
|
|
|
|
ErrorMessage: "pointer redundancy scheme date does not match requested one",
|
|
|
|
},
|
|
|
|
{
|
2019-10-28 21:09:57 +00:00
|
|
|
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
|
2019-06-05 17:41:02 +01:00
|
|
|
pointer.Remote.Redundancy.Total += 100
|
|
|
|
},
|
|
|
|
// this error is triggered earlier then Create/Commit RS comparison
|
|
|
|
ErrorMessage: "invalid no order limit for piece",
|
|
|
|
},
|
|
|
|
{
|
2019-10-28 21:09:57 +00:00
|
|
|
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
|
2019-06-05 17:41:02 +01:00
|
|
|
pointer.Remote.Redundancy.ErasureShareSize += 100
|
|
|
|
},
|
|
|
|
ErrorMessage: "pointer redundancy scheme date does not match requested one",
|
|
|
|
},
|
|
|
|
{
|
2019-10-28 21:09:57 +00:00
|
|
|
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
|
2019-06-05 17:41:02 +01:00
|
|
|
pointer.Remote.Redundancy.Type = 100
|
|
|
|
},
|
|
|
|
ErrorMessage: "pointer redundancy scheme date does not match requested one",
|
|
|
|
},
|
|
|
|
{
|
2019-10-28 21:09:57 +00:00
|
|
|
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
|
2019-06-05 17:41:02 +01:00
|
|
|
pointer.Type = pb.Pointer_INLINE
|
|
|
|
},
|
|
|
|
ErrorMessage: "pointer type is INLINE but remote segment is set",
|
|
|
|
},
|
2019-07-03 17:14:37 +01:00
|
|
|
{
|
|
|
|
// no piece hash removes piece from pointer, not enough pieces for successful upload
|
2019-10-28 21:09:57 +00:00
|
|
|
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
|
2019-07-03 17:14:37 +01:00
|
|
|
pointer.Remote.RemotePieces[0].Hash = nil
|
|
|
|
},
|
2019-07-31 19:28:43 +01:00
|
|
|
ErrorMessage: "Number of valid pieces (2) is less than the success threshold (3)",
|
2019-07-03 17:14:37 +01:00
|
|
|
},
|
2019-10-28 21:09:57 +00:00
|
|
|
{
|
|
|
|
// set piece number to be out of range of limit slice
|
|
|
|
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
|
|
|
|
pointer.Remote.RemotePieces[0].PieceNum = int32(len(limits))
|
|
|
|
},
|
|
|
|
ErrorMessage: "invalid piece number",
|
|
|
|
},
|
2019-07-03 17:14:37 +01:00
|
|
|
{
|
|
|
|
// invalid timestamp removes piece from pointer, not enough pieces for successful upload
|
2019-10-28 21:09:57 +00:00
|
|
|
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
|
2019-07-03 17:14:37 +01:00
|
|
|
pointer.Remote.RemotePieces[0].Hash.Timestamp = time.Now().Add(-24 * time.Hour)
|
|
|
|
},
|
2019-07-31 19:28:43 +01:00
|
|
|
ErrorMessage: "Number of valid pieces (2) is less than the success threshold (3)",
|
2019-07-03 17:14:37 +01:00
|
|
|
},
|
|
|
|
{
|
|
|
|
// invalid hash PieceID removes piece from pointer, not enough pieces for successful upload
|
2019-10-28 21:09:57 +00:00
|
|
|
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
|
2019-07-03 17:14:37 +01:00
|
|
|
pointer.Remote.RemotePieces[0].Hash.PieceId = storj.PieceID{1}
|
|
|
|
},
|
2019-07-31 19:28:43 +01:00
|
|
|
ErrorMessage: "Number of valid pieces (2) is less than the success threshold (3)",
|
2019-07-03 17:14:37 +01:00
|
|
|
},
|
|
|
|
{
|
2019-10-28 21:09:57 +00:00
|
|
|
Modify: func(pointer *pb.Pointer, fullIDMap map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
|
2019-07-03 17:14:37 +01:00
|
|
|
pointer.Remote.RemotePieces[0].Hash.PieceSize = 1
|
2019-09-18 14:50:33 +01:00
|
|
|
|
|
|
|
ctx := testcontext.New(t)
|
|
|
|
snFullID := fullIDMap[pointer.Remote.RemotePieces[0].NodeId]
|
|
|
|
require.NotNil(t, snFullID)
|
|
|
|
signer := signing.SignerFromFullIdentity(snFullID)
|
|
|
|
storageNodeHash, err := signing.SignPieceHash(ctx, signer, pointer.Remote.RemotePieces[0].Hash)
|
|
|
|
require.NoError(t, err)
|
|
|
|
pointer.Remote.RemotePieces[0].Hash = storageNodeHash
|
2019-07-03 17:14:37 +01:00
|
|
|
},
|
|
|
|
ErrorMessage: "all pieces needs to have the same size",
|
|
|
|
},
|
|
|
|
{
|
2019-10-28 21:09:57 +00:00
|
|
|
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
|
2019-07-03 17:14:37 +01:00
|
|
|
pointer.SegmentSize = 100
|
|
|
|
},
|
|
|
|
ErrorMessage: "expected piece size is different from provided",
|
|
|
|
},
|
2019-09-18 14:50:33 +01:00
|
|
|
{
|
2019-10-28 21:09:57 +00:00
|
|
|
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
|
2019-09-18 14:50:33 +01:00
|
|
|
// nil piece hash signature removes piece from pointer, not enough pieces for successful upload
|
|
|
|
pointer.Remote.RemotePieces[0].Hash.Signature = nil
|
|
|
|
},
|
|
|
|
ErrorMessage: "Number of valid pieces (2) is less than the success threshold (3)",
|
|
|
|
},
|
|
|
|
{
|
2019-10-28 21:09:57 +00:00
|
|
|
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
|
2019-09-18 14:50:33 +01:00
|
|
|
// invalid piece hash signature removes piece from pointer, not enough pieces for successful upload
|
|
|
|
pointer.Remote.RemotePieces[0].Hash.Signature = nil
|
|
|
|
|
|
|
|
ctx := testcontext.New(t)
|
|
|
|
ca, err := testidentity.NewTestCA(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
badFullID, err := ca.NewIdentity()
|
|
|
|
require.NoError(t, err)
|
|
|
|
signer := signing.SignerFromFullIdentity(badFullID)
|
|
|
|
|
|
|
|
newHash, err := signing.SignPieceHash(ctx, signer, pointer.Remote.RemotePieces[0].Hash)
|
|
|
|
require.NoError(t, err)
|
|
|
|
pointer.Remote.RemotePieces[0].Hash = newHash
|
|
|
|
},
|
|
|
|
ErrorMessage: "Number of valid pieces (2) is less than the success threshold (3)",
|
|
|
|
},
|
2019-11-04 22:26:19 +00:00
|
|
|
{
|
|
|
|
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
|
|
|
|
firstPiece := pointer.Remote.RemotePieces[0]
|
|
|
|
pointer.Remote.RemotePieces[1] = firstPiece
|
|
|
|
pointer.Remote.RemotePieces[2] = firstPiece
|
|
|
|
},
|
|
|
|
ErrorMessage: "piece num 0 is duplicated",
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Modify: func(pointer *pb.Pointer, _ map[storj.NodeID]*identity.FullIdentity, limits []*pb.OrderLimit) {
|
|
|
|
firstNodeID := pointer.Remote.RemotePieces[0].NodeId
|
|
|
|
pointer.Remote.RemotePieces[1].NodeId = firstNodeID
|
|
|
|
},
|
|
|
|
ErrorMessage: "invalid order limit piece id",
|
|
|
|
},
|
2019-06-05 17:41:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
2019-07-16 20:16:41 +01:00
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
2019-06-05 17:41:02 +01:00
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
|
|
|
|
metainfo, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
require.NoError(t, err)
|
2019-06-25 16:36:23 +01:00
|
|
|
defer ctx.Check(metainfo.Close)
|
2019-06-05 17:41:02 +01:00
|
|
|
|
2019-09-18 14:50:33 +01:00
|
|
|
fullIDMap := make(map[storj.NodeID]*identity.FullIdentity)
|
|
|
|
for _, node := range planet.StorageNodes {
|
|
|
|
fullIDMap[node.ID()] = node.Identity
|
|
|
|
}
|
|
|
|
|
2019-07-03 17:14:37 +01:00
|
|
|
for i, test := range tests {
|
2019-09-18 14:50:33 +01:00
|
|
|
pointer, limits := runCreateSegment(ctx, t, metainfo, fullIDMap)
|
2019-10-28 21:09:57 +00:00
|
|
|
test.Modify(pointer, fullIDMap, limits)
|
2019-06-05 17:41:02 +01:00
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
_, err = metainfo.CommitSegmentOld(ctx, "my-bucket-name", "file/path", -1, pointer, limits)
|
2019-07-03 17:14:37 +01:00
|
|
|
require.Error(t, err, "Case #%v", i)
|
|
|
|
require.Contains(t, err.Error(), test.ErrorMessage, "Case #%v", i)
|
2019-06-05 17:41:02 +01:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-08-05 08:07:40 +01:00
|
|
|
func TestSetBucketAttribution(t *testing.T) {
|
2019-06-13 02:35:37 +01:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
2019-07-16 20:16:41 +01:00
|
|
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
2019-06-13 02:35:37 +01:00
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
uplink := planet.Uplinks[0]
|
2019-06-19 13:02:37 +01:00
|
|
|
|
2019-07-23 15:58:45 +01:00
|
|
|
err := uplink.CreateBucket(ctx, planet.Satellites[0], "alpha")
|
2019-06-13 02:35:37 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
require.NoError(t, err)
|
2019-06-25 16:36:23 +01:00
|
|
|
defer ctx.Check(metainfoClient.Close)
|
2019-06-13 02:35:37 +01:00
|
|
|
|
2019-06-26 11:38:51 +01:00
|
|
|
partnerID := testrand.UUID()
|
2019-06-13 02:35:37 +01:00
|
|
|
{
|
|
|
|
// bucket with no items
|
2019-08-05 08:07:40 +01:00
|
|
|
err = metainfoClient.SetBucketAttribution(ctx, metainfo.SetBucketAttributionParams{
|
|
|
|
Bucket: "alpha",
|
|
|
|
PartnerID: partnerID,
|
|
|
|
})
|
2019-06-13 02:35:37 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// no bucket exists
|
2019-08-05 08:07:40 +01:00
|
|
|
err = metainfoClient.SetBucketAttribution(ctx, metainfo.SetBucketAttributionParams{
|
|
|
|
Bucket: "beta",
|
|
|
|
PartnerID: partnerID,
|
|
|
|
})
|
2019-06-13 02:35:37 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
{
|
2019-06-26 17:22:01 +01:00
|
|
|
// already attributed bucket, adding files
|
2019-06-19 13:02:37 +01:00
|
|
|
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "alpha", "path", []byte{1, 2, 3})
|
2019-06-13 02:35:37 +01:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
// bucket with items
|
2019-08-05 08:07:40 +01:00
|
|
|
err = metainfoClient.SetBucketAttribution(ctx, metainfo.SetBucketAttributionParams{
|
|
|
|
Bucket: "beta",
|
|
|
|
PartnerID: partnerID,
|
|
|
|
})
|
2019-06-26 17:22:01 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
{
|
|
|
|
//non attributed bucket, and adding files
|
2019-07-03 19:56:17 +01:00
|
|
|
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "alpha-new", "path", []byte{1, 2, 3})
|
2019-06-26 17:22:01 +01:00
|
|
|
assert.NoError(t, err)
|
|
|
|
|
|
|
|
// bucket with items
|
2019-08-05 08:07:40 +01:00
|
|
|
err = metainfoClient.SetBucketAttribution(ctx, metainfo.SetBucketAttributionParams{
|
|
|
|
Bucket: "alpha-new",
|
|
|
|
PartnerID: partnerID,
|
|
|
|
})
|
2019-06-13 02:35:37 +01:00
|
|
|
require.Error(t, err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-06-27 18:36:51 +01:00
|
|
|
func TestGetProjectInfo(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
2019-07-16 20:16:41 +01:00
|
|
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 2,
|
2019-06-27 18:36:51 +01:00
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
apiKey0 := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
apiKey1 := planet.Uplinks[1].APIKey[planet.Satellites[0].ID()]
|
|
|
|
|
|
|
|
metainfo0, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey0)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
metainfo1, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
info0, err := metainfo0.GetProjectInfo(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, info0.ProjectSalt)
|
|
|
|
|
|
|
|
info1, err := metainfo1.GetProjectInfo(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, info1.ProjectSalt)
|
|
|
|
|
|
|
|
// Different projects should have different salts
|
|
|
|
require.NotEqual(t, info0.ProjectSalt, info1.ProjectSalt)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-09-18 14:50:33 +01:00
|
|
|
func runCreateSegment(ctx context.Context, t *testing.T, metainfo *metainfo.Client, fullIDMap map[storj.NodeID]*identity.FullIdentity) (*pb.Pointer, []*pb.OrderLimit) {
|
2019-06-05 17:41:02 +01:00
|
|
|
pointer := createTestPointer(t)
|
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
addressedLimits, rootPieceID, _, err := metainfo.CreateSegmentOld(ctx, "my-bucket-name", "file/path", -1, pointer.Remote.Redundancy, memory.MiB.Int64(), pointer.ExpirationDate)
|
2019-06-05 17:41:02 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
pointer.Remote.RootPieceId = rootPieceID
|
|
|
|
|
2019-07-01 16:54:11 +01:00
|
|
|
limits := make([]*pb.OrderLimit, len(addressedLimits))
|
2019-06-05 17:41:02 +01:00
|
|
|
for i, addressedLimit := range addressedLimits {
|
|
|
|
limits[i] = addressedLimit.Limit
|
2019-07-03 17:14:37 +01:00
|
|
|
|
|
|
|
if len(pointer.Remote.RemotePieces) > i {
|
2019-09-18 14:50:33 +01:00
|
|
|
nodeID := addressedLimits[i].Limit.StorageNodeId
|
|
|
|
pointer.Remote.RemotePieces[i].NodeId = nodeID
|
2019-07-03 17:14:37 +01:00
|
|
|
pointer.Remote.RemotePieces[i].Hash.PieceId = addressedLimits[i].Limit.PieceId
|
2019-09-18 14:50:33 +01:00
|
|
|
|
|
|
|
snFullID := fullIDMap[nodeID]
|
|
|
|
require.NotNil(t, snFullID)
|
|
|
|
signer := signing.SignerFromFullIdentity(snFullID)
|
|
|
|
storageNodeHash, err := signing.SignPieceHash(ctx, signer, pointer.Remote.RemotePieces[i].Hash)
|
|
|
|
require.NoError(t, err)
|
|
|
|
pointer.Remote.RemotePieces[i].Hash = storageNodeHash
|
2019-07-03 17:14:37 +01:00
|
|
|
}
|
2019-06-05 17:41:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return pointer, limits
|
|
|
|
}
|
|
|
|
|
|
|
|
func createTestPointer(t *testing.T) *pb.Pointer {
|
|
|
|
rs := &pb.RedundancyScheme{
|
|
|
|
MinReq: 1,
|
|
|
|
RepairThreshold: 1,
|
|
|
|
SuccessThreshold: 3,
|
|
|
|
Total: 4,
|
|
|
|
ErasureShareSize: 1024,
|
|
|
|
Type: pb.RedundancyScheme_RS,
|
|
|
|
}
|
|
|
|
|
2019-07-03 17:14:37 +01:00
|
|
|
redundancy, err := eestream.NewRedundancyStrategyFromProto(rs)
|
|
|
|
require.NoError(t, err)
|
|
|
|
segmentSize := 4 * memory.KiB.Int64()
|
|
|
|
pieceSize := eestream.CalcPieceSize(segmentSize, redundancy)
|
2019-07-08 19:10:16 +01:00
|
|
|
timestamp := time.Now().Add(time.Hour)
|
2019-06-05 17:41:02 +01:00
|
|
|
pointer := &pb.Pointer{
|
2019-07-08 23:16:50 +01:00
|
|
|
CreationDate: time.Now(),
|
|
|
|
Type: pb.Pointer_REMOTE,
|
|
|
|
SegmentSize: segmentSize,
|
2019-06-05 17:41:02 +01:00
|
|
|
Remote: &pb.RemoteSegment{
|
|
|
|
Redundancy: rs,
|
|
|
|
RemotePieces: []*pb.RemotePiece{
|
2019-06-27 19:52:50 +01:00
|
|
|
{
|
2019-06-05 17:41:02 +01:00
|
|
|
PieceNum: 0,
|
2019-07-03 17:14:37 +01:00
|
|
|
Hash: &pb.PieceHash{
|
|
|
|
PieceSize: pieceSize,
|
|
|
|
Timestamp: timestamp,
|
|
|
|
},
|
2019-06-05 17:41:02 +01:00
|
|
|
},
|
2019-06-27 19:52:50 +01:00
|
|
|
{
|
2019-06-05 17:41:02 +01:00
|
|
|
PieceNum: 1,
|
2019-07-03 17:14:37 +01:00
|
|
|
Hash: &pb.PieceHash{
|
|
|
|
PieceSize: pieceSize,
|
|
|
|
Timestamp: timestamp,
|
|
|
|
},
|
2019-06-05 17:41:02 +01:00
|
|
|
},
|
2019-07-31 19:28:43 +01:00
|
|
|
{
|
|
|
|
PieceNum: 2,
|
|
|
|
Hash: &pb.PieceHash{
|
|
|
|
PieceSize: pieceSize,
|
|
|
|
Timestamp: timestamp,
|
|
|
|
},
|
|
|
|
},
|
2019-06-05 17:41:02 +01:00
|
|
|
},
|
|
|
|
},
|
2019-07-09 22:54:00 +01:00
|
|
|
ExpirationDate: timestamp,
|
2019-06-05 17:41:02 +01:00
|
|
|
}
|
|
|
|
return pointer
|
|
|
|
}
|
2019-06-24 10:52:25 +01:00
|
|
|
|
|
|
|
func TestBucketNameValidation(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
2019-07-24 12:33:23 +01:00
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
2019-06-24 10:52:25 +01:00
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
|
|
|
|
metainfo, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
require.NoError(t, err)
|
2019-06-25 16:36:23 +01:00
|
|
|
defer ctx.Check(metainfo.Close)
|
2019-06-24 10:52:25 +01:00
|
|
|
|
|
|
|
rs := &pb.RedundancyScheme{
|
|
|
|
MinReq: 1,
|
|
|
|
RepairThreshold: 1,
|
|
|
|
SuccessThreshold: 3,
|
|
|
|
Total: 4,
|
|
|
|
ErasureShareSize: 1024,
|
|
|
|
Type: pb.RedundancyScheme_RS,
|
|
|
|
}
|
|
|
|
|
|
|
|
validNames := []string{
|
|
|
|
"tes", "testbucket",
|
|
|
|
"test-bucket", "testbucket9",
|
|
|
|
"9testbucket", "a.b",
|
|
|
|
"test.bucket", "test-one.bucket-one",
|
|
|
|
"test.bucket.one",
|
|
|
|
"testbucket-63-0123456789012345678901234567890123456789012345abc",
|
|
|
|
}
|
|
|
|
for _, name := range validNames {
|
2019-10-23 07:59:56 +01:00
|
|
|
_, _, _, err = metainfo.CreateSegmentOld(ctx, name, "", -1, rs, 1, time.Now().Add(time.Hour))
|
2019-06-24 10:52:25 +01:00
|
|
|
require.NoError(t, err, "bucket name: %v", name)
|
|
|
|
}
|
|
|
|
|
|
|
|
invalidNames := []string{
|
|
|
|
"", "t", "te", "-testbucket",
|
|
|
|
"testbucket-", "-testbucket-",
|
|
|
|
"a.b.", "test.bucket-.one",
|
|
|
|
"test.-bucket.one", "1.2.3.4",
|
|
|
|
"192.168.1.234", "testBUCKET",
|
|
|
|
"test/bucket",
|
|
|
|
"testbucket-64-0123456789012345678901234567890123456789012345abcd",
|
|
|
|
}
|
|
|
|
for _, name := range invalidNames {
|
2019-10-23 07:59:56 +01:00
|
|
|
_, _, _, err = metainfo.CreateSegmentOld(ctx, name, "", -1, rs, 1, time.Now().Add(time.Hour))
|
2019-06-24 10:52:25 +01:00
|
|
|
require.Error(t, err, "bucket name: %v", name)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
2019-07-16 11:39:23 +01:00
|
|
|
|
2019-07-23 12:09:12 +01:00
|
|
|
func TestListGetObjects(t *testing.T) {
|
2019-07-16 11:39:23 +01:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
uplink := planet.Uplinks[0]
|
|
|
|
|
|
|
|
files := make([]string, 10)
|
|
|
|
data := testrand.Bytes(1 * memory.KiB)
|
|
|
|
for i := 0; i < len(files); i++ {
|
|
|
|
files[i] = "path" + strconv.Itoa(i)
|
|
|
|
err := uplink.Upload(ctx, planet.Satellites[0], "testbucket", files[i], data)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
2019-07-24 12:33:23 +01:00
|
|
|
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
2019-07-16 11:39:23 +01:00
|
|
|
require.NoError(t, err)
|
2019-07-24 12:33:23 +01:00
|
|
|
defer ctx.Check(metainfoClient.Close)
|
2019-07-16 11:39:23 +01:00
|
|
|
|
2019-07-24 12:33:23 +01:00
|
|
|
expectedBucketName := "testbucket"
|
|
|
|
items, _, err := metainfoClient.ListObjects(ctx, metainfo.ListObjectsParams{
|
|
|
|
Bucket: []byte(expectedBucketName),
|
|
|
|
})
|
2019-07-16 11:39:23 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, len(files), len(items))
|
|
|
|
for _, item := range items {
|
|
|
|
require.NotEmpty(t, item.EncryptedPath)
|
|
|
|
require.True(t, item.CreatedAt.Before(time.Now()))
|
2019-07-23 12:09:12 +01:00
|
|
|
|
2019-08-01 10:04:31 +01:00
|
|
|
object, err := metainfoClient.GetObject(ctx, metainfo.GetObjectParams{
|
2019-07-24 12:33:23 +01:00
|
|
|
Bucket: []byte(expectedBucketName),
|
|
|
|
EncryptedPath: item.EncryptedPath,
|
|
|
|
})
|
2019-07-23 12:09:12 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, item.EncryptedPath, []byte(object.Path))
|
|
|
|
|
2019-08-01 10:04:31 +01:00
|
|
|
require.NotEmpty(t, object.StreamID)
|
2019-07-16 11:39:23 +01:00
|
|
|
}
|
|
|
|
|
2019-07-24 12:33:23 +01:00
|
|
|
items, _, err = metainfoClient.ListObjects(ctx, metainfo.ListObjectsParams{
|
|
|
|
Bucket: []byte(expectedBucketName),
|
|
|
|
Limit: 3,
|
|
|
|
})
|
2019-07-16 11:39:23 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 3, len(items))
|
|
|
|
})
|
|
|
|
}
|
2019-07-24 12:33:23 +01:00
|
|
|
|
|
|
|
func TestBeginCommitListSegment(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
uplink := planet.Uplinks[0]
|
|
|
|
|
|
|
|
config := uplink.GetConfig(planet.Satellites[0])
|
|
|
|
metainfoService := planet.Satellites[0].Metainfo.Service
|
|
|
|
|
|
|
|
projects, err := planet.Satellites[0].DB.Console().Projects().GetAll(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
projectID := projects[0].ID
|
|
|
|
|
|
|
|
bucket := storj.Bucket{
|
|
|
|
Name: "initial-bucket",
|
|
|
|
ProjectID: projectID,
|
|
|
|
PathCipher: config.GetEncryptionParameters().CipherSuite,
|
|
|
|
}
|
|
|
|
_, err = metainfoService.CreateBucket(ctx, bucket)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(metainfoClient.Close)
|
|
|
|
|
|
|
|
params := metainfo.BeginObjectParams{
|
|
|
|
Bucket: []byte(bucket.Name),
|
|
|
|
EncryptedPath: []byte("encrypted-path"),
|
|
|
|
Redundancy: storj.RedundancyScheme{
|
|
|
|
Algorithm: storj.ReedSolomon,
|
|
|
|
ShareSize: 256,
|
|
|
|
RequiredShares: 1,
|
|
|
|
RepairShares: 1,
|
|
|
|
OptimalShares: 3,
|
|
|
|
TotalShares: 4,
|
|
|
|
},
|
2019-08-01 10:04:31 +01:00
|
|
|
EncryptionParameters: storj.EncryptionParameters{},
|
|
|
|
ExpiresAt: time.Now().UTC().Add(24 * time.Hour),
|
2019-07-24 12:33:23 +01:00
|
|
|
}
|
|
|
|
streamID, err := metainfoClient.BeginObject(ctx, params)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
segmentID, limits, _, err := metainfoClient.BeginSegment(ctx, metainfo.BeginSegmentParams{
|
|
|
|
StreamID: streamID,
|
|
|
|
Position: storj.SegmentPosition{
|
2019-08-01 10:04:31 +01:00
|
|
|
Index: 0,
|
2019-07-24 12:33:23 +01:00
|
|
|
},
|
2019-09-10 16:39:47 +01:00
|
|
|
MaxOrderLimit: memory.MiB.Int64(),
|
2019-07-24 12:33:23 +01:00
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-09-18 14:50:33 +01:00
|
|
|
fullIDMap := make(map[storj.NodeID]*identity.FullIdentity)
|
|
|
|
for _, node := range planet.StorageNodes {
|
|
|
|
fullIDMap[node.ID()] = node.Identity
|
|
|
|
}
|
|
|
|
|
2019-07-24 12:33:23 +01:00
|
|
|
makeResult := func(num int32) *pb.SegmentPieceUploadResult {
|
2019-09-18 14:50:33 +01:00
|
|
|
nodeID := limits[num].Limit.StorageNodeId
|
|
|
|
hash := &pb.PieceHash{
|
|
|
|
PieceId: limits[num].Limit.PieceId,
|
|
|
|
PieceSize: 1048832,
|
|
|
|
Timestamp: time.Now(),
|
|
|
|
}
|
|
|
|
|
|
|
|
fullID := fullIDMap[nodeID]
|
|
|
|
require.NotNil(t, fullID)
|
|
|
|
signer := signing.SignerFromFullIdentity(fullID)
|
|
|
|
signedHash, err := signing.SignPieceHash(ctx, signer, hash)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-07-24 12:33:23 +01:00
|
|
|
return &pb.SegmentPieceUploadResult{
|
|
|
|
PieceNum: num,
|
2019-09-18 14:50:33 +01:00
|
|
|
NodeId: nodeID,
|
|
|
|
Hash: signedHash,
|
2019-07-24 12:33:23 +01:00
|
|
|
}
|
|
|
|
}
|
2019-10-23 07:59:56 +01:00
|
|
|
err = metainfoClient.CommitSegment(ctx, metainfo.CommitSegmentParams{
|
2019-08-01 10:04:31 +01:00
|
|
|
SegmentID: segmentID,
|
|
|
|
|
2019-07-24 12:33:23 +01:00
|
|
|
SizeEncryptedData: memory.MiB.Int64(),
|
|
|
|
UploadResult: []*pb.SegmentPieceUploadResult{
|
|
|
|
makeResult(0),
|
|
|
|
makeResult(1),
|
2019-07-31 19:28:43 +01:00
|
|
|
makeResult(2),
|
2019-07-24 12:33:23 +01:00
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-08-01 10:04:31 +01:00
|
|
|
err = metainfoClient.CommitObject(ctx, metainfo.CommitObjectParams{
|
|
|
|
StreamID: streamID,
|
|
|
|
})
|
2019-07-24 12:33:23 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
objects, _, err := metainfoClient.ListObjects(ctx, metainfo.ListObjectsParams{
|
|
|
|
Bucket: []byte(bucket.Name),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, objects, 1)
|
|
|
|
|
|
|
|
require.Equal(t, params.EncryptedPath, objects[0].EncryptedPath)
|
|
|
|
require.Equal(t, params.ExpiresAt, objects[0].ExpiresAt)
|
|
|
|
|
2019-08-01 10:04:31 +01:00
|
|
|
object, err := metainfoClient.GetObject(ctx, metainfo.GetObjectParams{
|
2019-07-24 12:33:23 +01:00
|
|
|
Bucket: []byte(bucket.Name),
|
|
|
|
EncryptedPath: objects[0].EncryptedPath,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
segments, _, err := metainfoClient.ListSegments(ctx, metainfo.ListSegmentsParams{
|
2019-08-01 10:04:31 +01:00
|
|
|
StreamID: object.StreamID,
|
2019-07-24 12:33:23 +01:00
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, segments, 1)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-08-30 22:30:18 +01:00
|
|
|
func TestListSegment(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
uplink := planet.Uplinks[0]
|
|
|
|
|
|
|
|
data := testrand.Bytes(15 * memory.KiB)
|
|
|
|
config := uplink.GetConfig(planet.Satellites[0])
|
|
|
|
config.Client.SegmentSize = memory.KiB
|
|
|
|
err := uplink.UploadWithClientConfig(ctx, planet.Satellites[0], config, "testbucket", "test-path", data)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// 15KiB + encryption should be uploaded into 16 segments with SegmentSize == 1KiB
|
|
|
|
numberOfSegments := 16
|
|
|
|
|
|
|
|
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(metainfoClient.Close)
|
|
|
|
|
|
|
|
items, _, err := metainfoClient.ListObjects(ctx, metainfo.ListObjectsParams{
|
|
|
|
Bucket: []byte("testbucket"),
|
|
|
|
Limit: 1,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
object, err := metainfoClient.GetObject(ctx, metainfo.GetObjectParams{
|
|
|
|
Bucket: []byte("testbucket"),
|
|
|
|
EncryptedPath: items[0].EncryptedPath,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
for _, test := range []struct {
|
|
|
|
Index int32
|
|
|
|
Limit int32
|
|
|
|
Result int
|
|
|
|
More bool
|
|
|
|
}{
|
|
|
|
{Index: 0, Result: numberOfSegments},
|
|
|
|
{Index: 0, Result: numberOfSegments, Limit: int32(numberOfSegments), More: false},
|
|
|
|
{Index: 0, Result: 5, Limit: 5, More: true},
|
|
|
|
{Index: 16, Result: 0, More: false},
|
|
|
|
{Index: 11, Result: 5, Limit: 5, More: false},
|
|
|
|
{Index: 15, Result: 1, More: false},
|
|
|
|
} {
|
2019-10-23 07:59:56 +01:00
|
|
|
segments, more, err := metainfoClient.ListSegments(ctx, metainfo.ListSegmentsParams{
|
2019-08-30 22:30:18 +01:00
|
|
|
StreamID: object.StreamID,
|
|
|
|
Limit: test.Limit,
|
|
|
|
CursorPosition: storj.SegmentPosition{
|
|
|
|
Index: test.Index,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, segments, test.Result)
|
|
|
|
require.Equal(t, test.More, more)
|
|
|
|
if !more && test.Result > 0 {
|
|
|
|
require.Equal(t, int32(-1), segments[test.Result-1].Position.Index)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-07-24 12:33:23 +01:00
|
|
|
func TestInlineSegment(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
uplink := planet.Uplinks[0]
|
|
|
|
|
|
|
|
config := uplink.GetConfig(planet.Satellites[0])
|
|
|
|
metainfoService := planet.Satellites[0].Metainfo.Service
|
|
|
|
|
|
|
|
projects, err := planet.Satellites[0].DB.Console().Projects().GetAll(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
projectID := projects[0].ID
|
|
|
|
|
|
|
|
// TODO maybe split into separate cases
|
|
|
|
// Test:
|
|
|
|
// * create bucket
|
|
|
|
// * begin object
|
|
|
|
// * send several inline segments
|
|
|
|
// * commit object
|
|
|
|
// * list created object
|
|
|
|
// * list object segments
|
|
|
|
// * download segments
|
|
|
|
// * delete segments and object
|
|
|
|
|
|
|
|
bucket := storj.Bucket{
|
|
|
|
Name: "inline-segments-bucket",
|
|
|
|
ProjectID: projectID,
|
|
|
|
PathCipher: config.GetEncryptionParameters().CipherSuite,
|
|
|
|
}
|
|
|
|
_, err = metainfoService.CreateBucket(ctx, bucket)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(metainfoClient.Close)
|
|
|
|
|
|
|
|
params := metainfo.BeginObjectParams{
|
|
|
|
Bucket: []byte(bucket.Name),
|
|
|
|
EncryptedPath: []byte("encrypted-path"),
|
|
|
|
Redundancy: storj.RedundancyScheme{
|
|
|
|
Algorithm: storj.ReedSolomon,
|
|
|
|
ShareSize: 256,
|
|
|
|
RequiredShares: 1,
|
|
|
|
RepairShares: 1,
|
|
|
|
OptimalShares: 3,
|
|
|
|
TotalShares: 4,
|
|
|
|
},
|
2019-08-01 10:04:31 +01:00
|
|
|
EncryptionParameters: storj.EncryptionParameters{},
|
|
|
|
ExpiresAt: time.Now().UTC().Add(24 * time.Hour),
|
2019-07-24 12:33:23 +01:00
|
|
|
}
|
|
|
|
streamID, err := metainfoClient.BeginObject(ctx, params)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-08-01 10:04:31 +01:00
|
|
|
segments := []int32{0, 1, 2, 3, 4, 5, 6}
|
2019-07-24 12:33:23 +01:00
|
|
|
segmentsData := make([][]byte, len(segments))
|
|
|
|
for i, segment := range segments {
|
|
|
|
segmentsData[i] = testrand.Bytes(memory.KiB)
|
|
|
|
err = metainfoClient.MakeInlineSegment(ctx, metainfo.MakeInlineSegmentParams{
|
|
|
|
StreamID: streamID,
|
|
|
|
Position: storj.SegmentPosition{
|
|
|
|
Index: segment,
|
|
|
|
},
|
|
|
|
EncryptedInlineData: segmentsData[i],
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
2019-08-01 10:04:31 +01:00
|
|
|
err = metainfoClient.CommitObject(ctx, metainfo.CommitObjectParams{
|
|
|
|
StreamID: streamID,
|
|
|
|
})
|
2019-07-24 12:33:23 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
objects, _, err := metainfoClient.ListObjects(ctx, metainfo.ListObjectsParams{
|
|
|
|
Bucket: []byte(bucket.Name),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, objects, 1)
|
|
|
|
|
|
|
|
require.Equal(t, params.EncryptedPath, objects[0].EncryptedPath)
|
|
|
|
require.Equal(t, params.ExpiresAt, objects[0].ExpiresAt)
|
|
|
|
|
2019-08-01 10:04:31 +01:00
|
|
|
object, err := metainfoClient.GetObject(ctx, metainfo.GetObjectParams{
|
2019-07-24 12:33:23 +01:00
|
|
|
Bucket: params.Bucket,
|
|
|
|
EncryptedPath: params.EncryptedPath,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
{ // test listing inline segments
|
|
|
|
for _, test := range []struct {
|
|
|
|
Index int32
|
|
|
|
Limit int
|
|
|
|
Result int
|
|
|
|
More bool
|
|
|
|
}{
|
|
|
|
{Index: 0, Result: len(segments), More: false},
|
|
|
|
{Index: 2, Result: len(segments) - 2, More: false},
|
|
|
|
{Index: 0, Result: 3, More: true, Limit: 3},
|
|
|
|
{Index: 0, Result: len(segments), More: false, Limit: len(segments)},
|
|
|
|
{Index: 0, Result: len(segments) - 1, More: true, Limit: len(segments) - 1},
|
|
|
|
} {
|
2019-10-23 07:59:56 +01:00
|
|
|
items, more, err := metainfoClient.ListSegments(ctx, metainfo.ListSegmentsParams{
|
2019-08-01 10:04:31 +01:00
|
|
|
StreamID: object.StreamID,
|
2019-07-24 12:33:23 +01:00
|
|
|
CursorPosition: storj.SegmentPosition{
|
|
|
|
Index: test.Index,
|
|
|
|
},
|
|
|
|
Limit: int32(test.Limit),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, test.Result, len(items))
|
|
|
|
require.Equal(t, test.More, more)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
{ // test download inline segments
|
2019-10-23 07:59:56 +01:00
|
|
|
items, _, err := metainfoClient.ListSegments(ctx, metainfo.ListSegmentsParams{
|
2019-08-01 10:04:31 +01:00
|
|
|
StreamID: object.StreamID,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, len(segments), len(items))
|
|
|
|
|
|
|
|
for i, item := range items {
|
2019-07-24 12:33:23 +01:00
|
|
|
info, limits, err := metainfoClient.DownloadSegment(ctx, metainfo.DownloadSegmentParams{
|
2019-08-01 10:04:31 +01:00
|
|
|
StreamID: object.StreamID,
|
2019-07-24 12:33:23 +01:00
|
|
|
Position: storj.SegmentPosition{
|
2019-08-01 10:04:31 +01:00
|
|
|
Index: item.Position.Index,
|
2019-07-24 12:33:23 +01:00
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Nil(t, limits)
|
|
|
|
require.Equal(t, segmentsData[i], info.EncryptedInlineData)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
{ // test deleting segments
|
|
|
|
streamID, err = metainfoClient.BeginDeleteObject(ctx, metainfo.BeginDeleteObjectParams{
|
|
|
|
Bucket: params.Bucket,
|
|
|
|
EncryptedPath: params.EncryptedPath,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
items, _, err := metainfoClient.ListSegments(ctx, metainfo.ListSegmentsParams{
|
2019-07-24 12:33:23 +01:00
|
|
|
StreamID: streamID,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
for _, item := range items {
|
2019-08-01 10:04:31 +01:00
|
|
|
segmentID, limits, _, err := metainfoClient.BeginDeleteSegment(ctx, metainfo.BeginDeleteSegmentParams{
|
2019-07-24 12:33:23 +01:00
|
|
|
StreamID: streamID,
|
|
|
|
Position: storj.SegmentPosition{
|
|
|
|
Index: item.Position.Index,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Nil(t, limits)
|
|
|
|
|
|
|
|
err = metainfoClient.FinishDeleteSegment(ctx, metainfo.FinishDeleteSegmentParams{
|
|
|
|
SegmentID: segmentID,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
_, _, err = metainfoClient.ListSegments(ctx, metainfo.ListSegmentsParams{
|
2019-07-24 12:33:23 +01:00
|
|
|
StreamID: streamID,
|
|
|
|
})
|
2019-08-30 22:30:18 +01:00
|
|
|
require.Error(t, err)
|
|
|
|
require.True(t, storj.ErrObjectNotFound.Has(err))
|
2019-07-24 12:33:23 +01:00
|
|
|
|
2019-08-06 15:56:23 +01:00
|
|
|
err = metainfoClient.FinishDeleteObject(ctx, metainfo.FinishDeleteObjectParams{
|
|
|
|
StreamID: streamID,
|
|
|
|
})
|
2019-07-24 12:33:23 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestRemoteSegment(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
uplink := planet.Uplinks[0]
|
|
|
|
|
|
|
|
expectedBucketName := "remote-segments-bucket"
|
|
|
|
err := uplink.Upload(ctx, planet.Satellites[0], expectedBucketName, "file-object", testrand.Bytes(10*memory.KiB))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(metainfoClient.Close)
|
|
|
|
|
|
|
|
items, _, err := metainfoClient.ListObjects(ctx, metainfo.ListObjectsParams{
|
|
|
|
Bucket: []byte(expectedBucketName),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, items, 1)
|
|
|
|
|
|
|
|
{
|
|
|
|
// Get object
|
|
|
|
// List segments
|
|
|
|
// Download segment
|
|
|
|
|
2019-08-01 10:04:31 +01:00
|
|
|
object, err := metainfoClient.GetObject(ctx, metainfo.GetObjectParams{
|
2019-07-24 12:33:23 +01:00
|
|
|
Bucket: []byte(expectedBucketName),
|
|
|
|
EncryptedPath: items[0].EncryptedPath,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
segments, _, err := metainfoClient.ListSegments(ctx, metainfo.ListSegmentsParams{
|
2019-08-01 10:04:31 +01:00
|
|
|
StreamID: object.StreamID,
|
2019-07-24 12:33:23 +01:00
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, segments, 1)
|
|
|
|
|
|
|
|
_, limits, err := metainfoClient.DownloadSegment(ctx, metainfo.DownloadSegmentParams{
|
2019-08-01 10:04:31 +01:00
|
|
|
StreamID: object.StreamID,
|
2019-07-24 12:33:23 +01:00
|
|
|
Position: storj.SegmentPosition{
|
|
|
|
Index: segments[0].Position.Index,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotEmpty(t, limits)
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// Begin deleting object
|
|
|
|
// List segments
|
|
|
|
// Begin/Finish deleting segment
|
|
|
|
// List objects
|
|
|
|
|
|
|
|
streamID, err := metainfoClient.BeginDeleteObject(ctx, metainfo.BeginDeleteObjectParams{
|
|
|
|
Bucket: []byte(expectedBucketName),
|
|
|
|
EncryptedPath: items[0].EncryptedPath,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
segments, _, err := metainfoClient.ListSegments(ctx, metainfo.ListSegmentsParams{
|
2019-07-24 12:33:23 +01:00
|
|
|
StreamID: streamID,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
for _, segment := range segments {
|
2019-08-01 10:04:31 +01:00
|
|
|
segmentID, limits, _, err := metainfoClient.BeginDeleteSegment(ctx, metainfo.BeginDeleteSegmentParams{
|
2019-07-24 12:33:23 +01:00
|
|
|
StreamID: streamID,
|
|
|
|
Position: storj.SegmentPosition{
|
|
|
|
Index: segment.Position.Index,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotEmpty(t, limits)
|
|
|
|
|
|
|
|
err = metainfoClient.FinishDeleteSegment(ctx, metainfo.FinishDeleteSegmentParams{
|
|
|
|
SegmentID: segmentID,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
2019-08-06 15:56:23 +01:00
|
|
|
err = metainfoClient.FinishDeleteObject(ctx, metainfo.FinishDeleteObjectParams{
|
|
|
|
StreamID: streamID,
|
|
|
|
})
|
2019-07-24 12:33:23 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
items, _, err = metainfoClient.ListObjects(ctx, metainfo.ListObjectsParams{
|
|
|
|
Bucket: []byte(expectedBucketName),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, items, 0)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestIDs(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
|
|
|
|
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(metainfoClient.Close)
|
|
|
|
|
|
|
|
{
|
|
|
|
streamID := testrand.StreamID(256)
|
2019-08-01 10:04:31 +01:00
|
|
|
err = metainfoClient.CommitObject(ctx, metainfo.CommitObjectParams{
|
|
|
|
StreamID: streamID,
|
|
|
|
})
|
2019-07-24 12:33:23 +01:00
|
|
|
require.Error(t, err) // invalid streamID
|
|
|
|
|
|
|
|
segmentID := testrand.SegmentID(512)
|
2019-10-23 07:59:56 +01:00
|
|
|
err = metainfoClient.CommitSegment(ctx, metainfo.CommitSegmentParams{
|
2019-07-24 12:33:23 +01:00
|
|
|
SegmentID: segmentID,
|
|
|
|
})
|
|
|
|
require.Error(t, err) // invalid segmentID
|
|
|
|
}
|
|
|
|
|
|
|
|
satellitePeer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
|
|
|
|
|
|
|
|
{ // streamID expired
|
|
|
|
signedStreamID, err := signing.SignStreamID(ctx, satellitePeer, &pb.SatStreamID{
|
|
|
|
CreationDate: time.Now().Add(-24 * time.Hour),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
encodedStreamID, err := proto.Marshal(signedStreamID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
streamID, err := storj.StreamIDFromBytes(encodedStreamID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-08-01 10:04:31 +01:00
|
|
|
err = metainfoClient.CommitObject(ctx, metainfo.CommitObjectParams{
|
|
|
|
StreamID: streamID,
|
|
|
|
})
|
2019-07-24 12:33:23 +01:00
|
|
|
require.Error(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
{ // segmentID expired
|
|
|
|
signedSegmentID, err := signing.SignSegmentID(ctx, satellitePeer, &pb.SatSegmentID{
|
|
|
|
CreationDate: time.Now().Add(-24 * time.Hour),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
encodedSegmentID, err := proto.Marshal(signedSegmentID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
segmentID, err := storj.SegmentIDFromBytes(encodedSegmentID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-10-23 07:59:56 +01:00
|
|
|
err = metainfoClient.CommitSegment(ctx, metainfo.CommitSegmentParams{
|
2019-07-24 12:33:23 +01:00
|
|
|
SegmentID: segmentID,
|
|
|
|
})
|
|
|
|
require.Error(t, err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
2019-08-06 15:56:23 +01:00
|
|
|
|
|
|
|
func TestBatch(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
|
|
|
|
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(metainfoClient.Close)
|
|
|
|
|
|
|
|
{ // create few buckets and list them in one batch
|
|
|
|
requests := make([]metainfo.BatchItem, 0)
|
|
|
|
numOfBuckets := 5
|
|
|
|
for i := 0; i < numOfBuckets; i++ {
|
|
|
|
requests = append(requests, &metainfo.CreateBucketParams{
|
|
|
|
Name: []byte("test-bucket-" + strconv.Itoa(i)),
|
|
|
|
PathCipher: storj.EncAESGCM,
|
|
|
|
DefaultSegmentsSize: memory.MiB.Int64(),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
requests = append(requests, &metainfo.ListBucketsParams{
|
|
|
|
ListOpts: storj.BucketListOptions{
|
|
|
|
Cursor: "",
|
|
|
|
Direction: storj.After,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
responses, err := metainfoClient.Batch(ctx, requests...)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, numOfBuckets+1, len(responses))
|
|
|
|
|
|
|
|
for i := 0; i < numOfBuckets; i++ {
|
|
|
|
response, err := responses[i].CreateBucket()
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, "test-bucket-"+strconv.Itoa(i), response.Bucket.Name)
|
|
|
|
|
|
|
|
_, err = responses[i].GetBucket()
|
|
|
|
require.Error(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
bucketsListResp, err := responses[numOfBuckets].ListBuckets()
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, numOfBuckets, len(bucketsListResp.BucketList.Items))
|
|
|
|
}
|
|
|
|
|
|
|
|
{ // create bucket, object, upload inline segments in batch, download inline segments in batch
|
|
|
|
err := planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "second-test-bucket")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2019-10-22 11:23:22 +01:00
|
|
|
requests := make([]metainfo.BatchItem, 0)
|
|
|
|
requests = append(requests, &metainfo.BeginObjectParams{
|
2019-08-06 15:56:23 +01:00
|
|
|
Bucket: []byte("second-test-bucket"),
|
|
|
|
EncryptedPath: []byte("encrypted-path"),
|
|
|
|
})
|
|
|
|
numOfSegments := 10
|
|
|
|
expectedData := make([][]byte, numOfSegments)
|
|
|
|
for i := 0; i < numOfSegments; i++ {
|
|
|
|
expectedData[i] = testrand.Bytes(memory.KiB)
|
|
|
|
|
|
|
|
requests = append(requests, &metainfo.MakeInlineSegmentParams{
|
|
|
|
Position: storj.SegmentPosition{
|
|
|
|
Index: int32(i),
|
|
|
|
},
|
|
|
|
EncryptedInlineData: expectedData[i],
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2019-10-22 11:23:22 +01:00
|
|
|
requests = append(requests, &metainfo.CommitObjectParams{})
|
|
|
|
requests = append(requests, &metainfo.ListSegmentsParams{})
|
2019-08-06 15:56:23 +01:00
|
|
|
|
|
|
|
responses, err := metainfoClient.Batch(ctx, requests...)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, numOfSegments+3, len(responses))
|
|
|
|
|
2019-10-22 11:23:22 +01:00
|
|
|
listResponse, err := responses[numOfSegments+2].ListSegment()
|
2019-08-06 15:56:23 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, numOfSegments, len(listResponse.Items))
|
|
|
|
|
|
|
|
requests = make([]metainfo.BatchItem, 0)
|
2019-10-22 11:23:22 +01:00
|
|
|
requests = append(requests, &metainfo.GetObjectParams{
|
|
|
|
Bucket: []byte("second-test-bucket"),
|
|
|
|
EncryptedPath: []byte("encrypted-path"),
|
|
|
|
})
|
2019-08-06 15:56:23 +01:00
|
|
|
for _, segment := range listResponse.Items {
|
|
|
|
requests = append(requests, &metainfo.DownloadSegmentParams{
|
|
|
|
Position: segment.Position,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
responses, err = metainfoClient.Batch(ctx, requests...)
|
|
|
|
require.NoError(t, err)
|
2019-10-22 11:23:22 +01:00
|
|
|
require.Equal(t, len(listResponse.Items)+1, len(responses))
|
2019-08-06 15:56:23 +01:00
|
|
|
|
2019-10-22 11:23:22 +01:00
|
|
|
for i, response := range responses[1:] {
|
2019-08-06 15:56:23 +01:00
|
|
|
downloadResponse, err := response.DownloadSegment()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, expectedData[i], downloadResponse.Info.EncryptedInlineData)
|
|
|
|
}
|
|
|
|
}
|
2019-10-22 11:23:22 +01:00
|
|
|
|
|
|
|
{ // test case when StreamID is not set automatically
|
|
|
|
err := planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "third-test-bucket")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
streamID, err := metainfoClient.BeginObject(ctx, metainfo.BeginObjectParams{
|
|
|
|
Bucket: []byte("third-test-bucket"),
|
|
|
|
EncryptedPath: []byte("encrypted-path"),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
requests := make([]metainfo.BatchItem, 0)
|
|
|
|
numOfSegments := 10
|
|
|
|
expectedData := make([][]byte, numOfSegments)
|
|
|
|
for i := 0; i < numOfSegments; i++ {
|
|
|
|
expectedData[i] = testrand.Bytes(memory.KiB)
|
|
|
|
|
|
|
|
requests = append(requests, &metainfo.MakeInlineSegmentParams{
|
|
|
|
StreamID: streamID,
|
|
|
|
Position: storj.SegmentPosition{
|
|
|
|
Index: int32(i),
|
|
|
|
},
|
|
|
|
EncryptedInlineData: expectedData[i],
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
requests = append(requests, &metainfo.CommitObjectParams{
|
|
|
|
StreamID: streamID,
|
|
|
|
})
|
|
|
|
|
|
|
|
responses, err := metainfoClient.Batch(ctx, requests...)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, numOfSegments+1, len(responses))
|
|
|
|
}
|
2019-08-06 15:56:23 +01:00
|
|
|
})
|
|
|
|
}
|
2019-10-31 19:04:33 +00:00
|
|
|
|
|
|
|
func TestValidateRS(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
|
|
|
config.Metainfo.RS.MinTotalThreshold = 4
|
|
|
|
config.Metainfo.RS.MaxTotalThreshold = 5
|
|
|
|
config.Metainfo.RS.Validate = true
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
ul := planet.Uplinks[0]
|
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
|
|
|
|
testData := testrand.Bytes(8 * memory.KiB)
|
|
|
|
rs := &uplink.RSConfig{
|
|
|
|
MinThreshold: 1,
|
|
|
|
RepairThreshold: 2,
|
|
|
|
SuccessThreshold: 3,
|
|
|
|
MaxThreshold: 3,
|
|
|
|
}
|
|
|
|
// test below permitted total value
|
|
|
|
err := ul.UploadWithConfig(ctx, satellite, rs, "testbucket", "test/path/below", testData)
|
|
|
|
require.Error(t, err)
|
|
|
|
|
|
|
|
// test above permitted total value
|
|
|
|
rs.MaxThreshold = 6
|
|
|
|
err = ul.UploadWithConfig(ctx, satellite, rs, "testbucket", "test/path/above", testData)
|
|
|
|
require.Error(t, err)
|
|
|
|
|
|
|
|
// test minimum permitted total value
|
|
|
|
rs.MaxThreshold = 4
|
|
|
|
err = ul.UploadWithConfig(ctx, satellite, rs, "testbucket", "test/path/min", testData)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// test maximum permitted total value
|
|
|
|
rs.MaxThreshold = 5
|
|
|
|
err = ul.UploadWithConfig(ctx, satellite, rs, "testbucket", "test/path/max", testData)
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
}
|