2019-03-30 11:21:49 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package metainfo_test
|
|
|
|
|
|
|
|
import (
|
2020-03-30 10:08:02 +01:00
|
|
|
"errors"
|
2020-06-30 22:49:29 +01:00
|
|
|
"fmt"
|
2020-08-18 21:50:49 +01:00
|
|
|
"net"
|
2019-03-30 11:21:49 +00:00
|
|
|
"sort"
|
2019-07-16 11:39:23 +01:00
|
|
|
"strconv"
|
2019-03-30 11:21:49 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
2019-06-21 19:15:58 +01:00
|
|
|
"go.uber.org/zap"
|
2019-03-30 11:21:49 +00:00
|
|
|
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/errs2"
|
|
|
|
"storj.io/common/macaroon"
|
|
|
|
"storj.io/common/memory"
|
|
|
|
"storj.io/common/pb"
|
|
|
|
"storj.io/common/rpc/rpcstatus"
|
|
|
|
"storj.io/common/signing"
|
|
|
|
"storj.io/common/storj"
|
|
|
|
"storj.io/common/testcontext"
|
|
|
|
"storj.io/common/testrand"
|
2021-10-27 09:50:27 +01:00
|
|
|
"storj.io/common/uuid"
|
2019-11-14 19:46:15 +00:00
|
|
|
"storj.io/storj/private/testplanet"
|
2019-06-21 19:15:58 +01:00
|
|
|
"storj.io/storj/satellite"
|
2021-10-27 09:50:27 +01:00
|
|
|
"storj.io/storj/satellite/buckets"
|
2020-10-30 11:22:16 +00:00
|
|
|
"storj.io/storj/satellite/internalpb"
|
2021-04-21 13:42:57 +01:00
|
|
|
"storj.io/storj/satellite/metabase"
|
2021-04-30 14:41:20 +01:00
|
|
|
"storj.io/storj/satellite/metainfo"
|
2020-03-30 10:08:02 +01:00
|
|
|
"storj.io/uplink"
|
2021-04-30 14:41:20 +01:00
|
|
|
"storj.io/uplink/private/metaclient"
|
2020-08-18 21:50:49 +01:00
|
|
|
"storj.io/uplink/private/object"
|
2020-05-26 09:05:43 +01:00
|
|
|
"storj.io/uplink/private/testuplink"
|
2019-03-30 11:21:49 +00:00
|
|
|
)
|
|
|
|
|
2020-06-30 22:49:29 +01:00
|
|
|
func TestMaxOutBuckets(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
2020-08-26 16:19:09 +01:00
|
|
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
2020-06-30 22:49:29 +01:00
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
limit := planet.Satellites[0].Config.Metainfo.ProjectLimits.MaxBuckets
|
|
|
|
for i := 1; i <= limit; i++ {
|
|
|
|
name := "test" + strconv.Itoa(i)
|
|
|
|
err := planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], name)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
err := planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], fmt.Sprintf("test%d", limit+1))
|
2021-04-29 18:56:41 +01:00
|
|
|
require.Error(t, err)
|
|
|
|
require.Contains(t, err.Error(), fmt.Sprintf("number of allocated buckets (%d) exceeded", limit))
|
2020-06-30 22:49:29 +01:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-06-10 15:10:44 +01:00
|
|
|
func TestRevokeAccess(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
accessIssuer := planet.Uplinks[0].Access[planet.Satellites[0].ID()]
|
|
|
|
accessUser1, err := accessIssuer.Share(uplink.Permission{
|
|
|
|
AllowDownload: true,
|
|
|
|
AllowUpload: true,
|
|
|
|
AllowList: true,
|
|
|
|
AllowDelete: true,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
accessUser2, err := accessUser1.Share(uplink.Permission{
|
|
|
|
AllowDownload: true,
|
|
|
|
AllowUpload: true,
|
|
|
|
AllowList: true,
|
|
|
|
AllowDelete: true,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
projectUser2, err := uplink.OpenProject(ctx, accessUser2)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(projectUser2.Close)
|
|
|
|
|
|
|
|
// confirm that we can create a bucket
|
|
|
|
_, err = projectUser2.CreateBucket(ctx, "bob")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// we shouldn't be allowed to revoke ourselves or our parent
|
|
|
|
err = projectUser2.RevokeAccess(ctx, accessUser2)
|
|
|
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
|
|
|
err = projectUser2.RevokeAccess(ctx, accessUser1)
|
|
|
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
|
|
|
|
|
|
|
projectIssuer, err := uplink.OpenProject(ctx, accessIssuer)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(projectIssuer.Close)
|
|
|
|
|
|
|
|
projectUser1, err := uplink.OpenProject(ctx, accessUser1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(projectUser1.Close)
|
|
|
|
|
|
|
|
// I should be able to revoke with accessIssuer
|
|
|
|
err = projectIssuer.RevokeAccess(ctx, accessUser1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// should no longer be able to create bucket with access 2 or 3
|
|
|
|
_, err = projectUser2.CreateBucket(ctx, "bob1")
|
|
|
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
|
|
|
_, err = projectUser1.CreateBucket(ctx, "bob1")
|
|
|
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestRevokeMacaroon(t *testing.T) {
|
2020-06-03 14:51:02 +01:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
|
|
|
|
// I want the api key for the single satellite in this test
|
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
|
|
|
|
client, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(client.Close)
|
|
|
|
|
|
|
|
// Sanity check: it should work before revoke
|
2021-04-30 14:41:20 +01:00
|
|
|
_, err = client.ListBuckets(ctx, metaclient.ListBucketsParams{
|
2020-06-03 14:51:02 +01:00
|
|
|
ListOpts: storj.BucketListOptions{
|
|
|
|
Cursor: "",
|
|
|
|
Direction: storj.Forward,
|
|
|
|
Limit: 10,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = planet.Satellites[0].API.DB.Revocation().Revoke(ctx, apiKey.Tail(), []byte("apikey"))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
_, err = client.ListBuckets(ctx, metaclient.ListBucketsParams{})
|
2020-06-03 14:51:02 +01:00
|
|
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
_, err = client.BeginObject(ctx, metaclient.BeginObjectParams{})
|
2020-06-03 14:51:02 +01:00
|
|
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
_, err = client.BeginDeleteObject(ctx, metaclient.BeginDeleteObjectParams{})
|
2020-07-21 08:34:00 +01:00
|
|
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
2020-06-03 14:51:02 +01:00
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
_, err = client.ListBuckets(ctx, metaclient.ListBucketsParams{})
|
2020-06-03 14:51:02 +01:00
|
|
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
_, _, err = client.ListObjects(ctx, metaclient.ListObjectsParams{})
|
2020-06-03 14:51:02 +01:00
|
|
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
_, err = client.CreateBucket(ctx, metaclient.CreateBucketParams{})
|
2020-06-03 14:51:02 +01:00
|
|
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
_, err = client.DeleteBucket(ctx, metaclient.DeleteBucketParams{})
|
2020-06-03 14:51:02 +01:00
|
|
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
_, err = client.BeginDeleteObject(ctx, metaclient.BeginDeleteObjectParams{})
|
2020-07-21 08:34:00 +01:00
|
|
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
2020-06-03 14:51:02 +01:00
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
_, err = client.GetBucket(ctx, metaclient.GetBucketParams{})
|
2020-06-03 14:51:02 +01:00
|
|
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
_, err = client.GetObject(ctx, metaclient.GetObjectParams{})
|
2020-06-03 14:51:02 +01:00
|
|
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
|
|
|
|
|
|
|
_, err = client.GetProjectInfo(ctx)
|
|
|
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
|
|
|
|
|
|
|
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
|
2020-10-30 11:22:16 +00:00
|
|
|
satStreamID := &internalpb.StreamID{
|
2020-06-03 14:51:02 +01:00
|
|
|
CreationDate: time.Now(),
|
|
|
|
}
|
2021-04-30 14:41:20 +01:00
|
|
|
signedStreamID, err := metainfo.SignStreamID(ctx, signer, satStreamID)
|
2020-06-03 14:51:02 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
encodedStreamID, err := pb.Marshal(signedStreamID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
err = client.CommitObject(ctx, metaclient.CommitObjectParams{StreamID: encodedStreamID})
|
2020-06-03 14:51:02 +01:00
|
|
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
_, err = client.BeginSegment(ctx, metaclient.BeginSegmentParams{StreamID: encodedStreamID})
|
2020-06-03 14:51:02 +01:00
|
|
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
err = client.MakeInlineSegment(ctx, metaclient.MakeInlineSegmentParams{StreamID: encodedStreamID})
|
2020-06-03 14:51:02 +01:00
|
|
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
|
|
|
|
2021-12-22 14:05:53 +00:00
|
|
|
_, err = client.DownloadSegmentWithRS(ctx, metaclient.DownloadSegmentParams{StreamID: encodedStreamID})
|
2020-06-03 14:51:02 +01:00
|
|
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
_, err = client.ListSegments(ctx, metaclient.ListSegmentsParams{StreamID: encodedStreamID})
|
2020-11-24 16:23:47 +00:00
|
|
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
|
|
|
|
2020-06-03 14:51:02 +01:00
|
|
|
// these methods needs SegmentID
|
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
signedSegmentID, err := metainfo.SignSegmentID(ctx, signer, &internalpb.SegmentID{
|
2020-06-03 14:51:02 +01:00
|
|
|
StreamId: satStreamID,
|
|
|
|
CreationDate: time.Now(),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
encodedSegmentID, err := pb.Marshal(signedSegmentID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
segmentID, err := storj.SegmentIDFromBytes(encodedSegmentID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
err = client.CommitSegment(ctx, metaclient.CommitSegmentParams{SegmentID: segmentID})
|
2020-06-03 14:51:02 +01:00
|
|
|
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-01-03 09:27:10 +00:00
|
|
|
func TestInvalidAPIKey(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
throwawayKey, err := macaroon.NewAPIKey([]byte("secret"))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
for _, invalidAPIKey := range []string{"", "invalid", "testKey"} {
|
2020-11-10 11:56:30 +00:00
|
|
|
func() {
|
|
|
|
client, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], throwawayKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(client.Close)
|
2020-01-03 09:27:10 +00:00
|
|
|
|
2020-11-10 11:56:30 +00:00
|
|
|
client.SetRawAPIKey([]byte(invalidAPIKey))
|
2020-01-03 09:27:10 +00:00
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
_, err = client.BeginObject(ctx, metaclient.BeginObjectParams{})
|
2020-11-10 11:56:30 +00:00
|
|
|
assertInvalidArgument(t, err, false)
|
2020-01-03 09:27:10 +00:00
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
_, err = client.BeginDeleteObject(ctx, metaclient.BeginDeleteObjectParams{})
|
2020-11-10 11:56:30 +00:00
|
|
|
assertInvalidArgument(t, err, false)
|
2020-01-03 09:27:10 +00:00
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
_, err = client.ListBuckets(ctx, metaclient.ListBucketsParams{})
|
2020-11-10 11:56:30 +00:00
|
|
|
assertInvalidArgument(t, err, false)
|
2020-01-03 09:27:10 +00:00
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
_, _, err = client.ListObjects(ctx, metaclient.ListObjectsParams{})
|
2020-11-10 11:56:30 +00:00
|
|
|
assertInvalidArgument(t, err, false)
|
2020-01-03 09:27:10 +00:00
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
_, err = client.CreateBucket(ctx, metaclient.CreateBucketParams{})
|
2020-11-10 11:56:30 +00:00
|
|
|
assertInvalidArgument(t, err, false)
|
2020-01-03 09:27:10 +00:00
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
_, err = client.DeleteBucket(ctx, metaclient.DeleteBucketParams{})
|
2020-11-10 11:56:30 +00:00
|
|
|
assertInvalidArgument(t, err, false)
|
2020-01-03 09:27:10 +00:00
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
_, err = client.BeginDeleteObject(ctx, metaclient.BeginDeleteObjectParams{})
|
2020-11-10 11:56:30 +00:00
|
|
|
assertInvalidArgument(t, err, false)
|
2020-01-03 09:27:10 +00:00
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
_, err = client.GetBucket(ctx, metaclient.GetBucketParams{})
|
2020-11-10 11:56:30 +00:00
|
|
|
assertInvalidArgument(t, err, false)
|
2020-01-03 09:27:10 +00:00
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
_, err = client.GetObject(ctx, metaclient.GetObjectParams{})
|
2020-11-10 11:56:30 +00:00
|
|
|
assertInvalidArgument(t, err, false)
|
2020-01-03 09:27:10 +00:00
|
|
|
|
2020-11-10 11:56:30 +00:00
|
|
|
_, err = client.GetProjectInfo(ctx)
|
|
|
|
assertInvalidArgument(t, err, false)
|
2020-01-03 09:27:10 +00:00
|
|
|
|
2020-11-10 11:56:30 +00:00
|
|
|
// these methods needs StreamID to do authentication
|
2020-01-03 09:27:10 +00:00
|
|
|
|
2020-11-10 11:56:30 +00:00
|
|
|
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
|
|
|
|
satStreamID := &internalpb.StreamID{
|
|
|
|
CreationDate: time.Now(),
|
|
|
|
}
|
2021-04-30 14:41:20 +01:00
|
|
|
signedStreamID, err := metainfo.SignStreamID(ctx, signer, satStreamID)
|
2020-11-10 11:56:30 +00:00
|
|
|
require.NoError(t, err)
|
2020-01-03 09:27:10 +00:00
|
|
|
|
2020-11-10 11:56:30 +00:00
|
|
|
encodedStreamID, err := pb.Marshal(signedStreamID)
|
|
|
|
require.NoError(t, err)
|
2020-01-03 09:27:10 +00:00
|
|
|
|
2020-11-10 11:56:30 +00:00
|
|
|
streamID, err := storj.StreamIDFromBytes(encodedStreamID)
|
|
|
|
require.NoError(t, err)
|
2020-01-03 09:27:10 +00:00
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
err = client.CommitObject(ctx, metaclient.CommitObjectParams{StreamID: streamID})
|
2020-11-10 11:56:30 +00:00
|
|
|
assertInvalidArgument(t, err, false)
|
2020-06-03 14:51:02 +01:00
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
_, err = client.BeginSegment(ctx, metaclient.BeginSegmentParams{StreamID: streamID})
|
2020-11-10 11:56:30 +00:00
|
|
|
assertInvalidArgument(t, err, false)
|
2020-01-03 09:27:10 +00:00
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
err = client.MakeInlineSegment(ctx, metaclient.MakeInlineSegmentParams{StreamID: streamID})
|
2020-11-10 11:56:30 +00:00
|
|
|
assertInvalidArgument(t, err, false)
|
2020-01-03 09:27:10 +00:00
|
|
|
|
2021-12-22 14:05:53 +00:00
|
|
|
_, err = client.DownloadSegmentWithRS(ctx, metaclient.DownloadSegmentParams{StreamID: streamID})
|
2020-11-10 11:56:30 +00:00
|
|
|
assertInvalidArgument(t, err, false)
|
2020-01-03 09:27:10 +00:00
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
_, err = client.ListSegments(ctx, metaclient.ListSegmentsParams{StreamID: streamID})
|
2020-11-24 16:23:47 +00:00
|
|
|
assertInvalidArgument(t, err, false)
|
|
|
|
|
2020-11-10 11:56:30 +00:00
|
|
|
// these methods needs SegmentID
|
2020-01-03 09:27:10 +00:00
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
signedSegmentID, err := metainfo.SignSegmentID(ctx, signer, &internalpb.SegmentID{
|
2020-11-10 11:56:30 +00:00
|
|
|
StreamId: satStreamID,
|
|
|
|
CreationDate: time.Now(),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2020-01-03 09:27:10 +00:00
|
|
|
|
2020-11-10 11:56:30 +00:00
|
|
|
encodedSegmentID, err := pb.Marshal(signedSegmentID)
|
|
|
|
require.NoError(t, err)
|
2020-01-03 09:27:10 +00:00
|
|
|
|
2020-11-10 11:56:30 +00:00
|
|
|
segmentID, err := storj.SegmentIDFromBytes(encodedSegmentID)
|
|
|
|
require.NoError(t, err)
|
2020-01-03 09:27:10 +00:00
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
err = client.CommitSegment(ctx, metaclient.CommitSegmentParams{SegmentID: segmentID})
|
2020-11-10 11:56:30 +00:00
|
|
|
assertInvalidArgument(t, err, false)
|
|
|
|
}()
|
2020-01-03 09:27:10 +00:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-03-10 09:58:14 +00:00
|
|
|
func assertInvalidArgument(t *testing.T, err error, allowed bool) {
|
2019-03-30 11:21:49 +00:00
|
|
|
t.Helper()
|
|
|
|
|
2019-05-24 17:51:27 +01:00
|
|
|
// If it's allowed, we allow any non-unauthenticated error because
|
|
|
|
// some calls error after authentication checks.
|
2019-09-19 05:46:39 +01:00
|
|
|
if !allowed {
|
2020-03-10 09:58:14 +00:00
|
|
|
assert.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
|
2019-03-30 11:21:49 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestServiceList(t *testing.T) {
|
2019-12-06 18:03:22 +00:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
2019-03-30 11:21:49 +00:00
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
items := []struct {
|
|
|
|
Key string
|
|
|
|
Value []byte
|
|
|
|
}{
|
|
|
|
{Key: "sample.😶", Value: []byte{1}},
|
|
|
|
{Key: "müsic", Value: []byte{2}},
|
|
|
|
{Key: "müsic/söng1.mp3", Value: []byte{3}},
|
|
|
|
{Key: "müsic/söng2.mp3", Value: []byte{4}},
|
|
|
|
{Key: "müsic/album/söng3.mp3", Value: []byte{5}},
|
|
|
|
{Key: "müsic/söng4.mp3", Value: []byte{6}},
|
|
|
|
{Key: "ビデオ/movie.mkv", Value: []byte{7}},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, item := range items {
|
|
|
|
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", item.Key, item.Value)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
2020-05-26 09:05:43 +01:00
|
|
|
project, err := planet.Uplinks[0].GetProject(ctx, planet.Satellites[0])
|
2019-12-06 18:03:22 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(project.Close)
|
2020-05-26 09:05:43 +01:00
|
|
|
|
|
|
|
objects := project.ListObjects(ctx, "testbucket", &uplink.ListObjectsOptions{
|
|
|
|
Recursive: true,
|
|
|
|
})
|
|
|
|
|
|
|
|
listItems := make([]*uplink.Object, 0)
|
|
|
|
for objects.Next() {
|
|
|
|
listItems = append(listItems, objects.Item())
|
|
|
|
}
|
|
|
|
require.NoError(t, objects.Err())
|
2019-03-30 11:21:49 +00:00
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
expected := []storj.Object{
|
|
|
|
{Path: "müsic"},
|
|
|
|
{Path: "müsic/album/söng3.mp3"},
|
|
|
|
{Path: "müsic/söng1.mp3"},
|
|
|
|
{Path: "müsic/söng2.mp3"},
|
|
|
|
{Path: "müsic/söng4.mp3"},
|
|
|
|
{Path: "sample.😶"},
|
|
|
|
{Path: "ビデオ/movie.mkv"},
|
|
|
|
}
|
|
|
|
|
2020-05-26 09:05:43 +01:00
|
|
|
require.Equal(t, len(expected), len(listItems))
|
|
|
|
sort.Slice(listItems, func(i, k int) bool {
|
|
|
|
return listItems[i].Key < listItems[k].Key
|
2019-12-06 18:03:22 +00:00
|
|
|
})
|
|
|
|
for i, item := range expected {
|
2020-05-26 09:05:43 +01:00
|
|
|
require.Equal(t, item.Path, listItems[i].Key)
|
|
|
|
require.Equal(t, item.IsPrefix, listItems[i].IsPrefix)
|
2019-12-06 18:03:22 +00:00
|
|
|
}
|
|
|
|
|
2020-05-26 09:05:43 +01:00
|
|
|
objects = project.ListObjects(ctx, "testbucket", &uplink.ListObjectsOptions{
|
|
|
|
Recursive: false,
|
|
|
|
})
|
|
|
|
|
|
|
|
listItems = make([]*uplink.Object, 0)
|
|
|
|
for objects.Next() {
|
|
|
|
listItems = append(listItems, objects.Item())
|
|
|
|
}
|
|
|
|
require.NoError(t, objects.Err())
|
2019-03-30 11:21:49 +00:00
|
|
|
|
2019-12-06 18:03:22 +00:00
|
|
|
expected = []storj.Object{
|
|
|
|
{Path: "müsic"},
|
|
|
|
{Path: "müsic/", IsPrefix: true},
|
|
|
|
{Path: "sample.😶"},
|
|
|
|
{Path: "ビデオ/", IsPrefix: true},
|
|
|
|
}
|
|
|
|
|
2020-05-26 09:05:43 +01:00
|
|
|
require.Equal(t, len(expected), len(listItems))
|
|
|
|
sort.Slice(listItems, func(i, k int) bool {
|
|
|
|
return listItems[i].Key < listItems[k].Key
|
2019-12-06 18:03:22 +00:00
|
|
|
})
|
|
|
|
for i, item := range expected {
|
2020-05-26 09:05:43 +01:00
|
|
|
t.Log(item.Path, listItems[i].Key)
|
|
|
|
require.Equal(t, item.Path, listItems[i].Key)
|
|
|
|
require.Equal(t, item.IsPrefix, listItems[i].IsPrefix)
|
2019-12-06 18:03:22 +00:00
|
|
|
}
|
2019-03-30 11:21:49 +00:00
|
|
|
})
|
|
|
|
}
|
2019-04-09 14:31:19 +01:00
|
|
|
|
2021-12-10 12:29:43 +00:00
|
|
|
// func TestExpirationTimeSegment(t *testing.T) {
|
|
|
|
// testplanet.Run(t, testplanet.Config{
|
|
|
|
// SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
// }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
// apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
//
|
|
|
|
// err := planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "my-bucket-name")
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// defer ctx.Check(metainfoClient.Close)
|
|
|
|
//
|
|
|
|
// for i, r := range []struct {
|
|
|
|
// expirationDate time.Time
|
|
|
|
// errFlag bool
|
|
|
|
// }{
|
|
|
|
// { // expiration time not set
|
|
|
|
// time.Time{},
|
|
|
|
// false,
|
|
|
|
// },
|
|
|
|
// { // 10 days into future
|
|
|
|
// time.Now().AddDate(0, 0, 10),
|
|
|
|
// false,
|
|
|
|
// },
|
|
|
|
// { // current time
|
|
|
|
// time.Now(),
|
|
|
|
// true,
|
|
|
|
// },
|
|
|
|
// { // 10 days into past
|
|
|
|
// time.Now().AddDate(0, 0, -10),
|
|
|
|
// true,
|
|
|
|
// },
|
|
|
|
// } {
|
|
|
|
// _, err := metainfoClient.BeginObject(ctx, metaclient.BeginObjectParams{
|
|
|
|
// Bucket: []byte("my-bucket-name"),
|
|
|
|
// EncryptedPath: []byte("path" + strconv.Itoa(i)),
|
|
|
|
// ExpiresAt: r.expirationDate,
|
|
|
|
// EncryptionParameters: storj.EncryptionParameters{
|
|
|
|
// CipherSuite: storj.EncAESGCM,
|
|
|
|
// BlockSize: 256,
|
|
|
|
// },
|
|
|
|
// })
|
|
|
|
// if r.errFlag {
|
|
|
|
// assert.Error(t, err)
|
|
|
|
// } else {
|
|
|
|
// assert.NoError(t, err)
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
// })
|
|
|
|
//}
|
2020-01-03 09:27:10 +00:00
|
|
|
|
2019-06-27 18:36:51 +01:00
|
|
|
func TestGetProjectInfo(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
2019-07-16 20:16:41 +01:00
|
|
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 2,
|
2019-06-27 18:36:51 +01:00
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
apiKey0 := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
apiKey1 := planet.Uplinks[1].APIKey[planet.Satellites[0].ID()]
|
|
|
|
|
|
|
|
metainfo0, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey0)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
metainfo1, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
info0, err := metainfo0.GetProjectInfo(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, info0.ProjectSalt)
|
|
|
|
|
|
|
|
info1, err := metainfo1.GetProjectInfo(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NotNil(t, info1.ProjectSalt)
|
|
|
|
|
|
|
|
// Different projects should have different salts
|
|
|
|
require.NotEqual(t, info0.ProjectSalt, info1.ProjectSalt)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2021-12-10 12:29:43 +00:00
|
|
|
// func TestBucketNameValidation(t *testing.T) {
|
|
|
|
// testplanet.Run(t, testplanet.Config{
|
|
|
|
// SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
|
|
// }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
// apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
//
|
|
|
|
// metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// defer ctx.Check(metainfoClient.Close)
|
|
|
|
//
|
|
|
|
// validNames := []string{
|
|
|
|
// "tes", "testbucket",
|
|
|
|
// "test-bucket", "testbucket9",
|
|
|
|
// "9testbucket", "a.b",
|
|
|
|
// "test.bucket", "test-one.bucket-one",
|
|
|
|
// "test.bucket.one",
|
|
|
|
// "testbucket-63-0123456789012345678901234567890123456789012345abc",
|
|
|
|
// }
|
|
|
|
// for _, name := range validNames {
|
|
|
|
// _, err = metainfoClient.CreateBucket(ctx, metaclient.CreateBucketParams{
|
|
|
|
// Name: []byte(name),
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err, "bucket name: %v", name)
|
|
|
|
//
|
|
|
|
// _, err = metainfoClient.BeginObject(ctx, metaclient.BeginObjectParams{
|
|
|
|
// Bucket: []byte(name),
|
|
|
|
// EncryptedPath: []byte("123"),
|
|
|
|
// Version: 0,
|
|
|
|
// ExpiresAt: time.Now().Add(16 * 24 * time.Hour),
|
|
|
|
// EncryptionParameters: storj.EncryptionParameters{
|
|
|
|
// CipherSuite: storj.EncAESGCM,
|
|
|
|
// BlockSize: 256,
|
|
|
|
// },
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err, "bucket name: %v", name)
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// invalidNames := []string{
|
|
|
|
// "", "t", "te", "-testbucket",
|
|
|
|
// "testbucket-", "-testbucket-",
|
|
|
|
// "a.b.", "test.bucket-.one",
|
|
|
|
// "test.-bucket.one", "1.2.3.4",
|
|
|
|
// "192.168.1.234", "testBUCKET",
|
|
|
|
// "test/bucket",
|
|
|
|
// "testbucket-64-0123456789012345678901234567890123456789012345abcd",
|
|
|
|
// }
|
|
|
|
// for _, name := range invalidNames {
|
|
|
|
// _, err = metainfoClient.BeginObject(ctx, metaclient.BeginObjectParams{
|
|
|
|
// Bucket: []byte(name),
|
|
|
|
// EncryptedPath: []byte("123"),
|
|
|
|
// })
|
|
|
|
// require.Error(t, err, "bucket name: %v", name)
|
|
|
|
//
|
|
|
|
// _, err = metainfoClient.CreateBucket(ctx, metaclient.CreateBucketParams{
|
|
|
|
// Name: []byte(name),
|
|
|
|
// })
|
|
|
|
// require.Error(t, err, "bucket name: %v", name)
|
|
|
|
// }
|
|
|
|
// })
|
|
|
|
//}
|
|
|
|
|
|
|
|
// func TestListGetObjects(t *testing.T) {
|
|
|
|
// testplanet.Run(t, testplanet.Config{
|
|
|
|
// SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
// }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
// apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
// uplink := planet.Uplinks[0]
|
|
|
|
//
|
|
|
|
// files := make([]string, 10)
|
|
|
|
// data := testrand.Bytes(1 * memory.KiB)
|
|
|
|
// for i := 0; i < len(files); i++ {
|
|
|
|
// files[i] = "path" + strconv.Itoa(i)
|
|
|
|
// err := uplink.Upload(ctx, planet.Satellites[0], "testbucket", files[i], data)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// defer ctx.Check(metainfoClient.Close)
|
|
|
|
//
|
|
|
|
// expectedBucketName := "testbucket"
|
|
|
|
// items, _, err := metainfoClient.ListObjects(ctx, metaclient.ListObjectsParams{
|
|
|
|
// Bucket: []byte(expectedBucketName),
|
|
|
|
// IncludeSystemMetadata: true,
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.Equal(t, len(files), len(items))
|
|
|
|
// for _, item := range items {
|
|
|
|
// require.NotEmpty(t, item.EncryptedPath)
|
|
|
|
// require.True(t, item.CreatedAt.Before(time.Now()))
|
|
|
|
//
|
|
|
|
// object, err := metainfoClient.GetObject(ctx, metaclient.GetObjectParams{
|
|
|
|
// Bucket: []byte(expectedBucketName),
|
|
|
|
// EncryptedPath: item.EncryptedPath,
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.Equal(t, item.EncryptedPath, object.EncryptedPath)
|
|
|
|
//
|
|
|
|
// require.NotEmpty(t, object.StreamID)
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// items, _, err = metainfoClient.ListObjects(ctx, metaclient.ListObjectsParams{
|
|
|
|
// Bucket: []byte(expectedBucketName),
|
|
|
|
// Limit: 3,
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.Equal(t, 3, len(items))
|
|
|
|
// })
|
|
|
|
//}
|
2019-07-24 12:33:23 +01:00
|
|
|
|
2022-01-10 15:46:27 +00:00
|
|
|
// func TestBucketExistenceCheck(t *testing.T) {
|
|
|
|
// testplanet.Run(t, testplanet.Config{
|
|
|
|
// SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
// }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
// apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
//
|
|
|
|
// metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// defer ctx.Check(metainfoClient.Close)
|
|
|
|
//
|
|
|
|
// // test object methods for bucket existence check
|
|
|
|
// _, err = metainfoClient.BeginObject(ctx, metaclient.BeginObjectParams{
|
|
|
|
// Bucket: []byte("non-existing-bucket"),
|
|
|
|
// EncryptedPath: []byte("encrypted-path"),
|
|
|
|
// })
|
|
|
|
// require.Error(t, err)
|
|
|
|
// require.True(t, errs2.IsRPC(err, rpcstatus.NotFound))
|
|
|
|
// require.Equal(t, storj.ErrBucketNotFound.New("%s", "non-existing-bucket").Error(), errs.Unwrap(err).Error())
|
|
|
|
//
|
|
|
|
// _, _, err = metainfoClient.ListObjects(ctx, metaclient.ListObjectsParams{
|
|
|
|
// Bucket: []byte("non-existing-bucket"),
|
|
|
|
// })
|
|
|
|
// require.Error(t, err)
|
|
|
|
// require.True(t, errs2.IsRPC(err, rpcstatus.NotFound))
|
|
|
|
// require.Equal(t, storj.ErrBucketNotFound.New("%s", "non-existing-bucket").Error(), errs.Unwrap(err).Error())
|
|
|
|
// })
|
|
|
|
//}
|
2020-03-16 08:55:52 +00:00
|
|
|
|
2021-12-10 12:29:43 +00:00
|
|
|
// func TestBeginCommit(t *testing.T) {
|
|
|
|
// testplanet.Run(t, testplanet.Config{
|
|
|
|
// SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
|
|
// Reconfigure: testplanet.Reconfigure{
|
|
|
|
// Satellite: func(logger *zap.Logger, index int, config *satellite.Config) {
|
|
|
|
// config.Overlay.GeoIP.MockCountries = []string{"DE"}
|
|
|
|
// },
|
|
|
|
// },
|
|
|
|
// }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
// apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
// buckets := planet.Satellites[0].API.Buckets.Service
|
|
|
|
//
|
|
|
|
// bucket := storj.Bucket{
|
|
|
|
// Name: "initial-bucket",
|
|
|
|
// ProjectID: planet.Uplinks[0].Projects[0].ID,
|
|
|
|
// Placement: storj.EU,
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// _, err := buckets.CreateBucket(ctx, bucket)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// defer ctx.Check(metainfoClient.Close)
|
|
|
|
//
|
|
|
|
// params := metaclient.BeginObjectParams{
|
|
|
|
// Bucket: []byte(bucket.Name),
|
|
|
|
// EncryptedPath: []byte("encrypted-path"),
|
|
|
|
// Redundancy: storj.RedundancyScheme{
|
|
|
|
// Algorithm: storj.ReedSolomon,
|
|
|
|
// ShareSize: 256,
|
|
|
|
// RequiredShares: 1,
|
|
|
|
// RepairShares: 1,
|
|
|
|
// OptimalShares: 3,
|
|
|
|
// TotalShares: 4,
|
|
|
|
// },
|
|
|
|
// EncryptionParameters: storj.EncryptionParameters{
|
|
|
|
// CipherSuite: storj.EncAESGCM,
|
|
|
|
// BlockSize: 256,
|
|
|
|
// },
|
|
|
|
// ExpiresAt: time.Now().Add(24 * time.Hour),
|
|
|
|
// }
|
|
|
|
// beginObjectResponse, err := metainfoClient.BeginObject(ctx, params)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// streamID := internalpb.StreamID{}
|
|
|
|
// err = pb.Unmarshal(beginObjectResponse.StreamID.Bytes(), &streamID)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.Equal(t, int32(storj.EU), streamID.Placement)
|
|
|
|
//
|
|
|
|
// response, err := metainfoClient.BeginSegment(ctx, metaclient.BeginSegmentParams{
|
|
|
|
// StreamID: beginObjectResponse.StreamID,
|
|
|
|
// Position: storj.SegmentPosition{
|
|
|
|
// Index: 0,
|
|
|
|
// },
|
|
|
|
// MaxOrderLimit: memory.MiB.Int64(),
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// fullIDMap := make(map[storj.NodeID]*identity.FullIdentity)
|
|
|
|
// for _, node := range planet.StorageNodes {
|
|
|
|
// fullIDMap[node.ID()] = node.Identity
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// makeResult := func(num int32) *pb.SegmentPieceUploadResult {
|
|
|
|
// nodeID := response.Limits[num].Limit.StorageNodeId
|
|
|
|
// hash := &pb.PieceHash{
|
|
|
|
// PieceId: response.Limits[num].Limit.PieceId,
|
|
|
|
// PieceSize: 1048832,
|
|
|
|
// Timestamp: time.Now(),
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// fullID := fullIDMap[nodeID]
|
|
|
|
// require.NotNil(t, fullID)
|
|
|
|
// signer := signing.SignerFromFullIdentity(fullID)
|
|
|
|
// signedHash, err := signing.SignPieceHash(ctx, signer, hash)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// return &pb.SegmentPieceUploadResult{
|
|
|
|
// PieceNum: num,
|
|
|
|
// NodeId: nodeID,
|
|
|
|
// Hash: signedHash,
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
// err = metainfoClient.CommitSegment(ctx, metaclient.CommitSegmentParams{
|
|
|
|
// SegmentID: response.SegmentID,
|
|
|
|
// Encryption: storj.SegmentEncryption{
|
|
|
|
// EncryptedKey: testrand.Bytes(256),
|
|
|
|
// },
|
|
|
|
// PlainSize: 5000,
|
|
|
|
// SizeEncryptedData: memory.MiB.Int64(),
|
|
|
|
// UploadResult: []*pb.SegmentPieceUploadResult{
|
|
|
|
// makeResult(0),
|
|
|
|
// makeResult(1),
|
|
|
|
// makeResult(2),
|
|
|
|
// },
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// metadata, err := pb.Marshal(&pb.StreamMeta{
|
|
|
|
// NumberOfSegments: 1,
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// err = metainfoClient.CommitObject(ctx, metaclient.CommitObjectParams{
|
|
|
|
// StreamID: beginObjectResponse.StreamID,
|
|
|
|
// EncryptedMetadata: metadata,
|
|
|
|
// EncryptedMetadataNonce: testrand.Nonce(),
|
|
|
|
// EncryptedMetadataEncryptedKey: testrand.Bytes(32),
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// objects, _, err := metainfoClient.ListObjects(ctx, metaclient.ListObjectsParams{
|
|
|
|
// Bucket: []byte(bucket.Name),
|
|
|
|
// IncludeSystemMetadata: true,
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.Len(t, objects, 1)
|
|
|
|
// require.Equal(t, params.EncryptedPath, objects[0].EncryptedPath)
|
|
|
|
// // TODO find better way to compare (one ExpiresAt contains time zone informations)
|
|
|
|
// require.Equal(t, params.ExpiresAt.Unix(), objects[0].ExpiresAt.Unix())
|
|
|
|
//
|
|
|
|
// object, err := metainfoClient.GetObject(ctx, metaclient.GetObjectParams{
|
|
|
|
// Bucket: []byte(bucket.Name),
|
|
|
|
// EncryptedPath: objects[0].EncryptedPath,
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// project := planet.Uplinks[0].Projects[0]
|
|
|
|
// allObjects, err := planet.Satellites[0].Metabase.DB.TestingAllCommittedObjects(ctx, project.ID, object.Bucket)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.Len(t, allObjects, 1)
|
|
|
|
// })
|
|
|
|
//}
|
|
|
|
|
|
|
|
// buckets := planet.Satellites[0].API.Buckets.Service
|
|
|
|
//
|
|
|
|
// // TODO maybe split into separate cases
|
|
|
|
// // Test:
|
|
|
|
// // * create bucket
|
|
|
|
// // * begin object
|
|
|
|
// // * send several inline segments
|
|
|
|
// // * commit object
|
|
|
|
// // * list created object
|
|
|
|
// // * list object segments
|
|
|
|
// // * download segments
|
|
|
|
// // * delete segments and object
|
|
|
|
//
|
|
|
|
// bucket := storj.Bucket{
|
|
|
|
// Name: "inline-segments-bucket",
|
|
|
|
// ProjectID: planet.Uplinks[0].Projects[0].ID,
|
|
|
|
// }
|
|
|
|
// _, err := buckets.CreateBucket(ctx, bucket)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// defer ctx.Check(metainfoClient.Close)
|
|
|
|
//
|
|
|
|
// params := metaclient.BeginObjectParams{
|
|
|
|
// Bucket: []byte(bucket.Name),
|
|
|
|
// EncryptedPath: []byte("encrypted-path"),
|
|
|
|
// Redundancy: storj.RedundancyScheme{
|
|
|
|
// Algorithm: storj.ReedSolomon,
|
|
|
|
// ShareSize: 256,
|
|
|
|
// RequiredShares: 1,
|
|
|
|
// RepairShares: 1,
|
|
|
|
// OptimalShares: 3,
|
|
|
|
// TotalShares: 4,
|
|
|
|
// },
|
|
|
|
// EncryptionParameters: storj.EncryptionParameters{
|
|
|
|
// CipherSuite: storj.EncAESGCM,
|
|
|
|
// BlockSize: 256,
|
|
|
|
// },
|
|
|
|
//
|
|
|
|
// ExpiresAt: time.Now().Add(24 * time.Hour),
|
|
|
|
// }
|
|
|
|
// beginObjectResp, err := metainfoClient.BeginObject(ctx, params)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// segments := []int32{0, 1, 2, 3, 4, 5, 6}
|
|
|
|
// segmentsData := make([][]byte, len(segments))
|
|
|
|
// for i, segment := range segments {
|
|
|
|
// segmentsData[i] = testrand.Bytes(memory.KiB)
|
|
|
|
// err = metainfoClient.MakeInlineSegment(ctx, metaclient.MakeInlineSegmentParams{
|
|
|
|
// StreamID: beginObjectResp.StreamID,
|
|
|
|
// Position: storj.SegmentPosition{
|
|
|
|
// Index: segment,
|
|
|
|
// },
|
|
|
|
// PlainSize: 1024,
|
|
|
|
// EncryptedInlineData: segmentsData[i],
|
|
|
|
// Encryption: storj.SegmentEncryption{
|
|
|
|
// EncryptedKey: testrand.Bytes(256),
|
|
|
|
// },
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// metadata, err := pb.Marshal(&pb.StreamMeta{
|
|
|
|
// NumberOfSegments: int64(len(segments)),
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// err = metainfoClient.CommitObject(ctx, metaclient.CommitObjectParams{
|
|
|
|
// StreamID: beginObjectResp.StreamID,
|
|
|
|
// EncryptedMetadata: metadata,
|
|
|
|
// EncryptedMetadataNonce: testrand.Nonce(),
|
|
|
|
// EncryptedMetadataEncryptedKey: testrand.Bytes(32),
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// objects, _, err := metainfoClient.ListObjects(ctx, metaclient.ListObjectsParams{
|
|
|
|
// Bucket: []byte(bucket.Name),
|
|
|
|
// IncludeSystemMetadata: true,
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.Len(t, objects, 1)
|
|
|
|
//
|
|
|
|
// require.Equal(t, params.EncryptedPath, objects[0].EncryptedPath)
|
|
|
|
// // TODO find better way to compare (one ExpiresAt contains time zone informations)
|
|
|
|
// require.Equal(t, params.ExpiresAt.Unix(), objects[0].ExpiresAt.Unix())
|
|
|
|
//
|
|
|
|
// object, err := metainfoClient.GetObject(ctx, metaclient.GetObjectParams{
|
|
|
|
// Bucket: params.Bucket,
|
|
|
|
// EncryptedPath: params.EncryptedPath,
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// { // Confirm data larger than our configured max inline segment size of 4 KiB cannot be inlined
|
|
|
|
// beginObjectResp, err := metainfoClient.BeginObject(ctx, metaclient.BeginObjectParams{
|
|
|
|
// Bucket: []byte(bucket.Name),
|
|
|
|
// EncryptedPath: []byte("too-large-inline-segment"),
|
|
|
|
// EncryptionParameters: storj.EncryptionParameters{
|
|
|
|
// CipherSuite: storj.EncAESGCM,
|
|
|
|
// BlockSize: 256,
|
|
|
|
// },
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// data := testrand.Bytes(10 * memory.KiB)
|
|
|
|
// err = metainfoClient.MakeInlineSegment(ctx, metaclient.MakeInlineSegmentParams{
|
|
|
|
// StreamID: beginObjectResp.StreamID,
|
|
|
|
// Position: storj.SegmentPosition{
|
|
|
|
// Index: 0,
|
|
|
|
// },
|
|
|
|
// EncryptedInlineData: data,
|
|
|
|
// Encryption: storj.SegmentEncryption{
|
|
|
|
// EncryptedKey: testrand.Bytes(256),
|
|
|
|
// },
|
|
|
|
// })
|
|
|
|
// require.Error(t, err)
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// { // test download inline segments
|
|
|
|
// existingSegments := []int32{0, 1, 2, 3, 4, 5, -1}
|
|
|
|
//
|
|
|
|
// for i, index := range existingSegments {
|
|
|
|
// response, err := metainfoClient.DownloadSegmentWithRS(ctx, metaclient.DownloadSegmentParams{
|
|
|
|
// StreamID: object.StreamID,
|
|
|
|
// Position: storj.SegmentPosition{
|
|
|
|
// Index: index,
|
|
|
|
// },
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.Nil(t, response.Limits)
|
|
|
|
// require.Equal(t, segmentsData[i], response.Info.EncryptedInlineData)
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// { // test deleting segments
|
|
|
|
// _, err := metainfoClient.BeginDeleteObject(ctx, metaclient.BeginDeleteObjectParams{
|
|
|
|
// Bucket: params.Bucket,
|
|
|
|
// EncryptedPath: params.EncryptedPath,
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// _, err = metainfoClient.GetObject(ctx, metaclient.GetObjectParams{
|
|
|
|
// Bucket: params.Bucket,
|
|
|
|
// EncryptedPath: params.EncryptedPath,
|
|
|
|
// })
|
|
|
|
// require.Error(t, err)
|
|
|
|
// }
|
|
|
|
// })
|
|
|
|
//}
|
2019-07-24 12:33:23 +01:00
|
|
|
|
2021-10-27 09:50:27 +01:00
|
|
|
func TestUploadWithPlacement(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
Satellite: func(logger *zap.Logger, index int, config *satellite.Config) {
|
|
|
|
config.Overlay.GeoIP.MockCountries = []string{"DE"}
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
bucketName := "initial-bucket"
|
|
|
|
objectName := "file1"
|
|
|
|
|
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
fmt.Println(apiKey)
|
|
|
|
buckets := planet.Satellites[0].API.Buckets.Service
|
|
|
|
|
|
|
|
bucket := storj.Bucket{
|
|
|
|
Name: bucketName,
|
|
|
|
ProjectID: planet.Uplinks[0].Projects[0].ID,
|
|
|
|
Placement: storj.EU,
|
|
|
|
}
|
|
|
|
_, err := buckets.CreateBucket(ctx, bucket)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// this should be bigger than the max inline segment
|
|
|
|
content := make([]byte, 5000)
|
|
|
|
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], bucket.Name, objectName, content)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
segments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 1, len(segments))
|
|
|
|
require.Equal(t, storj.EU, segments[0].Placement)
|
|
|
|
})
|
|
|
|
}
|
2019-07-24 12:33:23 +01:00
|
|
|
|
2021-12-10 12:29:43 +00:00
|
|
|
// func TestRemoteSegment(t *testing.T) {
|
|
|
|
// testplanet.Run(t, testplanet.Config{
|
|
|
|
// SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
|
|
// }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
// apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
// uplink := planet.Uplinks[0]
|
|
|
|
//
|
|
|
|
// expectedBucketName := "remote-segments-bucket"
|
|
|
|
// err := uplink.Upload(ctx, planet.Satellites[0], expectedBucketName, "file-object", testrand.Bytes(50*memory.KiB))
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// defer ctx.Check(metainfoClient.Close)
|
|
|
|
//
|
|
|
|
// items, _, err := metainfoClient.ListObjects(ctx, metaclient.ListObjectsParams{
|
|
|
|
// Bucket: []byte(expectedBucketName),
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.Len(t, items, 1)
|
|
|
|
//
|
|
|
|
// {
|
|
|
|
// // Get object
|
|
|
|
// // Download segment
|
|
|
|
//
|
|
|
|
// object, err := metainfoClient.GetObject(ctx, metaclient.GetObjectParams{
|
|
|
|
// Bucket: []byte(expectedBucketName),
|
|
|
|
// EncryptedPath: items[0].EncryptedPath,
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// response, err := metainfoClient.DownloadSegmentWithRS(ctx, metaclient.DownloadSegmentParams{
|
|
|
|
// StreamID: object.StreamID,
|
|
|
|
// Position: storj.SegmentPosition{
|
|
|
|
// Index: -1,
|
|
|
|
// },
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.NotEmpty(t, response.Limits)
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// {
|
|
|
|
// // Download Object
|
|
|
|
// download, err := metainfoClient.DownloadObject(ctx, metaclient.DownloadObjectParams{
|
|
|
|
// Bucket: []byte(expectedBucketName),
|
|
|
|
// EncryptedObjectKey: items[0].EncryptedPath,
|
|
|
|
// Range: metaclient.StreamRange{
|
|
|
|
// Mode: metaclient.StreamRangeStartLimit,
|
|
|
|
// Start: 1,
|
|
|
|
// Limit: 2,
|
|
|
|
// },
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.Len(t, download.DownloadedSegments, 1)
|
|
|
|
// require.NotEmpty(t, download.DownloadedSegments[0].Limits)
|
|
|
|
// for _, limit := range download.DownloadedSegments[0].Limits {
|
|
|
|
// if limit == nil {
|
|
|
|
// continue
|
|
|
|
// }
|
|
|
|
// // requested download size is
|
|
|
|
// // [1:2}
|
|
|
|
// // calculating encryption input block size (7408) indices gives us:
|
|
|
|
// // 0 and 1
|
|
|
|
// // converting these into output block size (7424), gives us:
|
|
|
|
// // [0:7424}
|
|
|
|
// // this aligned to stripe size (256), gives us:
|
|
|
|
// // [0:7424}
|
|
|
|
// require.Equal(t, int64(7424), limit.Limit.Limit)
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// {
|
|
|
|
// // Begin deleting object
|
|
|
|
// // List objects
|
|
|
|
//
|
|
|
|
// _, err := metainfoClient.BeginDeleteObject(ctx, metaclient.BeginDeleteObjectParams{
|
|
|
|
// Bucket: []byte(expectedBucketName),
|
|
|
|
// EncryptedPath: items[0].EncryptedPath,
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// items, _, err = metainfoClient.ListObjects(ctx, metaclient.ListObjectsParams{
|
|
|
|
// Bucket: []byte(expectedBucketName),
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.Len(t, items, 0)
|
|
|
|
// }
|
|
|
|
// })
|
|
|
|
//}
|
2019-07-24 12:33:23 +01:00
|
|
|
|
|
|
|
func TestIDs(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
|
|
|
|
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(metainfoClient.Close)
|
|
|
|
|
|
|
|
{
|
|
|
|
streamID := testrand.StreamID(256)
|
2021-04-30 14:41:20 +01:00
|
|
|
err = metainfoClient.CommitObject(ctx, metaclient.CommitObjectParams{
|
2019-08-01 10:04:31 +01:00
|
|
|
StreamID: streamID,
|
|
|
|
})
|
2019-07-24 12:33:23 +01:00
|
|
|
require.Error(t, err) // invalid streamID
|
|
|
|
|
|
|
|
segmentID := testrand.SegmentID(512)
|
2021-04-30 14:41:20 +01:00
|
|
|
err = metainfoClient.CommitSegment(ctx, metaclient.CommitSegmentParams{
|
2019-07-24 12:33:23 +01:00
|
|
|
SegmentID: segmentID,
|
|
|
|
})
|
|
|
|
require.Error(t, err) // invalid segmentID
|
|
|
|
}
|
|
|
|
|
|
|
|
satellitePeer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
|
|
|
|
|
|
|
|
{ // streamID expired
|
2021-04-30 14:41:20 +01:00
|
|
|
signedStreamID, err := metainfo.SignStreamID(ctx, satellitePeer, &internalpb.StreamID{
|
2019-11-15 15:44:23 +00:00
|
|
|
CreationDate: time.Now().Add(-36 * time.Hour),
|
2019-07-24 12:33:23 +01:00
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-04-08 13:08:57 +01:00
|
|
|
encodedStreamID, err := pb.Marshal(signedStreamID)
|
2019-07-24 12:33:23 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
streamID, err := storj.StreamIDFromBytes(encodedStreamID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
err = metainfoClient.CommitObject(ctx, metaclient.CommitObjectParams{
|
2019-08-01 10:04:31 +01:00
|
|
|
StreamID: streamID,
|
|
|
|
})
|
2019-07-24 12:33:23 +01:00
|
|
|
require.Error(t, err)
|
|
|
|
}
|
|
|
|
|
2019-11-15 15:44:23 +00:00
|
|
|
{ // segment id missing stream id
|
2021-04-30 14:41:20 +01:00
|
|
|
signedSegmentID, err := metainfo.SignSegmentID(ctx, satellitePeer, &internalpb.SegmentID{
|
2019-11-15 15:44:23 +00:00
|
|
|
CreationDate: time.Now().Add(-1 * time.Hour),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-04-08 13:08:57 +01:00
|
|
|
encodedSegmentID, err := pb.Marshal(signedSegmentID)
|
2019-11-15 15:44:23 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
segmentID, err := storj.SegmentIDFromBytes(encodedSegmentID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
err = metainfoClient.CommitSegment(ctx, metaclient.CommitSegmentParams{
|
2019-11-15 15:44:23 +00:00
|
|
|
SegmentID: segmentID,
|
|
|
|
})
|
|
|
|
require.Error(t, err)
|
|
|
|
}
|
|
|
|
|
2019-07-24 12:33:23 +01:00
|
|
|
{ // segmentID expired
|
2021-04-30 14:41:20 +01:00
|
|
|
signedSegmentID, err := metainfo.SignSegmentID(ctx, satellitePeer, &internalpb.SegmentID{
|
2019-11-15 15:44:23 +00:00
|
|
|
CreationDate: time.Now().Add(-36 * time.Hour),
|
2020-10-30 11:22:16 +00:00
|
|
|
StreamId: &internalpb.StreamID{
|
2019-11-15 15:44:23 +00:00
|
|
|
CreationDate: time.Now(),
|
|
|
|
},
|
2019-07-24 12:33:23 +01:00
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-04-08 13:08:57 +01:00
|
|
|
encodedSegmentID, err := pb.Marshal(signedSegmentID)
|
2019-07-24 12:33:23 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
segmentID, err := storj.SegmentIDFromBytes(encodedSegmentID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2021-04-30 14:41:20 +01:00
|
|
|
err = metainfoClient.CommitSegment(ctx, metaclient.CommitSegmentParams{
|
2019-07-24 12:33:23 +01:00
|
|
|
SegmentID: segmentID,
|
|
|
|
})
|
|
|
|
require.Error(t, err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
2019-08-06 15:56:23 +01:00
|
|
|
|
2021-12-10 12:29:43 +00:00
|
|
|
// func TestBatch(t *testing.T) {
|
|
|
|
// testplanet.Run(t, testplanet.Config{
|
|
|
|
// SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
// }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
// apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
//
|
|
|
|
// metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// defer ctx.Check(metainfoClient.Close)
|
|
|
|
//
|
|
|
|
// { // create few buckets and list them in one batch
|
|
|
|
// requests := make([]metaclient.BatchItem, 0)
|
|
|
|
// numOfBuckets := 5
|
|
|
|
// for i := 0; i < numOfBuckets; i++ {
|
|
|
|
// requests = append(requests, &metaclient.CreateBucketParams{
|
|
|
|
// Name: []byte("test-bucket-" + strconv.Itoa(i)),
|
|
|
|
// })
|
|
|
|
// }
|
|
|
|
// requests = append(requests, &metaclient.ListBucketsParams{
|
|
|
|
// ListOpts: storj.BucketListOptions{
|
|
|
|
// Cursor: "",
|
|
|
|
// Direction: storj.After,
|
|
|
|
// },
|
|
|
|
// })
|
|
|
|
// responses, err := metainfoClient.Batch(ctx, requests...)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.Equal(t, numOfBuckets+1, len(responses))
|
|
|
|
//
|
|
|
|
// for i := 0; i < numOfBuckets; i++ {
|
|
|
|
// response, err := responses[i].CreateBucket()
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.Equal(t, "test-bucket-"+strconv.Itoa(i), response.Bucket.Name)
|
|
|
|
//
|
|
|
|
// _, err = responses[i].GetBucket()
|
|
|
|
// require.Error(t, err)
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// bucketsListResp, err := responses[numOfBuckets].ListBuckets()
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.Equal(t, numOfBuckets, len(bucketsListResp.BucketList.Items))
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// { // create bucket, object, upload inline segments in batch, download inline segments in batch
|
|
|
|
// err := planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "second-test-bucket")
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// requests := make([]metaclient.BatchItem, 0)
|
|
|
|
// requests = append(requests, &metaclient.BeginObjectParams{
|
|
|
|
// Bucket: []byte("second-test-bucket"),
|
|
|
|
// EncryptedPath: []byte("encrypted-path"),
|
|
|
|
// EncryptionParameters: storj.EncryptionParameters{
|
|
|
|
// CipherSuite: storj.EncAESGCM,
|
|
|
|
// BlockSize: 256,
|
|
|
|
// },
|
|
|
|
// })
|
|
|
|
// numOfSegments := 10
|
|
|
|
// expectedData := make([][]byte, numOfSegments)
|
|
|
|
// for i := 0; i < numOfSegments; i++ {
|
|
|
|
// expectedData[i] = testrand.Bytes(memory.KiB)
|
|
|
|
//
|
|
|
|
// requests = append(requests, &metaclient.MakeInlineSegmentParams{
|
|
|
|
// Position: storj.SegmentPosition{
|
|
|
|
// Index: int32(i),
|
|
|
|
// },
|
|
|
|
// PlainSize: int64(len(expectedData[i])),
|
|
|
|
// EncryptedInlineData: expectedData[i],
|
|
|
|
// Encryption: storj.SegmentEncryption{
|
|
|
|
// EncryptedKey: testrand.Bytes(256),
|
|
|
|
// },
|
|
|
|
// })
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// metadata, err := pb.Marshal(&pb.StreamMeta{
|
|
|
|
// NumberOfSegments: int64(numOfSegments),
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// requests = append(requests, &metaclient.CommitObjectParams{
|
|
|
|
// EncryptedMetadata: metadata,
|
|
|
|
// EncryptedMetadataNonce: testrand.Nonce(),
|
|
|
|
// EncryptedMetadataEncryptedKey: testrand.Bytes(32),
|
|
|
|
// })
|
|
|
|
//
|
|
|
|
// responses, err := metainfoClient.Batch(ctx, requests...)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.Equal(t, numOfSegments+2, len(responses))
|
|
|
|
//
|
|
|
|
// requests = make([]metaclient.BatchItem, 0)
|
|
|
|
// requests = append(requests, &metaclient.GetObjectParams{
|
|
|
|
// Bucket: []byte("second-test-bucket"),
|
|
|
|
// EncryptedPath: []byte("encrypted-path"),
|
|
|
|
// })
|
|
|
|
//
|
|
|
|
// for i := 0; i < numOfSegments-1; i++ {
|
|
|
|
// requests = append(requests, &metaclient.DownloadSegmentParams{
|
|
|
|
// Position: storj.SegmentPosition{
|
|
|
|
// Index: int32(i),
|
|
|
|
// },
|
|
|
|
// })
|
|
|
|
// }
|
|
|
|
// requests = append(requests, &metaclient.DownloadSegmentParams{
|
|
|
|
// Position: storj.SegmentPosition{
|
|
|
|
// Index: -1,
|
|
|
|
// },
|
|
|
|
// })
|
|
|
|
// responses, err = metainfoClient.Batch(ctx, requests...)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.Equal(t, numOfSegments+1, len(responses))
|
|
|
|
//
|
|
|
|
// for i, response := range responses[1:] {
|
|
|
|
// downloadResponse, err := response.DownloadSegment()
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// require.Equal(t, expectedData[i], downloadResponse.Info.EncryptedInlineData)
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// { // test case when StreamID is not set automatically
|
|
|
|
// err := planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "third-test-bucket")
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// beginObjectResp, err := metainfoClient.BeginObject(ctx, metaclient.BeginObjectParams{
|
|
|
|
// Bucket: []byte("third-test-bucket"),
|
|
|
|
// EncryptedPath: []byte("encrypted-path"),
|
|
|
|
// EncryptionParameters: storj.EncryptionParameters{
|
|
|
|
// CipherSuite: storj.EncAESGCM,
|
|
|
|
// BlockSize: 256,
|
|
|
|
// },
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// requests := make([]metaclient.BatchItem, 0)
|
|
|
|
// numOfSegments := 10
|
|
|
|
// expectedData := make([][]byte, numOfSegments)
|
|
|
|
// for i := 0; i < numOfSegments; i++ {
|
|
|
|
// expectedData[i] = testrand.Bytes(memory.KiB)
|
|
|
|
//
|
|
|
|
// requests = append(requests, &metaclient.MakeInlineSegmentParams{
|
|
|
|
// StreamID: beginObjectResp.StreamID,
|
|
|
|
// Position: storj.SegmentPosition{
|
|
|
|
// Index: int32(i),
|
|
|
|
// },
|
|
|
|
// PlainSize: int64(len(expectedData[i])),
|
|
|
|
// EncryptedInlineData: expectedData[i],
|
|
|
|
// Encryption: storj.SegmentEncryption{
|
|
|
|
// EncryptedKey: testrand.Bytes(256),
|
|
|
|
// },
|
|
|
|
// })
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// metadata, err := pb.Marshal(&pb.StreamMeta{
|
|
|
|
// NumberOfSegments: int64(numOfSegments),
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// requests = append(requests, &metaclient.CommitObjectParams{
|
|
|
|
// StreamID: beginObjectResp.StreamID,
|
|
|
|
// EncryptedMetadata: metadata,
|
|
|
|
// EncryptedMetadataNonce: testrand.Nonce(),
|
|
|
|
// EncryptedMetadataEncryptedKey: testrand.Bytes(32),
|
|
|
|
// })
|
|
|
|
//
|
|
|
|
// responses, err := metainfoClient.Batch(ctx, requests...)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.Equal(t, numOfSegments+1, len(responses))
|
|
|
|
// }
|
|
|
|
// })
|
|
|
|
//}
|
2019-10-31 19:04:33 +00:00
|
|
|
|
2020-01-17 15:01:36 +00:00
|
|
|
func TestRateLimit(t *testing.T) {
|
2020-01-29 15:22:22 +00:00
|
|
|
rateLimit := 2
|
2020-01-17 15:01:36 +00:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
2020-01-29 15:22:22 +00:00
|
|
|
config.Metainfo.RateLimiter.Rate = float64(rateLimit)
|
2020-05-26 09:05:43 +01:00
|
|
|
config.Metainfo.RateLimiter.CacheExpiration = 500 * time.Millisecond
|
2020-01-17 15:01:36 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
ul := planet.Uplinks[0]
|
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
|
2020-05-26 09:05:43 +01:00
|
|
|
// TODO find a way to reset limiter before test is executed, currently
|
|
|
|
// testplanet is doing one additional request to get access
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
2020-01-29 15:22:22 +00:00
|
|
|
var group errs2.Group
|
|
|
|
for i := 0; i <= rateLimit; i++ {
|
|
|
|
group.Go(func() error {
|
|
|
|
return ul.CreateBucket(ctx, satellite, testrand.BucketName())
|
|
|
|
})
|
|
|
|
}
|
|
|
|
groupErrs := group.Wait()
|
|
|
|
require.Len(t, groupErrs, 1)
|
2020-01-17 15:01:36 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestRateLimit_Disabled(t *testing.T) {
|
2020-01-29 15:22:22 +00:00
|
|
|
rateLimit := 2
|
2020-01-17 15:01:36 +00:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
|
|
|
config.Metainfo.RateLimiter.Enabled = false
|
2020-01-29 15:22:22 +00:00
|
|
|
config.Metainfo.RateLimiter.Rate = float64(rateLimit)
|
2020-01-17 15:01:36 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
ul := planet.Uplinks[0]
|
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
|
2020-01-29 15:22:22 +00:00
|
|
|
var group errs2.Group
|
|
|
|
for i := 0; i <= rateLimit; i++ {
|
|
|
|
group.Go(func() error {
|
|
|
|
return ul.CreateBucket(ctx, satellite, testrand.BucketName())
|
|
|
|
})
|
|
|
|
}
|
|
|
|
groupErrs := group.Wait()
|
|
|
|
require.Len(t, groupErrs, 0)
|
2020-01-17 15:01:36 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestRateLimit_ProjectRateLimitOverride(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
|
|
|
config.Metainfo.RateLimiter.Rate = 2
|
2020-05-26 09:05:43 +01:00
|
|
|
config.Metainfo.RateLimiter.CacheExpiration = 500 * time.Millisecond
|
2020-01-17 15:01:36 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
ul := planet.Uplinks[0]
|
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
|
2020-05-26 09:05:43 +01:00
|
|
|
// TODO find a way to reset limiter before test is executed, currently
|
|
|
|
// testplanet is doing one additional request to get access
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
2020-01-17 15:01:36 +00:00
|
|
|
projects, err := satellite.DB.Console().Projects().GetAll(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, projects, 1)
|
|
|
|
|
|
|
|
rateLimit := 3
|
|
|
|
projects[0].RateLimit = &rateLimit
|
|
|
|
|
|
|
|
err = satellite.DB.Console().Projects().Update(ctx, &projects[0])
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2020-01-29 15:22:22 +00:00
|
|
|
var group errs2.Group
|
|
|
|
for i := 0; i <= rateLimit; i++ {
|
|
|
|
group.Go(func() error {
|
|
|
|
return ul.CreateBucket(ctx, satellite, testrand.BucketName())
|
|
|
|
})
|
|
|
|
}
|
|
|
|
groupErrs := group.Wait()
|
|
|
|
require.Len(t, groupErrs, 1)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestRateLimit_ProjectRateLimitOverrideCachedExpired(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
2020-04-29 12:20:02 +01:00
|
|
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
2020-01-29 15:22:22 +00:00
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
|
|
|
config.Metainfo.RateLimiter.Rate = 2
|
2020-04-29 12:20:02 +01:00
|
|
|
config.Metainfo.RateLimiter.CacheExpiration = time.Second
|
2020-01-29 15:22:22 +00:00
|
|
|
},
|
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
ul := planet.Uplinks[0]
|
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
|
2020-05-26 09:05:43 +01:00
|
|
|
// TODO find a way to reset limiter before test is executed, currently
|
|
|
|
// testplanet is doing one additional request to get access
|
|
|
|
time.Sleep(2 * time.Second)
|
|
|
|
|
2020-01-29 15:22:22 +00:00
|
|
|
projects, err := satellite.DB.Console().Projects().GetAll(ctx)
|
2020-01-17 15:01:36 +00:00
|
|
|
require.NoError(t, err)
|
2020-01-29 15:22:22 +00:00
|
|
|
require.Len(t, projects, 1)
|
|
|
|
|
|
|
|
rateLimit := 3
|
|
|
|
projects[0].RateLimit = &rateLimit
|
|
|
|
|
|
|
|
err = satellite.DB.Console().Projects().Update(ctx, &projects[0])
|
2020-01-17 15:01:36 +00:00
|
|
|
require.NoError(t, err)
|
2020-01-29 15:22:22 +00:00
|
|
|
|
|
|
|
var group1 errs2.Group
|
|
|
|
|
|
|
|
for i := 0; i <= rateLimit; i++ {
|
|
|
|
group1.Go(func() error {
|
|
|
|
return ul.CreateBucket(ctx, satellite, testrand.BucketName())
|
|
|
|
})
|
|
|
|
}
|
|
|
|
group1Errs := group1.Wait()
|
|
|
|
require.Len(t, group1Errs, 1)
|
|
|
|
|
|
|
|
rateLimit = 1
|
|
|
|
projects[0].RateLimit = &rateLimit
|
|
|
|
|
|
|
|
err = satellite.DB.Console().Projects().Update(ctx, &projects[0])
|
2020-01-17 15:01:36 +00:00
|
|
|
require.NoError(t, err)
|
2020-01-29 15:22:22 +00:00
|
|
|
|
2020-04-29 12:20:02 +01:00
|
|
|
time.Sleep(2 * time.Second)
|
2020-01-29 15:22:22 +00:00
|
|
|
|
|
|
|
var group2 errs2.Group
|
|
|
|
|
|
|
|
for i := 0; i <= rateLimit; i++ {
|
|
|
|
group2.Go(func() error {
|
|
|
|
return ul.CreateBucket(ctx, satellite, testrand.BucketName())
|
|
|
|
})
|
|
|
|
}
|
|
|
|
group2Errs := group2.Wait()
|
|
|
|
require.Len(t, group2Errs, 1)
|
2020-01-17 15:01:36 +00:00
|
|
|
})
|
|
|
|
}
|
2020-02-11 17:25:35 +00:00
|
|
|
|
2021-08-23 22:47:58 +01:00
|
|
|
func TestRateLimit_ExceededBurstLimit(t *testing.T) {
|
|
|
|
burstLimit := 2
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
|
|
|
config.Metainfo.RateLimiter.Rate = float64(burstLimit)
|
|
|
|
config.Metainfo.RateLimiter.CacheExpiration = 500 * time.Millisecond
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
ul := planet.Uplinks[0]
|
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
|
|
|
|
// TODO find a way to reset limiter before test is executed, currently
|
|
|
|
// testplanet is doing one additional request to get access
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
|
|
|
var group errs2.Group
|
|
|
|
for i := 0; i <= burstLimit; i++ {
|
|
|
|
group.Go(func() error {
|
|
|
|
return ul.CreateBucket(ctx, satellite, testrand.BucketName())
|
|
|
|
})
|
|
|
|
}
|
|
|
|
groupErrs := group.Wait()
|
|
|
|
require.Len(t, groupErrs, 1)
|
|
|
|
|
|
|
|
projects, err := satellite.DB.Console().Projects().GetAll(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, projects, 1)
|
|
|
|
|
|
|
|
zeroRateLimit := 0
|
|
|
|
err = satellite.DB.Console().Projects().UpdateBurstLimit(ctx, projects[0].ID, zeroRateLimit)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
time.Sleep(1 * time.Second)
|
|
|
|
|
|
|
|
var group2 errs2.Group
|
|
|
|
for i := 0; i <= burstLimit; i++ {
|
|
|
|
group2.Go(func() error {
|
|
|
|
return ul.CreateBucket(ctx, satellite, testrand.BucketName())
|
|
|
|
})
|
|
|
|
}
|
|
|
|
group2Errs := group2.Wait()
|
|
|
|
require.Len(t, group2Errs, burstLimit+1)
|
|
|
|
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-02-14 12:52:00 +00:00
|
|
|
func TestBucketEmptinessBeforeDelete(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "test-bucket", "object-key"+strconv.Itoa(i), testrand.Bytes(memory.KiB))
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
err := planet.Uplinks[0].DeleteBucket(ctx, planet.Satellites[0], "test-bucket")
|
|
|
|
require.Error(t, err)
|
2020-03-30 10:08:02 +01:00
|
|
|
require.True(t, errors.Is(err, uplink.ErrBucketNotEmpty))
|
2020-02-14 12:52:00 +00:00
|
|
|
|
|
|
|
err = planet.Uplinks[0].DeleteObject(ctx, planet.Satellites[0], "test-bucket", "object-key"+strconv.Itoa(i))
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
err := planet.Uplinks[0].DeleteBucket(ctx, planet.Satellites[0], "test-bucket")
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
}
|
2020-04-02 08:45:51 +01:00
|
|
|
|
2021-12-10 12:29:43 +00:00
|
|
|
// func TestDeleteBatchWithoutPermission(t *testing.T) {
|
|
|
|
// testplanet.Run(t, testplanet.Config{
|
|
|
|
// SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
// }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
// apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
//
|
|
|
|
// err := planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "test-bucket")
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// apiKey, err = apiKey.Restrict(macaroon.WithNonce(macaroon.Caveat{
|
|
|
|
// DisallowLists: true,
|
|
|
|
// DisallowReads: true,
|
|
|
|
// }))
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// defer ctx.Check(metainfoClient.Close)
|
|
|
|
//
|
|
|
|
// responses, err := metainfoClient.Batch(ctx,
|
|
|
|
// // this request was causing panic becase for deleting object
|
|
|
|
// // its possible to return no error and empty response for
|
|
|
|
// // specific set of permissions, see `apiKey.Restrict` from above
|
|
|
|
// &metaclient.BeginDeleteObjectParams{
|
|
|
|
// Bucket: []byte("test-bucket"),
|
|
|
|
// EncryptedPath: []byte("not-existing-object"),
|
|
|
|
// },
|
|
|
|
//
|
|
|
|
// // TODO this code should be enabled then issue with read permissions in
|
|
|
|
// // DeleteBucket method currently user have always permission to read bucket
|
|
|
|
// // https://storjlabs.atlassian.net/browse/USR-603
|
|
|
|
// // when it will be fixed commented code from bellow should replace existing DeleteBucketParams
|
|
|
|
// // the same situation like above
|
|
|
|
// // &metaclient.DeleteBucketParams{
|
|
|
|
// // Name: []byte("not-existing-bucket"),
|
|
|
|
// // },
|
|
|
|
//
|
|
|
|
// &metaclient.DeleteBucketParams{
|
|
|
|
// Name: []byte("test-bucket"),
|
|
|
|
// },
|
|
|
|
// )
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.Equal(t, 2, len(responses))
|
|
|
|
// })
|
|
|
|
//}
|
2020-04-09 09:19:16 +01:00
|
|
|
|
|
|
|
func TestInlineSegmentThreshold(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
{ // limit is max inline segment size + encryption overhead
|
|
|
|
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "test-bucket-inline", "inline-object", testrand.Bytes(4*memory.KiB))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// we don't know encrypted path
|
2021-09-07 09:15:47 +01:00
|
|
|
segments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
|
2020-04-09 09:19:16 +01:00
|
|
|
require.NoError(t, err)
|
2020-12-21 13:48:29 +00:00
|
|
|
require.Len(t, segments, 1)
|
2020-11-30 08:39:51 +00:00
|
|
|
require.Zero(t, segments[0].Redundancy)
|
|
|
|
require.NotEmpty(t, segments[0].InlineData)
|
2020-12-21 13:48:29 +00:00
|
|
|
|
|
|
|
// clean up - delete the uploaded object
|
2021-09-07 09:15:47 +01:00
|
|
|
objects, err := planet.Satellites[0].Metabase.DB.TestingAllObjects(ctx)
|
2020-12-21 13:48:29 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, objects, 1)
|
2021-09-07 09:15:47 +01:00
|
|
|
_, err = planet.Satellites[0].Metabase.DB.DeleteObjectLatestVersion(ctx, metabase.DeleteObjectLatestVersion{
|
2020-12-21 13:48:29 +00:00
|
|
|
ObjectLocation: objects[0].Location(),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2020-04-09 09:19:16 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
{ // one more byte over limit should enough to create remote segment
|
|
|
|
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "test-bucket-remote", "remote-object", testrand.Bytes(4*memory.KiB+1))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// we don't know encrypted path
|
2021-09-07 09:15:47 +01:00
|
|
|
segments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
|
2020-04-09 09:19:16 +01:00
|
|
|
require.NoError(t, err)
|
2020-12-21 13:48:29 +00:00
|
|
|
require.Len(t, segments, 1)
|
2020-11-30 08:39:51 +00:00
|
|
|
require.NotZero(t, segments[0].Redundancy)
|
|
|
|
require.Empty(t, segments[0].InlineData)
|
2020-12-21 13:48:29 +00:00
|
|
|
|
|
|
|
// clean up - delete the uploaded object
|
2021-09-07 09:15:47 +01:00
|
|
|
objects, err := planet.Satellites[0].Metabase.DB.TestingAllObjects(ctx)
|
2020-12-21 13:48:29 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, objects, 1)
|
2021-09-07 09:15:47 +01:00
|
|
|
_, err = planet.Satellites[0].Metabase.DB.DeleteObjectLatestVersion(ctx, metabase.DeleteObjectLatestVersion{
|
2020-12-21 13:48:29 +00:00
|
|
|
ObjectLocation: objects[0].Location(),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2020-04-09 09:19:16 +01:00
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
2020-06-01 21:07:31 +01:00
|
|
|
|
|
|
|
// TestCommitObjectMetadataSize ensures that CommitObject returns an error when the metadata provided by the user is too large.
|
2021-12-10 12:29:43 +00:00
|
|
|
// func TestCommitObjectMetadataSize(t *testing.T) {
|
|
|
|
// testplanet.Run(t, testplanet.Config{
|
|
|
|
// SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
|
|
// Reconfigure: testplanet.Reconfigure{
|
|
|
|
// Satellite: testplanet.MaxMetadataSize(2 * memory.KiB),
|
|
|
|
// },
|
|
|
|
// }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
// apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
// buckets := planet.Satellites[0].API.Buckets.Service
|
|
|
|
//
|
|
|
|
// bucket := storj.Bucket{
|
|
|
|
// Name: "initial-bucket",
|
|
|
|
// ProjectID: planet.Uplinks[0].Projects[0].ID,
|
|
|
|
// }
|
|
|
|
// _, err := buckets.CreateBucket(ctx, bucket)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// defer ctx.Check(metainfoClient.Close)
|
|
|
|
//
|
|
|
|
// params := metaclient.BeginObjectParams{
|
|
|
|
// Bucket: []byte(bucket.Name),
|
|
|
|
// EncryptedPath: []byte("encrypted-path"),
|
|
|
|
// Redundancy: storj.RedundancyScheme{
|
|
|
|
// Algorithm: storj.ReedSolomon,
|
|
|
|
// ShareSize: 256,
|
|
|
|
// RequiredShares: 1,
|
|
|
|
// RepairShares: 1,
|
|
|
|
// OptimalShares: 3,
|
|
|
|
// TotalShares: 4,
|
|
|
|
// },
|
|
|
|
// EncryptionParameters: storj.EncryptionParameters{
|
|
|
|
// BlockSize: 256,
|
|
|
|
// CipherSuite: storj.EncNull,
|
|
|
|
// },
|
|
|
|
// ExpiresAt: time.Now().Add(24 * time.Hour),
|
|
|
|
// }
|
|
|
|
// beginObjectResponse, err := metainfoClient.BeginObject(ctx, params)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// response, err := metainfoClient.BeginSegment(ctx, metaclient.BeginSegmentParams{
|
|
|
|
// StreamID: beginObjectResponse.StreamID,
|
|
|
|
// Position: storj.SegmentPosition{
|
|
|
|
// Index: 0,
|
|
|
|
// },
|
|
|
|
// MaxOrderLimit: memory.MiB.Int64(),
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// fullIDMap := make(map[storj.NodeID]*identity.FullIdentity)
|
|
|
|
// for _, node := range planet.StorageNodes {
|
|
|
|
// fullIDMap[node.ID()] = node.Identity
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// makeResult := func(num int32) *pb.SegmentPieceUploadResult {
|
|
|
|
// nodeID := response.Limits[num].Limit.StorageNodeId
|
|
|
|
// hash := &pb.PieceHash{
|
|
|
|
// PieceId: response.Limits[num].Limit.PieceId,
|
|
|
|
// PieceSize: 1048832,
|
|
|
|
// Timestamp: time.Now(),
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// fullID := fullIDMap[nodeID]
|
|
|
|
// require.NotNil(t, fullID)
|
|
|
|
// signer := signing.SignerFromFullIdentity(fullID)
|
|
|
|
// signedHash, err := signing.SignPieceHash(ctx, signer, hash)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// return &pb.SegmentPieceUploadResult{
|
|
|
|
// PieceNum: num,
|
|
|
|
// NodeId: nodeID,
|
|
|
|
// Hash: signedHash,
|
|
|
|
// }
|
|
|
|
// }
|
|
|
|
// err = metainfoClient.CommitSegment(ctx, metaclient.CommitSegmentParams{
|
|
|
|
// SegmentID: response.SegmentID,
|
|
|
|
// Encryption: storj.SegmentEncryption{
|
|
|
|
// EncryptedKey: []byte{1},
|
|
|
|
// },
|
|
|
|
// PlainSize: memory.MiB.Int64(),
|
|
|
|
// SizeEncryptedData: memory.MiB.Int64(),
|
|
|
|
// UploadResult: []*pb.SegmentPieceUploadResult{
|
|
|
|
// makeResult(0),
|
|
|
|
// makeResult(1),
|
|
|
|
// makeResult(2),
|
|
|
|
// },
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// // 5KiB metadata should fail because it is too large.
|
|
|
|
// metadata, err := pb.Marshal(&pb.StreamMeta{
|
|
|
|
// EncryptedStreamInfo: testrand.Bytes(5 * memory.KiB),
|
|
|
|
// NumberOfSegments: 1,
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// err = metainfoClient.CommitObject(ctx, metaclient.CommitObjectParams{
|
|
|
|
// StreamID: beginObjectResponse.StreamID,
|
|
|
|
// EncryptedMetadata: metadata,
|
|
|
|
// EncryptedMetadataNonce: testrand.Nonce(),
|
|
|
|
// EncryptedMetadataEncryptedKey: testrand.Bytes(32),
|
|
|
|
// })
|
|
|
|
// require.Error(t, err)
|
|
|
|
// assertInvalidArgument(t, err, true)
|
|
|
|
//
|
|
|
|
// // 1KiB metadata should not fail.
|
|
|
|
// metadata, err = pb.Marshal(&pb.StreamMeta{
|
|
|
|
// EncryptedStreamInfo: testrand.Bytes(1 * memory.KiB),
|
|
|
|
// NumberOfSegments: 1,
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// err = metainfoClient.CommitObject(ctx, metaclient.CommitObjectParams{
|
|
|
|
// StreamID: beginObjectResponse.StreamID,
|
|
|
|
// EncryptedMetadata: metadata,
|
|
|
|
// EncryptedMetadataNonce: testrand.Nonce(),
|
|
|
|
// EncryptedMetadataEncryptedKey: testrand.Bytes(32),
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// })
|
|
|
|
//
|
|
|
|
//}
|
2020-07-31 12:24:40 +01:00
|
|
|
|
2021-07-29 13:08:19 +01:00
|
|
|
// TestBeginObjectEncryptedObjectKeyLength ensures that BeginObject returns an error when the encrypted key provided by the user is too large.
|
2022-01-10 15:46:27 +00:00
|
|
|
// func TestBeginObjectEncryptedObjectKeyLength(t *testing.T) {
|
|
|
|
// testplanet.Run(t, testplanet.Config{
|
|
|
|
// SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
// Reconfigure: testplanet.Reconfigure{
|
|
|
|
// Satellite: testplanet.MaxObjectKeyLength(1024),
|
|
|
|
// },
|
|
|
|
// }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
// apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
//
|
|
|
|
// err := planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "initial-bucket")
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// defer ctx.Check(metainfoClient.Close)
|
|
|
|
//
|
|
|
|
// params := metaclient.BeginObjectParams{
|
|
|
|
// Bucket: []byte("initial-bucket"),
|
|
|
|
// EncryptionParameters: storj.EncryptionParameters{
|
|
|
|
// BlockSize: 256,
|
|
|
|
// CipherSuite: storj.EncNull,
|
|
|
|
// },
|
|
|
|
// }
|
|
|
|
//
|
|
|
|
// params.EncryptedPath = testrand.Bytes(500)
|
|
|
|
// _, err = metainfoClient.BeginObject(ctx, params)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// params.EncryptedPath = testrand.Bytes(1024)
|
|
|
|
// _, err = metainfoClient.BeginObject(ctx, params)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// params.EncryptedPath = testrand.Bytes(2048)
|
|
|
|
// _, err = metainfoClient.BeginObject(ctx, params)
|
|
|
|
// require.Error(t, err)
|
|
|
|
// require.True(t, rpcstatus.Code(err) == rpcstatus.InvalidArgument)
|
|
|
|
// })
|
|
|
|
//}
|
2021-07-29 13:08:19 +01:00
|
|
|
|
2020-07-31 12:24:40 +01:00
|
|
|
func TestDeleteRightsOnUpload(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
up := planet.Uplinks[0]
|
|
|
|
|
|
|
|
err := up.CreateBucket(ctx, planet.Satellites[0], "test-bucket")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
data := testrand.Bytes(1 * memory.KiB)
|
|
|
|
err = up.Upload(ctx, planet.Satellites[0], "test-bucket", "test-key", data)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
access := up.Access[planet.Satellites[0].ID()]
|
|
|
|
|
|
|
|
overwrite := func(allowDelete bool) error {
|
|
|
|
permission := uplink.FullPermission()
|
|
|
|
permission.AllowDelete = allowDelete
|
|
|
|
|
|
|
|
sharedAccess, err := access.Share(permission)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
project, err := uplink.OpenProject(ctx, sharedAccess)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(project.Close)
|
|
|
|
|
|
|
|
upload, err := project.UploadObject(ctx, "test-bucket", "test-key", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = upload.Write([]byte("new data"))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
return upload.Commit()
|
|
|
|
}
|
|
|
|
|
|
|
|
require.Error(t, overwrite(false))
|
|
|
|
require.NoError(t, overwrite(true))
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestImmutableUpload(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
access := planet.Uplinks[0].Access[planet.Satellites[0].ID()]
|
|
|
|
|
|
|
|
permission := uplink.Permission{AllowUpload: true} // AllowDelete: false
|
|
|
|
sharedAccess, err := access.Share(permission)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
project, err := uplink.OpenProject(ctx, sharedAccess)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(project.Close)
|
|
|
|
|
|
|
|
_, err = project.CreateBucket(ctx, "test-bucket")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Uploading the object for first time should be successful.
|
|
|
|
upload, err := project.UploadObject(ctx, "test-bucket", "test-key", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = upload.Write(testrand.Bytes(1 * memory.KiB))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = upload.Commit()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Overwriting the object should fail on Commit.
|
|
|
|
upload, err = project.UploadObject(ctx, "test-bucket", "test-key", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = upload.Write(testrand.Bytes(1 * memory.KiB))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = upload.Commit()
|
|
|
|
require.Error(t, err)
|
|
|
|
})
|
|
|
|
}
|
2020-08-18 21:50:49 +01:00
|
|
|
|
|
|
|
func TestGetObjectIPs(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 5, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
access := planet.Uplinks[0].Access[planet.Satellites[0].ID()]
|
|
|
|
uplnk := planet.Uplinks[0]
|
2020-08-31 22:48:25 +01:00
|
|
|
uplinkCtx := testuplink.WithMaxSegmentSize(ctx, 5*memory.KB)
|
2020-08-18 21:50:49 +01:00
|
|
|
sat := planet.Satellites[0]
|
|
|
|
|
2020-08-31 22:48:25 +01:00
|
|
|
require.NoError(t, uplnk.CreateBucket(uplinkCtx, sat, "bob"))
|
|
|
|
require.NoError(t, uplnk.Upload(uplinkCtx, sat, "bob", "jones", testrand.Bytes(20*memory.KB)))
|
2020-08-18 21:50:49 +01:00
|
|
|
ips, err := object.GetObjectIPs(ctx, uplink.Config{}, access, "bob", "jones")
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.True(t, len(ips) > 0)
|
|
|
|
|
|
|
|
// verify it's a real IP with valid host and port
|
|
|
|
for _, ip := range ips {
|
|
|
|
host, port, err := net.SplitHostPort(string(ip))
|
|
|
|
require.NoError(t, err)
|
|
|
|
netIP := net.ParseIP(host)
|
|
|
|
require.NotNil(t, netIP)
|
|
|
|
_, err = strconv.Atoi(port)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
2021-01-07 08:46:49 +00:00
|
|
|
|
2021-12-10 12:29:43 +00:00
|
|
|
// func TestMultipartObjectDownloadRejection(t *testing.T) {
|
|
|
|
// testplanet.Run(t, testplanet.Config{
|
|
|
|
// SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
|
|
// }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
//
|
|
|
|
// data := testrand.Bytes(20 * memory.KB)
|
|
|
|
// err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "pip-first", "non-multipart-object", data)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// project, err := planet.Uplinks[0].OpenProject(ctx, planet.Satellites[0])
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// _, err = project.EnsureBucket(ctx, "pip-second")
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// info, err := project.BeginUpload(ctx, "pip-second", "multipart-object", nil)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// upload, err := project.UploadPart(ctx, "pip-second", "multipart-object", info.UploadID, 1)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// _, err = upload.Write(data)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.NoError(t, upload.Commit())
|
|
|
|
// _, err = project.CommitUpload(ctx, "pip-second", "multipart-object", info.UploadID, nil)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// _, err = project.EnsureBucket(ctx, "pip-third")
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// info, err = project.BeginUpload(ctx, "pip-third", "multipart-object-third", nil)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// for i := 0; i < 4; i++ {
|
|
|
|
// upload, err := project.UploadPart(ctx, "pip-third", "multipart-object-third", info.UploadID, uint32(i+1))
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// _, err = upload.Write(data)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.NoError(t, upload.Commit())
|
|
|
|
// }
|
|
|
|
// _, err = project.CommitUpload(ctx, "pip-third", "multipart-object-third", info.UploadID, nil)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
// metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// defer ctx.Check(metainfoClient.Close)
|
|
|
|
//
|
|
|
|
// objects, err := planet.Satellites[0].Metabase.DB.TestingAllCommittedObjects(ctx, planet.Uplinks[0].Projects[0].ID, "pip-first")
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.Len(t, objects, 1)
|
|
|
|
//
|
|
|
|
// // verify that standard objects can be downloaded in an old way (index = -1 as last segment)
|
|
|
|
// object, err := metainfoClient.GetObject(ctx, metaclient.GetObjectParams{
|
|
|
|
// Bucket: []byte("pip-first"),
|
|
|
|
// EncryptedPath: []byte(objects[0].ObjectKey),
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// _, err = metainfoClient.DownloadSegmentWithRS(ctx, metaclient.DownloadSegmentParams{
|
|
|
|
// StreamID: object.StreamID,
|
|
|
|
// Position: storj.SegmentPosition{
|
|
|
|
// Index: -1,
|
|
|
|
// },
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// objects, err = planet.Satellites[0].Metabase.DB.TestingAllCommittedObjects(ctx, planet.Uplinks[0].Projects[0].ID, "pip-second")
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.Len(t, objects, 1)
|
|
|
|
//
|
|
|
|
// // verify that multipart objects (single segment) CANNOT be downloaded in an old way (index = -1 as last segment)
|
|
|
|
// object, err = metainfoClient.GetObject(ctx, metaclient.GetObjectParams{
|
|
|
|
// Bucket: []byte("pip-second"),
|
|
|
|
// EncryptedPath: []byte(objects[0].ObjectKey),
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// _, err = metainfoClient.DownloadSegmentWithRS(ctx, metaclient.DownloadSegmentParams{
|
|
|
|
// StreamID: object.StreamID,
|
|
|
|
// Position: storj.SegmentPosition{
|
|
|
|
// Index: -1,
|
|
|
|
// },
|
|
|
|
// })
|
|
|
|
// require.Error(t, err)
|
|
|
|
// require.Contains(t, err.Error(), "Used uplink version cannot download multipart objects.")
|
|
|
|
//
|
|
|
|
// objects, err = planet.Satellites[0].Metabase.DB.TestingAllCommittedObjects(ctx, planet.Uplinks[0].Projects[0].ID, "pip-third")
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.Len(t, objects, 1)
|
|
|
|
//
|
|
|
|
// // verify that multipart objects (multiple segments) CANNOT be downloaded in an old way (index = -1 as last segment)
|
|
|
|
// object, err = metainfoClient.GetObject(ctx, metaclient.GetObjectParams{
|
|
|
|
// Bucket: []byte("pip-third"),
|
|
|
|
// EncryptedPath: []byte(objects[0].ObjectKey),
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// _, err = metainfoClient.DownloadSegmentWithRS(ctx, metaclient.DownloadSegmentParams{
|
|
|
|
// StreamID: object.StreamID,
|
|
|
|
// Position: storj.SegmentPosition{
|
|
|
|
// Index: -1,
|
|
|
|
// },
|
|
|
|
// })
|
|
|
|
// require.Error(t, err)
|
|
|
|
// require.Contains(t, err.Error(), "Used uplink version cannot download multipart objects.")
|
|
|
|
// })
|
|
|
|
//}
|
2021-01-11 10:08:18 +00:00
|
|
|
|
|
|
|
func TestObjectOverrideOnUpload(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
|
|
|
|
initialData := testrand.Bytes(20 * memory.KB)
|
|
|
|
overrideData := testrand.Bytes(25 * memory.KB)
|
|
|
|
|
|
|
|
{ // committed object
|
|
|
|
|
|
|
|
// upload committed object
|
|
|
|
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "pip-first", "committed-object", initialData)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// upload once again to override
|
|
|
|
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "pip-first", "committed-object", overrideData)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
data, err := planet.Uplinks[0].Download(ctx, planet.Satellites[0], "pip-first", "committed-object")
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, overrideData, data)
|
|
|
|
}
|
|
|
|
|
|
|
|
{ // pending object
|
|
|
|
project, err := planet.Uplinks[0].OpenProject(ctx, planet.Satellites[0])
|
|
|
|
require.NoError(t, err)
|
2021-01-19 12:52:57 +00:00
|
|
|
defer ctx.Check(project.Close)
|
2021-01-11 10:08:18 +00:00
|
|
|
|
|
|
|
// upload pending object
|
2021-04-20 09:06:56 +01:00
|
|
|
info, err := project.BeginUpload(ctx, "pip-first", "pending-object", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
upload, err := project.UploadPart(ctx, "pip-first", "pending-object", info.UploadID, 1)
|
2021-01-11 10:08:18 +00:00
|
|
|
require.NoError(t, err)
|
2021-04-20 09:06:56 +01:00
|
|
|
_, err = upload.Write(initialData)
|
2021-01-11 10:08:18 +00:00
|
|
|
require.NoError(t, err)
|
2021-04-20 09:06:56 +01:00
|
|
|
require.NoError(t, upload.Commit())
|
2021-01-11 10:08:18 +00:00
|
|
|
|
|
|
|
// upload once again to override
|
|
|
|
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "pip-first", "pending-object", overrideData)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
data, err := planet.Uplinks[0].Download(ctx, planet.Satellites[0], "pip-first", "pending-object")
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, overrideData, data)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
2021-01-12 11:29:13 +00:00
|
|
|
|
2021-12-10 12:29:43 +00:00
|
|
|
// func TestStableUploadID(t *testing.T) {
|
|
|
|
// testplanet.Run(t, testplanet.Config{
|
|
|
|
// SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
// }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
// apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
// client, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// defer ctx.Check(client.Close)
|
|
|
|
//
|
|
|
|
// err = planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "testbucket")
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// beginResp, err := client.BeginObject(ctx, metaclient.BeginObjectParams{
|
|
|
|
// Bucket: []byte("testbucket"),
|
|
|
|
// EncryptedPath: []byte("a/b/testobject"),
|
|
|
|
// EncryptionParameters: storj.EncryptionParameters{
|
|
|
|
// CipherSuite: storj.EncAESGCM,
|
|
|
|
// BlockSize: 256,
|
|
|
|
// },
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// // List the root of the bucket recursively
|
|
|
|
// listResp, _, err := client.ListObjects(ctx, metaclient.ListObjectsParams{
|
|
|
|
// Bucket: []byte("testbucket"),
|
|
|
|
// Status: int32(metabase.Pending),
|
|
|
|
// Recursive: true,
|
|
|
|
// IncludeSystemMetadata: true,
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.Len(t, listResp, 1)
|
|
|
|
// // check that BeginObject and ListObjects return the same StreamID.
|
|
|
|
// assert.Equal(t, beginResp.StreamID, listResp[0].StreamID)
|
|
|
|
//
|
|
|
|
// // List with prefix non-recursively
|
|
|
|
// listResp2, _, err := client.ListObjects(ctx, metaclient.ListObjectsParams{
|
|
|
|
// Bucket: []byte("testbucket"),
|
|
|
|
// Status: int32(metabase.Pending),
|
|
|
|
// EncryptedPrefix: []byte("a/b/"),
|
|
|
|
// IncludeSystemMetadata: true,
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.Len(t, listResp2, 1)
|
|
|
|
// // check that the StreamID is still the same.
|
|
|
|
// assert.Equal(t, listResp[0].StreamID, listResp2[0].StreamID)
|
|
|
|
//
|
|
|
|
// // List with prefix recursively
|
|
|
|
// listResp3, _, err := client.ListObjects(ctx, metaclient.ListObjectsParams{
|
|
|
|
// Bucket: []byte("testbucket"),
|
|
|
|
// Status: int32(metabase.Pending),
|
|
|
|
// EncryptedPrefix: []byte("a/b/"),
|
|
|
|
// Recursive: true,
|
|
|
|
// IncludeSystemMetadata: true,
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.Len(t, listResp3, 1)
|
|
|
|
// // check that the StreamID is still the same.
|
|
|
|
// assert.Equal(t, listResp[0].StreamID, listResp3[0].StreamID)
|
|
|
|
//
|
|
|
|
// // List the pending object directly
|
|
|
|
// listResp4, err := client.ListPendingObjectStreams(ctx, metaclient.ListPendingObjectStreamsParams{
|
|
|
|
// Bucket: []byte("testbucket"),
|
|
|
|
// EncryptedPath: []byte("a/b/testobject"),
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// require.Len(t, listResp4.Items, 1)
|
|
|
|
// // check that the StreamID is still the same.
|
|
|
|
// assert.Equal(t, listResp[0].StreamID, listResp4.Items[0].StreamID)
|
|
|
|
// })
|
|
|
|
//}
|
2021-06-10 11:08:21 +01:00
|
|
|
|
|
|
|
func TestObjectSegmentExpiresAt(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
inlineData := testrand.Bytes(1 * memory.KiB)
|
|
|
|
inlineExpiration := time.Now().Add(2 * time.Hour)
|
|
|
|
err := planet.Uplinks[0].UploadWithExpiration(ctx, planet.Satellites[0], "hohoho", "inline_object", inlineData, inlineExpiration)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
remoteData := testrand.Bytes(10 * memory.KiB)
|
|
|
|
remoteExpiration := time.Now().Add(4 * time.Hour)
|
|
|
|
err = planet.Uplinks[0].UploadWithExpiration(ctx, planet.Satellites[0], "hohoho", "remote_object", remoteData, remoteExpiration)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2021-09-07 09:15:47 +01:00
|
|
|
segments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
|
2021-06-10 11:08:21 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, segments, 2)
|
|
|
|
|
|
|
|
for _, segment := range segments {
|
|
|
|
if int(segment.PlainSize) == len(inlineData) {
|
|
|
|
require.Equal(t, inlineExpiration.Unix(), segment.ExpiresAt.Unix())
|
|
|
|
} else if int(segment.PlainSize) == len(remoteData) {
|
|
|
|
require.Equal(t, remoteExpiration.Unix(), segment.ExpiresAt.Unix())
|
|
|
|
} else {
|
|
|
|
t.Fail()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
2021-08-06 21:55:34 +01:00
|
|
|
|
2021-12-10 12:29:43 +00:00
|
|
|
// func TestDeleteNonExistentObject(t *testing.T) {
|
|
|
|
// testplanet.Run(t, testplanet.Config{
|
|
|
|
// SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
// }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
// apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
//
|
|
|
|
// expectedBucketName := "delete-objects-bucket"
|
|
|
|
// metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// defer ctx.Check(metainfoClient.Close)
|
|
|
|
//
|
|
|
|
// // non-pending non-existent objects return no error
|
|
|
|
// _, err = metainfoClient.BeginDeleteObject(ctx, metaclient.BeginDeleteObjectParams{
|
|
|
|
// Bucket: []byte(expectedBucketName),
|
|
|
|
// EncryptedPath: []byte("bad path"),
|
|
|
|
// })
|
|
|
|
// require.NoError(t, err)
|
|
|
|
//
|
|
|
|
// // pending non-existent objects return an RPC error
|
|
|
|
// signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
|
|
|
|
// streamUUID := testrand.UUID()
|
|
|
|
// satStreamID := &internalpb.StreamID{CreationDate: time.Now(), StreamId: streamUUID[:]}
|
|
|
|
// signedStreamID, err := metainfo.SignStreamID(ctx, signer, satStreamID)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// encodedStreamID, err := pb.Marshal(signedStreamID)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// streamID, err := storj.StreamIDFromBytes(encodedStreamID)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// _, err = metainfoClient.BeginDeleteObject(ctx, metaclient.BeginDeleteObjectParams{
|
|
|
|
// Bucket: []byte(expectedBucketName),
|
|
|
|
// EncryptedPath: []byte("bad path"),
|
|
|
|
// Status: int32(metabase.Pending),
|
|
|
|
// StreamID: streamID,
|
|
|
|
// })
|
|
|
|
// require.True(t, errs2.IsRPC(err, rpcstatus.NotFound))
|
|
|
|
// })
|
|
|
|
//}
|
2021-10-27 09:50:27 +01:00
|
|
|
|
|
|
|
func TestMoveObject_Geofencing(t *testing.T) {
|
|
|
|
testplanet.Run(t,
|
|
|
|
testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
},
|
|
|
|
func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
buckets := satellite.API.Buckets.Service
|
|
|
|
uplink := planet.Uplinks[0]
|
|
|
|
projectID := uplink.Projects[0].ID
|
|
|
|
|
|
|
|
// create buckets with different placement
|
|
|
|
createGeofencedBucket(t, ctx, buckets, projectID, "global1", storj.EveryCountry)
|
|
|
|
createGeofencedBucket(t, ctx, buckets, projectID, "global2", storj.EveryCountry)
|
|
|
|
createGeofencedBucket(t, ctx, buckets, projectID, "us1", storj.US)
|
|
|
|
createGeofencedBucket(t, ctx, buckets, projectID, "us2", storj.US)
|
|
|
|
createGeofencedBucket(t, ctx, buckets, projectID, "eu1", storj.EU)
|
|
|
|
|
|
|
|
// upload an object to one of the global buckets
|
|
|
|
err := uplink.Upload(ctx, satellite, "global1", "testobject", []byte{})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
project, err := uplink.GetProject(ctx, satellite)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// move the object to a new key within the same bucket
|
|
|
|
err = project.MoveObject(ctx, "global1", "testobject", "global1", "movedobject", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// move the object to the other global bucket
|
|
|
|
err = project.MoveObject(ctx, "global1", "movedobject", "global2", "movedobject", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// move the object to a geofenced bucket - should fail
|
|
|
|
err = project.MoveObject(ctx, "global2", "movedobject", "us1", "movedobject", nil)
|
|
|
|
require.Error(t, err)
|
|
|
|
|
|
|
|
// upload an object to one of the US-geofenced buckets
|
|
|
|
err = uplink.Upload(ctx, satellite, "us1", "testobject", []byte{})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// move the object to a new key within the same bucket
|
|
|
|
err = project.MoveObject(ctx, "us1", "testobject", "us1", "movedobject", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// move the object to the other US-geofenced bucket
|
|
|
|
err = project.MoveObject(ctx, "us1", "movedobject", "us2", "movedobject", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// move the object to the EU-geofenced bucket - should fail
|
|
|
|
err = project.MoveObject(ctx, "us2", "movedobject", "eu1", "movedobject", nil)
|
|
|
|
require.Error(t, err)
|
|
|
|
|
|
|
|
// move the object to a non-geofenced bucket - should fail
|
|
|
|
err = project.MoveObject(ctx, "us2", "movedobject", "global1", "movedobject", nil)
|
|
|
|
require.Error(t, err)
|
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
func createGeofencedBucket(t *testing.T, ctx *testcontext.Context, buckets *buckets.Service, projectID uuid.UUID, bucketName string, placement storj.PlacementConstraint) {
|
|
|
|
// generate the bucket id
|
|
|
|
bucketID, err := uuid.New()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// create the bucket
|
|
|
|
_, err = buckets.CreateBucket(ctx, storj.Bucket{
|
|
|
|
ID: bucketID,
|
|
|
|
Name: bucketName,
|
|
|
|
ProjectID: projectID,
|
|
|
|
Placement: placement,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// check that the bucket placement is correct
|
|
|
|
bucket, err := buckets.GetBucket(ctx, []byte(bucketName), projectID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, placement, bucket.Placement)
|
|
|
|
}
|