2022-01-26 11:10:28 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package metainfo_test
|
|
|
|
|
|
|
|
import (
|
2022-08-30 11:04:59 +01:00
|
|
|
"bytes"
|
2022-01-26 11:10:28 +00:00
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"net"
|
|
|
|
"sort"
|
|
|
|
"strconv"
|
2022-05-04 21:50:24 +01:00
|
|
|
"sync"
|
2022-01-26 11:10:28 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
"github.com/stretchr/testify/require"
|
2022-05-04 21:50:24 +01:00
|
|
|
"github.com/zeebo/errs"
|
2022-01-26 11:10:28 +00:00
|
|
|
"go.uber.org/zap"
|
2023-06-07 12:58:25 +01:00
|
|
|
"golang.org/x/exp/maps"
|
2022-01-26 11:10:28 +00:00
|
|
|
|
|
|
|
"storj.io/common/errs2"
|
|
|
|
"storj.io/common/identity"
|
2023-07-06 13:35:26 +01:00
|
|
|
"storj.io/common/identity/testidentity"
|
2022-01-26 11:10:28 +00:00
|
|
|
"storj.io/common/memory"
|
2023-07-06 13:35:26 +01:00
|
|
|
"storj.io/common/nodetag"
|
2022-01-26 11:10:28 +00:00
|
|
|
"storj.io/common/pb"
|
|
|
|
"storj.io/common/rpc/rpcstatus"
|
|
|
|
"storj.io/common/signing"
|
|
|
|
"storj.io/common/storj"
|
|
|
|
"storj.io/common/testcontext"
|
|
|
|
"storj.io/common/testrand"
|
2023-04-18 08:37:36 +01:00
|
|
|
"storj.io/common/time2"
|
2022-01-26 11:10:28 +00:00
|
|
|
"storj.io/common/uuid"
|
|
|
|
"storj.io/storj/private/testplanet"
|
|
|
|
"storj.io/storj/satellite"
|
|
|
|
"storj.io/storj/satellite/buckets"
|
|
|
|
"storj.io/storj/satellite/internalpb"
|
|
|
|
"storj.io/storj/satellite/metabase"
|
|
|
|
"storj.io/storj/satellite/metainfo"
|
2023-07-06 13:35:26 +01:00
|
|
|
"storj.io/storj/satellite/overlay"
|
|
|
|
"storj.io/storj/storagenode"
|
|
|
|
"storj.io/storj/storagenode/contact"
|
2022-01-26 11:10:28 +00:00
|
|
|
"storj.io/uplink"
|
|
|
|
"storj.io/uplink/private/metaclient"
|
|
|
|
"storj.io/uplink/private/object"
|
|
|
|
"storj.io/uplink/private/testuplink"
|
|
|
|
)
|
|
|
|
|
2022-06-22 12:33:03 +01:00
|
|
|
func assertRPCStatusCode(t *testing.T, actualError error, expectedStatusCode rpcstatus.StatusCode) {
|
|
|
|
statusCode := rpcstatus.Code(actualError)
|
|
|
|
require.NotEqual(t, rpcstatus.Unknown, statusCode, "expected rpcstatus error, got \"%v\"", actualError)
|
|
|
|
require.Equal(t, expectedStatusCode, statusCode, "wrong %T, got %v", statusCode, actualError)
|
|
|
|
}
|
|
|
|
|
2022-09-07 16:43:17 +01:00
|
|
|
func TestEndpoint_Object_No_StorageNodes(t *testing.T) {
|
2022-01-26 11:10:28 +00:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
2022-02-04 10:27:38 +00:00
|
|
|
SatelliteCount: 1, UplinkCount: 1,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
Satellite: testplanet.MaxObjectKeyLength(1024),
|
|
|
|
},
|
2022-01-26 11:10:28 +00:00
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
2022-08-30 11:04:59 +01:00
|
|
|
satellite := planet.Satellites[0]
|
2022-01-26 11:10:28 +00:00
|
|
|
|
|
|
|
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(metainfoClient.Close)
|
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
bucketName := "testbucket"
|
|
|
|
deleteBucket := func() error {
|
|
|
|
_, err := metainfoClient.DeleteBucket(ctx, metaclient.DeleteBucketParams{
|
|
|
|
Name: []byte(bucketName),
|
|
|
|
DeleteAll: true,
|
|
|
|
})
|
|
|
|
return err
|
|
|
|
}
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
t.Run("get objects", func(t *testing.T) {
|
|
|
|
defer ctx.Check(deleteBucket)
|
|
|
|
|
|
|
|
files := make([]string, 10)
|
|
|
|
data := testrand.Bytes(1 * memory.KiB)
|
|
|
|
for i := 0; i < len(files); i++ {
|
|
|
|
files[i] = "path" + strconv.Itoa(i)
|
|
|
|
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], bucketName, files[i], data)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
expectedBucketName := bucketName
|
|
|
|
items, _, err := metainfoClient.ListObjects(ctx, metaclient.ListObjectsParams{
|
|
|
|
Bucket: []byte(expectedBucketName),
|
|
|
|
IncludeSystemMetadata: true,
|
2022-01-26 11:10:28 +00:00
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2022-02-04 10:27:38 +00:00
|
|
|
require.Equal(t, len(files), len(items))
|
|
|
|
for _, item := range items {
|
|
|
|
require.NotEmpty(t, item.EncryptedObjectKey)
|
|
|
|
require.True(t, item.CreatedAt.Before(time.Now()))
|
|
|
|
|
|
|
|
object, err := metainfoClient.GetObject(ctx, metaclient.GetObjectParams{
|
|
|
|
Bucket: []byte(expectedBucketName),
|
|
|
|
EncryptedObjectKey: item.EncryptedObjectKey,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, item.EncryptedObjectKey, object.EncryptedObjectKey)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
require.NotEmpty(t, object.StreamID)
|
|
|
|
}
|
|
|
|
|
|
|
|
items, _, err = metainfoClient.ListObjects(ctx, metaclient.ListObjectsParams{
|
|
|
|
Bucket: []byte(expectedBucketName),
|
|
|
|
Limit: 3,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 3, len(items))
|
2022-01-26 11:10:28 +00:00
|
|
|
|
|
|
|
})
|
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
t.Run("list service", func(t *testing.T) {
|
|
|
|
defer ctx.Check(deleteBucket)
|
|
|
|
|
|
|
|
items := []struct {
|
|
|
|
Key string
|
|
|
|
Value []byte
|
|
|
|
}{
|
|
|
|
{Key: "sample.😶", Value: []byte{1}},
|
|
|
|
{Key: "müsic", Value: []byte{2}},
|
|
|
|
{Key: "müsic/söng1.mp3", Value: []byte{3}},
|
|
|
|
{Key: "müsic/söng2.mp3", Value: []byte{4}},
|
|
|
|
{Key: "müsic/album/söng3.mp3", Value: []byte{5}},
|
|
|
|
{Key: "müsic/söng4.mp3", Value: []byte{6}},
|
|
|
|
{Key: "ビデオ/movie.mkv", Value: []byte{7}},
|
|
|
|
}
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
for _, item := range items {
|
|
|
|
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], bucketName, item.Key, item.Value)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
}
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
project, err := planet.Uplinks[0].GetProject(ctx, planet.Satellites[0])
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(project.Close)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
objects := project.ListObjects(ctx, "testbucket", &uplink.ListObjectsOptions{
|
|
|
|
Recursive: true,
|
|
|
|
})
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
listItems := make([]*uplink.Object, 0)
|
|
|
|
for objects.Next() {
|
|
|
|
listItems = append(listItems, objects.Item())
|
|
|
|
}
|
|
|
|
require.NoError(t, objects.Err())
|
|
|
|
|
2022-11-23 12:15:52 +00:00
|
|
|
expected := []metaclient.Object{
|
2022-02-04 10:27:38 +00:00
|
|
|
{Path: "müsic"},
|
|
|
|
{Path: "müsic/album/söng3.mp3"},
|
|
|
|
{Path: "müsic/söng1.mp3"},
|
|
|
|
{Path: "müsic/söng2.mp3"},
|
|
|
|
{Path: "müsic/söng4.mp3"},
|
|
|
|
{Path: "sample.😶"},
|
|
|
|
{Path: "ビデオ/movie.mkv"},
|
|
|
|
}
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
require.Equal(t, len(expected), len(listItems))
|
|
|
|
sort.Slice(listItems, func(i, k int) bool {
|
|
|
|
return listItems[i].Key < listItems[k].Key
|
|
|
|
})
|
|
|
|
for i, item := range expected {
|
|
|
|
require.Equal(t, item.Path, listItems[i].Key)
|
|
|
|
require.Equal(t, item.IsPrefix, listItems[i].IsPrefix)
|
|
|
|
}
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
objects = project.ListObjects(ctx, bucketName, &uplink.ListObjectsOptions{
|
|
|
|
Recursive: false,
|
|
|
|
})
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
listItems = make([]*uplink.Object, 0)
|
|
|
|
for objects.Next() {
|
|
|
|
listItems = append(listItems, objects.Item())
|
|
|
|
}
|
|
|
|
require.NoError(t, objects.Err())
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-11-23 12:15:52 +00:00
|
|
|
expected = []metaclient.Object{
|
2022-02-04 10:27:38 +00:00
|
|
|
{Path: "müsic"},
|
|
|
|
{Path: "müsic/", IsPrefix: true},
|
|
|
|
{Path: "sample.😶"},
|
|
|
|
{Path: "ビデオ/", IsPrefix: true},
|
2022-01-26 11:10:28 +00:00
|
|
|
}
|
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
require.Equal(t, len(expected), len(listItems))
|
|
|
|
sort.Slice(listItems, func(i, k int) bool {
|
|
|
|
return listItems[i].Key < listItems[k].Key
|
|
|
|
})
|
|
|
|
for i, item := range expected {
|
|
|
|
t.Log(item.Path, listItems[i].Key)
|
|
|
|
require.Equal(t, item.Path, listItems[i].Key)
|
|
|
|
require.Equal(t, item.IsPrefix, listItems[i].IsPrefix)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
// ensures that CommitObject returns an error when the metadata provided by the user is too large.
|
|
|
|
t.Run("validate metadata size", func(t *testing.T) {
|
|
|
|
defer ctx.Check(deleteBucket)
|
|
|
|
|
|
|
|
err = planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], bucketName)
|
2022-01-26 11:10:28 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
params := metaclient.BeginObjectParams{
|
|
|
|
Bucket: []byte(bucketName),
|
|
|
|
EncryptedObjectKey: []byte("encrypted-path"),
|
|
|
|
Redundancy: storj.RedundancyScheme{
|
|
|
|
Algorithm: storj.ReedSolomon,
|
|
|
|
ShareSize: 256,
|
|
|
|
RequiredShares: 1,
|
|
|
|
RepairShares: 1,
|
|
|
|
OptimalShares: 3,
|
|
|
|
TotalShares: 4,
|
|
|
|
},
|
|
|
|
EncryptionParameters: storj.EncryptionParameters{
|
|
|
|
BlockSize: 256,
|
|
|
|
CipherSuite: storj.EncNull,
|
|
|
|
},
|
|
|
|
ExpiresAt: time.Now().Add(24 * time.Hour),
|
2022-01-26 11:10:28 +00:00
|
|
|
}
|
2022-02-04 10:27:38 +00:00
|
|
|
beginObjectResponse, err := metainfoClient.BeginObject(ctx, params)
|
|
|
|
require.NoError(t, err)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
// 5KiB metadata should fail because it is too large.
|
|
|
|
metadata, err := pb.Marshal(&pb.StreamMeta{
|
|
|
|
EncryptedStreamInfo: testrand.Bytes(5 * memory.KiB),
|
|
|
|
NumberOfSegments: 1,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
err = metainfoClient.CommitObject(ctx, metaclient.CommitObjectParams{
|
|
|
|
StreamID: beginObjectResponse.StreamID,
|
|
|
|
EncryptedMetadata: metadata,
|
|
|
|
EncryptedMetadataNonce: testrand.Nonce(),
|
2022-04-21 15:25:16 +01:00
|
|
|
EncryptedMetadataEncryptedKey: randomEncryptedKey,
|
2022-02-04 10:27:38 +00:00
|
|
|
})
|
|
|
|
require.Error(t, err)
|
|
|
|
assertInvalidArgument(t, err, true)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
// 1KiB metadata should not fail.
|
|
|
|
metadata, err = pb.Marshal(&pb.StreamMeta{
|
|
|
|
EncryptedStreamInfo: testrand.Bytes(1 * memory.KiB),
|
|
|
|
NumberOfSegments: 1,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
err = metainfoClient.CommitObject(ctx, metaclient.CommitObjectParams{
|
|
|
|
StreamID: beginObjectResponse.StreamID,
|
|
|
|
EncryptedMetadata: metadata,
|
|
|
|
EncryptedMetadataNonce: testrand.Nonce(),
|
2022-04-21 15:25:16 +01:00
|
|
|
EncryptedMetadataEncryptedKey: randomEncryptedKey,
|
2022-02-04 10:27:38 +00:00
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2022-01-26 11:10:28 +00:00
|
|
|
})
|
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
t.Run("update metadata", func(t *testing.T) {
|
|
|
|
defer ctx.Check(deleteBucket)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
satelliteSys := planet.Satellites[0]
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
// upload a small inline object
|
|
|
|
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], bucketName, "testobject", testrand.Bytes(1*memory.KiB))
|
|
|
|
require.NoError(t, err)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
objects, err := satelliteSys.API.Metainfo.Metabase.TestingAllObjects(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, objects, 1)
|
|
|
|
|
|
|
|
getResp, err := satelliteSys.API.Metainfo.Endpoint.GetObject(ctx, &pb.ObjectGetRequest{
|
|
|
|
Header: &pb.RequestHeader{
|
|
|
|
ApiKey: apiKey.SerializeRaw(),
|
|
|
|
},
|
2022-11-23 12:15:52 +00:00
|
|
|
Bucket: []byte("testbucket"),
|
|
|
|
EncryptedObjectKey: []byte(objects[0].ObjectKey),
|
2022-02-04 10:27:38 +00:00
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
testEncryptedMetadata := testrand.BytesInt(64)
|
2022-04-21 15:25:16 +01:00
|
|
|
testEncryptedMetadataEncryptedKey := randomEncryptedKey
|
2022-02-04 10:27:38 +00:00
|
|
|
testEncryptedMetadataNonce := testrand.Nonce()
|
|
|
|
|
|
|
|
// update the object metadata
|
|
|
|
_, err = satelliteSys.API.Metainfo.Endpoint.UpdateObjectMetadata(ctx, &pb.ObjectUpdateMetadataRequest{
|
|
|
|
Header: &pb.RequestHeader{
|
|
|
|
ApiKey: apiKey.SerializeRaw(),
|
|
|
|
},
|
|
|
|
Bucket: getResp.Object.Bucket,
|
2022-11-23 12:15:52 +00:00
|
|
|
EncryptedObjectKey: getResp.Object.EncryptedObjectKey,
|
2022-02-04 10:27:38 +00:00
|
|
|
Version: getResp.Object.Version,
|
|
|
|
StreamId: getResp.Object.StreamId,
|
|
|
|
EncryptedMetadataNonce: testEncryptedMetadataNonce,
|
|
|
|
EncryptedMetadata: testEncryptedMetadata,
|
|
|
|
EncryptedMetadataEncryptedKey: testEncryptedMetadataEncryptedKey,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
// assert the metadata has been updated
|
|
|
|
objects, err = satelliteSys.API.Metainfo.Metabase.TestingAllObjects(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, objects, 1)
|
|
|
|
assert.Equal(t, testEncryptedMetadata, objects[0].EncryptedMetadata)
|
|
|
|
assert.Equal(t, testEncryptedMetadataEncryptedKey, objects[0].EncryptedMetadataEncryptedKey)
|
|
|
|
assert.Equal(t, testEncryptedMetadataNonce[:], objects[0].EncryptedMetadataNonce)
|
2022-01-26 11:10:28 +00:00
|
|
|
})
|
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
t.Run("check delete rights on upload", func(t *testing.T) {
|
|
|
|
defer ctx.Check(deleteBucket)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
up := planet.Uplinks[0]
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
err := up.CreateBucket(ctx, planet.Satellites[0], bucketName)
|
|
|
|
require.NoError(t, err)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
data := testrand.Bytes(1 * memory.KiB)
|
|
|
|
err = up.Upload(ctx, planet.Satellites[0], bucketName, "test-key", data)
|
|
|
|
require.NoError(t, err)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
access := up.Access[planet.Satellites[0].ID()]
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
overwrite := func(allowDelete bool) error {
|
|
|
|
permission := uplink.FullPermission()
|
|
|
|
permission.AllowDelete = allowDelete
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
sharedAccess, err := access.Share(permission)
|
|
|
|
require.NoError(t, err)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
project, err := uplink.OpenProject(ctx, sharedAccess)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(project.Close)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
upload, err := project.UploadObject(ctx, bucketName, "test-key", nil)
|
|
|
|
require.NoError(t, err)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
_, err = upload.Write([]byte("new data"))
|
|
|
|
require.NoError(t, err)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
return upload.Commit()
|
|
|
|
}
|
|
|
|
|
|
|
|
require.Error(t, overwrite(false))
|
|
|
|
require.NoError(t, overwrite(true))
|
2022-01-26 11:10:28 +00:00
|
|
|
})
|
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
t.Run("immutable upload", func(t *testing.T) {
|
|
|
|
defer ctx.Check(deleteBucket)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
access := planet.Uplinks[0].Access[planet.Satellites[0].ID()]
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
permission := uplink.Permission{AllowUpload: true} // AllowDelete: false
|
|
|
|
sharedAccess, err := access.Share(permission)
|
2022-01-26 11:10:28 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
project, err := uplink.OpenProject(ctx, sharedAccess)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(project.Close)
|
|
|
|
|
|
|
|
_, err = project.CreateBucket(ctx, bucketName)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Uploading the object for first time should be successful.
|
|
|
|
upload, err := project.UploadObject(ctx, bucketName, "test-key", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = upload.Write(testrand.Bytes(1 * memory.KiB))
|
|
|
|
require.NoError(t, err)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
err = upload.Commit()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// Overwriting the object should fail on Commit.
|
|
|
|
upload, err = project.UploadObject(ctx, bucketName, "test-key", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = upload.Write(testrand.Bytes(1 * memory.KiB))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = upload.Commit()
|
|
|
|
require.Error(t, err)
|
2022-01-26 11:10:28 +00:00
|
|
|
})
|
2022-02-04 10:27:38 +00:00
|
|
|
|
|
|
|
t.Run("stable upload id", func(t *testing.T) {
|
|
|
|
defer ctx.Check(deleteBucket)
|
|
|
|
|
|
|
|
err = planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], bucketName)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
beginResp, err := metainfoClient.BeginObject(ctx, metaclient.BeginObjectParams{
|
|
|
|
Bucket: []byte(bucketName),
|
|
|
|
EncryptedObjectKey: []byte("a/b/testobject"),
|
|
|
|
EncryptionParameters: storj.EncryptionParameters{
|
|
|
|
CipherSuite: storj.EncAESGCM,
|
|
|
|
BlockSize: 256,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// List the root of the bucket recursively
|
|
|
|
listResp, _, err := metainfoClient.ListObjects(ctx, metaclient.ListObjectsParams{
|
|
|
|
Bucket: []byte(bucketName),
|
|
|
|
Status: int32(metabase.Pending),
|
|
|
|
Recursive: true,
|
|
|
|
IncludeSystemMetadata: true,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, listResp, 1)
|
|
|
|
// check that BeginObject and ListObjects return the same StreamID.
|
|
|
|
assert.Equal(t, beginResp.StreamID, listResp[0].StreamID)
|
|
|
|
|
|
|
|
// List with prefix non-recursively
|
|
|
|
listResp2, _, err := metainfoClient.ListObjects(ctx, metaclient.ListObjectsParams{
|
|
|
|
Bucket: []byte(bucketName),
|
|
|
|
Status: int32(metabase.Pending),
|
|
|
|
EncryptedPrefix: []byte("a/b/"),
|
|
|
|
IncludeSystemMetadata: true,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, listResp2, 1)
|
|
|
|
// check that the StreamID is still the same.
|
|
|
|
assert.Equal(t, listResp[0].StreamID, listResp2[0].StreamID)
|
|
|
|
|
|
|
|
// List with prefix recursively
|
|
|
|
listResp3, _, err := metainfoClient.ListObjects(ctx, metaclient.ListObjectsParams{
|
|
|
|
Bucket: []byte(bucketName),
|
|
|
|
Status: int32(metabase.Pending),
|
|
|
|
EncryptedPrefix: []byte("a/b/"),
|
|
|
|
Recursive: true,
|
|
|
|
IncludeSystemMetadata: true,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, listResp3, 1)
|
|
|
|
// check that the StreamID is still the same.
|
|
|
|
assert.Equal(t, listResp[0].StreamID, listResp3[0].StreamID)
|
|
|
|
|
|
|
|
// List the pending object directly
|
|
|
|
listResp4, err := metainfoClient.ListPendingObjectStreams(ctx, metaclient.ListPendingObjectStreamsParams{
|
|
|
|
Bucket: []byte(bucketName),
|
|
|
|
EncryptedObjectKey: []byte("a/b/testobject"),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, listResp4.Items, 1)
|
|
|
|
// check that the StreamID is still the same.
|
|
|
|
assert.Equal(t, listResp[0].StreamID, listResp4.Items[0].StreamID)
|
2022-01-26 11:10:28 +00:00
|
|
|
})
|
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
// ensures that BeginObject returns an error when the encrypted key provided by the user is too large.
|
|
|
|
t.Run("validate encrypted object key length", func(t *testing.T) {
|
|
|
|
defer ctx.Check(deleteBucket)
|
|
|
|
|
|
|
|
err := planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], bucketName)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
params := metaclient.BeginObjectParams{
|
|
|
|
Bucket: []byte(bucketName),
|
|
|
|
EncryptionParameters: storj.EncryptionParameters{
|
|
|
|
BlockSize: 256,
|
|
|
|
CipherSuite: storj.EncNull,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
params.EncryptedObjectKey = testrand.Bytes(500)
|
|
|
|
_, err = metainfoClient.BeginObject(ctx, params)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
params.EncryptedObjectKey = testrand.Bytes(1024)
|
|
|
|
_, err = metainfoClient.BeginObject(ctx, params)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
params.EncryptedObjectKey = testrand.Bytes(2048)
|
|
|
|
_, err = metainfoClient.BeginObject(ctx, params)
|
|
|
|
require.Error(t, err)
|
|
|
|
require.True(t, rpcstatus.Code(err) == rpcstatus.InvalidArgument)
|
2022-01-26 11:10:28 +00:00
|
|
|
})
|
2022-02-04 10:27:38 +00:00
|
|
|
|
|
|
|
t.Run("delete not existing object", func(t *testing.T) {
|
|
|
|
expectedBucketName := bucketName
|
|
|
|
|
|
|
|
// non-pending non-existent objects return no error
|
|
|
|
_, err = metainfoClient.BeginDeleteObject(ctx, metaclient.BeginDeleteObjectParams{
|
|
|
|
Bucket: []byte(expectedBucketName),
|
|
|
|
EncryptedObjectKey: []byte("bad path"),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// pending non-existent objects return an RPC error
|
|
|
|
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
|
|
|
|
streamUUID := testrand.UUID()
|
2022-09-21 09:10:06 +01:00
|
|
|
satStreamID := &internalpb.StreamID{
|
|
|
|
Bucket: []byte(expectedBucketName),
|
|
|
|
EncryptedObjectKey: []byte("bad path"),
|
|
|
|
StreamId: streamUUID[:],
|
|
|
|
CreationDate: time.Now(),
|
|
|
|
}
|
2022-02-04 10:27:38 +00:00
|
|
|
signedStreamID, err := metainfo.SignStreamID(ctx, signer, satStreamID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
encodedStreamID, err := pb.Marshal(signedStreamID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
streamID, err := storj.StreamIDFromBytes(encodedStreamID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
_, err = metainfoClient.BeginDeleteObject(ctx, metaclient.BeginDeleteObjectParams{
|
|
|
|
Bucket: []byte(expectedBucketName),
|
|
|
|
EncryptedObjectKey: []byte("bad path"),
|
|
|
|
Status: int32(metabase.Pending),
|
|
|
|
StreamID: streamID,
|
|
|
|
})
|
|
|
|
require.True(t, errs2.IsRPC(err, rpcstatus.NotFound))
|
2022-01-26 11:10:28 +00:00
|
|
|
})
|
2022-08-30 11:04:59 +01:00
|
|
|
|
|
|
|
t.Run("get object", func(t *testing.T) {
|
|
|
|
defer ctx.Check(deleteBucket)
|
|
|
|
|
|
|
|
err := planet.Uplinks[0].Upload(ctx, satellite, "testbucket", "object", testrand.Bytes(256))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
objects, err := satellite.API.Metainfo.Metabase.TestingAllObjects(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, objects, 1)
|
|
|
|
|
|
|
|
committedObject := objects[0]
|
|
|
|
|
2022-09-07 16:43:17 +01:00
|
|
|
pendingObject, err := satellite.API.Metainfo.Metabase.BeginObjectNextVersion(ctx, metabase.BeginObjectNextVersion{
|
2022-08-30 11:04:59 +01:00
|
|
|
ObjectStream: metabase.ObjectStream{
|
|
|
|
ProjectID: committedObject.ProjectID,
|
|
|
|
BucketName: committedObject.BucketName,
|
|
|
|
ObjectKey: committedObject.ObjectKey,
|
|
|
|
StreamID: committedObject.StreamID,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2022-09-07 16:43:17 +01:00
|
|
|
require.Equal(t, committedObject.Version+1, pendingObject.Version)
|
2022-08-30 11:04:59 +01:00
|
|
|
|
|
|
|
getObjectResponse, err := satellite.API.Metainfo.Endpoint.GetObject(ctx, &pb.ObjectGetRequest{
|
2022-11-23 12:15:52 +00:00
|
|
|
Header: &pb.RequestHeader{ApiKey: apiKey.SerializeRaw()},
|
|
|
|
Bucket: []byte("testbucket"),
|
|
|
|
EncryptedObjectKey: []byte(committedObject.ObjectKey),
|
|
|
|
Version: int32(committedObject.Version),
|
2022-08-30 11:04:59 +01:00
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, committedObject.BucketName, getObjectResponse.Object.Bucket)
|
2022-11-23 12:15:52 +00:00
|
|
|
require.EqualValues(t, committedObject.ObjectKey, getObjectResponse.Object.EncryptedObjectKey)
|
2022-08-30 11:04:59 +01:00
|
|
|
require.EqualValues(t, committedObject.Version, getObjectResponse.Object.Version)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("download object", func(t *testing.T) {
|
|
|
|
defer ctx.Check(deleteBucket)
|
|
|
|
|
|
|
|
err := planet.Uplinks[0].Upload(ctx, satellite, "testbucket", "object", testrand.Bytes(256))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
objects, err := satellite.API.Metainfo.Metabase.TestingAllObjects(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, objects, 1)
|
|
|
|
|
|
|
|
committedObject := objects[0]
|
|
|
|
|
2022-09-07 16:43:17 +01:00
|
|
|
pendingObject, err := satellite.API.Metainfo.Metabase.BeginObjectNextVersion(ctx, metabase.BeginObjectNextVersion{
|
2022-08-30 11:04:59 +01:00
|
|
|
ObjectStream: metabase.ObjectStream{
|
|
|
|
ProjectID: committedObject.ProjectID,
|
|
|
|
BucketName: committedObject.BucketName,
|
|
|
|
ObjectKey: committedObject.ObjectKey,
|
|
|
|
StreamID: committedObject.StreamID,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2022-09-07 16:43:17 +01:00
|
|
|
require.Equal(t, committedObject.Version+1, pendingObject.Version)
|
2022-08-30 11:04:59 +01:00
|
|
|
|
|
|
|
downloadObjectResponse, err := satellite.API.Metainfo.Endpoint.DownloadObject(ctx, &pb.ObjectDownloadRequest{
|
|
|
|
Header: &pb.RequestHeader{ApiKey: apiKey.SerializeRaw()},
|
|
|
|
Bucket: []byte("testbucket"),
|
|
|
|
EncryptedObjectKey: []byte(committedObject.ObjectKey),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, committedObject.BucketName, downloadObjectResponse.Object.Bucket)
|
2022-11-23 12:15:52 +00:00
|
|
|
require.EqualValues(t, committedObject.ObjectKey, downloadObjectResponse.Object.EncryptedObjectKey)
|
2022-08-30 11:04:59 +01:00
|
|
|
require.EqualValues(t, committedObject.Version, downloadObjectResponse.Object.Version)
|
|
|
|
})
|
2022-09-08 08:54:30 +01:00
|
|
|
|
|
|
|
t.Run("begin expired object", func(t *testing.T) {
|
|
|
|
defer ctx.Check(deleteBucket)
|
|
|
|
|
|
|
|
err := planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], bucketName)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
params := metaclient.BeginObjectParams{
|
|
|
|
Bucket: []byte(bucketName),
|
|
|
|
EncryptedObjectKey: []byte("encrypted-path"),
|
|
|
|
Redundancy: storj.RedundancyScheme{
|
|
|
|
Algorithm: storj.ReedSolomon,
|
|
|
|
ShareSize: 256,
|
|
|
|
RequiredShares: 1,
|
|
|
|
RepairShares: 1,
|
|
|
|
OptimalShares: 3,
|
|
|
|
TotalShares: 4,
|
|
|
|
},
|
|
|
|
EncryptionParameters: storj.EncryptionParameters{
|
|
|
|
BlockSize: 256,
|
|
|
|
CipherSuite: storj.EncNull,
|
|
|
|
},
|
|
|
|
ExpiresAt: time.Now().Add(-24 * time.Hour),
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = metainfoClient.BeginObject(ctx, params)
|
|
|
|
require.Error(t, err)
|
|
|
|
require.Contains(t, err.Error(), "Invalid expiration time")
|
|
|
|
require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
|
|
|
|
})
|
2022-09-01 23:13:15 +01:00
|
|
|
|
2022-11-09 10:26:18 +00:00
|
|
|
t.Run("UploadID check", func(t *testing.T) {
|
|
|
|
defer ctx.Check(deleteBucket)
|
|
|
|
|
|
|
|
project, err := planet.Uplinks[0].OpenProject(ctx, planet.Satellites[0])
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(project.Close)
|
|
|
|
|
|
|
|
_, err = project.CreateBucket(ctx, bucketName)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2023-02-07 10:24:23 +00:00
|
|
|
for _, tt := range []struct {
|
|
|
|
expires time.Time
|
|
|
|
options uplink.ListUploadsOptions
|
|
|
|
}{
|
|
|
|
{
|
|
|
|
options: uplink.ListUploadsOptions{System: false, Custom: false},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
options: uplink.ListUploadsOptions{System: true, Custom: false},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
options: uplink.ListUploadsOptions{System: true, Custom: true},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
options: uplink.ListUploadsOptions{System: false, Custom: true},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
expires: time.Now().Add(24 * time.Hour),
|
|
|
|
options: uplink.ListUploadsOptions{System: false, Custom: false},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
expires: time.Now().Add(24 * time.Hour),
|
|
|
|
options: uplink.ListUploadsOptions{System: true, Custom: false},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
expires: time.Now().Add(24 * time.Hour),
|
|
|
|
options: uplink.ListUploadsOptions{System: true, Custom: true},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
expires: time.Now().Add(24 * time.Hour),
|
|
|
|
options: uplink.ListUploadsOptions{System: false, Custom: true},
|
|
|
|
},
|
2022-11-09 10:26:18 +00:00
|
|
|
} {
|
2023-02-07 10:24:23 +00:00
|
|
|
t.Run(fmt.Sprintf("expires:%v;system:%v;custom:%v", !tt.expires.IsZero(), tt.options.System, tt.options.Custom), func(t *testing.T) {
|
|
|
|
uploadInfo, err := project.BeginUpload(ctx, bucketName, "multipart-object", &uplink.UploadOptions{
|
|
|
|
Expires: tt.expires,
|
|
|
|
})
|
2022-11-09 10:26:18 +00:00
|
|
|
require.NoError(t, err)
|
2022-09-01 23:13:15 +01:00
|
|
|
|
2023-02-07 10:24:23 +00:00
|
|
|
iterator := project.ListUploads(ctx, bucketName, &tt.options)
|
2022-11-09 10:26:18 +00:00
|
|
|
require.True(t, iterator.Next())
|
|
|
|
require.Equal(t, uploadInfo.UploadID, iterator.Item().UploadID)
|
|
|
|
|
|
|
|
err = project.AbortUpload(ctx, bucketName, "multipart-object", iterator.Item().UploadID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
})
|
2023-03-29 16:12:16 +01:00
|
|
|
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestEndpoint_Object_UploadLimit(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, UplinkCount: 1,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
|
|
|
config.Metainfo.UploadLimiter.SingleObjectLimit = 200 * time.Millisecond
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
2023-04-18 08:37:36 +01:00
|
|
|
endpoint := planet.Satellites[0].Metainfo.Endpoint
|
2023-03-29 16:12:16 +01:00
|
|
|
|
|
|
|
bucketName := "testbucket"
|
2023-04-18 08:37:36 +01:00
|
|
|
|
|
|
|
err := planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], bucketName)
|
|
|
|
require.NoError(t, err)
|
2023-03-29 16:12:16 +01:00
|
|
|
|
|
|
|
t.Run("limit single object upload", func(t *testing.T) {
|
|
|
|
|
2023-04-18 08:37:36 +01:00
|
|
|
now := time.Now()
|
|
|
|
request := &pb.BeginObjectRequest{
|
|
|
|
Header: &pb.RequestHeader{
|
|
|
|
ApiKey: apiKey.SerializeRaw(),
|
|
|
|
},
|
|
|
|
Bucket: []byte(bucketName),
|
|
|
|
EncryptedObjectKey: []byte("single-object"),
|
|
|
|
EncryptionParameters: &pb.EncryptionParameters{
|
|
|
|
CipherSuite: pb.CipherSuite_ENC_AESGCM,
|
|
|
|
},
|
|
|
|
}
|
2023-03-29 16:12:16 +01:00
|
|
|
// upload to the same location one by one should fail
|
2023-04-18 08:37:36 +01:00
|
|
|
_, err := endpoint.BeginObject(ctx, request)
|
2023-03-29 16:12:16 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2023-04-18 08:37:36 +01:00
|
|
|
_, err = endpoint.BeginObject(ctx, request)
|
2023-03-29 16:12:16 +01:00
|
|
|
require.Error(t, err)
|
|
|
|
require.True(t, errs2.IsRPC(err, rpcstatus.ResourceExhausted))
|
|
|
|
|
2023-04-18 08:37:36 +01:00
|
|
|
ctx, _ := time2.WithNewMachine(ctx, time2.WithTimeAt(now.Add(250*time.Millisecond)))
|
2023-03-29 16:12:16 +01:00
|
|
|
|
2023-04-18 08:37:36 +01:00
|
|
|
_, err = endpoint.BeginObject(ctx, request)
|
2023-03-29 16:12:16 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// upload to different locations one by one should NOT fail
|
2023-04-18 08:37:36 +01:00
|
|
|
request.EncryptedObjectKey = []byte("single-objectA")
|
|
|
|
_, err = endpoint.BeginObject(ctx, request)
|
2023-03-29 16:12:16 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2023-04-18 08:37:36 +01:00
|
|
|
request.EncryptedObjectKey = []byte("single-objectB")
|
|
|
|
_, err = endpoint.BeginObject(ctx, request)
|
2023-03-29 16:12:16 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
2022-11-09 10:26:18 +00:00
|
|
|
})
|
2022-01-26 11:10:28 +00:00
|
|
|
}
|
|
|
|
|
2022-09-01 23:13:15 +01:00
|
|
|
// TODO remove when listing query tests feature flag is removed.
|
|
|
|
func TestEndpoint_Object_No_StorageNodes_TestListingQuery(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, UplinkCount: 1,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
Satellite: testplanet.Combine(testplanet.MaxObjectKeyLength(1024), func(log *zap.Logger, index int, config *satellite.Config) {
|
|
|
|
config.Metainfo.TestListingQuery = true
|
|
|
|
}),
|
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
|
|
|
|
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(metainfoClient.Close)
|
|
|
|
|
|
|
|
bucketName := "testbucket"
|
|
|
|
deleteBucket := func() error {
|
|
|
|
_, err := metainfoClient.DeleteBucket(ctx, metaclient.DeleteBucketParams{
|
|
|
|
Name: []byte(bucketName),
|
|
|
|
DeleteAll: true,
|
|
|
|
})
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("list service with listing query test", func(t *testing.T) {
|
|
|
|
defer ctx.Check(deleteBucket)
|
|
|
|
|
|
|
|
items := []struct {
|
|
|
|
Key string
|
|
|
|
Value []byte
|
|
|
|
}{
|
|
|
|
{Key: "sample.😶", Value: []byte{1}},
|
|
|
|
{Key: "müsic", Value: []byte{2}},
|
|
|
|
{Key: "müsic/söng1.mp3", Value: []byte{3}},
|
|
|
|
{Key: "müsic/söng2.mp3", Value: []byte{4}},
|
|
|
|
{Key: "müsic/album/söng3.mp3", Value: []byte{5}},
|
|
|
|
{Key: "müsic/söng4.mp3", Value: []byte{6}},
|
|
|
|
{Key: "ビデオ/movie.mkv", Value: []byte{7}},
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, item := range items {
|
|
|
|
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], bucketName, item.Key, item.Value)
|
|
|
|
assert.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
project, err := planet.Uplinks[0].GetProject(ctx, planet.Satellites[0])
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(project.Close)
|
|
|
|
|
|
|
|
objects := project.ListObjects(ctx, "testbucket", &uplink.ListObjectsOptions{
|
|
|
|
Recursive: true,
|
|
|
|
})
|
|
|
|
|
|
|
|
listItems := make([]*uplink.Object, 0)
|
|
|
|
for objects.Next() {
|
|
|
|
listItems = append(listItems, objects.Item())
|
|
|
|
}
|
|
|
|
require.NoError(t, objects.Err())
|
|
|
|
|
2022-11-23 12:15:52 +00:00
|
|
|
expected := []metaclient.Object{
|
2022-09-01 23:13:15 +01:00
|
|
|
{Path: "müsic"},
|
|
|
|
{Path: "müsic/album/söng3.mp3"},
|
|
|
|
{Path: "müsic/söng1.mp3"},
|
|
|
|
{Path: "müsic/söng2.mp3"},
|
|
|
|
{Path: "müsic/söng4.mp3"},
|
|
|
|
{Path: "sample.😶"},
|
|
|
|
{Path: "ビデオ/movie.mkv"},
|
|
|
|
}
|
|
|
|
|
|
|
|
require.Equal(t, len(expected), len(listItems))
|
|
|
|
sort.Slice(listItems, func(i, k int) bool {
|
|
|
|
return listItems[i].Key < listItems[k].Key
|
|
|
|
})
|
|
|
|
for i, item := range expected {
|
|
|
|
require.Equal(t, item.Path, listItems[i].Key)
|
|
|
|
require.Equal(t, item.IsPrefix, listItems[i].IsPrefix)
|
|
|
|
}
|
|
|
|
|
|
|
|
objects = project.ListObjects(ctx, bucketName, &uplink.ListObjectsOptions{
|
|
|
|
Recursive: false,
|
|
|
|
})
|
|
|
|
|
|
|
|
listItems = make([]*uplink.Object, 0)
|
|
|
|
for objects.Next() {
|
|
|
|
listItems = append(listItems, objects.Item())
|
|
|
|
}
|
|
|
|
require.NoError(t, objects.Err())
|
|
|
|
|
2022-11-23 12:15:52 +00:00
|
|
|
expected = []metaclient.Object{
|
2022-09-01 23:13:15 +01:00
|
|
|
{Path: "müsic"},
|
|
|
|
{Path: "müsic/", IsPrefix: true},
|
|
|
|
{Path: "sample.😶"},
|
|
|
|
{Path: "ビデオ/", IsPrefix: true},
|
|
|
|
}
|
|
|
|
|
|
|
|
require.Equal(t, len(expected), len(listItems))
|
|
|
|
sort.Slice(listItems, func(i, k int) bool {
|
|
|
|
return listItems[i].Key < listItems[k].Key
|
|
|
|
})
|
|
|
|
for i, item := range expected {
|
|
|
|
t.Log(item.Path, listItems[i].Key)
|
|
|
|
require.Equal(t, item.Path, listItems[i].Key)
|
|
|
|
require.Equal(t, item.IsPrefix, listItems[i].IsPrefix)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
})
|
|
|
|
}
|
2023-03-15 12:26:10 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
func TestEndpoint_Object_With_StorageNodes(t *testing.T) {
|
2022-01-26 11:10:28 +00:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
2022-02-04 10:27:38 +00:00
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
2022-01-26 11:10:28 +00:00
|
|
|
Reconfigure: testplanet.Reconfigure{
|
2022-02-04 10:27:38 +00:00
|
|
|
Satellite: func(logger *zap.Logger, index int, config *satellite.Config) {
|
|
|
|
config.Overlay.GeoIP.MockCountries = []string{"DE"}
|
|
|
|
},
|
2022-01-26 11:10:28 +00:00
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(metainfoClient.Close)
|
|
|
|
|
2023-03-15 12:26:10 +00:00
|
|
|
project, err := planet.Uplinks[0].OpenProject(ctx, planet.Satellites[0])
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(project.Close)
|
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
bucketName := "testbucket"
|
|
|
|
deleteBucket := func(bucketName string) func() error {
|
|
|
|
return func() error {
|
|
|
|
_, err := metainfoClient.DeleteBucket(ctx, metaclient.DeleteBucketParams{
|
|
|
|
Name: []byte(bucketName),
|
|
|
|
DeleteAll: true,
|
|
|
|
})
|
|
|
|
return err
|
|
|
|
}
|
2022-01-26 11:10:28 +00:00
|
|
|
}
|
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
t.Run("begin commit", func(t *testing.T) {
|
|
|
|
defer ctx.Check(deleteBucket(bucketName))
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2023-04-13 13:04:07 +01:00
|
|
|
bucketsService := planet.Satellites[0].API.Buckets.Service
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2023-04-13 13:04:07 +01:00
|
|
|
bucket := buckets.Bucket{
|
2022-02-04 10:27:38 +00:00
|
|
|
Name: bucketName,
|
|
|
|
ProjectID: planet.Uplinks[0].Projects[0].ID,
|
|
|
|
Placement: storj.EU,
|
|
|
|
}
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2023-04-13 13:04:07 +01:00
|
|
|
_, err := bucketsService.CreateBucket(ctx, bucket)
|
2022-01-26 11:10:28 +00:00
|
|
|
require.NoError(t, err)
|
2022-02-04 10:27:38 +00:00
|
|
|
|
|
|
|
params := metaclient.BeginObjectParams{
|
|
|
|
Bucket: []byte(bucket.Name),
|
|
|
|
EncryptedObjectKey: []byte("encrypted-path"),
|
|
|
|
Redundancy: storj.RedundancyScheme{
|
|
|
|
Algorithm: storj.ReedSolomon,
|
|
|
|
ShareSize: 256,
|
|
|
|
RequiredShares: 1,
|
|
|
|
RepairShares: 1,
|
|
|
|
OptimalShares: 3,
|
|
|
|
TotalShares: 4,
|
|
|
|
},
|
|
|
|
EncryptionParameters: storj.EncryptionParameters{
|
|
|
|
CipherSuite: storj.EncAESGCM,
|
|
|
|
BlockSize: 256,
|
|
|
|
},
|
|
|
|
ExpiresAt: time.Now().Add(24 * time.Hour),
|
|
|
|
}
|
|
|
|
beginObjectResponse, err := metainfoClient.BeginObject(ctx, params)
|
2022-01-26 11:10:28 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
streamID := internalpb.StreamID{}
|
|
|
|
err = pb.Unmarshal(beginObjectResponse.StreamID.Bytes(), &streamID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, int32(storj.EU), streamID.Placement)
|
|
|
|
|
|
|
|
response, err := metainfoClient.BeginSegment(ctx, metaclient.BeginSegmentParams{
|
|
|
|
StreamID: beginObjectResponse.StreamID,
|
2022-11-07 13:48:31 +00:00
|
|
|
Position: metaclient.SegmentPosition{
|
2022-02-04 10:27:38 +00:00
|
|
|
Index: 0,
|
|
|
|
},
|
|
|
|
MaxOrderLimit: memory.MiB.Int64(),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
fullIDMap := make(map[storj.NodeID]*identity.FullIdentity)
|
|
|
|
for _, node := range planet.StorageNodes {
|
|
|
|
fullIDMap[node.ID()] = node.Identity
|
|
|
|
}
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
makeResult := func(num int32) *pb.SegmentPieceUploadResult {
|
|
|
|
nodeID := response.Limits[num].Limit.StorageNodeId
|
|
|
|
hash := &pb.PieceHash{
|
|
|
|
PieceId: response.Limits[num].Limit.PieceId,
|
|
|
|
PieceSize: 1048832,
|
|
|
|
Timestamp: time.Now(),
|
|
|
|
}
|
|
|
|
|
|
|
|
fullID := fullIDMap[nodeID]
|
|
|
|
require.NotNil(t, fullID)
|
|
|
|
signer := signing.SignerFromFullIdentity(fullID)
|
|
|
|
signedHash, err := signing.SignPieceHash(ctx, signer, hash)
|
|
|
|
require.NoError(t, err)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
return &pb.SegmentPieceUploadResult{
|
|
|
|
PieceNum: num,
|
|
|
|
NodeId: nodeID,
|
|
|
|
Hash: signedHash,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
err = metainfoClient.CommitSegment(ctx, metaclient.CommitSegmentParams{
|
|
|
|
SegmentID: response.SegmentID,
|
2022-11-07 13:48:31 +00:00
|
|
|
Encryption: metaclient.SegmentEncryption{
|
2022-02-04 10:27:38 +00:00
|
|
|
EncryptedKey: testrand.Bytes(256),
|
|
|
|
},
|
|
|
|
PlainSize: 5000,
|
|
|
|
SizeEncryptedData: memory.MiB.Int64(),
|
|
|
|
UploadResult: []*pb.SegmentPieceUploadResult{
|
|
|
|
makeResult(0),
|
|
|
|
makeResult(1),
|
|
|
|
makeResult(2),
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
metadata, err := pb.Marshal(&pb.StreamMeta{
|
|
|
|
NumberOfSegments: 1,
|
|
|
|
})
|
2022-01-26 11:10:28 +00:00
|
|
|
require.NoError(t, err)
|
2022-02-04 10:27:38 +00:00
|
|
|
err = metainfoClient.CommitObject(ctx, metaclient.CommitObjectParams{
|
|
|
|
StreamID: beginObjectResponse.StreamID,
|
|
|
|
EncryptedMetadata: metadata,
|
|
|
|
EncryptedMetadataNonce: testrand.Nonce(),
|
2022-04-21 15:25:16 +01:00
|
|
|
EncryptedMetadataEncryptedKey: randomEncryptedKey,
|
2022-02-04 10:27:38 +00:00
|
|
|
})
|
2022-01-26 11:10:28 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
objects, _, err := metainfoClient.ListObjects(ctx, metaclient.ListObjectsParams{
|
|
|
|
Bucket: []byte(bucket.Name),
|
|
|
|
IncludeSystemMetadata: true,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, objects, 1)
|
|
|
|
require.Equal(t, params.EncryptedObjectKey, objects[0].EncryptedObjectKey)
|
|
|
|
// TODO find better way to compare (one ExpiresAt contains time zone informations)
|
|
|
|
require.Equal(t, params.ExpiresAt.Unix(), objects[0].ExpiresAt.Unix())
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
object, err := metainfoClient.GetObject(ctx, metaclient.GetObjectParams{
|
|
|
|
Bucket: []byte(bucket.Name),
|
|
|
|
EncryptedObjectKey: objects[0].EncryptedObjectKey,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
project := planet.Uplinks[0].Projects[0]
|
|
|
|
allObjects, err := planet.Satellites[0].Metabase.DB.TestingAllCommittedObjects(ctx, project.ID, object.Bucket)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, allObjects, 1)
|
2022-01-26 11:10:28 +00:00
|
|
|
})
|
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
t.Run("get object IP", func(t *testing.T) {
|
|
|
|
defer ctx.Check(deleteBucket(bucketName))
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
access := planet.Uplinks[0].Access[planet.Satellites[0].ID()]
|
|
|
|
uplnk := planet.Uplinks[0]
|
|
|
|
uplinkCtx := testuplink.WithMaxSegmentSize(ctx, 5*memory.KB)
|
|
|
|
sat := planet.Satellites[0]
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
require.NoError(t, uplnk.CreateBucket(uplinkCtx, sat, bucketName))
|
|
|
|
require.NoError(t, uplnk.Upload(uplinkCtx, sat, bucketName, "jones", testrand.Bytes(20*memory.KB)))
|
2022-12-13 08:57:49 +00:00
|
|
|
|
2023-06-07 12:58:25 +01:00
|
|
|
jonesSegments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-12-13 08:57:49 +00:00
|
|
|
project, err := uplnk.OpenProject(ctx, planet.Satellites[0])
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(project.Close)
|
|
|
|
|
|
|
|
// make a copy
|
|
|
|
_, err = project.CopyObject(ctx, bucketName, "jones", bucketName, "jones_copy", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
ips, err := object.GetObjectIPs(ctx, uplink.Config{}, access, bucketName, "jones")
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.True(t, len(ips) > 0)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-12-13 08:57:49 +00:00
|
|
|
copyIPs, err := object.GetObjectIPs(ctx, uplink.Config{}, access, bucketName, "jones_copy")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
2023-06-07 12:58:25 +01:00
|
|
|
// verify that orignal and copy has the same results
|
|
|
|
require.ElementsMatch(t, ips, copyIPs)
|
|
|
|
|
|
|
|
expectedIPsMap := map[string]struct{}{}
|
|
|
|
for _, segment := range jonesSegments {
|
|
|
|
for _, piece := range segment.Pieces {
|
|
|
|
node, err := planet.Satellites[0].Overlay.Service.Get(ctx, piece.StorageNode)
|
|
|
|
require.NoError(t, err)
|
|
|
|
expectedIPsMap[node.LastIPPort] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
expectedIPs := [][]byte{}
|
|
|
|
for _, ip := range maps.Keys(expectedIPsMap) {
|
|
|
|
expectedIPs = append(expectedIPs, []byte(ip))
|
|
|
|
}
|
|
|
|
require.ElementsMatch(t, expectedIPs, ips)
|
|
|
|
|
|
|
|
// set bucket geofencing
|
|
|
|
_, err = planet.Satellites[0].DB.Buckets().UpdateBucket(ctx, buckets.Bucket{
|
|
|
|
ProjectID: planet.Uplinks[0].Projects[0].ID,
|
|
|
|
Name: bucketName,
|
|
|
|
Placement: storj.EU,
|
2022-12-13 08:57:49 +00:00
|
|
|
})
|
2023-06-07 12:58:25 +01:00
|
|
|
require.NoError(t, err)
|
2022-12-13 08:57:49 +00:00
|
|
|
|
2023-06-07 12:58:25 +01:00
|
|
|
// set one node to US to filter it out from IP results
|
|
|
|
usNode := planet.FindNode(jonesSegments[0].Pieces[0].StorageNode)
|
|
|
|
require.NoError(t, planet.Satellites[0].Overlay.Service.TestNodeCountryCode(ctx, usNode.ID(), "US"))
|
|
|
|
require.NoError(t, planet.Satellites[0].API.Overlay.Service.DownloadSelectionCache.Refresh(ctx))
|
2022-12-13 08:57:49 +00:00
|
|
|
|
2023-06-07 12:58:25 +01:00
|
|
|
geoFencedIPs, err := object.GetObjectIPs(ctx, uplink.Config{}, access, bucketName, "jones")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Len(t, geoFencedIPs, len(expectedIPs)-1)
|
|
|
|
for _, ip := range geoFencedIPs {
|
|
|
|
if string(ip) == usNode.Addr() {
|
|
|
|
t.Fatal("this IP should be removed from results because of geofencing")
|
|
|
|
}
|
2022-02-04 10:27:38 +00:00
|
|
|
}
|
2022-01-26 11:10:28 +00:00
|
|
|
})
|
|
|
|
|
2022-08-30 11:04:59 +01:00
|
|
|
t.Run("get object IP with same location committed and pending status", func(t *testing.T) {
|
|
|
|
defer ctx.Check(deleteBucket(bucketName))
|
|
|
|
|
|
|
|
access := planet.Uplinks[0].Access[planet.Satellites[0].ID()]
|
|
|
|
uplnk := planet.Uplinks[0]
|
|
|
|
sat := planet.Satellites[0]
|
|
|
|
|
|
|
|
require.NoError(t, uplnk.Upload(ctx, sat, bucketName, "jones", testrand.Bytes(20*memory.KB)))
|
|
|
|
|
|
|
|
ips, err := object.GetObjectIPs(ctx, uplink.Config{}, access, bucketName, "jones")
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.True(t, len(ips) > 0)
|
|
|
|
|
|
|
|
// verify it's a real IP with valid host and port
|
|
|
|
for _, ip := range ips {
|
|
|
|
host, port, err := net.SplitHostPort(string(ip))
|
|
|
|
require.NoError(t, err)
|
|
|
|
netIP := net.ParseIP(host)
|
|
|
|
require.NotNil(t, netIP)
|
|
|
|
_, err = strconv.Atoi(port)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
objects, err := sat.API.Metainfo.Metabase.TestingAllObjects(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, objects, 1)
|
|
|
|
|
|
|
|
committedObject := objects[0]
|
|
|
|
|
2022-09-07 16:43:17 +01:00
|
|
|
pendingObject, err := sat.Metabase.DB.BeginObjectNextVersion(ctx, metabase.BeginObjectNextVersion{
|
2022-08-30 11:04:59 +01:00
|
|
|
ObjectStream: metabase.ObjectStream{
|
|
|
|
ProjectID: committedObject.ProjectID,
|
|
|
|
BucketName: committedObject.BucketName,
|
|
|
|
ObjectKey: committedObject.ObjectKey,
|
|
|
|
StreamID: committedObject.StreamID,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2022-09-07 16:43:17 +01:00
|
|
|
require.Equal(t, committedObject.Version+1, pendingObject.Version)
|
2022-08-30 11:04:59 +01:00
|
|
|
|
|
|
|
newIps, err := object.GetObjectIPs(ctx, uplink.Config{}, access, bucketName, "jones")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
sort.Slice(ips, func(i, j int) bool {
|
|
|
|
return bytes.Compare(ips[i], ips[j]) < 0
|
|
|
|
})
|
|
|
|
sort.Slice(newIps, func(i, j int) bool {
|
|
|
|
return bytes.Compare(newIps[i], newIps[j]) < 0
|
|
|
|
})
|
|
|
|
require.Equal(t, ips, newIps)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("get object IP with version != 1", func(t *testing.T) {
|
|
|
|
defer ctx.Check(deleteBucket(bucketName))
|
|
|
|
|
|
|
|
access := planet.Uplinks[0].Access[planet.Satellites[0].ID()]
|
|
|
|
uplnk := planet.Uplinks[0]
|
|
|
|
sat := planet.Satellites[0]
|
|
|
|
|
|
|
|
require.NoError(t, uplnk.Upload(ctx, sat, bucketName, "jones", testrand.Bytes(20*memory.KB)))
|
|
|
|
|
|
|
|
objects, err := sat.API.Metainfo.Metabase.TestingAllObjects(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
committedObject := objects[0]
|
|
|
|
randomVersion := metabase.Version(2 + testrand.Intn(9))
|
|
|
|
|
|
|
|
// atm there's no better way to change object's version
|
|
|
|
res, err := planet.Satellites[0].Metabase.DB.UnderlyingTagSQL().Exec(ctx,
|
|
|
|
"UPDATE objects SET version = $1 WHERE project_id = $2 AND bucket_name = $3 AND object_key = $4 AND stream_id = $5",
|
|
|
|
randomVersion, committedObject.ProjectID, committedObject.BucketName, committedObject.ObjectKey, committedObject.StreamID,
|
|
|
|
)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
affected, err := res.RowsAffected()
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.EqualValues(t, 1, affected)
|
|
|
|
|
|
|
|
ips, err := object.GetObjectIPs(ctx, uplink.Config{}, access, bucketName, "jones")
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.True(t, len(ips) > 0)
|
|
|
|
|
|
|
|
// verify it's a real IP with valid host and port
|
|
|
|
for _, ip := range ips {
|
|
|
|
host, port, err := net.SplitHostPort(string(ip))
|
|
|
|
require.NoError(t, err)
|
|
|
|
netIP := net.ParseIP(host)
|
|
|
|
require.NotNil(t, netIP)
|
|
|
|
_, err = strconv.Atoi(port)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
t.Run("multipart object download rejection", func(t *testing.T) {
|
|
|
|
defer ctx.Check(deleteBucket("pip-first"))
|
|
|
|
defer ctx.Check(deleteBucket("pip-second"))
|
|
|
|
defer ctx.Check(deleteBucket("pip-third"))
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
data := testrand.Bytes(20 * memory.KB)
|
|
|
|
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "pip-first", "non-multipart-object", data)
|
|
|
|
require.NoError(t, err)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
project, err := planet.Uplinks[0].OpenProject(ctx, planet.Satellites[0])
|
|
|
|
require.NoError(t, err)
|
2022-09-21 09:10:06 +01:00
|
|
|
defer ctx.Check(project.Close)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
_, err = project.EnsureBucket(ctx, "pip-second")
|
|
|
|
require.NoError(t, err)
|
|
|
|
info, err := project.BeginUpload(ctx, "pip-second", "multipart-object", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
upload, err := project.UploadPart(ctx, "pip-second", "multipart-object", info.UploadID, 1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
_, err = upload.Write(data)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, upload.Commit())
|
|
|
|
_, err = project.CommitUpload(ctx, "pip-second", "multipart-object", info.UploadID, nil)
|
2022-01-26 11:10:28 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
_, err = project.EnsureBucket(ctx, "pip-third")
|
|
|
|
require.NoError(t, err)
|
|
|
|
info, err = project.BeginUpload(ctx, "pip-third", "multipart-object-third", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
for i := 0; i < 4; i++ {
|
|
|
|
upload, err := project.UploadPart(ctx, "pip-third", "multipart-object-third", info.UploadID, uint32(i+1))
|
|
|
|
require.NoError(t, err)
|
|
|
|
_, err = upload.Write(data)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, upload.Commit())
|
|
|
|
}
|
|
|
|
_, err = project.CommitUpload(ctx, "pip-third", "multipart-object-third", info.UploadID, nil)
|
2022-01-26 11:10:28 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
objects, err := planet.Satellites[0].Metabase.DB.TestingAllCommittedObjects(ctx, planet.Uplinks[0].Projects[0].ID, "pip-first")
|
2022-01-26 11:10:28 +00:00
|
|
|
require.NoError(t, err)
|
2022-02-04 10:27:38 +00:00
|
|
|
require.Len(t, objects, 1)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
// verify that standard objects can be downloaded in an old way (index = -1 as last segment)
|
|
|
|
object, err := metainfoClient.GetObject(ctx, metaclient.GetObjectParams{
|
|
|
|
Bucket: []byte("pip-first"),
|
|
|
|
EncryptedObjectKey: []byte(objects[0].ObjectKey),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
_, err = metainfoClient.DownloadSegmentWithRS(ctx, metaclient.DownloadSegmentParams{
|
|
|
|
StreamID: object.StreamID,
|
2022-11-07 13:48:31 +00:00
|
|
|
Position: metaclient.SegmentPosition{
|
2022-02-04 10:27:38 +00:00
|
|
|
Index: -1,
|
|
|
|
},
|
|
|
|
})
|
2022-01-26 11:10:28 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
objects, err = planet.Satellites[0].Metabase.DB.TestingAllCommittedObjects(ctx, planet.Uplinks[0].Projects[0].ID, "pip-second")
|
2022-01-26 11:10:28 +00:00
|
|
|
require.NoError(t, err)
|
2022-02-04 10:27:38 +00:00
|
|
|
require.Len(t, objects, 1)
|
|
|
|
|
|
|
|
// verify that multipart objects (single segment) CANNOT be downloaded in an old way (index = -1 as last segment)
|
|
|
|
object, err = metainfoClient.GetObject(ctx, metaclient.GetObjectParams{
|
|
|
|
Bucket: []byte("pip-second"),
|
|
|
|
EncryptedObjectKey: []byte(objects[0].ObjectKey),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
_, err = metainfoClient.DownloadSegmentWithRS(ctx, metaclient.DownloadSegmentParams{
|
|
|
|
StreamID: object.StreamID,
|
2022-11-07 13:48:31 +00:00
|
|
|
Position: metaclient.SegmentPosition{
|
2022-02-04 10:27:38 +00:00
|
|
|
Index: -1,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.Error(t, err)
|
|
|
|
require.Contains(t, err.Error(), "Used uplink version cannot download multipart objects.")
|
|
|
|
|
|
|
|
objects, err = planet.Satellites[0].Metabase.DB.TestingAllCommittedObjects(ctx, planet.Uplinks[0].Projects[0].ID, "pip-third")
|
2022-01-26 11:10:28 +00:00
|
|
|
require.NoError(t, err)
|
2022-02-04 10:27:38 +00:00
|
|
|
require.Len(t, objects, 1)
|
|
|
|
|
|
|
|
// verify that multipart objects (multiple segments) CANNOT be downloaded in an old way (index = -1 as last segment)
|
|
|
|
object, err = metainfoClient.GetObject(ctx, metaclient.GetObjectParams{
|
|
|
|
Bucket: []byte("pip-third"),
|
|
|
|
EncryptedObjectKey: []byte(objects[0].ObjectKey),
|
|
|
|
})
|
2022-01-26 11:10:28 +00:00
|
|
|
require.NoError(t, err)
|
2022-02-04 10:27:38 +00:00
|
|
|
_, err = metainfoClient.DownloadSegmentWithRS(ctx, metaclient.DownloadSegmentParams{
|
|
|
|
StreamID: object.StreamID,
|
2022-11-07 13:48:31 +00:00
|
|
|
Position: metaclient.SegmentPosition{
|
2022-02-04 10:27:38 +00:00
|
|
|
Index: -1,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
require.Error(t, err)
|
|
|
|
require.Contains(t, err.Error(), "Used uplink version cannot download multipart objects.")
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("object override on upload", func(t *testing.T) {
|
|
|
|
defer ctx.Check(deleteBucket("pip-first"))
|
|
|
|
|
|
|
|
initialData := testrand.Bytes(20 * memory.KB)
|
|
|
|
overrideData := testrand.Bytes(25 * memory.KB)
|
|
|
|
|
|
|
|
{ // committed object
|
|
|
|
|
|
|
|
// upload committed object
|
|
|
|
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "pip-first", "committed-object", initialData)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// upload once again to override
|
|
|
|
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "pip-first", "committed-object", overrideData)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
data, err := planet.Uplinks[0].Download(ctx, planet.Satellites[0], "pip-first", "committed-object")
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, overrideData, data)
|
|
|
|
}
|
|
|
|
|
|
|
|
{ // pending object
|
|
|
|
project, err := planet.Uplinks[0].OpenProject(ctx, planet.Satellites[0])
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(project.Close)
|
|
|
|
|
|
|
|
// upload pending object
|
|
|
|
info, err := project.BeginUpload(ctx, "pip-first", "pending-object", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
upload, err := project.UploadPart(ctx, "pip-first", "pending-object", info.UploadID, 1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
_, err = upload.Write(initialData)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, upload.Commit())
|
|
|
|
|
|
|
|
// upload once again to override
|
|
|
|
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "pip-first", "pending-object", overrideData)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
data, err := planet.Uplinks[0].Download(ctx, planet.Satellites[0], "pip-first", "pending-object")
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, overrideData, data)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("upload with placement", func(t *testing.T) {
|
|
|
|
defer ctx.Check(deleteBucket("initial-bucket"))
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
bucketName := "initial-bucket"
|
|
|
|
objectName := "file1"
|
|
|
|
|
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
fmt.Println(apiKey)
|
2023-04-13 13:04:07 +01:00
|
|
|
bucketsService := planet.Satellites[0].API.Buckets.Service
|
2022-02-04 10:27:38 +00:00
|
|
|
|
2023-04-13 13:04:07 +01:00
|
|
|
bucket := buckets.Bucket{
|
2022-02-04 10:27:38 +00:00
|
|
|
Name: bucketName,
|
|
|
|
ProjectID: planet.Uplinks[0].Projects[0].ID,
|
|
|
|
Placement: storj.EU,
|
|
|
|
}
|
2023-04-13 13:04:07 +01:00
|
|
|
_, err := bucketsService.CreateBucket(ctx, bucket)
|
2022-01-26 11:10:28 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2022-02-04 10:27:38 +00:00
|
|
|
// this should be bigger than the max inline segment
|
|
|
|
content := make([]byte, 5000)
|
|
|
|
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], bucket.Name, objectName, content)
|
2022-01-26 11:10:28 +00:00
|
|
|
require.NoError(t, err)
|
2022-02-04 10:27:38 +00:00
|
|
|
|
|
|
|
segments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, 1, len(segments))
|
|
|
|
require.Equal(t, storj.EU, segments[0].Placement)
|
|
|
|
})
|
2023-03-15 12:26:10 +00:00
|
|
|
|
|
|
|
t.Run("multiple versions", func(t *testing.T) {
|
|
|
|
defer ctx.Check(deleteBucket("multipleversions"))
|
|
|
|
|
|
|
|
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "multipleversions", "object", testrand.Bytes(10*memory.MiB))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// override object to have it with version 2
|
|
|
|
expectedData := testrand.Bytes(11 * memory.KiB)
|
|
|
|
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "multipleversions", "object", expectedData)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
objects, err := planet.Satellites[0].Metabase.DB.TestingAllObjects(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, objects, 1)
|
|
|
|
require.EqualValues(t, 2, objects[0].Version)
|
|
|
|
|
|
|
|
// add some pending uploads, each will have version higher then 2
|
|
|
|
uploadIDs := []string{}
|
|
|
|
for i := 0; i < 10; i++ {
|
|
|
|
info, err := project.BeginUpload(ctx, "multipleversions", "object", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
uploadIDs = append(uploadIDs, info.UploadID)
|
|
|
|
}
|
|
|
|
|
|
|
|
checkDownload := func(objectKey string, expectedData []byte) {
|
|
|
|
data, err := planet.Uplinks[0].Download(ctx, planet.Satellites[0], "multipleversions", objectKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, expectedData, data)
|
|
|
|
}
|
|
|
|
|
|
|
|
checkDownload("object", expectedData)
|
|
|
|
|
|
|
|
err = project.MoveObject(ctx, "multipleversions", "object", "multipleversions", "object_moved", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDownload("object_moved", expectedData)
|
|
|
|
|
|
|
|
err = project.MoveObject(ctx, "multipleversions", "object_moved", "multipleversions", "object", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDownload("object", expectedData)
|
|
|
|
|
|
|
|
iterator := project.ListObjects(ctx, "multipleversions", nil)
|
|
|
|
require.True(t, iterator.Next())
|
|
|
|
require.Equal(t, "object", iterator.Item().Key)
|
|
|
|
require.NoError(t, iterator.Err())
|
|
|
|
|
|
|
|
// upload multipleversions/object once again as we just moved it
|
|
|
|
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "multipleversions", "object", expectedData)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDownload("object", expectedData)
|
|
|
|
|
|
|
|
{ // server side copy
|
|
|
|
_, err = project.CopyObject(ctx, "multipleversions", "object", "multipleversions", "object_copy", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDownload("object_copy", expectedData)
|
|
|
|
|
|
|
|
_, err = project.DeleteObject(ctx, "multipleversions", "object")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = project.CopyObject(ctx, "multipleversions", "object_copy", "multipleversions", "object", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDownload("object", expectedData)
|
|
|
|
|
|
|
|
_, err = project.DeleteObject(ctx, "multipleversions", "object_copy")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDownload("object", expectedData)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = project.AbortUpload(ctx, "multipleversions", "object", uploadIDs[0])
|
|
|
|
require.NoError(t, err)
|
|
|
|
checkDownload("object", expectedData)
|
|
|
|
|
|
|
|
expectedData = testrand.Bytes(12 * memory.KiB)
|
|
|
|
upload, err := project.UploadPart(ctx, "multipleversions", "object", uploadIDs[1], 1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
_, err = upload.Write(expectedData)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, upload.Commit())
|
|
|
|
_, err = project.CommitUpload(ctx, "multipleversions", "object", uploadIDs[1], nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDownload("object", expectedData)
|
|
|
|
|
|
|
|
_, err = project.DeleteObject(ctx, "multipleversions", "object")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = project.DeleteObject(ctx, "multipleversions", "object_moved")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
iterator = project.ListObjects(ctx, "multipleversions", nil)
|
|
|
|
require.False(t, iterator.Next())
|
|
|
|
require.NoError(t, iterator.Err())
|
|
|
|
|
|
|
|
// use next available pending upload
|
|
|
|
upload, err = project.UploadPart(ctx, "multipleversions", "object", uploadIDs[2], 1)
|
|
|
|
require.NoError(t, err)
|
|
|
|
_, err = upload.Write(expectedData)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.NoError(t, upload.Commit())
|
|
|
|
_, err = project.CommitUpload(ctx, "multipleversions", "object", uploadIDs[2], nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDownload("object", expectedData)
|
|
|
|
|
|
|
|
uploads := project.ListUploads(ctx, "multipleversions", nil)
|
|
|
|
count := 0
|
|
|
|
for uploads.Next() {
|
|
|
|
require.Equal(t, "object", uploads.Item().Key)
|
|
|
|
count++
|
|
|
|
}
|
|
|
|
// we started with 10 pending object and during test we abort/commit 3 objects
|
|
|
|
pendingUploadsLeft := 7
|
|
|
|
require.Equal(t, pendingUploadsLeft, count)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("override object", func(t *testing.T) {
|
|
|
|
defer ctx.Check(deleteBucket("bucket"))
|
|
|
|
|
|
|
|
bucketName := "bucket"
|
|
|
|
objectName := "file1"
|
|
|
|
|
|
|
|
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], bucketName, objectName, testrand.Bytes(5*memory.KiB))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
expectedData := testrand.Bytes(5 * memory.KiB)
|
|
|
|
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], bucketName, objectName, expectedData)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
data, err := planet.Uplinks[0].Download(ctx, planet.Satellites[0], bucketName, objectName)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, expectedData, data)
|
|
|
|
})
|
2022-01-26 11:10:28 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestMoveObject_Geofencing(t *testing.T) {
|
|
|
|
testplanet.Run(t,
|
|
|
|
testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
},
|
|
|
|
func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
buckets := satellite.API.Buckets.Service
|
|
|
|
uplink := planet.Uplinks[0]
|
|
|
|
projectID := uplink.Projects[0].ID
|
|
|
|
|
|
|
|
// create buckets with different placement
|
|
|
|
createGeofencedBucket(t, ctx, buckets, projectID, "global1", storj.EveryCountry)
|
|
|
|
createGeofencedBucket(t, ctx, buckets, projectID, "global2", storj.EveryCountry)
|
|
|
|
createGeofencedBucket(t, ctx, buckets, projectID, "us1", storj.US)
|
|
|
|
createGeofencedBucket(t, ctx, buckets, projectID, "us2", storj.US)
|
|
|
|
createGeofencedBucket(t, ctx, buckets, projectID, "eu1", storj.EU)
|
|
|
|
|
|
|
|
// upload an object to one of the global buckets
|
|
|
|
err := uplink.Upload(ctx, satellite, "global1", "testobject", []byte{})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
project, err := uplink.GetProject(ctx, satellite)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// move the object to a new key within the same bucket
|
|
|
|
err = project.MoveObject(ctx, "global1", "testobject", "global1", "movedobject", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// move the object to the other global bucket
|
|
|
|
err = project.MoveObject(ctx, "global1", "movedobject", "global2", "movedobject", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// move the object to a geofenced bucket - should fail
|
|
|
|
err = project.MoveObject(ctx, "global2", "movedobject", "us1", "movedobject", nil)
|
|
|
|
require.Error(t, err)
|
|
|
|
|
|
|
|
// upload an object to one of the US-geofenced buckets
|
|
|
|
err = uplink.Upload(ctx, satellite, "us1", "testobject", []byte{})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// move the object to a new key within the same bucket
|
|
|
|
err = project.MoveObject(ctx, "us1", "testobject", "us1", "movedobject", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// move the object to the other US-geofenced bucket
|
|
|
|
err = project.MoveObject(ctx, "us1", "movedobject", "us2", "movedobject", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// move the object to the EU-geofenced bucket - should fail
|
|
|
|
err = project.MoveObject(ctx, "us2", "movedobject", "eu1", "movedobject", nil)
|
|
|
|
require.Error(t, err)
|
|
|
|
|
|
|
|
// move the object to a non-geofenced bucket - should fail
|
|
|
|
err = project.MoveObject(ctx, "us2", "movedobject", "global1", "movedobject", nil)
|
|
|
|
require.Error(t, err)
|
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2023-04-13 13:04:07 +01:00
|
|
|
func createGeofencedBucket(t *testing.T, ctx *testcontext.Context, service *buckets.Service, projectID uuid.UUID, bucketName string, placement storj.PlacementConstraint) {
|
2022-01-26 11:10:28 +00:00
|
|
|
// generate the bucket id
|
|
|
|
bucketID, err := uuid.New()
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// create the bucket
|
2023-04-13 13:04:07 +01:00
|
|
|
_, err = service.CreateBucket(ctx, buckets.Bucket{
|
2022-01-26 11:10:28 +00:00
|
|
|
ID: bucketID,
|
|
|
|
Name: bucketName,
|
|
|
|
ProjectID: projectID,
|
|
|
|
Placement: placement,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// check that the bucket placement is correct
|
2023-04-13 13:04:07 +01:00
|
|
|
bucket, err := service.GetBucket(ctx, []byte(bucketName), projectID)
|
2022-01-26 11:10:28 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, placement, bucket.Placement)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestEndpoint_DeleteCommittedObject(t *testing.T) {
|
2022-09-12 11:25:57 +01:00
|
|
|
createObject := func(ctx context.Context, t *testing.T, planet *testplanet.Planet, bucket, key string, data []byte) {
|
|
|
|
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], bucket, key, data)
|
2022-01-26 11:10:28 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
2022-09-12 11:25:57 +01:00
|
|
|
deleteObject := func(ctx context.Context, t *testing.T, planet *testplanet.Planet, bucket, encryptedKey string, streamID uuid.UUID) {
|
2022-01-26 11:10:28 +00:00
|
|
|
projectID := planet.Uplinks[0].Projects[0].ID
|
|
|
|
|
2022-09-12 11:25:57 +01:00
|
|
|
_, err := planet.Satellites[0].Metainfo.Endpoint.DeleteCommittedObject(ctx, projectID, bucket, metabase.ObjectKey(encryptedKey))
|
2022-01-26 11:10:28 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
2022-09-12 11:25:57 +01:00
|
|
|
testDeleteObject(t, createObject, deleteObject)
|
2022-01-26 11:10:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestEndpoint_DeletePendingObject(t *testing.T) {
|
2022-09-12 11:25:57 +01:00
|
|
|
createPendingObject := func(ctx context.Context, t *testing.T, planet *testplanet.Planet, bucket, key string, data []byte) {
|
2022-01-26 11:10:28 +00:00
|
|
|
// TODO This should be replaced by a call to testplanet.Uplink.MultipartUpload when available.
|
|
|
|
project, err := planet.Uplinks[0].OpenProject(ctx, planet.Satellites[0])
|
|
|
|
require.NoError(t, err, "failed to retrieve project")
|
2022-09-22 09:35:30 +01:00
|
|
|
defer func() { require.NoError(t, project.Close()) }()
|
2022-01-26 11:10:28 +00:00
|
|
|
|
2022-09-12 11:25:57 +01:00
|
|
|
_, err = project.EnsureBucket(ctx, bucket)
|
2022-01-26 11:10:28 +00:00
|
|
|
require.NoError(t, err, "failed to create bucket")
|
|
|
|
|
2022-09-12 11:25:57 +01:00
|
|
|
info, err := project.BeginUpload(ctx, bucket, key, &uplink.UploadOptions{})
|
2022-01-26 11:10:28 +00:00
|
|
|
require.NoError(t, err, "failed to start multipart upload")
|
|
|
|
|
2022-09-12 11:25:57 +01:00
|
|
|
upload, err := project.UploadPart(ctx, bucket, key, info.UploadID, 1)
|
2022-01-26 11:10:28 +00:00
|
|
|
require.NoError(t, err, "failed to put object part")
|
|
|
|
_, err = upload.Write(data)
|
|
|
|
require.NoError(t, err, "failed to put object part")
|
|
|
|
require.NoError(t, upload.Commit(), "failed to put object part")
|
|
|
|
}
|
2022-09-12 11:25:57 +01:00
|
|
|
deletePendingObject := func(ctx context.Context, t *testing.T, planet *testplanet.Planet, bucket, encryptedKey string, streamID uuid.UUID) {
|
2022-01-26 11:10:28 +00:00
|
|
|
projectID := planet.Uplinks[0].Projects[0].ID
|
|
|
|
|
2022-09-12 11:25:57 +01:00
|
|
|
deletedObjects, err := planet.Satellites[0].Metainfo.Endpoint.DeletePendingObject(ctx,
|
|
|
|
metabase.ObjectStream{
|
|
|
|
ProjectID: projectID,
|
|
|
|
BucketName: bucket,
|
|
|
|
ObjectKey: metabase.ObjectKey(encryptedKey),
|
|
|
|
Version: metabase.DefaultVersion,
|
|
|
|
StreamID: streamID,
|
2023-08-09 10:19:12 +01:00
|
|
|
}, false)
|
2022-01-26 11:10:28 +00:00
|
|
|
require.NoError(t, err)
|
2022-09-12 11:25:57 +01:00
|
|
|
require.Len(t, deletedObjects, 1)
|
2022-01-26 11:10:28 +00:00
|
|
|
}
|
2022-09-12 11:25:57 +01:00
|
|
|
testDeleteObject(t, createPendingObject, deletePendingObject)
|
2022-01-26 11:10:28 +00:00
|
|
|
}
|
|
|
|
|
2023-08-09 10:19:12 +01:00
|
|
|
func TestEndpoint_AbortMultipartUpload_UsePendingObjectsTable(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
|
|
|
config.Metainfo.UsePendingObjectsTable = true
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
project, err := planet.Uplinks[0].OpenProject(ctx, planet.Satellites[0])
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(project.Close)
|
|
|
|
|
|
|
|
_, err = project.CreateBucket(ctx, "testbucket")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
uploadInfo, err := project.BeginUpload(ctx, "testbucket", "key", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
objects, err := planet.Satellites[0].Metabase.DB.TestingAllPendingObjects(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, objects, 1)
|
|
|
|
|
|
|
|
err = project.AbortUpload(ctx, "testbucket", "key", uploadInfo.UploadID)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
objects, err = planet.Satellites[0].Metabase.DB.TestingAllPendingObjects(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, objects, 0)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-09-12 11:25:57 +01:00
|
|
|
func testDeleteObject(t *testing.T,
|
|
|
|
createObject func(ctx context.Context, t *testing.T, planet *testplanet.Planet, bucket, key string, data []byte),
|
|
|
|
deleteObject func(ctx context.Context, t *testing.T, planet *testplanet.Planet, bucket, encryptedKey string, streamID uuid.UUID),
|
|
|
|
) {
|
|
|
|
bucketName := "deleteobjects"
|
2022-01-26 11:10:28 +00:00
|
|
|
t.Run("all nodes up", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
var testCases = []struct {
|
|
|
|
caseDescription string
|
|
|
|
objData []byte
|
|
|
|
hasRemote bool
|
|
|
|
}{
|
|
|
|
{caseDescription: "one remote segment", objData: testrand.Bytes(10 * memory.KiB)},
|
|
|
|
{caseDescription: "one inline segment", objData: testrand.Bytes(3 * memory.KiB)},
|
|
|
|
{caseDescription: "several segments (all remote)", objData: testrand.Bytes(50 * memory.KiB)},
|
|
|
|
{caseDescription: "several segments (remote + inline)", objData: testrand.Bytes(33 * memory.KiB)},
|
|
|
|
}
|
|
|
|
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
// Reconfigure RS for ensuring that we don't have long-tail cancellations
|
|
|
|
// and the upload doesn't leave garbage in the SNs
|
|
|
|
Satellite: testplanet.Combine(
|
|
|
|
testplanet.ReconfigureRS(2, 2, 4, 4),
|
|
|
|
testplanet.MaxSegmentSize(13*memory.KiB),
|
|
|
|
),
|
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
for _, tc := range testCases {
|
|
|
|
tc := tc
|
|
|
|
t.Run(tc.caseDescription, func(t *testing.T) {
|
|
|
|
|
2022-09-12 11:25:57 +01:00
|
|
|
createObject(ctx, t, planet, bucketName, tc.caseDescription, tc.objData)
|
2022-01-26 11:10:28 +00:00
|
|
|
|
|
|
|
// calculate the SNs total used space after data upload
|
|
|
|
var totalUsedSpace int64
|
|
|
|
for _, sn := range planet.StorageNodes {
|
|
|
|
piecesTotal, _, err := sn.Storage2.Store.SpaceUsedForPieces(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
totalUsedSpace += piecesTotal
|
|
|
|
}
|
|
|
|
|
2022-09-12 11:25:57 +01:00
|
|
|
objects, err := planet.Satellites[0].Metabase.DB.TestingAllObjects(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
for _, object := range objects {
|
|
|
|
deleteObject(ctx, t, planet, bucketName, string(object.ObjectKey), object.StreamID)
|
|
|
|
}
|
2022-01-26 11:10:28 +00:00
|
|
|
|
|
|
|
planet.WaitForStorageNodeDeleters(ctx)
|
|
|
|
|
|
|
|
// calculate the SNs used space after delete the pieces
|
|
|
|
var totalUsedSpaceAfterDelete int64
|
|
|
|
for _, sn := range planet.StorageNodes {
|
|
|
|
piecesTotal, _, err := sn.Storage2.Store.SpaceUsedForPieces(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
totalUsedSpaceAfterDelete += piecesTotal
|
|
|
|
}
|
|
|
|
|
2023-05-26 10:39:44 +01:00
|
|
|
// we are not deleting data from SN right away so used space should be the same
|
|
|
|
require.Equal(t, totalUsedSpace, totalUsedSpaceAfterDelete)
|
2022-01-26 11:10:28 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("some nodes down", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
var testCases = []struct {
|
|
|
|
caseDescription string
|
|
|
|
objData []byte
|
|
|
|
}{
|
|
|
|
{caseDescription: "one remote segment", objData: testrand.Bytes(10 * memory.KiB)},
|
|
|
|
{caseDescription: "several segments (all remote)", objData: testrand.Bytes(50 * memory.KiB)},
|
|
|
|
{caseDescription: "several segments (remote + inline)", objData: testrand.Bytes(33 * memory.KiB)},
|
|
|
|
}
|
|
|
|
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
// Reconfigure RS for ensuring that we don't have long-tail cancellations
|
|
|
|
// and the upload doesn't leave garbage in the SNs
|
|
|
|
Satellite: testplanet.Combine(
|
|
|
|
testplanet.ReconfigureRS(2, 2, 4, 4),
|
|
|
|
testplanet.MaxSegmentSize(13*memory.KiB),
|
|
|
|
),
|
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
numToShutdown := 2
|
|
|
|
|
|
|
|
for _, tc := range testCases {
|
2022-09-12 11:25:57 +01:00
|
|
|
createObject(ctx, t, planet, bucketName, tc.caseDescription, tc.objData)
|
2022-01-26 11:10:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(t, planet.WaitForStorageNodeEndpoints(ctx))
|
|
|
|
|
|
|
|
// Shutdown the first numToShutdown storage nodes before we delete the pieces
|
|
|
|
// and collect used space values for those nodes
|
|
|
|
snUsedSpace := make([]int64, len(planet.StorageNodes))
|
2023-05-26 10:39:44 +01:00
|
|
|
for i, node := range planet.StorageNodes {
|
2022-01-26 11:10:28 +00:00
|
|
|
var err error
|
2023-05-26 10:39:44 +01:00
|
|
|
snUsedSpace[i], _, err = node.Storage2.Store.SpaceUsedForPieces(ctx)
|
2022-01-26 11:10:28 +00:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
2023-05-26 10:39:44 +01:00
|
|
|
if i < numToShutdown {
|
|
|
|
require.NoError(t, planet.StopPeer(node))
|
|
|
|
}
|
2022-01-26 11:10:28 +00:00
|
|
|
}
|
|
|
|
|
2022-09-12 11:25:57 +01:00
|
|
|
objects, err := planet.Satellites[0].Metabase.DB.TestingAllObjects(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
for _, object := range objects {
|
|
|
|
deleteObject(ctx, t, planet, bucketName, string(object.ObjectKey), object.StreamID)
|
|
|
|
}
|
2022-01-26 11:10:28 +00:00
|
|
|
|
|
|
|
planet.WaitForStorageNodeDeleters(ctx)
|
|
|
|
|
2023-05-26 10:39:44 +01:00
|
|
|
// we are not deleting data from SN right away so used space should be the same
|
|
|
|
// for online and shutdown/offline node
|
2022-01-26 11:10:28 +00:00
|
|
|
for i, sn := range planet.StorageNodes {
|
|
|
|
usedSpace, _, err := sn.Storage2.Store.SpaceUsedForPieces(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
require.Equal(t, snUsedSpace[i], usedSpace, "StorageNode #%d", i)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("all nodes down", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
|
|
|
|
var testCases = []struct {
|
|
|
|
caseDescription string
|
|
|
|
objData []byte
|
|
|
|
}{
|
|
|
|
{caseDescription: "one remote segment", objData: testrand.Bytes(10 * memory.KiB)},
|
|
|
|
{caseDescription: "several segments (all remote)", objData: testrand.Bytes(50 * memory.KiB)},
|
|
|
|
{caseDescription: "several segments (remote + inline)", objData: testrand.Bytes(33 * memory.KiB)},
|
|
|
|
}
|
|
|
|
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
// Reconfigure RS for ensuring that we don't have long-tail cancellations
|
|
|
|
// and the upload doesn't leave garbage in the SNs
|
|
|
|
Satellite: testplanet.Combine(
|
|
|
|
testplanet.ReconfigureRS(2, 2, 4, 4),
|
|
|
|
testplanet.MaxSegmentSize(13*memory.KiB),
|
|
|
|
),
|
|
|
|
},
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
for _, tc := range testCases {
|
2022-09-12 11:25:57 +01:00
|
|
|
createObject(ctx, t, planet, bucketName, tc.caseDescription, tc.objData)
|
2022-01-26 11:10:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// calculate the SNs total used space after data upload
|
|
|
|
var usedSpaceBeforeDelete int64
|
|
|
|
for _, sn := range planet.StorageNodes {
|
|
|
|
piecesTotal, _, err := sn.Storage2.Store.SpaceUsedForPieces(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
usedSpaceBeforeDelete += piecesTotal
|
|
|
|
}
|
|
|
|
|
|
|
|
// Shutdown all the storage nodes before we delete the pieces
|
|
|
|
for _, sn := range planet.StorageNodes {
|
|
|
|
require.NoError(t, planet.StopPeer(sn))
|
|
|
|
}
|
|
|
|
|
2022-09-12 11:25:57 +01:00
|
|
|
objects, err := planet.Satellites[0].Metabase.DB.TestingAllObjects(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
for _, object := range objects {
|
|
|
|
deleteObject(ctx, t, planet, bucketName, string(object.ObjectKey), object.StreamID)
|
|
|
|
}
|
2022-01-26 11:10:28 +00:00
|
|
|
|
|
|
|
// Check that storage nodes that were offline when deleting the pieces
|
|
|
|
// they are still holding data
|
|
|
|
var totalUsedSpace int64
|
|
|
|
for _, sn := range planet.StorageNodes {
|
|
|
|
piecesTotal, _, err := sn.Storage2.Store.SpaceUsedForPieces(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
totalUsedSpace += piecesTotal
|
|
|
|
}
|
|
|
|
|
|
|
|
require.Equal(t, usedSpaceBeforeDelete, totalUsedSpace, "totalUsedSpace")
|
|
|
|
})
|
|
|
|
})
|
|
|
|
}
|
2022-01-28 00:17:33 +00:00
|
|
|
|
|
|
|
func TestEndpoint_CopyObject(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
2022-06-22 12:33:03 +01:00
|
|
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 4,
|
2022-01-28 00:17:33 +00:00
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
satelliteSys := planet.Satellites[0]
|
|
|
|
uplnk := planet.Uplinks[0]
|
|
|
|
|
|
|
|
// upload a small inline object
|
|
|
|
err := uplnk.Upload(ctx, planet.Satellites[0], "testbucket", "testobject", testrand.Bytes(1*memory.KiB))
|
|
|
|
require.NoError(t, err)
|
|
|
|
objects, err := satelliteSys.API.Metainfo.Metabase.TestingAllObjects(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, objects, 1)
|
|
|
|
|
|
|
|
getResp, err := satelliteSys.API.Metainfo.Endpoint.GetObject(ctx, &pb.ObjectGetRequest{
|
|
|
|
Header: &pb.RequestHeader{
|
|
|
|
ApiKey: apiKey.SerializeRaw(),
|
|
|
|
},
|
2022-11-23 12:15:52 +00:00
|
|
|
Bucket: []byte("testbucket"),
|
|
|
|
EncryptedObjectKey: []byte(objects[0].ObjectKey),
|
2022-01-28 00:17:33 +00:00
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
testEncryptedMetadataNonce := testrand.Nonce()
|
|
|
|
// update the object metadata
|
|
|
|
beginResp, err := satelliteSys.API.Metainfo.Endpoint.BeginCopyObject(ctx, &pb.ObjectBeginCopyRequest{
|
|
|
|
Header: &pb.RequestHeader{
|
|
|
|
ApiKey: apiKey.SerializeRaw(),
|
|
|
|
},
|
|
|
|
Bucket: getResp.Object.Bucket,
|
2022-11-23 12:15:52 +00:00
|
|
|
EncryptedObjectKey: getResp.Object.EncryptedObjectKey,
|
2022-01-28 00:17:33 +00:00
|
|
|
NewBucket: []byte("testbucket"),
|
|
|
|
NewEncryptedObjectKey: []byte("newencryptedkey"),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
assert.Len(t, beginResp.SegmentKeys, 1)
|
|
|
|
assert.Equal(t, beginResp.EncryptedMetadataKey, objects[0].EncryptedMetadataEncryptedKey)
|
|
|
|
assert.Equal(t, beginResp.EncryptedMetadataKeyNonce.Bytes(), objects[0].EncryptedMetadataNonce)
|
|
|
|
|
|
|
|
segmentKeys := pb.EncryptedKeyAndNonce{
|
|
|
|
Position: beginResp.SegmentKeys[0].Position,
|
|
|
|
EncryptedKeyNonce: testrand.Nonce(),
|
|
|
|
EncryptedKey: []byte("newencryptedkey"),
|
|
|
|
}
|
|
|
|
|
2022-04-21 15:25:16 +01:00
|
|
|
{
|
|
|
|
// metadata too large
|
|
|
|
_, err = satelliteSys.API.Metainfo.Endpoint.FinishCopyObject(ctx, &pb.ObjectFinishCopyRequest{
|
|
|
|
Header: &pb.RequestHeader{
|
|
|
|
ApiKey: apiKey.SerializeRaw(),
|
|
|
|
},
|
|
|
|
StreamId: getResp.Object.StreamId,
|
|
|
|
NewBucket: []byte("testbucket"),
|
|
|
|
NewEncryptedObjectKey: []byte("newobjectkey"),
|
|
|
|
NewEncryptedMetadata: testrand.Bytes(satelliteSys.Config.Metainfo.MaxMetadataSize + 1),
|
|
|
|
NewEncryptedMetadataKeyNonce: testEncryptedMetadataNonce,
|
|
|
|
NewEncryptedMetadataKey: []byte("encryptedmetadatakey"),
|
|
|
|
NewSegmentKeys: []*pb.EncryptedKeyAndNonce{&segmentKeys},
|
|
|
|
})
|
|
|
|
require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
|
|
|
|
|
|
|
|
// invalid encrypted metadata key
|
|
|
|
_, err = satelliteSys.API.Metainfo.Endpoint.FinishCopyObject(ctx, &pb.ObjectFinishCopyRequest{
|
|
|
|
Header: &pb.RequestHeader{
|
|
|
|
ApiKey: apiKey.SerializeRaw(),
|
|
|
|
},
|
|
|
|
StreamId: getResp.Object.StreamId,
|
|
|
|
NewBucket: []byte("testbucket"),
|
|
|
|
NewEncryptedObjectKey: []byte("newobjectkey"),
|
|
|
|
NewEncryptedMetadata: testrand.Bytes(satelliteSys.Config.Metainfo.MaxMetadataSize),
|
|
|
|
NewEncryptedMetadataKeyNonce: testEncryptedMetadataNonce,
|
|
|
|
NewEncryptedMetadataKey: []byte("encryptedmetadatakey"),
|
|
|
|
NewSegmentKeys: []*pb.EncryptedKeyAndNonce{&segmentKeys},
|
|
|
|
})
|
|
|
|
require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
|
|
|
|
}
|
|
|
|
|
2022-01-28 00:17:33 +00:00
|
|
|
_, err = satelliteSys.API.Metainfo.Endpoint.FinishCopyObject(ctx, &pb.ObjectFinishCopyRequest{
|
|
|
|
Header: &pb.RequestHeader{
|
|
|
|
ApiKey: apiKey.SerializeRaw(),
|
|
|
|
},
|
|
|
|
StreamId: getResp.Object.StreamId,
|
|
|
|
NewBucket: []byte("testbucket"),
|
|
|
|
NewEncryptedObjectKey: []byte("newobjectkey"),
|
|
|
|
NewEncryptedMetadataKeyNonce: testEncryptedMetadataNonce,
|
|
|
|
NewEncryptedMetadataKey: []byte("encryptedmetadatakey"),
|
|
|
|
NewSegmentKeys: []*pb.EncryptedKeyAndNonce{&segmentKeys},
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
objectsAfterCopy, err := satelliteSys.API.Metainfo.Metabase.TestingAllObjects(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, objectsAfterCopy, 2)
|
|
|
|
|
|
|
|
getCopyResp, err := satelliteSys.API.Metainfo.Endpoint.GetObject(ctx, &pb.ObjectGetRequest{
|
|
|
|
Header: &pb.RequestHeader{
|
|
|
|
ApiKey: apiKey.SerializeRaw(),
|
|
|
|
},
|
2022-11-23 12:15:52 +00:00
|
|
|
Bucket: []byte("testbucket"),
|
|
|
|
EncryptedObjectKey: []byte("newobjectkey"),
|
2022-01-28 00:17:33 +00:00
|
|
|
})
|
|
|
|
require.NoError(t, err, objectsAfterCopy[1])
|
|
|
|
require.NotEqual(t, getResp.Object.StreamId, getCopyResp.Object.StreamId)
|
|
|
|
require.NotZero(t, getCopyResp.Object.StreamId)
|
|
|
|
require.Equal(t, getResp.Object.InlineSize, getCopyResp.Object.InlineSize)
|
|
|
|
|
|
|
|
// compare segments
|
|
|
|
originalSegment, err := satelliteSys.API.Metainfo.Endpoint.DownloadSegment(ctx, &pb.SegmentDownloadRequest{
|
|
|
|
Header: &pb.RequestHeader{
|
|
|
|
ApiKey: apiKey.SerializeRaw(),
|
|
|
|
},
|
|
|
|
StreamId: getResp.Object.StreamId,
|
|
|
|
CursorPosition: segmentKeys.Position,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
copiedSegment, err := satelliteSys.API.Metainfo.Endpoint.DownloadSegment(ctx, &pb.SegmentDownloadRequest{
|
|
|
|
Header: &pb.RequestHeader{
|
|
|
|
ApiKey: apiKey.SerializeRaw(),
|
|
|
|
},
|
|
|
|
StreamId: getCopyResp.Object.StreamId,
|
|
|
|
CursorPosition: segmentKeys.Position,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, originalSegment.EncryptedInlineData, copiedSegment.EncryptedInlineData)
|
2022-06-22 12:33:03 +01:00
|
|
|
|
|
|
|
{ // test copy respects project storage size limit
|
|
|
|
// set storage limit
|
|
|
|
err = planet.Satellites[0].DB.ProjectAccounting().UpdateProjectUsageLimit(ctx, planet.Uplinks[1].Projects[0].ID, 1000)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// test object below the limit when copied
|
|
|
|
err = planet.Uplinks[1].Upload(ctx, planet.Satellites[0], "testbucket", "testobject", testrand.Bytes(100))
|
|
|
|
require.NoError(t, err)
|
|
|
|
objects, err = satelliteSys.API.Metainfo.Metabase.TestingAllObjects(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = satelliteSys.API.Metainfo.Endpoint.BeginCopyObject(ctx, &pb.ObjectBeginCopyRequest{
|
|
|
|
Header: &pb.RequestHeader{
|
|
|
|
ApiKey: planet.Uplinks[1].APIKey[planet.Satellites[0].ID()].SerializeRaw(),
|
|
|
|
},
|
|
|
|
Bucket: []byte("testbucket"),
|
|
|
|
EncryptedObjectKey: []byte(objects[0].ObjectKey),
|
|
|
|
NewBucket: []byte("testbucket"),
|
|
|
|
NewEncryptedObjectKey: []byte("newencryptedobjectkey"),
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
err = satelliteSys.API.Metainfo.Metabase.TestingDeleteAll(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// set storage limit
|
|
|
|
err = planet.Satellites[0].DB.ProjectAccounting().UpdateProjectUsageLimit(ctx, planet.Uplinks[2].Projects[0].ID, 1000)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// test object exceeding the limit when copied
|
|
|
|
err = planet.Uplinks[2].Upload(ctx, planet.Satellites[0], "testbucket", "testobject", testrand.Bytes(400))
|
|
|
|
require.NoError(t, err)
|
|
|
|
objects, err = satelliteSys.API.Metainfo.Metabase.TestingAllObjects(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = planet.Uplinks[2].CopyObject(ctx, planet.Satellites[0], "testbucket", "testobject", "testbucket", "testobject1")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = satelliteSys.API.Metainfo.Endpoint.BeginCopyObject(ctx, &pb.ObjectBeginCopyRequest{
|
|
|
|
Header: &pb.RequestHeader{
|
|
|
|
ApiKey: planet.Uplinks[2].APIKey[planet.Satellites[0].ID()].SerializeRaw(),
|
|
|
|
},
|
|
|
|
Bucket: []byte("testbucket"),
|
|
|
|
EncryptedObjectKey: []byte(objects[0].ObjectKey),
|
|
|
|
NewBucket: []byte("testbucket"),
|
|
|
|
NewEncryptedObjectKey: []byte("newencryptedobjectkey"),
|
|
|
|
})
|
|
|
|
assertRPCStatusCode(t, err, rpcstatus.ResourceExhausted)
|
|
|
|
assert.EqualError(t, err, "Exceeded Storage Limit")
|
|
|
|
|
|
|
|
// metabaseObjects, err := satelliteSys.API.Metainfo.Metabase.TestingAllObjects(ctx)
|
|
|
|
// require.NoError(t, err)
|
|
|
|
// metabaseObj := metabaseObjects[0]
|
|
|
|
|
|
|
|
// randomEncKey := testrand.Key()
|
|
|
|
|
|
|
|
// somehow triggers error "proto: can't skip unknown wire type 7" in endpoint.unmarshalSatStreamID
|
|
|
|
|
|
|
|
// _, err = satelliteSys.API.Metainfo.Endpoint.FinishCopyObject(ctx, &pb.ObjectFinishCopyRequest{
|
|
|
|
// Header: &pb.RequestHeader{
|
|
|
|
// ApiKey: planet.Uplinks[2].APIKey[planet.Satellites[0].ID()].SerializeRaw(),
|
|
|
|
// },
|
|
|
|
// StreamId: metabaseObj.ObjectStream.StreamID.Bytes(),
|
|
|
|
// NewBucket: []byte("testbucket"),
|
|
|
|
// NewEncryptedObjectKey: []byte("newencryptedobjectkey"),
|
|
|
|
// NewEncryptedMetadata: testrand.Bytes(10),
|
|
|
|
// NewEncryptedMetadataKey: randomEncKey.Raw()[:],
|
|
|
|
// NewEncryptedMetadataKeyNonce: testrand.Nonce(),
|
|
|
|
// NewSegmentKeys: []*pb.EncryptedKeyAndNonce{
|
|
|
|
// {
|
|
|
|
// Position: &pb.SegmentPosition{
|
|
|
|
// PartNumber: 0,
|
|
|
|
// Index: 0,
|
|
|
|
// },
|
|
|
|
// EncryptedKeyNonce: testrand.Nonce(),
|
|
|
|
// EncryptedKey: randomEncKey.Raw()[:],
|
|
|
|
// },
|
|
|
|
// },
|
|
|
|
// })
|
|
|
|
// assertRPCStatusCode(t, err, rpcstatus.ResourceExhausted)
|
|
|
|
// assert.EqualError(t, err, "Exceeded Storage Limit")
|
|
|
|
|
|
|
|
// test that a smaller object can still be uploaded and copied
|
|
|
|
err = planet.Uplinks[2].Upload(ctx, planet.Satellites[0], "testbucket", "testobject2", testrand.Bytes(10))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = planet.Uplinks[2].CopyObject(ctx, planet.Satellites[0], "testbucket", "testobject2", "testbucket", "testobject2copy")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = satelliteSys.API.Metainfo.Metabase.TestingDeleteAll(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
{ // test copy respects project segment limit
|
|
|
|
// set segment limit
|
|
|
|
err = planet.Satellites[0].DB.ProjectAccounting().UpdateProjectSegmentLimit(ctx, planet.Uplinks[3].Projects[0].ID, 2)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = planet.Uplinks[3].Upload(ctx, planet.Satellites[0], "testbucket", "testobject", testrand.Bytes(100))
|
|
|
|
require.NoError(t, err)
|
|
|
|
objects, err = satelliteSys.API.Metainfo.Metabase.TestingAllObjects(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = planet.Uplinks[3].CopyObject(ctx, planet.Satellites[0], "testbucket", "testobject", "testbucket", "testobject1")
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = satelliteSys.API.Metainfo.Endpoint.BeginCopyObject(ctx, &pb.ObjectBeginCopyRequest{
|
|
|
|
Header: &pb.RequestHeader{
|
|
|
|
ApiKey: planet.Uplinks[3].APIKey[planet.Satellites[0].ID()].SerializeRaw(),
|
|
|
|
},
|
|
|
|
Bucket: []byte("testbucket"),
|
|
|
|
EncryptedObjectKey: []byte(objects[0].ObjectKey),
|
|
|
|
NewBucket: []byte("testbucket"),
|
|
|
|
NewEncryptedObjectKey: []byte("newencryptedobjectkey1"),
|
|
|
|
})
|
|
|
|
assertRPCStatusCode(t, err, rpcstatus.ResourceExhausted)
|
|
|
|
assert.EqualError(t, err, "Exceeded Segments Limit")
|
|
|
|
}
|
2022-01-28 00:17:33 +00:00
|
|
|
})
|
|
|
|
}
|
2022-05-04 21:50:24 +01:00
|
|
|
|
|
|
|
func TestEndpoint_ParallelDeletes(t *testing.T) {
|
|
|
|
t.Skip("to be fixed - creating deadlocks")
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1,
|
|
|
|
StorageNodeCount: 4,
|
|
|
|
UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
project, err := planet.Uplinks[0].OpenProject(ctx, planet.Satellites[0])
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(project.Close)
|
|
|
|
testData := testrand.Bytes(5 * memory.KiB)
|
|
|
|
for i := 0; i < 50; i++ {
|
|
|
|
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "bucket", "object"+strconv.Itoa(i), testData)
|
|
|
|
require.NoError(t, err)
|
|
|
|
_, err = project.CopyObject(ctx, "bucket", "object"+strconv.Itoa(i), "bucket", "object"+strconv.Itoa(i)+"copy", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
list := project.ListObjects(ctx, "bucket", nil)
|
|
|
|
keys := []string{}
|
|
|
|
for list.Next() {
|
|
|
|
item := list.Item()
|
|
|
|
keys = append(keys, item.Key)
|
|
|
|
}
|
|
|
|
require.NoError(t, list.Err())
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(len(keys))
|
|
|
|
var errlist errs.Group
|
|
|
|
|
|
|
|
for i, name := range keys {
|
|
|
|
name := name
|
|
|
|
go func(toDelete string, index int) {
|
|
|
|
_, err := project.DeleteObject(ctx, "bucket", toDelete)
|
|
|
|
errlist.Add(err)
|
|
|
|
wg.Done()
|
|
|
|
}(name, i)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
require.NoError(t, errlist.Err())
|
|
|
|
|
|
|
|
// check all objects have been deleted
|
|
|
|
listAfterDelete := project.ListObjects(ctx, "bucket", nil)
|
|
|
|
require.False(t, listAfterDelete.Next())
|
|
|
|
require.NoError(t, listAfterDelete.Err())
|
|
|
|
|
|
|
|
_, err = project.DeleteBucket(ctx, "bucket")
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestEndpoint_ParallelDeletesSameAncestor(t *testing.T) {
|
|
|
|
t.Skip("to be fixed - creating deadlocks")
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1,
|
|
|
|
StorageNodeCount: 4,
|
|
|
|
UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
project, err := planet.Uplinks[0].OpenProject(ctx, planet.Satellites[0])
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(project.Close)
|
|
|
|
testData := testrand.Bytes(5 * memory.KiB)
|
|
|
|
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "bucket", "original-object", testData)
|
|
|
|
require.NoError(t, err)
|
|
|
|
for i := 0; i < 50; i++ {
|
|
|
|
_, err = project.CopyObject(ctx, "bucket", "original-object", "bucket", "copy"+strconv.Itoa(i), nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
list := project.ListObjects(ctx, "bucket", nil)
|
|
|
|
keys := []string{}
|
|
|
|
for list.Next() {
|
|
|
|
item := list.Item()
|
|
|
|
keys = append(keys, item.Key)
|
|
|
|
}
|
|
|
|
require.NoError(t, list.Err())
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(len(keys))
|
|
|
|
var errlist errs.Group
|
|
|
|
|
|
|
|
for i, name := range keys {
|
|
|
|
name := name
|
|
|
|
go func(toDelete string, index int) {
|
|
|
|
_, err := project.DeleteObject(ctx, "bucket", toDelete)
|
|
|
|
errlist.Add(err)
|
|
|
|
wg.Done()
|
|
|
|
}(name, i)
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
require.NoError(t, errlist.Err())
|
|
|
|
|
|
|
|
// check all objects have been deleted
|
|
|
|
listAfterDelete := project.ListObjects(ctx, "bucket", nil)
|
|
|
|
require.False(t, listAfterDelete.Next())
|
|
|
|
require.NoError(t, listAfterDelete.Err())
|
|
|
|
|
|
|
|
_, err = project.DeleteBucket(ctx, "bucket")
|
|
|
|
require.NoError(t, err)
|
|
|
|
})
|
|
|
|
}
|
2022-04-21 15:25:16 +01:00
|
|
|
|
|
|
|
func TestEndpoint_UpdateObjectMetadata(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()].SerializeRaw()
|
|
|
|
err := planet.Uplinks[0].Upload(ctx, satellite, "testbucket", "object", testrand.Bytes(256))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
objects, err := satellite.API.Metainfo.Metabase.TestingAllObjects(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
validMetadata := testrand.Bytes(satellite.Config.Metainfo.MaxMetadataSize)
|
|
|
|
validKey := randomEncryptedKey
|
|
|
|
|
|
|
|
getObjectResponse, err := satellite.API.Metainfo.Endpoint.GetObject(ctx, &pb.ObjectGetRequest{
|
2022-11-23 12:15:52 +00:00
|
|
|
Header: &pb.RequestHeader{ApiKey: apiKey},
|
|
|
|
Bucket: []byte("testbucket"),
|
|
|
|
EncryptedObjectKey: []byte(objects[0].ObjectKey),
|
|
|
|
Version: int32(objects[0].Version),
|
2022-04-21 15:25:16 +01:00
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = satellite.API.Metainfo.Endpoint.UpdateObjectMetadata(ctx, &pb.ObjectUpdateMetadataRequest{
|
|
|
|
Header: &pb.RequestHeader{ApiKey: apiKey},
|
|
|
|
Bucket: []byte("testbucket"),
|
|
|
|
EncryptedObjectKey: []byte(objects[0].ObjectKey),
|
|
|
|
Version: int32(objects[0].Version),
|
|
|
|
StreamId: getObjectResponse.Object.StreamId,
|
|
|
|
EncryptedMetadata: validMetadata,
|
|
|
|
EncryptedMetadataEncryptedKey: validKey,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
// too large metadata
|
|
|
|
_, err = satellite.API.Metainfo.Endpoint.UpdateObjectMetadata(ctx, &pb.ObjectUpdateMetadataRequest{
|
|
|
|
Header: &pb.RequestHeader{ApiKey: apiKey},
|
|
|
|
Bucket: []byte("testbucket"),
|
|
|
|
EncryptedObjectKey: []byte(objects[0].ObjectKey),
|
|
|
|
Version: int32(objects[0].Version),
|
|
|
|
|
|
|
|
EncryptedMetadata: testrand.Bytes(satellite.Config.Metainfo.MaxMetadataSize + 1),
|
|
|
|
EncryptedMetadataEncryptedKey: validKey,
|
|
|
|
})
|
|
|
|
require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
|
|
|
|
|
|
|
|
// invalid encrypted metadata key
|
|
|
|
_, err = satellite.API.Metainfo.Endpoint.UpdateObjectMetadata(ctx, &pb.ObjectUpdateMetadataRequest{
|
|
|
|
Header: &pb.RequestHeader{ApiKey: apiKey},
|
|
|
|
Bucket: []byte("testbucket"),
|
|
|
|
EncryptedObjectKey: []byte(objects[0].ObjectKey),
|
|
|
|
Version: int32(objects[0].Version),
|
|
|
|
|
|
|
|
EncryptedMetadata: validMetadata,
|
|
|
|
EncryptedMetadataEncryptedKey: testrand.Bytes(16),
|
|
|
|
})
|
|
|
|
require.True(t, errs2.IsRPC(err, rpcstatus.InvalidArgument))
|
|
|
|
|
|
|
|
// verify that metadata didn't change with rejected requests
|
|
|
|
objects, err = satellite.API.Metainfo.Metabase.TestingAllObjects(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, validMetadata, objects[0].EncryptedMetadata)
|
|
|
|
require.Equal(t, validKey, objects[0].EncryptedMetadataEncryptedKey)
|
|
|
|
})
|
|
|
|
}
|
2022-09-19 14:26:50 +01:00
|
|
|
|
2023-03-15 12:26:10 +00:00
|
|
|
func TestEndpoint_Object_CopyObject(t *testing.T) {
|
2022-09-26 15:21:43 +01:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
checkDownload := func(objectKey string, expectedData []byte) {
|
|
|
|
data, err := planet.Uplinks[0].Download(ctx, planet.Satellites[0], "multipleversions", objectKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Equal(t, expectedData, data)
|
|
|
|
}
|
|
|
|
|
|
|
|
expectedDataA := testrand.Bytes(7 * memory.KiB)
|
|
|
|
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "multipleversions", "objectA", expectedDataA)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "multipleversions", "objectInline", testrand.Bytes(1*memory.KiB))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "multipleversions", "objectRemote", testrand.Bytes(10*memory.KiB))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
project, err := planet.Uplinks[0].OpenProject(ctx, planet.Satellites[0])
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(project.Close)
|
|
|
|
|
|
|
|
_, err = project.CopyObject(ctx, "multipleversions", "objectA", "multipleversions", "objectInline", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
_, err = project.CopyObject(ctx, "multipleversions", "objectA", "multipleversions", "objectRemote", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDownload("objectInline", expectedDataA)
|
|
|
|
checkDownload("objectRemote", expectedDataA)
|
|
|
|
|
|
|
|
expectedDataB := testrand.Bytes(8 * memory.KiB)
|
|
|
|
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "multipleversions", "objectInline", expectedDataB)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "multipleversions", "objectRemote", expectedDataB)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDownload("objectInline", expectedDataB)
|
|
|
|
checkDownload("objectRemote", expectedDataB)
|
|
|
|
checkDownload("objectA", expectedDataA)
|
|
|
|
|
|
|
|
expectedDataD := testrand.Bytes(6 * memory.KiB)
|
|
|
|
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "multipleversions", "objectA", expectedDataD)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDownload("objectInline", expectedDataB)
|
|
|
|
checkDownload("objectRemote", expectedDataB)
|
|
|
|
checkDownload("objectA", expectedDataD)
|
|
|
|
|
|
|
|
objects, err := planet.Satellites[0].Metabase.DB.TestingAllObjects(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, objects, 3)
|
|
|
|
|
|
|
|
for _, object := range objects {
|
|
|
|
require.Greater(t, int64(object.Version), int64(1))
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = project.CopyObject(ctx, "multipleversions", "objectInline", "multipleversions", "objectInlineCopy", nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
checkDownload("objectInlineCopy", expectedDataB)
|
|
|
|
|
|
|
|
iterator := project.ListObjects(ctx, "multipleversions", nil)
|
|
|
|
|
|
|
|
items := []string{}
|
|
|
|
for iterator.Next() {
|
|
|
|
items = append(items, iterator.Item().Key)
|
|
|
|
}
|
|
|
|
require.NoError(t, iterator.Err())
|
|
|
|
|
|
|
|
sort.Strings(items)
|
|
|
|
require.Equal(t, []string{
|
|
|
|
"objectA", "objectInline", "objectInlineCopy", "objectRemote",
|
|
|
|
}, items)
|
|
|
|
})
|
|
|
|
}
|
2022-12-13 11:31:29 +00:00
|
|
|
|
2023-03-15 12:26:10 +00:00
|
|
|
func TestEndpoint_Object_MoveObject(t *testing.T) {
|
2022-12-13 11:31:29 +00:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
expectedDataA := testrand.Bytes(7 * memory.KiB)
|
|
|
|
|
|
|
|
// upload objectA twice to have to have version different than 1
|
|
|
|
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "multipleversions", "objectA", expectedDataA)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "multipleversions", "objectA", expectedDataA)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "multipleversions", "objectB", testrand.Bytes(1*memory.KiB))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
project, err := planet.Uplinks[0].OpenProject(ctx, planet.Satellites[0])
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(project.Close)
|
|
|
|
|
|
|
|
// move is not possible because we have committed object under target location
|
|
|
|
err = project.MoveObject(ctx, "multipleversions", "objectA", "multipleversions", "objectB", nil)
|
|
|
|
require.Error(t, err)
|
|
|
|
})
|
|
|
|
}
|
2023-02-14 11:41:46 +00:00
|
|
|
|
|
|
|
func TestListObjectDuplicates(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
u := planet.Uplinks[0]
|
|
|
|
s := planet.Satellites[0]
|
|
|
|
|
|
|
|
const amount = 23
|
|
|
|
|
|
|
|
require.NoError(t, u.CreateBucket(ctx, s, "test"))
|
|
|
|
|
|
|
|
prefixes := []string{"", "aprefix/"}
|
|
|
|
|
|
|
|
// reupload some objects many times to force different
|
|
|
|
// object versions internally
|
|
|
|
for _, prefix := range prefixes {
|
|
|
|
for i := 0; i < amount; i++ {
|
|
|
|
version := 1
|
|
|
|
if i%2 == 0 {
|
|
|
|
version = 2
|
|
|
|
} else if i%3 == 0 {
|
|
|
|
version = 3
|
|
|
|
}
|
|
|
|
|
|
|
|
for v := 0; v < version; v++ {
|
|
|
|
require.NoError(t, u.Upload(ctx, s, "test", prefix+fmt.Sprintf("file-%d", i), nil))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
project, err := u.GetProject(ctx, s)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(project.Close)
|
|
|
|
|
|
|
|
for _, prefix := range prefixes {
|
|
|
|
prefixLabel := prefix
|
|
|
|
if prefixLabel == "" {
|
|
|
|
prefixLabel = "empty"
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, listLimit := range []int{0, 1, 2, 3, 7, amount - 1, amount} {
|
|
|
|
t.Run(fmt.Sprintf("prefix %s limit %d", prefixLabel, listLimit), func(t *testing.T) {
|
|
|
|
limitCtx := testuplink.WithListLimit(ctx, listLimit)
|
|
|
|
|
|
|
|
keys := make(map[string]struct{})
|
|
|
|
iter := project.ListObjects(limitCtx, "test", &uplink.ListObjectsOptions{
|
|
|
|
Prefix: prefix,
|
|
|
|
})
|
|
|
|
for iter.Next() {
|
|
|
|
if iter.Item().IsPrefix {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := keys[iter.Item().Key]; ok {
|
|
|
|
t.Fatal("duplicate", iter.Item().Key, len(keys))
|
|
|
|
}
|
|
|
|
keys[iter.Item().Key] = struct{}{}
|
|
|
|
}
|
|
|
|
require.NoError(t, iter.Err())
|
|
|
|
require.Equal(t, amount, len(keys))
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
2023-02-15 10:12:39 +00:00
|
|
|
|
|
|
|
func TestListUploads(t *testing.T) {
|
2023-02-17 13:40:16 +00:00
|
|
|
t.Skip() // see TODO at the bottom. this test is now failing.
|
2023-02-15 10:12:39 +00:00
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1,
|
|
|
|
StorageNodeCount: 0,
|
|
|
|
UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
// basic ListUploads tests, more tests are on storj/uplink side
|
|
|
|
u := planet.Uplinks[0]
|
|
|
|
s := planet.Satellites[0]
|
|
|
|
|
|
|
|
project, err := u.OpenProject(ctx, s)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer ctx.Check(project.Close)
|
|
|
|
|
|
|
|
require.NoError(t, u.CreateBucket(ctx, s, "testbucket"))
|
|
|
|
|
|
|
|
// TODO number of objects created can be limited when uplink will
|
|
|
|
// have an option to control listing limit value for ListUploads
|
|
|
|
for i := 0; i < 1001; i++ {
|
|
|
|
_, err := project.BeginUpload(ctx, "testbucket", "object"+strconv.Itoa(i), nil)
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
list := project.ListUploads(ctx, "testbucket", nil)
|
|
|
|
items := 0
|
|
|
|
for list.Next() {
|
|
|
|
items++
|
|
|
|
}
|
|
|
|
require.NoError(t, list.Err())
|
|
|
|
// TODO result should be 1001 but we have bug in libuplink
|
|
|
|
// were it's not possible to get second page of results for
|
|
|
|
// pending objets.
|
|
|
|
// test will fail when we will fix uplink and we will need to adjust this test
|
|
|
|
require.Equal(t, 1000, items)
|
|
|
|
})
|
|
|
|
}
|
2023-07-06 13:35:26 +01:00
|
|
|
|
2023-08-15 11:42:42 +01:00
|
|
|
func TestNodeTagPlacement(t *testing.T) {
|
2023-07-06 13:35:26 +01:00
|
|
|
ctx := testcontext.New(t)
|
|
|
|
|
|
|
|
satelliteIdentity := signing.SignerFromFullIdentity(testidentity.MustPregeneratedSignedIdentity(0, storj.LatestIDVersion()))
|
|
|
|
|
|
|
|
placementRules := overlay.ConfigurablePlacementRule{}
|
2023-08-15 11:42:42 +01:00
|
|
|
tag := fmt.Sprintf(`tag("%s", "certified","true")`, satelliteIdentity.ID())
|
|
|
|
err := placementRules.Set(fmt.Sprintf(`0:exclude(%s);16:%s`, tag, tag))
|
2023-07-06 13:35:26 +01:00
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
testplanet.Run(t,
|
|
|
|
testplanet.Config{
|
|
|
|
SatelliteCount: 1,
|
|
|
|
StorageNodeCount: 12,
|
|
|
|
UplinkCount: 1,
|
|
|
|
Reconfigure: testplanet.Reconfigure{
|
|
|
|
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
|
|
|
config.Metainfo.RS.Min = 3
|
|
|
|
config.Metainfo.RS.Repair = 4
|
|
|
|
config.Metainfo.RS.Success = 5
|
|
|
|
config.Metainfo.RS.Total = 6
|
|
|
|
config.Metainfo.MaxInlineSegmentSize = 1
|
|
|
|
config.Placement = placementRules
|
|
|
|
},
|
|
|
|
StorageNode: func(index int, config *storagenode.Config) {
|
|
|
|
if index%2 == 0 {
|
|
|
|
tags := &pb.NodeTagSet{
|
2023-08-10 13:20:14 +01:00
|
|
|
NodeId: testidentity.MustPregeneratedSignedIdentity(index+1, storj.LatestIDVersion()).ID.Bytes(),
|
|
|
|
SignedAt: time.Now().Unix(),
|
2023-07-06 13:35:26 +01:00
|
|
|
Tags: []*pb.Tag{
|
|
|
|
{
|
|
|
|
Name: "certified",
|
|
|
|
Value: []byte("true"),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
signed, err := nodetag.Sign(ctx, tags, satelliteIdentity)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
config.Contact.Tags = contact.SignedTags(pb.SignedNodeTagSets{
|
|
|
|
Tags: []*pb.SignedNodeTagSet{
|
|
|
|
signed,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
buckets := satellite.API.Buckets.Service
|
|
|
|
uplink := planet.Uplinks[0]
|
|
|
|
projectID := uplink.Projects[0].ID
|
|
|
|
|
|
|
|
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
|
|
|
metainfoClient, err := uplink.DialMetainfo(ctx, satellite, apiKey)
|
|
|
|
require.NoError(t, err)
|
|
|
|
defer func() {
|
|
|
|
_ = metainfoClient.Close()
|
|
|
|
}()
|
|
|
|
|
2023-08-15 11:42:42 +01:00
|
|
|
nodeIndex := map[storj.NodeID]int{}
|
|
|
|
for ix, node := range planet.StorageNodes {
|
|
|
|
nodeIndex[node.Identity.ID] = ix
|
|
|
|
}
|
|
|
|
testPlacement := func(bucketName string, placement int, allowedNodes func(int) bool) {
|
|
|
|
|
|
|
|
createGeofencedBucket(t, ctx, buckets, projectID, bucketName, storj.PlacementConstraint(placement))
|
2023-07-06 13:35:26 +01:00
|
|
|
|
2023-08-15 11:42:42 +01:00
|
|
|
objectNo := 10
|
|
|
|
for i := 0; i < objectNo; i++ {
|
|
|
|
|
|
|
|
err := uplink.Upload(ctx, satellite, bucketName, "testobject"+strconv.Itoa(i), make([]byte, 10240))
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
objects, _, err := metainfoClient.ListObjects(ctx, metaclient.ListObjectsParams{
|
|
|
|
Bucket: []byte(bucketName),
|
2023-07-06 13:35:26 +01:00
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2023-08-15 11:42:42 +01:00
|
|
|
require.Len(t, objects, objectNo)
|
|
|
|
|
|
|
|
for _, listedObject := range objects {
|
|
|
|
for i := 0; i < 5; i++ {
|
|
|
|
o, err := metainfoClient.DownloadObject(ctx, metaclient.DownloadObjectParams{
|
|
|
|
Bucket: []byte(bucketName),
|
|
|
|
EncryptedObjectKey: listedObject.EncryptedObjectKey,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2023-07-06 13:35:26 +01:00
|
|
|
|
2023-08-15 11:42:42 +01:00
|
|
|
for _, limit := range o.DownloadedSegments[0].Limits {
|
|
|
|
if limit != nil {
|
|
|
|
ix := nodeIndex[limit.Limit.StorageNodeId]
|
|
|
|
require.True(t, allowedNodes(ix))
|
|
|
|
}
|
2023-07-06 13:35:26 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-08-15 11:42:42 +01:00
|
|
|
t.Run("upload to constrained", func(t *testing.T) {
|
|
|
|
testPlacement("constrained", 16, func(i int) bool {
|
|
|
|
return i%2 == 0
|
|
|
|
})
|
|
|
|
})
|
|
|
|
t.Run("upload to generic excluding constrained", func(t *testing.T) {
|
|
|
|
testPlacement("generic", 0, func(i int) bool {
|
|
|
|
return i%2 == 1
|
|
|
|
})
|
|
|
|
})
|
|
|
|
|
2023-07-06 13:35:26 +01:00
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|