satellite/metainfo: use metabase.SegmentKey with metainfo.Service

Instead of using string or []byte we will be using dedicated type
SegmentKey.

Change-Id: I6ca8039f0741f6f9837c69a6d070228ed10f2220
This commit is contained in:
Michal Niewrzal 2020-09-03 15:54:56 +02:00
parent 3f0c21323d
commit aa47e70f03
30 changed files with 302 additions and 286 deletions

View File

@ -26,6 +26,7 @@ import (
"storj.io/storj/pkg/revocation"
"storj.io/storj/pkg/server"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite/metainfo/metabase"
"storj.io/uplink"
"storj.io/uplink/private/metainfo"
)
@ -89,14 +90,14 @@ func TestDownloadWithSomeNodesOffline(t *testing.T) {
// get a remote segment from pointerdb
pdb := satellite.Metainfo.Service
listResponse, _, err := pdb.List(ctx, "", "", true, 0, 0)
listResponse, _, err := pdb.List(ctx, metabase.SegmentKey{}, "", true, 0, 0)
require.NoError(t, err)
var path string
var pointer *pb.Pointer
for _, v := range listResponse {
path = v.GetPath()
pointer, err = pdb.Get(ctx, path)
pointer, err = pdb.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
if pointer.GetType() == pb.Pointer_REMOTE {
break
@ -174,14 +175,14 @@ func TestDownloadFromUnresponsiveNode(t *testing.T) {
// get a remote segment from pointerdb
pdb := planet.Satellites[0].Metainfo.Service
listResponse, _, err := pdb.List(ctx, "", "", true, 0, 0)
listResponse, _, err := pdb.List(ctx, metabase.SegmentKey{}, "", true, 0, 0)
require.NoError(t, err)
var path string
var pointer *pb.Pointer
for _, v := range listResponse {
path = v.GetPath()
pointer, err = pdb.Get(ctx, path)
pointer, err = pdb.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
if pointer.GetType() == pb.Pointer_REMOTE {
break

View File

@ -17,6 +17,7 @@ import (
"storj.io/common/uuid"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite/accounting"
"storj.io/storj/satellite/metainfo/metabase"
"storj.io/storj/storage"
)
@ -242,7 +243,7 @@ func TestBilling_AuditRepairTraffic(t *testing.T) {
key, err := planet.Satellites[0].Metainfo.Database.List(ctx, nil, 10)
require.NoError(t, err)
require.Len(t, key, 1)
ptr, err := satelliteSys.Metainfo.Service.Get(ctx, key[0].String())
ptr, err := satelliteSys.Metainfo.Service.Get(ctx, metabase.SegmentKey(key[0]))
require.NoError(t, err)
// Cause repair traffic
@ -266,7 +267,7 @@ func TestBilling_AuditRepairTraffic(t *testing.T) {
require.NoError(t, err)
require.Len(t, key, 1)
ptr2, err := satelliteSys.Metainfo.Service.Get(ctx, key[0].String())
ptr2, err := satelliteSys.Metainfo.Service.Get(ctx, metabase.SegmentKey(key[0]))
require.NoError(t, err)
remotePieces := ptr2.GetRemote().GetRemotePieces()
@ -425,7 +426,7 @@ func TestBilling_ZombieSegments(t *testing.T) {
}
require.NotNil(t, lastSegmentKey)
err = satelliteSys.Metainfo.Service.UnsynchronizedDelete(ctx, lastSegmentKey.String())
err = satelliteSys.Metainfo.Service.UnsynchronizedDelete(ctx, metabase.SegmentKey(lastSegmentKey))
require.NoError(t, err)
err = uplnk.DeleteObject(ctx, satelliteSys, bucketName, objectKey)

View File

@ -577,7 +577,7 @@ func TestProjectUsage_FreeUsedStorageSpace(t *testing.T) {
// check if usage is equal to first uploaded file
prefix, err := metainfo.CreatePath(ctx, project.ID, -1, []byte("testbucket"), []byte{})
require.NoError(t, err)
items, _, err := satMetainfo.Service.List(ctx, prefix, "", true, 1, meta.All)
items, _, err := satMetainfo.Service.List(ctx, prefix.Encode(), "", true, 1, meta.All)
require.NoError(t, err)
usage, err := accounting.ProjectUsage.GetProjectStorageTotals(ctx, project.ID)

View File

@ -159,16 +159,16 @@ func TestCalculateBucketAtRestData(t *testing.T) {
var testCases = []struct {
name string
project string
segmentIndex string
segmentIndex int64
bucketName string
objectName string
inline bool
last bool
}{
{"inline, same project, same bucket", "9656af6e-2d9c-42fa-91f2-bfd516a722d7", "l", "mockBucketName", "mockObjectName", true, true},
{"remote, same project, same bucket", "9656af6e-2d9c-42fa-91f2-bfd516a722d7", "s0", "mockBucketName", "mockObjectName1", false, false},
{"last segment, same project, different bucket", "9656af6e-2d9c-42fa-91f2-bfd516a722d7", "l", "mockBucketName1", "mockObjectName2", false, true},
{"different project", "9656af6e-2d9c-42fa-91f2-bfd516a722d1", "s0", "mockBucketName", "mockObjectName", false, false},
{"inline, same project, same bucket", "9656af6e-2d9c-42fa-91f2-bfd516a722d7", metabase.LastSegmentIndex, "mockBucketName", "mockObjectName", true, true},
{"remote, same project, same bucket", "9656af6e-2d9c-42fa-91f2-bfd516a722d7", 0, "mockBucketName", "mockObjectName1", false, false},
{"last segment, same project, different bucket", "9656af6e-2d9c-42fa-91f2-bfd516a722d7", metabase.LastSegmentIndex, "mockBucketName1", "mockObjectName2", false, true},
{"different project", "9656af6e-2d9c-42fa-91f2-bfd516a722d1", 0, "mockBucketName", "mockObjectName", false, false},
}
testplanet.Run(t, testplanet.Config{
@ -187,8 +187,13 @@ func TestCalculateBucketAtRestData(t *testing.T) {
// setup: create a pointer and save it to pointerDB
pointer := makePointer(planet.StorageNodes, redundancyScheme, int64(2), tt.inline)
metainfo := satellitePeer.Metainfo.Service
objectPath := fmt.Sprintf("%s/%s/%s/%s", tt.project, tt.segmentIndex, tt.bucketName, tt.objectName)
err = metainfo.Put(ctx, objectPath, pointer)
location := metabase.SegmentLocation{
ProjectID: projectID,
BucketName: tt.bucketName,
Index: tt.segmentIndex,
ObjectKey: metabase.ObjectKey(tt.objectName),
}
err = metainfo.Put(ctx, location.Encode(), pointer)
require.NoError(t, err)
bucketLocation := metabase.BucketLocation{
@ -216,7 +221,8 @@ func TestTallyIgnoresExpiredPointers(t *testing.T) {
satellitePeer := planet.Satellites[0]
redundancyScheme := satelliteRS(satellitePeer)
project := "9656af6e-2d9c-42fa-91f2-bfd516a722d7"
projectID, err := uuid.FromString("9656af6e-2d9c-42fa-91f2-bfd516a722d7")
require.NoError(t, err)
bucket := "bucket"
// setup: create an expired pointer and save it to pointerDB
@ -224,8 +230,13 @@ func TestTallyIgnoresExpiredPointers(t *testing.T) {
pointer.ExpirationDate = time.Now().Add(-24 * time.Hour)
metainfo := satellitePeer.Metainfo.Service
objectPath := fmt.Sprintf("%s/%s/%s/%s", project, "l", bucket, "object/name")
err := metainfo.Put(ctx, objectPath, pointer)
location := metabase.SegmentLocation{
ProjectID: projectID,
BucketName: bucket,
Index: metabase.LastSegmentIndex,
ObjectKey: metabase.ObjectKey("object/name"),
}
err = metainfo.Put(ctx, location.Encode(), pointer)
require.NoError(t, err)
obs := tally.NewObserver(satellitePeer.Log.Named("observer"), time.Now())
@ -254,7 +265,7 @@ func TestTallyLiveAccounting(t *testing.T) {
require.NoError(t, err)
require.Len(t, key, 1)
ptr, err := planet.Satellites[0].Metainfo.Service.Get(ctx, key[0].String())
ptr, err := planet.Satellites[0].Metainfo.Service.Get(ctx, metabase.SegmentKey(key[0]))
require.NoError(t, err)
require.NotNil(t, ptr)

View File

@ -124,11 +124,11 @@ func TestDisqualifiedNodesGetNoDownload(t *testing.T) {
bucket := metabase.BucketLocation{ProjectID: uplinkPeer.Projects[0].ID, BucketName: "testbucket"}
items, _, err := satellitePeer.Metainfo.Service.List(ctx, "", "", true, 10, meta.All)
items, _, err := satellitePeer.Metainfo.Service.List(ctx, metabase.SegmentKey{}, "", true, 10, meta.All)
require.NoError(t, err)
require.Equal(t, 1, len(items))
pointer, err := satellitePeer.Metainfo.Service.Get(ctx, items[0].Path)
pointer, err := satellitePeer.Metainfo.Service.Get(ctx, metabase.SegmentKey(items[0].Path))
require.NoError(t, err)
disqualifiedNode := pointer.GetRemote().GetRemotePieces()[0].NodeId

View File

@ -58,7 +58,7 @@ func TestReverifySuccess(t *testing.T) {
path, err := queue.Next()
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
randomIndex, err := audit.GetRandomStripe(ctx, pointer)
@ -138,7 +138,7 @@ func TestReverifyFailMissingShare(t *testing.T) {
path, err := queue.Next()
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
randomIndex, err := audit.GetRandomStripe(ctx, pointer)
@ -220,7 +220,7 @@ func TestReverifyFailMissingShareNotVerified(t *testing.T) {
path, err := queue.Next()
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
randomIndex, err := audit.GetRandomStripe(ctx, pointer)
@ -254,10 +254,10 @@ func TestReverifyFailMissingShareNotVerified(t *testing.T) {
require.NoError(t, err)
// update pointer to have PieceHashesVerified false
err = satellite.Metainfo.Service.UnsynchronizedDelete(ctx, path)
err = satellite.Metainfo.Service.UnsynchronizedDelete(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
pointer.PieceHashesVerified = false
err = satellite.Metainfo.Service.Put(ctx, path, pointer)
err = satellite.Metainfo.Service.Put(ctx, metabase.SegmentKey(path), pointer)
require.NoError(t, err)
// delete the piece from the first node
@ -310,7 +310,7 @@ func TestReverifyFailBadData(t *testing.T) {
path, err := queue.Next()
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
randomIndex, err := audit.GetRandomStripe(ctx, pointer)
@ -382,7 +382,7 @@ func TestReverifyOffline(t *testing.T) {
path, err := queue.Next()
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
randomIndex, err := audit.GetRandomStripe(ctx, pointer)
@ -452,7 +452,7 @@ func TestReverifyOfflineDialTimeout(t *testing.T) {
path, err := queue.Next()
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
randomIndex, err := audit.GetRandomStripe(ctx, pointer)
@ -548,7 +548,7 @@ func TestReverifyDeletedSegment(t *testing.T) {
path, err := queue.Next()
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
randomIndex, err := audit.GetRandomStripe(ctx, pointer)
@ -635,7 +635,7 @@ func TestReverifyModifiedSegment(t *testing.T) {
pendingPath, err := queue.Next()
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, pendingPath)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(pendingPath))
require.NoError(t, err)
randomIndex, err := audit.GetRandomStripe(ctx, pointer)
@ -660,7 +660,7 @@ func TestReverifyModifiedSegment(t *testing.T) {
// remove a piece from the file (a piece that the contained node isn't holding)
audits.Verifier.OnTestingCheckSegmentAlteredHook = func() {
pieceToRemove := pointer.Remote.RemotePieces[1]
_, err = metainfo.UpdatePieces(ctx, pendingPath, pointer, nil, []*pb.RemotePiece{pieceToRemove})
_, err = metainfo.UpdatePieces(ctx, metabase.SegmentKey(pendingPath), pointer, nil, []*pb.RemotePiece{pieceToRemove})
require.NoError(t, err)
}
@ -723,7 +723,7 @@ func TestReverifyReplacedSegment(t *testing.T) {
pendingPath, err := queue.Next()
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, pendingPath)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(pendingPath))
require.NoError(t, err)
randomIndex, err := audit.GetRandomStripe(ctx, pointer)
@ -820,9 +820,9 @@ func TestReverifyDifferentShare(t *testing.T) {
require.NoError(t, err)
require.NotEqual(t, path1, path2)
pointer1, err := satellite.Metainfo.Service.Get(ctx, path1)
pointer1, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path1))
require.NoError(t, err)
pointer2, err := satellite.Metainfo.Service.Get(ctx, path2)
pointer2, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path2))
require.NoError(t, err)
// find a node that contains a piece for both files
@ -922,7 +922,7 @@ func TestReverifyExpired1(t *testing.T) {
require.NoError(t, err)
// set pointer's expiration date to be already expired
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
oldPointerBytes, err := pb.Marshal(pointer)
require.NoError(t, err)
@ -940,7 +940,7 @@ func TestReverifyExpired1(t *testing.T) {
require.NoError(t, err)
// Reverify should delete the expired segment
pointer, err = satellite.Metainfo.Service.Get(ctx, path)
pointer, err = satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.Error(t, err)
require.Nil(t, pointer)
@ -985,9 +985,9 @@ func TestReverifyExpired2(t *testing.T) {
require.NoError(t, err)
require.NotEqual(t, path1, path2)
pointer1, err := satellite.Metainfo.Service.Get(ctx, path1)
pointer1, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path1))
require.NoError(t, err)
pointer2, err := satellite.Metainfo.Service.Get(ctx, path2)
pointer2, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path2))
require.NoError(t, err)
// find a node that contains a piece for both files
@ -1062,7 +1062,7 @@ func TestReverifyExpired2(t *testing.T) {
require.Len(t, report.Fails, 0)
// Reverify should delete the expired segment
pointer, err := satellite.Metainfo.Service.Get(ctx, path1)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path1))
require.Error(t, err)
require.Nil(t, pointer)
@ -1109,7 +1109,7 @@ func TestReverifySlowDownload(t *testing.T) {
path, err := queue.Next()
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
slowPiece := pointer.Remote.RemotePieces[0]
@ -1193,7 +1193,7 @@ func TestReverifyUnknownError(t *testing.T) {
path, err := queue.Next()
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
badPiece := pointer.Remote.RemotePieces[0]

View File

@ -87,7 +87,7 @@ func NewVerifier(log *zap.Logger, metainfo *metainfo.Service, dialer rpc.Dialer,
func (verifier *Verifier) Verify(ctx context.Context, path storj.Path, skip map[storj.NodeID]bool) (report Report, err error) {
defer mon.Task()(&ctx)(&err)
pointerBytes, pointer, err := verifier.metainfo.GetWithBytes(ctx, path)
pointerBytes, pointer, err := verifier.metainfo.GetWithBytes(ctx, metabase.SegmentKey(path))
if err != nil {
if storj.ErrObjectNotFound.Has(err) {
verifier.log.Debug("segment deleted before Verify")
@ -96,7 +96,7 @@ func (verifier *Verifier) Verify(ctx context.Context, path storj.Path, skip map[
return Report{}, err
}
if pointer.ExpirationDate != (time.Time{}) && pointer.ExpirationDate.Before(time.Now()) {
errDelete := verifier.metainfo.Delete(ctx, path, pointerBytes)
errDelete := verifier.metainfo.Delete(ctx, metabase.SegmentKey(path), pointerBytes)
if errDelete != nil {
return Report{}, Error.Wrap(errDelete)
}
@ -379,7 +379,7 @@ func (verifier *Verifier) Reverify(ctx context.Context, path storj.Path) (report
err error
}
pointerBytes, pointer, err := verifier.metainfo.GetWithBytes(ctx, path)
pointerBytes, pointer, err := verifier.metainfo.GetWithBytes(ctx, metabase.SegmentKey(path))
if err != nil {
if storj.ErrObjectNotFound.Has(err) {
verifier.log.Debug("segment deleted before Reverify")
@ -388,7 +388,7 @@ func (verifier *Verifier) Reverify(ctx context.Context, path storj.Path) (report
return Report{}, err
}
if pointer.ExpirationDate != (time.Time{}) && pointer.ExpirationDate.Before(time.Now()) {
errDelete := verifier.metainfo.Delete(ctx, path, pointerBytes)
errDelete := verifier.metainfo.Delete(ctx, metabase.SegmentKey(path), pointerBytes)
if errDelete != nil {
return Report{}, Error.Wrap(errDelete)
}
@ -451,7 +451,7 @@ func (verifier *Verifier) Reverify(ctx context.Context, path storj.Path) (report
containedInSegment++
go func(pending *PendingAudit) {
pendingPointerBytes, pendingPointer, err := verifier.metainfo.GetWithBytes(ctx, pending.Path)
pendingPointerBytes, pendingPointer, err := verifier.metainfo.GetWithBytes(ctx, metabase.SegmentKey(pending.Path))
if err != nil {
if storj.ErrObjectNotFound.Has(err) {
ch <- result{nodeID: pending.NodeID, status: skipped}
@ -463,7 +463,7 @@ func (verifier *Verifier) Reverify(ctx context.Context, path storj.Path) (report
return
}
if pendingPointer.ExpirationDate != (time.Time{}) && pendingPointer.ExpirationDate.Before(time.Now().UTC()) {
errDelete := verifier.metainfo.Delete(ctx, pending.Path, pendingPointerBytes)
errDelete := verifier.metainfo.Delete(ctx, metabase.SegmentKey(pending.Path), pendingPointerBytes)
if errDelete != nil {
verifier.log.Debug("Reverify: error deleting expired segment", zap.Stringer("Node ID", pending.NodeID), zap.Error(errDelete))
}
@ -705,27 +705,27 @@ func (verifier *Verifier) GetShare(ctx context.Context, limit *pb.AddressedOrder
}
// checkIfSegmentAltered checks if path's pointer has been altered since path was selected.
func (verifier *Verifier) checkIfSegmentAltered(ctx context.Context, segmentPath string, oldPointer *pb.Pointer, oldPointerBytes []byte) (err error) {
func (verifier *Verifier) checkIfSegmentAltered(ctx context.Context, segmentKey string, oldPointer *pb.Pointer, oldPointerBytes []byte) (err error) {
defer mon.Task()(&ctx)(&err)
if verifier.OnTestingCheckSegmentAlteredHook != nil {
verifier.OnTestingCheckSegmentAlteredHook()
}
newPointerBytes, newPointer, err := verifier.metainfo.GetWithBytes(ctx, segmentPath)
newPointerBytes, newPointer, err := verifier.metainfo.GetWithBytes(ctx, metabase.SegmentKey(segmentKey))
if err != nil {
if storj.ErrObjectNotFound.Has(err) {
return ErrSegmentDeleted.New("%q", segmentPath)
return ErrSegmentDeleted.New("%q", segmentKey)
}
return err
}
if oldPointer != nil && oldPointer.CreationDate != newPointer.CreationDate {
return ErrSegmentDeleted.New("%q", segmentPath)
return ErrSegmentDeleted.New("%q", segmentKey)
}
if !bytes.Equal(oldPointerBytes, newPointerBytes) {
return ErrSegmentModified.New("%q", segmentPath)
return ErrSegmentModified.New("%q", segmentKey)
}
return nil
}

View File

@ -57,7 +57,7 @@ func TestDownloadSharesHappyPath(t *testing.T) {
path, err := queue.Next()
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
randomIndex, err := audit.GetRandomStripe(ctx, pointer)
@ -108,7 +108,7 @@ func TestDownloadSharesOfflineNode(t *testing.T) {
path, err := queue.Next()
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
randomIndex, err := audit.GetRandomStripe(ctx, pointer)
@ -165,7 +165,7 @@ func TestDownloadSharesMissingPiece(t *testing.T) {
path, err := queue.Next()
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
randomIndex, err := audit.GetRandomStripe(ctx, pointer)
@ -220,7 +220,7 @@ func TestDownloadSharesDialTimeout(t *testing.T) {
path, err := queue.Next()
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
randomIndex, err := audit.GetRandomStripe(ctx, pointer)
@ -303,7 +303,7 @@ func TestDownloadSharesDownloadTimeout(t *testing.T) {
path, err := queue.Next()
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
randomIndex, err := audit.GetRandomStripe(ctx, pointer)
@ -363,7 +363,7 @@ func TestVerifierHappyPath(t *testing.T) {
path, err := queue.Next()
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
report, err := audits.Verifier.Verify(ctx, path, nil)
@ -398,7 +398,7 @@ func TestVerifierExpired(t *testing.T) {
require.NoError(t, err)
// set pointer's expiration date to be already expired
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
oldPointerBytes, err := pb.Marshal(pointer)
require.NoError(t, err)
@ -416,7 +416,7 @@ func TestVerifierExpired(t *testing.T) {
require.NoError(t, err)
// Verify should delete the expired segment
pointer, err = satellite.Metainfo.Service.Get(ctx, path)
pointer, err = satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.Error(t, err)
require.Nil(t, pointer)
@ -449,7 +449,7 @@ func TestVerifierOfflineNode(t *testing.T) {
path, err := queue.Next()
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
// stop the first node in the pointer
@ -488,7 +488,7 @@ func TestVerifierMissingPiece(t *testing.T) {
path, err := queue.Next()
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
// delete the piece from the first node
@ -533,14 +533,14 @@ func TestVerifierMissingPieceHashesNotVerified(t *testing.T) {
path, err := queue.Next()
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
// update pointer to have PieceHashesVerified false
err = satellite.Metainfo.Service.UnsynchronizedDelete(ctx, path)
err = satellite.Metainfo.Service.UnsynchronizedDelete(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
pointer.PieceHashesVerified = false
err = satellite.Metainfo.Service.Put(ctx, path, pointer)
err = satellite.Metainfo.Service.Put(ctx, metabase.SegmentKey(path), pointer)
require.NoError(t, err)
// delete the piece from the first node
@ -583,7 +583,7 @@ func TestVerifierDialTimeout(t *testing.T) {
path, err := queue.Next()
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
tlsOptions, err := tlsopts.NewOptions(satellite.Identity, tlsopts.Config{}, nil)
@ -675,10 +675,10 @@ func TestVerifierModifiedSegment(t *testing.T) {
audits.Verifier.OnTestingCheckSegmentAlteredHook = func() {
// remove one piece from the segment so that checkIfSegmentAltered fails
pointer, err := metainfo.Get(ctx, path)
pointer, err := metainfo.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
pieceToRemove := pointer.Remote.RemotePieces[0]
_, err = metainfo.UpdatePieces(ctx, path, pointer, nil, []*pb.RemotePiece{pieceToRemove})
_, err = metainfo.UpdatePieces(ctx, metabase.SegmentKey(path), pointer, nil, []*pb.RemotePiece{pieceToRemove})
require.NoError(t, err)
}
@ -744,7 +744,7 @@ func TestVerifierModifiedSegmentFailsOnce(t *testing.T) {
path, err := queue.Next()
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
// delete the piece from the first node
@ -804,7 +804,7 @@ func TestVerifierSlowDownload(t *testing.T) {
path, err := queue.Next()
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
slowNode := planet.FindNode(pointer.Remote.RemotePieces[0].NodeId)
@ -854,7 +854,7 @@ func TestVerifierUnknownError(t *testing.T) {
path, err := queue.Next()
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
badNode := planet.FindNode(pointer.Remote.RemotePieces[0].NodeId)

View File

@ -124,7 +124,7 @@ func TestGarbageCollection(t *testing.T) {
})
}
func getPointer(ctx *testcontext.Context, t *testing.T, satellite *testplanet.Satellite, upl *testplanet.Uplink, bucket, path string) (lastSegPath string, pointer *pb.Pointer) {
func getPointer(ctx *testcontext.Context, t *testing.T, satellite *testplanet.Satellite, upl *testplanet.Uplink, bucket, path string) (_ metabase.SegmentKey, pointer *pb.Pointer) {
access := upl.Access[satellite.ID()]
serializedAccess, err := access.Serialize()
@ -143,11 +143,11 @@ func getPointer(ctx *testcontext.Context, t *testing.T, satellite *testplanet.Sa
ObjectKey: metabase.ObjectKey(encryptedPath.Raw()),
}
lastSegPath = string(segmentLocation.Encode())
pointer, err = satellite.Metainfo.Service.Get(ctx, lastSegPath)
key := segmentLocation.Encode()
pointer, err = satellite.Metainfo.Service.Get(ctx, key)
require.NoError(t, err)
return lastSegPath, pointer
return key, pointer
}
func encryptionAccess(access string) (*encryption.Store, error) {

View File

@ -19,6 +19,7 @@ import (
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite"
"storj.io/storj/satellite/gracefulexit"
"storj.io/storj/satellite/metainfo/metabase"
"storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/satellitedb/satellitedbtest"
"storj.io/storj/storage"
@ -180,7 +181,7 @@ func TestDurabilityRatio(t *testing.T) {
var oldPointer *pb.Pointer
var path []byte
for _, key := range keys {
p, err := satellite.Metainfo.Service.Get(ctx, string(key))
p, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(key))
require.NoError(t, err)
if p.GetRemote() != nil {

View File

@ -330,7 +330,7 @@ func (endpoint *Endpoint) processIncomplete(ctx context.Context, stream pb.DRPCS
return nil
}
pointer, err := endpoint.getValidPointer(ctx, string(incomplete.Path), incomplete.PieceNum, incomplete.RootPieceID)
pointer, err := endpoint.getValidPointer(ctx, metabase.SegmentKey(incomplete.Path), incomplete.PieceNum, incomplete.RootPieceID)
if err != nil {
endpoint.log.Warn("invalid pointer", zap.Error(err))
err = endpoint.db.DeleteTransferQueueItem(ctx, nodeID, incomplete.Path, incomplete.PieceNum)
@ -352,7 +352,7 @@ func (endpoint *Endpoint) processIncomplete(ctx context.Context, stream pb.DRPCS
pieceSize, err := endpoint.calculatePieceSize(ctx, pointer, incomplete, nodePiece)
if ErrAboveOptimalThreshold.Has(err) {
_, err = endpoint.metainfo.UpdatePieces(ctx, string(incomplete.Path), pointer, nil, []*pb.RemotePiece{nodePiece})
_, err = endpoint.metainfo.UpdatePieces(ctx, metabase.SegmentKey(incomplete.Path), pointer, nil, []*pb.RemotePiece{nodePiece})
if err != nil {
return Error.Wrap(err)
}
@ -469,7 +469,7 @@ func (endpoint *Endpoint) handleSucceeded(ctx context.Context, stream pb.DRPCSat
return Error.Wrap(err)
}
err = endpoint.updatePointer(ctx, transfer.OriginalPointer, exitingNodeID, receivingNodeID, string(transfer.Path), transfer.PieceNum, transferQueueItem.RootPieceID)
err = endpoint.updatePointer(ctx, transfer.OriginalPointer, exitingNodeID, receivingNodeID, metabase.SegmentKey(transfer.Path), transfer.PieceNum, transferQueueItem.RootPieceID)
if err != nil {
// remove the piece from the pending queue so it gets retried
deleteErr := pending.Delete(originalPieceID)
@ -550,7 +550,7 @@ func (endpoint *Endpoint) handleFailed(ctx context.Context, pending *PendingMap,
// If the pointer is not piece hash verified, do not count this as a failure.
if pb.TransferFailed_Error(errorCode) == pb.TransferFailed_NOT_FOUND {
endpoint.log.Debug("piece not found on node", zap.Stringer("node ID", nodeID), zap.ByteString("path", transfer.Path), zap.Int32("piece num", transfer.PieceNum))
pointer, err := endpoint.metainfo.Get(ctx, string(transfer.Path))
pointer, err := endpoint.metainfo.Get(ctx, metabase.SegmentKey(transfer.Path))
if err != nil {
return Error.Wrap(err)
}
@ -578,7 +578,7 @@ func (endpoint *Endpoint) handleFailed(ctx context.Context, pending *PendingMap,
return pending.Delete(pieceID)
}
_, err = endpoint.metainfo.UpdatePieces(ctx, string(transfer.Path), pointer, nil, []*pb.RemotePiece{nodePiece})
_, err = endpoint.metainfo.UpdatePieces(ctx, metabase.SegmentKey(transfer.Path), pointer, nil, []*pb.RemotePiece{nodePiece})
if err != nil {
return Error.Wrap(err)
}
@ -714,11 +714,11 @@ func (endpoint *Endpoint) getFinishedMessage(ctx context.Context, nodeID storj.N
return message, nil
}
func (endpoint *Endpoint) updatePointer(ctx context.Context, originalPointer *pb.Pointer, exitingNodeID storj.NodeID, receivingNodeID storj.NodeID, path string, pieceNum int32, originalRootPieceID storj.PieceID) (err error) {
func (endpoint *Endpoint) updatePointer(ctx context.Context, originalPointer *pb.Pointer, exitingNodeID storj.NodeID, receivingNodeID storj.NodeID, key metabase.SegmentKey, pieceNum int32, originalRootPieceID storj.PieceID) (err error) {
defer mon.Task()(&ctx)(&err)
// remove the node from the pointer
pointer, err := endpoint.getValidPointer(ctx, path, pieceNum, originalRootPieceID)
pointer, err := endpoint.getValidPointer(ctx, key, pieceNum, originalRootPieceID)
if err != nil {
return Error.Wrap(err)
}
@ -751,7 +751,7 @@ func (endpoint *Endpoint) updatePointer(ctx context.Context, originalPointer *pb
NodeId: receivingNodeID,
}}
}
_, err = endpoint.metainfo.UpdatePiecesCheckDuplicates(ctx, path, originalPointer, toAdd, toRemove, true)
_, err = endpoint.metainfo.UpdatePiecesCheckDuplicates(ctx, key, originalPointer, toAdd, toRemove, true)
if err != nil {
return Error.Wrap(err)
}
@ -870,21 +870,21 @@ func (endpoint *Endpoint) calculatePieceSize(ctx context.Context, pointer *pb.Po
return eestream.CalcPieceSize(pointer.GetSegmentSize(), redundancy), nil
}
func (endpoint *Endpoint) getValidPointer(ctx context.Context, path string, pieceNum int32, originalRootPieceID storj.PieceID) (*pb.Pointer, error) {
pointer, err := endpoint.metainfo.Get(ctx, path)
func (endpoint *Endpoint) getValidPointer(ctx context.Context, key metabase.SegmentKey, pieceNum int32, originalRootPieceID storj.PieceID) (*pb.Pointer, error) {
pointer, err := endpoint.metainfo.Get(ctx, key)
// TODO we don't know the type of error
if err != nil {
return nil, Error.New("pointer path %v no longer exists.", path)
return nil, Error.New("pointer path %s no longer exists.", key)
}
remote := pointer.GetRemote()
// no longer a remote segment
if remote == nil {
return nil, Error.New("pointer path %v is no longer remote.", path)
return nil, Error.New("pointer path %s is no longer remote.", key)
}
if !originalRootPieceID.IsZero() && originalRootPieceID != remote.RootPieceId {
return nil, Error.New("pointer path %v has changed.", path)
return nil, Error.New("pointer path %s has changed.", key)
}
return pointer, nil
}

View File

@ -29,6 +29,7 @@ import (
"storj.io/storj/private/testblobs"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite"
"storj.io/storj/satellite/metainfo/metabase"
"storj.io/storj/satellite/overlay"
"storj.io/storj/storage"
"storj.io/storj/storagenode"
@ -790,7 +791,7 @@ func TestSuccessPointerUpdate(t *testing.T) {
keys, err := satellite.Metainfo.Database.List(ctx, nil, 1)
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, string(keys[0]))
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(keys[0]))
require.NoError(t, err)
found := 0
@ -861,8 +862,8 @@ func TestUpdatePointerFailure_DuplicatedNodeID(t *testing.T) {
// update pointer to include the new receiving node before responding to satellite
keys, err := satellite.Metainfo.Database.List(ctx, nil, 1)
require.NoError(t, err)
path := string(keys[0])
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(keys[0]))
require.NoError(t, err)
require.NotNil(t, pointer.GetRemote())
require.True(t, len(pointer.GetRemote().GetRemotePieces()) > 0)
@ -883,7 +884,7 @@ func TestUpdatePointerFailure_DuplicatedNodeID(t *testing.T) {
NodeId: firstRecNodeID,
}
_, err = satellite.Metainfo.Service.UpdatePieces(ctx, path, pointer, pieceToAdd, pieceToRemove)
_, err = satellite.Metainfo.Service.UpdatePieces(ctx, metabase.SegmentKey(keys[0]), pointer, pieceToAdd, pieceToRemove)
require.NoError(t, err)
err = processClient.Send(success)
@ -907,8 +908,8 @@ func TestUpdatePointerFailure_DuplicatedNodeID(t *testing.T) {
// check exiting node is still in the pointer
keys, err := satellite.Metainfo.Database.List(ctx, nil, 1)
require.NoError(t, err)
path := string(keys[0])
pointer, err := satellite.Metainfo.Service.Get(ctx, path)
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(keys[0]))
require.NoError(t, err)
require.NotNil(t, pointer.GetRemote())
require.True(t, len(pointer.GetRemote().GetRemotePieces()) > 0)
@ -1102,7 +1103,7 @@ func TestFailureNotFoundPieceHashVerified(t *testing.T) {
var pointer *pb.Pointer
for _, key := range keys {
p, err := satellite.Metainfo.Service.Get(ctx, string(key))
p, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(key))
require.NoError(t, err)
if p.GetRemote() != nil {
@ -1134,7 +1135,7 @@ func TestFailureNotFoundPieceHashUnverified(t *testing.T) {
var oldPointer *pb.Pointer
var path []byte
for _, key := range keys {
p, err := satellite.Metainfo.Service.Get(ctx, string(key))
p, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(key))
require.NoError(t, err)
if p.GetRemote() != nil {
@ -1195,7 +1196,7 @@ func TestFailureNotFoundPieceHashUnverified(t *testing.T) {
var pointer *pb.Pointer
for _, key := range keys {
p, err := satellite.Metainfo.Service.Get(ctx, string(key))
p, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(key))
require.NoError(t, err)
if p.GetRemote() != nil {
@ -1502,7 +1503,7 @@ func findNodeToExit(ctx context.Context, planet *testplanet.Planet, objects int)
}
for _, key := range keys {
pointer, err := satellite.Metainfo.Service.Get(ctx, string(key))
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(key))
if err != nil {
return nil, err
}

View File

@ -120,12 +120,12 @@ func (endpoint *Endpoint) SegmentHealth(ctx context.Context, in *pb.SegmentHealt
return nil, Error.Wrap(err)
}
path, err := metainfo.CreatePath(ctx, projectID, in.GetSegmentIndex(), in.GetBucket(), in.GetEncryptedPath())
location, err := metainfo.CreatePath(ctx, projectID, in.GetSegmentIndex(), in.GetBucket(), in.GetEncryptedPath())
if err != nil {
return nil, Error.Wrap(err)
}
pointer, err := endpoint.metainfo.Get(ctx, path)
pointer, err := endpoint.metainfo.Get(ctx, location.Encode())
if err != nil {
return nil, Error.Wrap(err)
}

View File

@ -18,6 +18,7 @@ import (
"storj.io/common/uuid"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite/metainfo"
"storj.io/storj/satellite/metainfo/metabase"
"storj.io/storj/storage"
"storj.io/uplink/private/testuplink"
)
@ -311,7 +312,7 @@ func TestEndpoint_DeleteObjectPieces_ObjectWithoutLastSegment(t *testing.T) {
require.NoError(t, err)
// confirm that the object was deleted
listResponse, more, err := satelliteSys.Metainfo.Service.List(ctx, "", "", true, 0, 0)
listResponse, more, err := satelliteSys.Metainfo.Service.List(ctx, metabase.SegmentKey{}, "", true, 0, 0)
require.NoError(t, err)
require.False(t, more)
require.Len(t, listResponse, 0)
@ -423,7 +424,7 @@ func TestEndpoint_DeleteObjectPieces_ObjectWithoutLastSegment(t *testing.T) {
require.NoError(t, err)
// check segment state after deletion
listResponse, more, err := satelliteSys.Metainfo.Service.List(ctx, "", "", true, 0, 0)
listResponse, more, err := satelliteSys.Metainfo.Service.List(ctx, metabase.SegmentKey{}, "", true, 0, 0)
require.NoError(t, err)
require.False(t, more)
// since the segments are sparsed, we are only able to delete
@ -570,9 +571,9 @@ func uploadFirstObjectWithoutSomeSegmentsPointers(
projectID, encryptedPath = getProjectIDAndEncPathFirstObject(ctx, t, satelliteSys)
for _, segIndx := range noSegmentsIndexes {
path, err := metainfo.CreatePath(ctx, projectID, segIndx, []byte(bucketName), encryptedPath)
location, err := metainfo.CreatePath(ctx, projectID, segIndx, []byte(bucketName), encryptedPath)
require.NoError(t, err)
err = satelliteSys.Metainfo.Service.UnsynchronizedDelete(ctx, path)
err = satelliteSys.Metainfo.Service.UnsynchronizedDelete(ctx, location.Encode())
require.NoError(t, err)
}

View File

@ -51,7 +51,7 @@ func (ed *expiredDeleter) deleteSegmentIfExpired(ctx context.Context, path metai
if err != nil {
return err
}
err = ed.metainfo.Delete(ctx, path.Raw, pointerBytes)
err = ed.metainfo.Delete(ctx, path.Encode(), pointerBytes)
if storj.ErrObjectNotFound.Has(err) {
// segment already deleted
return nil

View File

@ -8,7 +8,6 @@ import (
"crypto/sha256"
"errors"
"fmt"
"strconv"
"time"
"github.com/spacemonkeygo/monkit/v3"
@ -212,29 +211,6 @@ func (endpoint *Endpoint) filterValidPieces(ctx context.Context, pointer *pb.Poi
return nil
}
// CreatePath creates a Segment path.
func CreatePath(ctx context.Context, projectID uuid.UUID, segmentIndex int64, bucket, path []byte) (_ storj.Path, err error) {
defer mon.Task()(&ctx)(&err)
if segmentIndex < lastSegment { // lastSegment = -1
return "", errors.New("invalid segment index")
}
segment := "l"
if segmentIndex > lastSegment { // lastSegment = -1
segment = "s" + strconv.FormatInt(segmentIndex, 10)
}
entries := make([]string, 0)
entries = append(entries, projectID.String())
entries = append(entries, segment)
if len(bucket) != 0 {
entries = append(entries, string(bucket))
}
if len(path) != 0 {
entries = append(entries, string(path))
}
return storj.JoinPaths(entries...), nil
}
// ProjectInfo returns allowed ProjectInfo for the provided API key.
func (endpoint *Endpoint) ProjectInfo(ctx context.Context, req *pb.ProjectInfoRequest) (_ *pb.ProjectInfoResponse, err error) {
defer mon.Task()(&ctx)(&err)
@ -476,11 +452,12 @@ func (endpoint *Endpoint) deleteBucketNotEmpty(ctx context.Context, projectID uu
func (endpoint *Endpoint) deleteByPrefix(ctx context.Context, projectID uuid.UUID, bucketName []byte, segmentIdx int64) (deletedCount int, err error) {
defer mon.Task()(&ctx)(&err)
prefix, err := CreatePath(ctx, projectID, segmentIdx, bucketName, []byte{})
location, err := CreatePath(ctx, projectID, segmentIdx, bucketName, []byte{})
if err != nil {
return deletedCount, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
}
prefix := location.Encode()
for {
segments, more, err := endpoint.metainfo.List(ctx, prefix, "", true, 0, meta.None)
if err != nil {
@ -718,14 +695,14 @@ func (endpoint *Endpoint) BeginObject(ctx context.Context, req *pb.ObjectBeginRe
return nil, err
}
} else {
path, err := CreatePath(ctx, keyInfo.ProjectID, lastSegment, req.Bucket, req.EncryptedPath)
location, err := CreatePath(ctx, keyInfo.ProjectID, lastSegment, req.Bucket, req.EncryptedPath)
if err != nil {
endpoint.log.Error("unable to create path", zap.Error(err))
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
}
// TODO maybe we can have different Get without pointer unmarshaling
_, _, err = endpoint.metainfo.GetWithBytes(ctx, path)
_, _, err = endpoint.metainfo.GetWithBytes(ctx, location.Encode())
if err == nil {
return nil, rpcstatus.Error(rpcstatus.PermissionDenied, "Unauthorized API credentials")
}
@ -791,24 +768,25 @@ func (endpoint *Endpoint) commitObject(ctx context.Context, req *pb.ObjectCommit
lastSegmentPointer := pointer
if pointer == nil {
lastSegmentIndex := streamMeta.NumberOfSegments - 1
lastSegmentPath, err := CreatePath(ctx, keyInfo.ProjectID, lastSegmentIndex, streamID.Bucket, streamID.EncryptedPath)
lastSegmentLocation, err := CreatePath(ctx, keyInfo.ProjectID, lastSegmentIndex, streamID.Bucket, streamID.EncryptedPath)
if err != nil {
return nil, rpcstatus.Errorf(rpcstatus.InvalidArgument, "unable to create segment path: %s", err.Error())
}
var lastSegmentPointerBytes []byte
lastSegmentPointerBytes, lastSegmentPointer, err = endpoint.metainfo.GetWithBytes(ctx, lastSegmentPath)
lastSegmentKey := lastSegmentLocation.Encode()
lastSegmentPointerBytes, lastSegmentPointer, err = endpoint.metainfo.GetWithBytes(ctx, lastSegmentKey)
if err != nil {
endpoint.log.Error("unable to get pointer", zap.String("segmentPath", lastSegmentPath), zap.Error(err))
endpoint.log.Error("unable to get pointer", zap.ByteString("segmentPath", lastSegmentKey), zap.Error(err))
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to commit object")
}
if lastSegmentPointer == nil {
return nil, rpcstatus.Errorf(rpcstatus.NotFound, "unable to find object: %q/%q", streamID.Bucket, streamID.EncryptedPath)
}
err = endpoint.metainfo.Delete(ctx, lastSegmentPath, lastSegmentPointerBytes)
err = endpoint.metainfo.Delete(ctx, lastSegmentKey, lastSegmentPointerBytes)
if err != nil {
endpoint.log.Error("unable to delete pointer", zap.String("segmentPath", lastSegmentPath), zap.Error(err))
endpoint.log.Error("unable to delete pointer", zap.ByteString("segmentPath", lastSegmentKey), zap.Error(err))
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to commit object")
}
}
@ -820,13 +798,13 @@ func (endpoint *Endpoint) commitObject(ctx context.Context, req *pb.ObjectCommit
lastSegmentPointer.Remote.Redundancy = streamID.Redundancy
lastSegmentPointer.Metadata = req.EncryptedMetadata
lastSegmentPath, err := CreatePath(ctx, keyInfo.ProjectID, int64(lastSegment), streamID.Bucket, streamID.EncryptedPath)
lastSegmentLocation, err := CreatePath(ctx, keyInfo.ProjectID, int64(lastSegment), streamID.Bucket, streamID.EncryptedPath)
if err != nil {
endpoint.log.Error("unable to create path", zap.Error(err))
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to commit object")
}
err = endpoint.metainfo.UnsynchronizedPut(ctx, lastSegmentPath, lastSegmentPointer)
err = endpoint.metainfo.UnsynchronizedPut(ctx, lastSegmentLocation.Encode(), lastSegmentPointer)
if err != nil {
endpoint.log.Error("unable to put pointer", zap.Error(err))
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to commit object")
@ -918,13 +896,13 @@ func (endpoint *Endpoint) getObject(ctx context.Context, projectID uuid.UUID, bu
index := int64(0)
for {
path, err := CreatePath(ctx, projectID, index, bucket, encryptedPath)
location, err := CreatePath(ctx, projectID, index, bucket, encryptedPath)
if err != nil {
endpoint.log.Error("unable to get pointer path", zap.Error(err))
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to get object")
}
pointer, err = endpoint.metainfo.Get(ctx, path)
pointer, err = endpoint.metainfo.Get(ctx, location.Encode())
if err != nil {
if storj.ErrObjectNotFound.Has(err) {
break
@ -981,7 +959,7 @@ func (endpoint *Endpoint) ListObjects(ctx context.Context, req *pb.ObjectListReq
metaflags := meta.All
// TODO use flags
segments, more, err := endpoint.metainfo.List(ctx, prefix, string(req.EncryptedCursor), req.Recursive, req.Limit, metaflags)
segments, more, err := endpoint.metainfo.List(ctx, prefix.Encode(), string(req.EncryptedCursor), req.Recursive, req.Limit, metaflags)
if err != nil {
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
}
@ -1165,12 +1143,12 @@ func (endpoint *Endpoint) GetObjectIPs(ctx context.Context, req *pb.ObjectGetIPs
// the numberOfSegmentsToFetch is > 0 and until we have fetched that many
// segments.
for i := firstSegment; !numSegmentsKnown || (numSegmentsKnown && numberOfSegmentsToFetch > 0 && i < numberOfSegmentsToFetch); i++ {
path, err := CreatePath(ctx, keyInfo.ProjectID, int64(i), req.Bucket, req.EncryptedPath)
location, err := CreatePath(ctx, keyInfo.ProjectID, int64(i), req.Bucket, req.EncryptedPath)
if err != nil {
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
}
pointer, err := endpoint.metainfo.Get(ctx, path)
pointer, err := endpoint.metainfo.Get(ctx, location.Encode())
if err != nil {
if storj.ErrObjectNotFound.Has(err) {
break
@ -1406,12 +1384,12 @@ func (endpoint *Endpoint) commitSegment(ctx context.Context, req *pb.SegmentComm
}
if savePointer {
path, err := CreatePath(ctx, keyInfo.ProjectID, int64(segmentID.Index), streamID.Bucket, streamID.EncryptedPath)
location, err := CreatePath(ctx, keyInfo.ProjectID, int64(segmentID.Index), streamID.Bucket, streamID.EncryptedPath)
if err != nil {
return nil, nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
}
err = endpoint.metainfo.UnsynchronizedPut(ctx, path, pointer)
err = endpoint.metainfo.UnsynchronizedPut(ctx, location.Encode(), pointer)
if err != nil {
return nil, nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
}
@ -1491,12 +1469,12 @@ func (endpoint *Endpoint) makeInlineSegment(ctx context.Context, req *pb.Segment
}
if savePointer {
path, err := CreatePath(ctx, keyInfo.ProjectID, int64(req.Position.Index), streamID.Bucket, streamID.EncryptedPath)
location, err := CreatePath(ctx, keyInfo.ProjectID, int64(req.Position.Index), streamID.Bucket, streamID.EncryptedPath)
if err != nil {
return nil, nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
}
err = endpoint.metainfo.UnsynchronizedPut(ctx, path, pointer)
err = endpoint.metainfo.UnsynchronizedPut(ctx, location.Encode(), pointer)
if err != nil {
return nil, nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
}
@ -1533,7 +1511,7 @@ func (endpoint *Endpoint) BeginDeleteSegment(ctx context.Context, req *pb.Segmen
return nil, err
}
pointer, path, err := endpoint.getPointer(ctx, keyInfo.ProjectID, int64(req.Position.Index), streamID.Bucket, streamID.EncryptedPath)
pointer, location, err := endpoint.getPointer(ctx, keyInfo.ProjectID, int64(req.Position.Index), streamID.Bucket, streamID.EncryptedPath)
if err != nil {
return nil, err
}
@ -1550,7 +1528,7 @@ func (endpoint *Endpoint) BeginDeleteSegment(ctx context.Context, req *pb.Segmen
// moved from FinishDeleteSegment to avoid inconsistency if someone will not
// call FinishDeleteSegment on uplink side
err = endpoint.metainfo.UnsynchronizedDelete(ctx, path)
err = endpoint.metainfo.UnsynchronizedDelete(ctx, location.Encode())
if err != nil {
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
}
@ -1902,23 +1880,23 @@ func (endpoint *Endpoint) DownloadSegment(ctx context.Context, req *pb.SegmentDo
// encryptedPath. It returns an error with a specific RPC status.
func (endpoint *Endpoint) getPointer(
ctx context.Context, projectID uuid.UUID, segmentIndex int64, bucket, encryptedPath []byte,
) (_ *pb.Pointer, _ string, err error) {
) (pointer *pb.Pointer, location metabase.SegmentLocation, err error) {
defer mon.Task()(&ctx, projectID.String(), segmentIndex, bucket, encryptedPath)(&err)
path, err := CreatePath(ctx, projectID, segmentIndex, bucket, encryptedPath)
location, err = CreatePath(ctx, projectID, segmentIndex, bucket, encryptedPath)
if err != nil {
return nil, "", rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
return nil, location, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
}
pointer, err := endpoint.metainfo.Get(ctx, path)
pointer, err = endpoint.metainfo.Get(ctx, location.Encode())
if err != nil {
if storj.ErrObjectNotFound.Has(err) {
return nil, "", rpcstatus.Error(rpcstatus.NotFound, err.Error())
return nil, location, rpcstatus.Error(rpcstatus.NotFound, err.Error())
}
endpoint.log.Error("error getting the pointer from metainfo service", zap.Error(err))
return nil, "", rpcstatus.Error(rpcstatus.Internal, err.Error())
return nil, location, rpcstatus.Error(rpcstatus.Internal, err.Error())
}
return pointer, path, nil
return pointer, location, nil
}
// sortLimits sorts order limits and fill missing ones with nil values.
@ -2131,3 +2109,18 @@ func (endpoint *Endpoint) RevokeAPIKey(ctx context.Context, req *pb.RevokeAPIKey
return &pb.RevokeAPIKeyResponse{}, nil
}
// CreatePath creates a segment key.
func CreatePath(ctx context.Context, projectID uuid.UUID, segmentIndex int64, bucket, path []byte) (_ metabase.SegmentLocation, err error) {
// TODO rename to CreateLocation
defer mon.Task()(&ctx)(&err)
if segmentIndex < lastSegment { // lastSegment = -1
return metabase.SegmentLocation{}, errors.New("invalid segment index")
}
return metabase.SegmentLocation{
ProjectID: projectID,
BucketName: string(bucket),
Index: segmentIndex,
ObjectKey: metabase.ObjectKey(path),
}, nil
}

View File

@ -29,7 +29,7 @@ import (
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite"
satMetainfo "storj.io/storj/satellite/metainfo"
"storj.io/storj/satellite/metainfo/metabase"
"storj.io/uplink"
"storj.io/uplink/private/metainfo"
"storj.io/uplink/private/object"
@ -1549,14 +1549,17 @@ func TestInlineSegmentThreshold(t *testing.T) {
require.NoError(t, err)
// we don't know encrypted path
prefix, err := satMetainfo.CreatePath(ctx, projectID, -1, []byte("test-bucket-inline"), []byte{})
require.NoError(t, err)
items, _, err := planet.Satellites[0].Metainfo.Service.List(ctx, prefix, "", false, 0, meta.All)
location := metabase.SegmentLocation{
ProjectID: projectID,
BucketName: "test-bucket-inline",
Index: metabase.LastSegmentIndex,
}
items, _, err := planet.Satellites[0].Metainfo.Service.List(ctx, location.Encode(), "", false, 0, meta.All)
require.NoError(t, err)
require.Equal(t, 1, len(items))
pointer, err := planet.Satellites[0].Metainfo.Service.Get(ctx, prefix+"/"+items[0].Path)
location.ObjectKey = metabase.ObjectKey(items[0].Path)
pointer, err := planet.Satellites[0].Metainfo.Service.Get(ctx, location.Encode())
require.NoError(t, err)
require.Equal(t, pb.Pointer_INLINE, pointer.Type)
}
@ -1566,14 +1569,19 @@ func TestInlineSegmentThreshold(t *testing.T) {
require.NoError(t, err)
// we don't know encrypted path
prefix, err := satMetainfo.CreatePath(ctx, projectID, -1, []byte("test-bucket-remote"), []byte{})
require.NoError(t, err)
location := metabase.SegmentLocation{
ProjectID: projectID,
BucketName: "test-bucket-remote",
Index: metabase.LastSegmentIndex,
}
items, _, err := planet.Satellites[0].Metainfo.Service.List(ctx, prefix, "", false, 0, meta.All)
items, _, err := planet.Satellites[0].Metainfo.Service.List(ctx, location.Encode(), "", false, 0, meta.All)
require.NoError(t, err)
require.Equal(t, 1, len(items))
pointer, err := planet.Satellites[0].Metainfo.Service.Get(ctx, prefix+"/"+items[0].Path)
location.ObjectKey = metabase.ObjectKey(items[0].Path)
pointer, err := planet.Satellites[0].Metainfo.Service.Get(ctx, location.Encode())
require.NoError(t, err)
require.Equal(t, pb.Pointer_REMOTE, pointer.Type)
}

View File

@ -10,6 +10,7 @@ import (
"go.uber.org/zap"
"storj.io/common/pb"
"storj.io/storj/satellite/metainfo/metabase"
)
// Report represents the deleteion status report.
@ -89,7 +90,7 @@ func (r Report) DeletedObjects() ([]*pb.Object, error) {
}
// GenerateReport returns the result of a delete, success, or failure.
func GenerateReport(ctx context.Context, log *zap.Logger, requests []*ObjectIdentifier, deletedPaths [][]byte, pointers []*pb.Pointer) Report {
func GenerateReport(ctx context.Context, log *zap.Logger, requests []*ObjectIdentifier, deletedPaths []metabase.SegmentKey, pointers []*pb.Pointer) Report {
defer mon.Task()(&ctx)(nil)
report := Report{}

View File

@ -14,6 +14,7 @@ import (
"storj.io/common/pb"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/satellite/metainfo/metabase"
"storj.io/storj/satellite/metainfo/objectdeletion"
)
@ -46,11 +47,11 @@ func TestReport(t *testing.T) {
}
func createDeletedItems(requests []*objectdeletion.ObjectIdentifier, numDeleted int) ([][]byte, []*pb.Pointer, error) {
func createDeletedItems(requests []*objectdeletion.ObjectIdentifier, numDeleted int) ([]metabase.SegmentKey, []*pb.Pointer, error) {
if numDeleted > len(requests) {
return nil, nil, errs.New("invalid argument")
}
paths := make([][]byte, 0, numDeleted)
paths := make([]metabase.SegmentKey, 0, numDeleted)
pointers := make([]*pb.Pointer, 0, numDeleted)
for i := 0; i < numDeleted; i++ {
path, err := requests[i].SegmentPath(int64(testrand.Intn(10)))

View File

@ -13,6 +13,7 @@ import (
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/storj/satellite/metainfo/metabase"
)
const (
@ -54,8 +55,8 @@ func (config *Config) Verify() errs.Group {
// PointerDB stores pointers.
type PointerDB interface {
GetItems(ctx context.Context, paths [][]byte) ([]*pb.Pointer, error)
UnsynchronizedGetDel(ctx context.Context, paths [][]byte) (deletedPaths [][]byte, _ []*pb.Pointer, _ error)
GetItems(ctx context.Context, keys []metabase.SegmentKey) ([]*pb.Pointer, error)
UnsynchronizedGetDel(ctx context.Context, keys []metabase.SegmentKey) (deletedKeys []metabase.SegmentKey, _ []*pb.Pointer, _ error)
}
// Service implements the object deletion service
@ -119,11 +120,11 @@ func (service *Service) Delete(ctx context.Context, requests ...*ObjectIdentifie
// DeletePointers returns a list of pointers and their paths that are deleted.
// If a object is not found, we will consider it as a successful delete.
func (service *Service) DeletePointers(ctx context.Context, requests []*ObjectIdentifier) (_ []*pb.Pointer, _ [][]byte, err error) {
func (service *Service) DeletePointers(ctx context.Context, requests []*ObjectIdentifier) (_ []*pb.Pointer, _ []metabase.SegmentKey, err error) {
defer mon.Task()(&ctx, len(requests))(&err)
// get first and last segment to determine the object state
lastAndFirstSegmentsPath := [][]byte{}
lastAndFirstSegmentsPath := []metabase.SegmentKey{}
for _, req := range requests {
lastSegmentPath, err := req.SegmentPath(lastSegmentIndex)
if err != nil {
@ -208,10 +209,10 @@ func GroupPiecesByNodeID(pointers []*pb.Pointer) map[storj.NodeID][]storj.PieceI
}
// generateSegmentPathsForCompleteObjects collects segment paths for objects that has last segment found in pointerDB.
func (service *Service) generateSegmentPathsForCompleteObjects(ctx context.Context, states map[string]*ObjectState) (_ [][]byte, err error) {
func (service *Service) generateSegmentPathsForCompleteObjects(ctx context.Context, states map[string]*ObjectState) (_ []metabase.SegmentKey, err error) {
defer mon.Task()(&ctx)(&err)
segmentPaths := [][]byte{}
segmentPaths := []metabase.SegmentKey{}
for _, state := range states {
switch state.Status() {
@ -259,13 +260,13 @@ func (service *Service) generateSegmentPathsForCompleteObjects(ctx context.Conte
}
// collectSegmentPathsForZombieObjects collects segment paths for objects that has no last segment found in pointerDB.
func (service *Service) collectSegmentPathsForZombieObjects(ctx context.Context, states map[string]*ObjectState) (_ [][]byte, err error) {
func (service *Service) collectSegmentPathsForZombieObjects(ctx context.Context, states map[string]*ObjectState) (_ []metabase.SegmentKey, err error) {
defer mon.Task()(&ctx)(&err)
zombies := map[string]ObjectIdentifier{}
largestLoaded := map[string]int64{}
segmentsToDel := [][]byte{}
segmentsToDel := []metabase.SegmentKey{}
for _, state := range states {
if state.Status() == ObjectActiveOrZombie {
@ -298,7 +299,7 @@ func (service *Service) collectSegmentPathsForZombieObjects(ctx context.Context,
startFrom := largestKnownSegment + 1
largestKnownSegment += int64(service.config.ZombieSegmentsPerRequest)
var zombieSegmentPaths [][]byte
var zombieSegmentPaths []metabase.SegmentKey
for _, id := range zombies {
for i := startFrom; i <= largestKnownSegment; i++ {
path, err := id.SegmentPath(i)

View File

@ -14,10 +14,9 @@ import (
"go.uber.org/zap/zaptest"
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/common/uuid"
"storj.io/storj/satellite/metainfo/metabase"
"storj.io/storj/satellite/metainfo/objectdeletion"
)
@ -327,7 +326,7 @@ func newPointerDB(objects []*objectdeletion.ObjectIdentifier, segmentType string
return pointerDB, nil
}
func (db *pointerDBMock) GetItems(ctx context.Context, paths [][]byte) ([]*pb.Pointer, error) {
func (db *pointerDBMock) GetItems(ctx context.Context, paths []metabase.SegmentKey) ([]*pb.Pointer, error) {
if db.hasError {
return nil, errs.New("pointerDB failure")
}
@ -338,7 +337,7 @@ func (db *pointerDBMock) GetItems(ctx context.Context, paths [][]byte) ([]*pb.Po
return pointers, nil
}
func (db *pointerDBMock) UnsynchronizedGetDel(ctx context.Context, paths [][]byte) ([][]byte, []*pb.Pointer, error) {
func (db *pointerDBMock) UnsynchronizedGetDel(ctx context.Context, paths []metabase.SegmentKey) ([]metabase.SegmentKey, []*pb.Pointer, error) {
pointers := make([]*pb.Pointer, len(paths))
for i, p := range paths {
pointers[i] = db.pointers[string(p)]
@ -429,21 +428,14 @@ func createPaths(object *objectdeletion.ObjectIdentifier, largestSegmentIdx int)
if segmentIdx == largestSegmentIdx {
segmentIdx = lastSegmentIdx
}
paths = append(paths, createPath(object.ProjectID, object.Bucket, segmentIdx, object.EncryptedPath))
location := metabase.SegmentLocation{
ProjectID: object.ProjectID,
BucketName: string(object.Bucket),
Index: int64(segmentIdx),
ObjectKey: metabase.ObjectKey(string(object.EncryptedPath)),
}
paths = append(paths, location.Encode())
}
return paths
}
func createPath(projectID uuid.UUID, bucket []byte, segmentIdx int, encryptedPath []byte) []byte {
segment := "l"
if segmentIdx > lastSegmentIdx {
segment = "s" + strconv.Itoa(segmentIdx)
}
entries := make([]string, 0)
entries = append(entries, projectID.String())
entries = append(entries, segment)
entries = append(entries, string(bucket))
entries = append(entries, string(encryptedPath))
return []byte(storj.JoinPaths(entries...))
}

View File

@ -7,6 +7,7 @@ import (
"context"
"storj.io/common/pb"
"storj.io/storj/satellite/metainfo/metabase"
)
// ObjectState determines how an object should be handled during
@ -53,7 +54,7 @@ const (
)
// CreateObjectStates creates the current object states.
func CreateObjectStates(ctx context.Context, requests []*ObjectIdentifier, pointers []*pb.Pointer, paths [][]byte) (map[string]*ObjectState, error) {
func CreateObjectStates(ctx context.Context, requests []*ObjectIdentifier, pointers []*pb.Pointer, paths []metabase.SegmentKey) (map[string]*ObjectState, error) {
// Fetch headers to figure out the status of objects.
objects := make(map[string]*ObjectState)

View File

@ -14,6 +14,7 @@ import (
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/common/uuid"
"storj.io/storj/satellite/metainfo/metabase"
"storj.io/storj/storage"
"storj.io/uplink/private/storage/meta"
)
@ -38,10 +39,10 @@ func NewService(logger *zap.Logger, db PointerDB, bucketsDB BucketsDB) *Service
}
// Put puts pointer to db under specific path.
func (s *Service) Put(ctx context.Context, path string, pointer *pb.Pointer) (err error) {
func (s *Service) Put(ctx context.Context, key metabase.SegmentKey, pointer *pb.Pointer) (err error) {
defer mon.Task()(&ctx)(&err)
if err := sanityCheckPointer(path, pointer); err != nil {
if err := sanityCheckPointer(key, pointer); err != nil {
return Error.Wrap(err)
}
@ -54,15 +55,15 @@ func (s *Service) Put(ctx context.Context, path string, pointer *pb.Pointer) (er
}
// CompareAndSwap is used instead of Put to avoid overwriting existing pointers
err = s.db.CompareAndSwap(ctx, []byte(path), nil, pointerBytes)
err = s.db.CompareAndSwap(ctx, storage.Key(key), nil, pointerBytes)
return Error.Wrap(err)
}
// UnsynchronizedPut puts pointer to db under specific path without verifying for existing pointer under the same path.
func (s *Service) UnsynchronizedPut(ctx context.Context, path string, pointer *pb.Pointer) (err error) {
func (s *Service) UnsynchronizedPut(ctx context.Context, key metabase.SegmentKey, pointer *pb.Pointer) (err error) {
defer mon.Task()(&ctx)(&err)
if err := sanityCheckPointer(path, pointer); err != nil {
if err := sanityCheckPointer(key, pointer); err != nil {
return Error.Wrap(err)
}
@ -74,13 +75,13 @@ func (s *Service) UnsynchronizedPut(ctx context.Context, path string, pointer *p
return Error.Wrap(err)
}
err = s.db.Put(ctx, []byte(path), pointerBytes)
err = s.db.Put(ctx, storage.Key(key), pointerBytes)
return Error.Wrap(err)
}
// UpdatePieces calls UpdatePiecesCheckDuplicates with checkDuplicates equal to false.
func (s *Service) UpdatePieces(ctx context.Context, path string, ref *pb.Pointer, toAdd, toRemove []*pb.RemotePiece) (pointer *pb.Pointer, err error) {
return s.UpdatePiecesCheckDuplicates(ctx, path, ref, toAdd, toRemove, false)
func (s *Service) UpdatePieces(ctx context.Context, key metabase.SegmentKey, ref *pb.Pointer, toAdd, toRemove []*pb.RemotePiece) (pointer *pb.Pointer, err error) {
return s.UpdatePiecesCheckDuplicates(ctx, key, ref, toAdd, toRemove, false)
}
// UpdatePiecesCheckDuplicates atomically adds toAdd pieces and removes toRemove pieces from
@ -93,22 +94,22 @@ func (s *Service) UpdatePieces(ctx context.Context, path string, ref *pb.Pointer
// Then it will remove the toRemove pieces and then it will add the toAdd pieces.
// Replacing the node ID and the hash of a piece can be done by adding the
// piece to both toAdd and toRemove.
func (s *Service) UpdatePiecesCheckDuplicates(ctx context.Context, path string, ref *pb.Pointer, toAdd, toRemove []*pb.RemotePiece, checkDuplicates bool) (pointer *pb.Pointer, err error) {
func (s *Service) UpdatePiecesCheckDuplicates(ctx context.Context, key metabase.SegmentKey, ref *pb.Pointer, toAdd, toRemove []*pb.RemotePiece, checkDuplicates bool) (pointer *pb.Pointer, err error) {
defer mon.Task()(&ctx)(&err)
if err := sanityCheckPointer(path, ref); err != nil {
if err := sanityCheckPointer(key, ref); err != nil {
return nil, Error.Wrap(err)
}
defer func() {
if err == nil {
err = sanityCheckPointer(path, pointer)
err = sanityCheckPointer(key, pointer)
}
}()
for {
// read the pointer
oldPointerBytes, err := s.db.Get(ctx, []byte(path))
oldPointerBytes, err := s.db.Get(ctx, storage.Key(key))
if err != nil {
if storage.ErrKeyNotFound.Has(err) {
err = storj.ErrObjectNotFound.Wrap(err)
@ -143,7 +144,7 @@ func (s *Service) UpdatePiecesCheckDuplicates(ctx context.Context, path string,
for _, piece := range toAdd {
_, ok := nodePieceMap[piece.NodeId]
if ok {
return nil, ErrNodeAlreadyExists.New("node id already exists in pointer. Path: %s, NodeID: %s", path, piece.NodeId.String())
return nil, ErrNodeAlreadyExists.New("node id already exists in pointer. Key: %s, NodeID: %s", key, piece.NodeId.String())
}
nodePieceMap[piece.NodeId] = struct{}{}
}
@ -191,7 +192,7 @@ func (s *Service) UpdatePiecesCheckDuplicates(ctx context.Context, path string,
}
// write the pointer using compare-and-swap
err = s.db.CompareAndSwap(ctx, []byte(path), oldPointerBytes, newPointerBytes)
err = s.db.CompareAndSwap(ctx, storage.Key(key), oldPointerBytes, newPointerBytes)
if storage.ErrValueChanged.Has(err) {
continue
}
@ -206,9 +207,9 @@ func (s *Service) UpdatePiecesCheckDuplicates(ctx context.Context, path string,
}
// Get gets decoded pointer from DB.
func (s *Service) Get(ctx context.Context, path string) (_ *pb.Pointer, err error) {
func (s *Service) Get(ctx context.Context, key metabase.SegmentKey) (_ *pb.Pointer, err error) {
defer mon.Task()(&ctx)(&err)
_, pointer, err := s.GetWithBytes(ctx, path)
_, pointer, err := s.GetWithBytes(ctx, key)
if err != nil {
return nil, err
}
@ -218,13 +219,13 @@ func (s *Service) Get(ctx context.Context, path string) (_ *pb.Pointer, err erro
// GetItems gets decoded pointers from DB.
// The return value is in the same order as the argument paths.
func (s *Service) GetItems(ctx context.Context, paths [][]byte) (_ []*pb.Pointer, err error) {
func (s *Service) GetItems(ctx context.Context, keys []metabase.SegmentKey) (_ []*pb.Pointer, err error) {
defer mon.Task()(&ctx)(&err)
keys := make(storage.Keys, len(paths))
for i := range paths {
keys[i] = paths[i]
storageKeys := make(storage.Keys, len(keys))
for i := range keys {
storageKeys[i] = storage.Key(keys[i])
}
pointerBytes, err := s.db.GetAll(ctx, keys)
pointerBytes, err := s.db.GetAll(ctx, storageKeys)
if err != nil {
return nil, Error.Wrap(err)
}
@ -245,10 +246,10 @@ func (s *Service) GetItems(ctx context.Context, paths [][]byte) (_ []*pb.Pointer
}
// GetWithBytes gets the protocol buffers encoded and decoded pointer from the DB.
func (s *Service) GetWithBytes(ctx context.Context, path string) (pointerBytes []byte, pointer *pb.Pointer, err error) {
func (s *Service) GetWithBytes(ctx context.Context, key metabase.SegmentKey) (pointerBytes []byte, pointer *pb.Pointer, err error) {
defer mon.Task()(&ctx)(&err)
pointerBytes, err = s.db.Get(ctx, []byte(path))
pointerBytes, err = s.db.Get(ctx, storage.Key(key))
if err != nil {
if storage.ErrKeyNotFound.Has(err) {
err = storj.ErrObjectNotFound.Wrap(err)
@ -266,12 +267,12 @@ func (s *Service) GetWithBytes(ctx context.Context, path string) (pointerBytes [
}
// List returns all Path keys in the pointers bucket.
func (s *Service) List(ctx context.Context, prefix string, startAfter string, recursive bool, limit int32,
func (s *Service) List(ctx context.Context, prefix metabase.SegmentKey, startAfter string, recursive bool, limit int32,
metaFlags uint32) (items []*pb.ListResponse_Item, more bool, err error) {
defer mon.Task()(&ctx)(&err)
var prefixKey storage.Key
if prefix != "" {
if len(prefix) != 0 {
prefixKey = storage.Key(prefix)
if prefix[len(prefix)-1] != storage.Delimiter {
prefixKey = append(prefixKey, storage.Delimiter)
@ -347,10 +348,10 @@ func (s *Service) setMetadata(item *pb.ListResponse_Item, data []byte, metaFlags
}
// Delete deletes a pointer bytes when it matches oldPointerBytes, otherwise it'll fail.
func (s *Service) Delete(ctx context.Context, path string, oldPointerBytes []byte) (err error) {
func (s *Service) Delete(ctx context.Context, key metabase.SegmentKey, oldPointerBytes []byte) (err error) {
defer mon.Task()(&ctx)(&err)
err = s.db.CompareAndSwap(ctx, []byte(path), oldPointerBytes, nil)
err = s.db.CompareAndSwap(ctx, storage.Key(key), oldPointerBytes, nil)
if storage.ErrKeyNotFound.Has(err) {
err = storj.ErrObjectNotFound.Wrap(err)
}
@ -359,18 +360,18 @@ func (s *Service) Delete(ctx context.Context, path string, oldPointerBytes []byt
// UnsynchronizedGetDel deletes items from db without verifying whether the pointers have changed in the database,
// and it returns deleted items.
func (s *Service) UnsynchronizedGetDel(ctx context.Context, paths [][]byte) ([][]byte, []*pb.Pointer, error) {
keys := make(storage.Keys, len(paths))
for i := range paths {
keys[i] = paths[i]
func (s *Service) UnsynchronizedGetDel(ctx context.Context, keys []metabase.SegmentKey) ([]metabase.SegmentKey, []*pb.Pointer, error) {
storageKeys := make(storage.Keys, len(keys))
for i := range keys {
storageKeys[i] = storage.Key(keys[i])
}
items, err := s.db.DeleteMultiple(ctx, keys)
items, err := s.db.DeleteMultiple(ctx, storageKeys)
if err != nil {
return nil, nil, Error.Wrap(err)
}
pointerPaths := make([][]byte, 0, len(items))
pointerPaths := make([]metabase.SegmentKey, 0, len(items))
pointers := make([]*pb.Pointer, 0, len(items))
for _, item := range items {
@ -380,7 +381,7 @@ func (s *Service) UnsynchronizedGetDel(ctx context.Context, paths [][]byte) ([][
return nil, nil, Error.Wrap(err)
}
pointerPaths = append(pointerPaths, item.Key)
pointerPaths = append(pointerPaths, metabase.SegmentKey(item.Key))
pointers = append(pointers, data)
}
@ -388,10 +389,10 @@ func (s *Service) UnsynchronizedGetDel(ctx context.Context, paths [][]byte) ([][
}
// UnsynchronizedDelete deletes item from db without verifying whether the pointer has changed in the database.
func (s *Service) UnsynchronizedDelete(ctx context.Context, path string) (err error) {
func (s *Service) UnsynchronizedDelete(ctx context.Context, key metabase.SegmentKey) (err error) {
defer mon.Task()(&ctx)(&err)
err = s.db.Delete(ctx, []byte(path))
err = s.db.Delete(ctx, storage.Key(key))
if storage.ErrKeyNotFound.Has(err) {
err = storj.ErrObjectNotFound.Wrap(err)
}
@ -438,7 +439,7 @@ func (s *Service) IsBucketEmpty(ctx context.Context, projectID uuid.UUID, bucket
return false, Error.Wrap(err)
}
items, _, err := s.List(ctx, prefix, "", true, 1, 0)
items, _, err := s.List(ctx, prefix.Encode(), "", true, 1, 0)
if err != nil {
return false, Error.Wrap(err)
}

View File

@ -19,6 +19,7 @@ import (
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite/metainfo"
"storj.io/storj/satellite/metainfo/metabase"
"storj.io/storj/storage"
)
@ -95,14 +96,14 @@ func TestGetItems_ReturnValueOrder(t *testing.T) {
keys, err := satellite.Metainfo.Database.List(ctx, nil, numItems)
require.NoError(t, err)
var paths = make([][]byte, 0, numItems+1)
var segmentKeys = make([]metabase.SegmentKey, 0, numItems+1)
var lastSegmentPathIndices []int
// Random nil pointer
nilPointerIndex := testrand.Intn(numItems + 1)
for i, key := range keys {
paths = append(paths, []byte(key.String()))
segmentKeys = append(segmentKeys, metabase.SegmentKey(key))
segmentIdx, err := parseSegmentPath([]byte(key.String()))
require.NoError(t, err)
@ -112,11 +113,11 @@ func TestGetItems_ReturnValueOrder(t *testing.T) {
// set a random path to be nil.
if nilPointerIndex == i {
paths[nilPointerIndex] = nil
segmentKeys[nilPointerIndex] = nil
}
}
pointers, err := satellite.Metainfo.Service.GetItems(ctx, paths)
pointers, err := satellite.Metainfo.Service.GetItems(ctx, segmentKeys)
require.NoError(t, err)
for i, p := range pointers {
@ -154,17 +155,12 @@ func TestUpdatePiecesCheckDuplicates(t *testing.T) {
keys, err := satellite.Metainfo.Database.List(ctx, nil, 1)
require.NoError(t, err)
require.Equal(t, 1, len(keys))
var pointer *pb.Pointer
var encPath string
for _, key := range keys {
encPath = string(key)
pointer, err = satellite.Metainfo.Service.Get(ctx, encPath)
require.NoError(t, err)
break
}
require.NotNil(t, pointer)
require.NotNil(t, encPath)
encPath, err := metabase.ParseSegmentKey(metabase.SegmentKey(keys[0]))
require.NoError(t, err)
pointer, err := satellite.Metainfo.Service.Get(ctx, encPath.Encode())
require.NoError(t, err)
pieces := pointer.GetRemote().GetRemotePieces()
require.False(t, hasDuplicates(pointer.GetRemote().GetRemotePieces()))
@ -182,12 +178,12 @@ func TestUpdatePiecesCheckDuplicates(t *testing.T) {
}
// test no duplicates
updPointer, err := satellite.Metainfo.Service.UpdatePiecesCheckDuplicates(ctx, encPath, pointer, []*pb.RemotePiece{addPiece}, []*pb.RemotePiece{removePiece}, true)
updPointer, err := satellite.Metainfo.Service.UpdatePiecesCheckDuplicates(ctx, encPath.Encode(), pointer, []*pb.RemotePiece{addPiece}, []*pb.RemotePiece{removePiece}, true)
require.True(t, metainfo.ErrNodeAlreadyExists.Has(err))
require.False(t, hasDuplicates(updPointer.GetRemote().GetRemotePieces()))
// test allow duplicates
updPointer, err = satellite.Metainfo.Service.UpdatePieces(ctx, encPath, pointer, []*pb.RemotePiece{addPiece}, []*pb.RemotePiece{removePiece})
updPointer, err = satellite.Metainfo.Service.UpdatePieces(ctx, encPath.Encode(), pointer, []*pb.RemotePiece{addPiece}, []*pb.RemotePiece{removePiece})
require.NoError(t, err)
require.True(t, hasDuplicates(updPointer.GetRemote().GetRemotePieces()))
})

View File

@ -7,10 +7,11 @@ package metainfo
import (
"storj.io/common/pb"
"storj.io/storj/satellite/metainfo/metabase"
)
// sanityCheckPointer implements sanity checking test data,
// we don't need this in production code.
func sanityCheckPointer(path string, pointer *pb.Pointer) (err error) {
func sanityCheckPointer(key metabase.SegmentKey, pointer *pb.Pointer) (err error) {
return nil
}

View File

@ -6,16 +6,18 @@
package metainfo
import (
"bytes"
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/storj/satellite/metainfo/metabase"
)
// sanityCheckPointer implements sanity checking test data,
// we don't need this in production code.
func sanityCheckPointer(path string, pointer *pb.Pointer) (err error) {
tokens := storj.SplitPath(path)
func sanityCheckPointer(key metabase.SegmentKey, pointer *pb.Pointer) (err error) {
tokens := bytes.Split(key, []byte("/"))
if len(tokens) <= 3 {
return Error.New("invalid path %q", path)
return Error.New("invalid path %s", key)
}
if pointer.Type == pb.Pointer_REMOTE {

View File

@ -16,6 +16,7 @@ import (
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite/metainfo/metabase"
"storj.io/storj/storage"
)
@ -128,11 +129,11 @@ func TestIdentifyIrreparableSegments(t *testing.T) {
// put test pointer to db
metainfo := planet.Satellites[0].Metainfo.Service
err := metainfo.Put(ctx, pointerPath, pointer)
err := metainfo.Put(ctx, metabase.SegmentKey(pointerPath), pointer)
require.NoError(t, err)
// modify pointer to make it expired and put to db
pointer.ExpirationDate = time.Now().Add(-time.Hour)
err = metainfo.Put(ctx, pointerPath+"-expired", pointer)
err = metainfo.Put(ctx, metabase.SegmentKey(pointerPath+"-expired"), pointer)
require.NoError(t, err)
err = checker.IdentifyInjuredSegments(ctx)
@ -185,9 +186,9 @@ func TestIdentifyIrreparableSegments(t *testing.T) {
},
}
// update test pointer in db
err = metainfo.UnsynchronizedDelete(ctx, pointerPath)
err = metainfo.UnsynchronizedDelete(ctx, metabase.SegmentKey(pointerPath))
require.NoError(t, err)
err = metainfo.Put(ctx, pointerPath, pointer)
err = metainfo.Put(ctx, metabase.SegmentKey(pointerPath), pointer)
require.NoError(t, err)
err = checker.IdentifyInjuredSegments(ctx)
@ -237,6 +238,6 @@ func insertPointer(ctx context.Context, t *testing.T, planet *testplanet.Planet,
// put test pointer to db
pointerdb := planet.Satellites[0].Metainfo.Service
err := pointerdb.Put(ctx, pointerPath, pointer)
err := pointerdb.Put(ctx, metabase.SegmentKey(pointerPath), pointer)
require.NoError(t, err)
}

View File

@ -20,6 +20,7 @@ import (
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite"
"storj.io/storj/satellite/metainfo/metabase"
"storj.io/storj/satellite/overlay"
"storj.io/storj/storage"
)
@ -468,9 +469,9 @@ func TestRemoveExpiredSegmentFromQueue(t *testing.T) {
require.NoError(t, err)
// replace pointer with one that is already expired
pointer.ExpirationDate = time.Now().Add(-time.Hour)
err = satellite.Metainfo.Service.UnsynchronizedDelete(ctx, encryptedPath)
err = satellite.Metainfo.Service.UnsynchronizedDelete(ctx, metabase.SegmentKey(encryptedPath))
require.NoError(t, err)
err = satellite.Metainfo.Service.UnsynchronizedPut(ctx, encryptedPath, pointer)
err = satellite.Metainfo.Service.UnsynchronizedPut(ctx, metabase.SegmentKey(encryptedPath), pointer)
require.NoError(t, err)
// Verify that the segment is on the repair queue
@ -653,7 +654,7 @@ func TestIrreparableSegmentAccordingToOverlay(t *testing.T) {
// Verify that the segment _is_ in the irreparable db
irreparableSegment, err = satellite.DB.Irreparable().Get(ctx, []byte(encryptedPath))
require.NoError(t, err)
require.Equal(t, encryptedPath, string(irreparableSegment.Path))
require.Equal(t, encryptedPath, metabase.SegmentKey(irreparableSegment.Path))
lastAttemptTime := time.Unix(irreparableSegment.LastRepairAttempt, 0)
require.Falsef(t, lastAttemptTime.Before(beforeRepair), "%s is before %s", lastAttemptTime, beforeRepair)
require.Falsef(t, lastAttemptTime.After(afterRepair), "%s is after %s", lastAttemptTime, afterRepair)
@ -770,7 +771,7 @@ func TestIrreparableSegmentNodesOffline(t *testing.T) {
// Verify that the segment _is_ in the irreparable db
irreparableSegment, err = satellite.DB.Irreparable().Get(ctx, []byte(encryptedPath))
require.NoError(t, err)
require.Equal(t, encryptedPath, string(irreparableSegment.Path))
require.Equal(t, encryptedPath, metabase.SegmentKey(irreparableSegment.Path))
lastAttemptTime := time.Unix(irreparableSegment.LastRepairAttempt, 0)
require.Falsef(t, lastAttemptTime.Before(beforeRepair), "%s is before %s", lastAttemptTime, beforeRepair)
require.Falsef(t, lastAttemptTime.After(afterRepair), "%s is after %s", lastAttemptTime, afterRepair)
@ -822,14 +823,14 @@ func testRepairMultipleDisqualifiedAndSuspended(t *testing.T, inMemoryRepair boo
// get a remote segment from metainfo
metainfo := satellite.Metainfo.Service
listResponse, _, err := metainfo.List(ctx, "", "", true, 0, 0)
listResponse, _, err := metainfo.List(ctx, metabase.SegmentKey{}, "", true, 0, 0)
require.NoError(t, err)
var path string
var key metabase.SegmentKey
var pointer *pb.Pointer
for _, v := range listResponse {
path = v.GetPath()
pointer, err = metainfo.Get(ctx, path)
key = metabase.SegmentKey(v.GetPath())
pointer, err = metainfo.Get(ctx, key)
require.NoError(t, err)
if pointer.GetType() == pb.Pointer_REMOTE {
break
@ -888,7 +889,7 @@ func testRepairMultipleDisqualifiedAndSuspended(t *testing.T, inMemoryRepair boo
require.Equal(t, newData, testData)
// updated pointer should not contain any of the disqualified or suspended nodes
pointer, err = metainfo.Get(ctx, path)
pointer, err = metainfo.Get(ctx, key)
require.NoError(t, err)
remotePieces = pointer.GetRemote().GetRemotePieces()
@ -1289,15 +1290,15 @@ func testRepairGracefullyExited(t *testing.T, inMemoryRepair bool) {
// get a remote segment from metainfo
metainfo := satellite.Metainfo.Service
listResponse, _, err := metainfo.List(ctx, "", "", true, 0, 0)
listResponse, _, err := metainfo.List(ctx, metabase.SegmentKey{}, "", true, 0, 0)
require.NoError(t, err)
require.NotNil(t, listResponse)
var path string
var key metabase.SegmentKey
var pointer *pb.Pointer
for _, v := range listResponse {
path = v.GetPath()
pointer, err = metainfo.Get(ctx, path)
key = metabase.SegmentKey(v.GetPath())
pointer, err = metainfo.Get(ctx, key)
require.NoError(t, err)
if pointer.GetType() == pb.Pointer_REMOTE {
break
@ -1353,7 +1354,7 @@ func testRepairGracefullyExited(t *testing.T, inMemoryRepair bool) {
require.Equal(t, newData, testData)
// updated pointer should not contain any of the gracefully exited nodes
pointer, err = metainfo.Get(ctx, path)
pointer, err = metainfo.Get(ctx, key)
require.NoError(t, err)
remotePieces = pointer.GetRemote().GetRemotePieces()
@ -1367,25 +1368,25 @@ func testRepairGracefullyExited(t *testing.T, inMemoryRepair bool) {
// nolint:golint
func getRemoteSegment(
t *testing.T, ctx context.Context, satellite *testplanet.Satellite,
) (_ *pb.Pointer, path string) {
) (_ *pb.Pointer, key metabase.SegmentKey) {
t.Helper()
// get a remote segment from metainfo
metainfo := satellite.Metainfo.Service
listResponse, _, err := metainfo.List(ctx, "", "", true, 0, 0)
listResponse, _, err := metainfo.List(ctx, metabase.SegmentKey{}, "", true, 0, 0)
require.NoError(t, err)
for _, v := range listResponse {
path := v.GetPath()
pointer, err := metainfo.Get(ctx, path)
key := metabase.SegmentKey(v.GetPath())
pointer, err := metainfo.Get(ctx, key)
require.NoError(t, err)
if pointer.GetType() == pb.Pointer_REMOTE {
return pointer, path
return pointer, key
}
}
t.Fatal("satellite doesn't have any remote segment")
return nil, ""
return nil, key
}
// corruptPieceData manipulates piece data on a storage node.

View File

@ -101,7 +101,7 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, path storj.Path) (s
defer mon.Task()(&ctx, path)(&err)
// Read the segment pointer from the metainfo
pointer, err := repairer.metainfo.Get(ctx, path)
pointer, err := repairer.metainfo.Get(ctx, metabase.SegmentKey(path))
if err != nil {
if storj.ErrObjectNotFound.Has(err) {
mon.Meter("repair_unnecessary").Mark(1) //locked
@ -330,7 +330,7 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, path storj.Path) (s
pointer.RepairCount++
// Update the segment pointer in the metainfo
_, err = repairer.metainfo.UpdatePieces(ctx, path, pointer, repairedPieces, toRemove)
_, err = repairer.metainfo.UpdatePieces(ctx, metabase.SegmentKey(path), pointer, repairedPieces, toRemove)
if err != nil {
return false, metainfoPutError.Wrap(err)
}

View File

@ -16,6 +16,7 @@ import (
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite/metainfo/metabase"
"storj.io/storj/satellite/overlay"
"storj.io/storj/storage"
)
@ -171,7 +172,7 @@ func findNodeToExit(ctx context.Context, planet *testplanet.Planet, objects int)
}
for _, key := range keys {
pointer, err := satellite.Metainfo.Service.Get(ctx, string(key))
pointer, err := satellite.Metainfo.Service.Get(ctx, metabase.SegmentKey(key))
if err != nil {
return nil, err
}