satellite/metainfo/metabase: add ListSegments request

Change-Id: Ie777a0f791685beab3973e76b10a9ce76398e7d7
This commit is contained in:
Michal Niewrzal 2020-11-05 13:59:19 +01:00
parent fd707392e7
commit 9d25b3a7d3
5 changed files with 375 additions and 17 deletions

View File

@ -501,24 +501,25 @@ func (db *DB) commitObjectWithoutProofs(ctx context.Context, opts CommitObject)
return Object{}, Error.New("failed to fetch segments: %w", err)
}
// TODO disabled for now
// verify segments
if len(segments) > 0 {
// without proofs we expect the segments to be contiguous
hasOffset := false
offset := int64(0)
for i, seg := range segments {
if seg.Position.Part != 0 && seg.Position.Index != uint32(i) {
return Object{}, Error.New("expected segment (%d,%d), found (%d,%d)", 0, i, seg.Position.Part, seg.Position.Index)
}
if seg.PlainOffset != 0 {
hasOffset = true
}
if hasOffset && seg.PlainOffset != offset {
return Object{}, Error.New("segment %d should be at plain offset %d, offset is %d", seg.Position.Index, offset, seg.PlainOffset)
}
offset += int64(seg.PlainSize)
}
}
// if len(segments) > 0 {
// // without proofs we expect the segments to be contiguous
// hasOffset := false
// offset := int64(0)
// for i, seg := range segments {
// if seg.Position.Part != 0 && seg.Position.Index != uint32(i) {
// return Object{}, Error.New("expected segment (%d,%d), found (%d,%d)", 0, i, seg.Position.Part, seg.Position.Index)
// }
// if seg.PlainOffset != 0 {
// hasOffset = true
// }
// if hasOffset && seg.PlainOffset != offset {
// return Object{}, Error.New("segment %d should be at plain offset %d, offset is %d", seg.Position.Index, offset, seg.PlainOffset)
// }
// offset += int64(seg.PlainSize)
// }
// }
// TODO: would we even need this when we make main index plain_offset?
fixedSegmentSize := int32(0)

View File

@ -23,6 +23,8 @@ const (
FirstSegmentIndex = 0
)
const maxListLimit = 1000
// BucketPrefix consists of <project id>/<bucket name>.
type BucketPrefix string

View File

@ -0,0 +1,93 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package metabase
import (
"context"
"database/sql"
"errors"
"storj.io/common/uuid"
"storj.io/storj/private/tagsql"
)
// ListSegments contains arguments necessary for listing stream segments.
type ListSegments struct {
StreamID uuid.UUID
Cursor SegmentPosition
Limit int
}
// ListSegmentsResult result of listing segments.
type ListSegmentsResult struct {
Segments []Segment
More bool
}
// ListSegments lists specified stream segments.
func (db *DB) ListSegments(ctx context.Context, opts ListSegments) (result ListSegmentsResult, err error) {
defer mon.Task()(&ctx)(&err)
if opts.StreamID.IsZero() {
return ListSegmentsResult{}, ErrInvalidRequest.New("StreamID missing")
}
// TODO verify this limit
if opts.Limit > maxListLimit {
return ListSegmentsResult{}, ErrInvalidRequest.New("Maximum listing limit is %d", maxListLimit)
}
if opts.Limit < 0 {
return ListSegmentsResult{}, ErrInvalidRequest.New("Invalid limit: %d", opts.Limit)
}
if opts.Limit == 0 {
opts.Limit = maxListLimit
}
err = withRows(db.db.Query(ctx, `
SELECT
position,
root_piece_id, encrypted_key_nonce, encrypted_key,
encrypted_size, plain_offset, plain_size,
redundancy,
inline_data, remote_pieces
FROM segments
WHERE
stream_id = $1 AND
($2 = 0 OR position > $2)
ORDER BY position ASC
LIMIT $3
`, opts.StreamID, opts.Cursor, opts.Limit+1))(func(rows tagsql.Rows) error {
for rows.Next() {
var segment Segment
err = rows.Scan(
&segment.Position,
&segment.RootPieceID, &segment.EncryptedKeyNonce, &segment.EncryptedKey,
&segment.EncryptedSize, &segment.PlainOffset, &segment.PlainSize,
redundancyScheme{&segment.Redundancy},
&segment.InlineData, &segment.Pieces,
)
if err != nil {
return Error.New("failed to scan segments: %w", err)
}
segment.StreamID = opts.StreamID
result.Segments = append(result.Segments, segment)
}
return nil
})
if err != nil {
if errors.Is(err, sql.ErrNoRows) {
return ListSegmentsResult{}, nil
}
return ListSegmentsResult{}, Error.New("unable to fetch object segments: %w", err)
}
if len(result.Segments) > opts.Limit {
result.More = true
result.Segments = result.Segments[:len(result.Segments)-1]
}
return result, nil
}

View File

@ -0,0 +1,247 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package metabase_test
import (
"testing"
"storj.io/common/storj"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/satellite/metainfo/metabase"
)
func TestListSegments(t *testing.T) {
All(t, func(ctx *testcontext.Context, t *testing.T, db *metabase.DB) {
obj := randObjectStream()
t.Run("StreamID missing", func(t *testing.T) {
defer DeleteAll{}.Check(ctx, t, db)
ListSegments{
Opts: metabase.ListSegments{},
ErrClass: &metabase.ErrInvalidRequest,
ErrText: "StreamID missing",
}.Check(ctx, t, db)
Verify{}.Check(ctx, t, db)
})
t.Run("Invalid limit", func(t *testing.T) {
defer DeleteAll{}.Check(ctx, t, db)
ListSegments{
Opts: metabase.ListSegments{
StreamID: obj.StreamID,
Limit: -1,
},
ErrClass: &metabase.ErrInvalidRequest,
ErrText: "Invalid limit: -1",
}.Check(ctx, t, db)
Verify{}.Check(ctx, t, db)
})
t.Run("List no segments", func(t *testing.T) {
defer DeleteAll{}.Check(ctx, t, db)
ListSegments{
Opts: metabase.ListSegments{
StreamID: obj.StreamID,
Limit: 1,
},
Result: metabase.ListSegmentsResult{},
}.Check(ctx, t, db)
Verify{}.Check(ctx, t, db)
})
t.Run("List segments", func(t *testing.T) {
defer DeleteAll{}.Check(ctx, t, db)
expectedObject := createObject(ctx, t, db, obj, 10)
expectedSegment := metabase.Segment{
StreamID: obj.StreamID,
Position: metabase.SegmentPosition{
Index: 1,
},
RootPieceID: storj.PieceID{1},
EncryptedKey: []byte{3},
EncryptedKeyNonce: []byte{4},
EncryptedSize: 1024,
PlainSize: 512,
Pieces: metabase.Pieces{{Number: 0, StorageNode: storj.NodeID{2}}},
Redundancy: defaultTestRedundancy,
}
expectedRawSegments := make([]metabase.RawSegment, 10)
expectedSegments := make([]metabase.Segment, 10)
for i := range expectedSegments {
expectedSegment.Position.Index = uint32(i + 1)
expectedSegments[i] = expectedSegment
expectedRawSegments[i] = metabase.RawSegment(expectedSegment)
}
ListSegments{
Opts: metabase.ListSegments{
StreamID: obj.StreamID,
Limit: 10,
},
Result: metabase.ListSegmentsResult{
Segments: expectedSegments,
},
}.Check(ctx, t, db)
ListSegments{
Opts: metabase.ListSegments{
StreamID: obj.StreamID,
Limit: 1,
},
Result: metabase.ListSegmentsResult{
Segments: expectedSegments[:1],
More: true,
},
}.Check(ctx, t, db)
ListSegments{
Opts: metabase.ListSegments{
StreamID: obj.StreamID,
Limit: 2,
Cursor: metabase.SegmentPosition{
Index: 2,
},
},
Result: metabase.ListSegmentsResult{
Segments: expectedSegments[2:4],
More: true,
},
}.Check(ctx, t, db)
ListSegments{
Opts: metabase.ListSegments{
StreamID: obj.StreamID,
Limit: 2,
Cursor: metabase.SegmentPosition{
Index: 10,
},
},
Result: metabase.ListSegmentsResult{
More: false,
},
}.Check(ctx, t, db)
Verify{
Objects: []metabase.RawObject{
metabase.RawObject(expectedObject),
},
Segments: expectedRawSegments,
}.Check(ctx, t, db)
})
t.Run("List segments from unordered parts", func(t *testing.T) {
defer DeleteAll{}.Check(ctx, t, db)
var testCases = []struct {
segments []metabase.SegmentPosition
}{
{[]metabase.SegmentPosition{
{Part: 3, Index: 0},
{Part: 0, Index: 0},
{Part: 1, Index: 0},
{Part: 2, Index: 0},
}},
{[]metabase.SegmentPosition{
{Part: 3, Index: 0},
{Part: 2, Index: 0},
{Part: 1, Index: 0},
{Part: 0, Index: 0},
}},
{[]metabase.SegmentPosition{
{Part: 0, Index: 0},
{Part: 2, Index: 0},
{Part: 1, Index: 0},
{Part: 3, Index: 0},
}},
}
expectedSegment := metabase.Segment{
StreamID: obj.StreamID,
RootPieceID: storj.PieceID{1},
EncryptedKey: []byte{3},
EncryptedKeyNonce: []byte{4},
EncryptedSize: 1024,
PlainSize: 512,
Pieces: metabase.Pieces{{Number: 0, StorageNode: storj.NodeID{2}}},
Redundancy: defaultTestRedundancy,
}
for _, tc := range testCases {
obj := randObjectStream()
BeginObjectExactVersion{
Opts: metabase.BeginObjectExactVersion{
ObjectStream: obj,
Encryption: defaultTestEncryption,
},
Version: obj.Version,
}.Check(ctx, t, db)
for i, segmentPosition := range tc.segments {
BeginSegment{
Opts: metabase.BeginSegment{
ObjectStream: obj,
Position: segmentPosition,
RootPieceID: storj.PieceID{byte(i + 1)},
Pieces: []metabase.Piece{{
Number: 1,
StorageNode: testrand.NodeID(),
}},
},
}.Check(ctx, t, db)
CommitSegment{
Opts: metabase.CommitSegment{
ObjectStream: obj,
Position: segmentPosition,
RootPieceID: storj.PieceID{1},
Pieces: metabase.Pieces{{Number: 0, StorageNode: storj.NodeID{2}}},
EncryptedKey: []byte{3},
EncryptedKeyNonce: []byte{4},
EncryptedSize: 1024,
PlainSize: 512,
PlainOffset: 0,
Redundancy: defaultTestRedundancy,
},
}.Check(ctx, t, db)
}
CommitObject{
Opts: metabase.CommitObject{
ObjectStream: obj,
},
}.Check(ctx, t, db)
expectedSegments := make([]metabase.Segment, 4)
for i := range expectedSegments {
expectedSegments[i] = expectedSegment
expectedSegments[i].StreamID = obj.StreamID
expectedSegments[i].Position.Part = uint32(i)
}
ListSegments{
Opts: metabase.ListSegments{
StreamID: obj.StreamID,
Limit: 0,
},
Result: metabase.ListSegmentsResult{
Segments: expectedSegments,
},
}.Check(ctx, t, db)
}
})
})
}

View File

@ -179,6 +179,21 @@ func (step GetSegmentByOffset) Check(ctx *testcontext.Context, t *testing.T, db
require.Zero(t, diff)
}
type ListSegments struct {
Opts metabase.ListSegments
Result metabase.ListSegmentsResult
ErrClass *errs.Class
ErrText string
}
func (step ListSegments) Check(ctx *testcontext.Context, t *testing.T, db *metabase.DB) {
result, err := db.ListSegments(ctx, step.Opts)
checkError(t, err, step.ErrClass, step.ErrText)
diff := cmp.Diff(step.Result, result, cmpopts.EquateApproxTime(5*time.Second))
require.Zero(t, diff)
}
type DeleteObjectExactVersion struct {
Opts metabase.DeleteObjectExactVersion
Result metabase.DeleteObjectResult