satellite/metainfo/metabase: add NewRedundancy parameter for UpdateSegmentPieces method
At some point we might try to change original segment RS values and set Pieces according to the new values. This change adds add NewRedundancy parameter for UpdateSegmentPieces method to give ability to do that. As a part of change NewPieces are validated against NewRedundancy. Change-Id: I8ea531c9060b5cd283d3bf4f6e4c320099dd5576
This commit is contained in:
parent
e613c641d7
commit
27ae0d1f15
@ -597,6 +597,7 @@ func TestReverifyModifiedSegment(t *testing.T) {
|
||||
Position: queueSegment.Position,
|
||||
OldPieces: segment.Pieces,
|
||||
NewPieces: append([]metabase.Piece{segment.Pieces[0]}, segment.Pieces[2:]...),
|
||||
NewRedundancy: segment.Redundancy,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
@ -644,6 +644,7 @@ func TestVerifierModifiedSegment(t *testing.T) {
|
||||
Position: queueSegment.Position,
|
||||
OldPieces: segment.Pieces,
|
||||
NewPieces: append([]metabase.Piece{segment.Pieces[0]}, segment.Pieces[2:]...),
|
||||
NewRedundancy: segment.Redundancy,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
@ -210,6 +210,7 @@ func TestDurabilityRatio(t *testing.T) {
|
||||
|
||||
OldPieces: segment.Pieces,
|
||||
NewPieces: newPieces,
|
||||
NewRedundancy: segment.Redundancy,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
@ -989,6 +989,7 @@ func (endpoint *Endpoint) UpdatePiecesCheckDuplicates(ctx context.Context, segme
|
||||
Position: segment.Position,
|
||||
|
||||
OldPieces: segment.Pieces,
|
||||
NewRedundancy: segment.Redundancy,
|
||||
NewPieces: pieces,
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/common/storj"
|
||||
"storj.io/common/uuid"
|
||||
"storj.io/storj/storage"
|
||||
)
|
||||
@ -20,6 +21,8 @@ type UpdateSegmentPieces struct {
|
||||
Position SegmentPosition
|
||||
|
||||
OldPieces Pieces
|
||||
|
||||
NewRedundancy storj.RedundancyScheme
|
||||
NewPieces Pieces
|
||||
}
|
||||
|
||||
@ -39,6 +42,16 @@ func (db *DB) UpdateSegmentPieces(ctx context.Context, opts UpdateSegmentPieces)
|
||||
return err
|
||||
}
|
||||
|
||||
if opts.NewRedundancy.IsZero() {
|
||||
return ErrInvalidRequest.New("NewRedundancy zero")
|
||||
}
|
||||
|
||||
// its possible that in this method we will have less pieces
|
||||
// than optimal shares (e.g. after repair)
|
||||
if len(opts.NewPieces) < int(opts.NewRedundancy.RepairShares) {
|
||||
return ErrInvalidRequest.New("number of new pieces is less than new redundancy repair shares value")
|
||||
}
|
||||
|
||||
if err := opts.NewPieces.Verify(); err != nil {
|
||||
if ErrInvalidRequest.Has(err) {
|
||||
return ErrInvalidRequest.New("NewPieces: %v", errs.Unwrap(err))
|
||||
@ -62,12 +75,17 @@ func (db *DB) UpdateSegmentPieces(ctx context.Context, opts UpdateSegmentPieces)
|
||||
remote_alias_pieces = CASE
|
||||
WHEN remote_alias_pieces = $3 THEN $4
|
||||
ELSE remote_alias_pieces
|
||||
END,
|
||||
redundancy = CASE
|
||||
WHEN remote_alias_pieces = $3 THEN $5
|
||||
ELSE redundancy
|
||||
END
|
||||
WHERE
|
||||
stream_id = $1 AND
|
||||
position = $2
|
||||
RETURNING remote_alias_pieces
|
||||
`, opts.StreamID, opts.Position, oldPieces, newPieces).Scan(&resultPieces)
|
||||
`, opts.StreamID, opts.Position, oldPieces, newPieces, redundancyScheme{&opts.NewRedundancy}).
|
||||
Scan(&resultPieces)
|
||||
if err != nil {
|
||||
if errors.Is(err, sql.ErrNoRows) {
|
||||
return ErrSegmentNotFound.New("segment missing")
|
||||
|
@ -112,7 +112,7 @@ func TestUpdateSegmentPieces(t *testing.T) {
|
||||
Verify{}.Check(ctx, t, db)
|
||||
})
|
||||
|
||||
t.Run("NewPieces missing", func(t *testing.T) {
|
||||
t.Run("NewRedundancy zero", func(t *testing.T) {
|
||||
defer DeleteAll{}.Check(ctx, t, db)
|
||||
|
||||
UpdateSegmentPieces{
|
||||
@ -124,7 +124,24 @@ func TestUpdateSegmentPieces(t *testing.T) {
|
||||
}},
|
||||
},
|
||||
ErrClass: &metabase.ErrInvalidRequest,
|
||||
ErrText: "NewPieces: pieces missing",
|
||||
ErrText: "NewRedundancy zero",
|
||||
}.Check(ctx, t, db)
|
||||
})
|
||||
|
||||
t.Run("NewPieces vs NewRedundancy", func(t *testing.T) {
|
||||
defer DeleteAll{}.Check(ctx, t, db)
|
||||
|
||||
UpdateSegmentPieces{
|
||||
Opts: metabase.UpdateSegmentPieces{
|
||||
StreamID: obj.StreamID,
|
||||
OldPieces: []metabase.Piece{{
|
||||
Number: 1,
|
||||
StorageNode: testrand.NodeID(),
|
||||
}},
|
||||
NewRedundancy: defaultTestRedundancy,
|
||||
},
|
||||
ErrClass: &metabase.ErrInvalidRequest,
|
||||
ErrText: "number of new pieces is less than new redundancy repair shares value",
|
||||
}.Check(ctx, t, db)
|
||||
})
|
||||
|
||||
@ -135,6 +152,7 @@ func TestUpdateSegmentPieces(t *testing.T) {
|
||||
Opts: metabase.UpdateSegmentPieces{
|
||||
StreamID: obj.StreamID,
|
||||
OldPieces: validPieces,
|
||||
NewRedundancy: defaultTestRedundancy,
|
||||
NewPieces: []metabase.Piece{{
|
||||
Number: 1,
|
||||
StorageNode: storj.NodeID{},
|
||||
@ -153,6 +171,7 @@ func TestUpdateSegmentPieces(t *testing.T) {
|
||||
Opts: metabase.UpdateSegmentPieces{
|
||||
StreamID: obj.StreamID,
|
||||
OldPieces: validPieces,
|
||||
NewRedundancy: defaultTestRedundancy,
|
||||
NewPieces: []metabase.Piece{
|
||||
{
|
||||
Number: 1,
|
||||
@ -177,6 +196,7 @@ func TestUpdateSegmentPieces(t *testing.T) {
|
||||
Opts: metabase.UpdateSegmentPieces{
|
||||
StreamID: obj.StreamID,
|
||||
OldPieces: validPieces,
|
||||
NewRedundancy: defaultTestRedundancy,
|
||||
NewPieces: []metabase.Piece{
|
||||
{
|
||||
Number: 2,
|
||||
@ -202,6 +222,7 @@ func TestUpdateSegmentPieces(t *testing.T) {
|
||||
StreamID: obj.StreamID,
|
||||
Position: metabase.SegmentPosition{Index: 1},
|
||||
OldPieces: validPieces,
|
||||
NewRedundancy: defaultTestRedundancy,
|
||||
NewPieces: validPieces,
|
||||
},
|
||||
ErrClass: &metabase.ErrSegmentNotFound,
|
||||
@ -212,13 +233,21 @@ func TestUpdateSegmentPieces(t *testing.T) {
|
||||
t.Run("segment pieces column was changed", func(t *testing.T) {
|
||||
defer DeleteAll{}.Check(ctx, t, db)
|
||||
|
||||
createObject(ctx, t, db, obj, 1)
|
||||
obj := createObject(ctx, t, db, obj, 1)
|
||||
|
||||
newRedundancy := storj.RedundancyScheme{
|
||||
RequiredShares: 1,
|
||||
RepairShares: 1,
|
||||
OptimalShares: 1,
|
||||
TotalShares: 4,
|
||||
}
|
||||
|
||||
UpdateSegmentPieces{
|
||||
Opts: metabase.UpdateSegmentPieces{
|
||||
StreamID: obj.StreamID,
|
||||
Position: metabase.SegmentPosition{Index: 0},
|
||||
OldPieces: validPieces,
|
||||
NewRedundancy: newRedundancy,
|
||||
NewPieces: metabase.Pieces{
|
||||
metabase.Piece{
|
||||
Number: 1,
|
||||
@ -229,6 +258,28 @@ func TestUpdateSegmentPieces(t *testing.T) {
|
||||
ErrClass: &storage.ErrValueChanged,
|
||||
ErrText: "segment remote_alias_pieces field was changed",
|
||||
}.Check(ctx, t, db)
|
||||
|
||||
// verify that original pieces and redundancy did not change
|
||||
Verify{
|
||||
Objects: []metabase.RawObject{
|
||||
metabase.RawObject(obj),
|
||||
},
|
||||
Segments: []metabase.RawSegment{
|
||||
{
|
||||
StreamID: obj.StreamID,
|
||||
RootPieceID: storj.PieceID{1},
|
||||
CreatedAt: &now,
|
||||
EncryptedKey: []byte{3},
|
||||
EncryptedKeyNonce: []byte{4},
|
||||
EncryptedSize: 1024,
|
||||
PlainOffset: 0,
|
||||
PlainSize: 512,
|
||||
|
||||
Redundancy: defaultTestRedundancy,
|
||||
Pieces: metabase.Pieces{{Number: 0, StorageNode: storj.NodeID{2}}},
|
||||
},
|
||||
},
|
||||
}.Check(ctx, t, db)
|
||||
})
|
||||
|
||||
t.Run("update pieces", func(t *testing.T) {
|
||||
@ -258,6 +309,7 @@ func TestUpdateSegmentPieces(t *testing.T) {
|
||||
StreamID: obj.StreamID,
|
||||
Position: metabase.SegmentPosition{Index: 0},
|
||||
OldPieces: segment.Pieces,
|
||||
NewRedundancy: defaultTestRedundancy,
|
||||
NewPieces: expectedPieces,
|
||||
},
|
||||
}.Check(ctx, t, db)
|
||||
|
@ -395,6 +395,7 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, path storj.Path) (s
|
||||
Position: segmentLocation.Position,
|
||||
|
||||
OldPieces: segment.Pieces,
|
||||
NewRedundancy: segment.Redundancy,
|
||||
NewPieces: newPieces,
|
||||
})
|
||||
if err != nil {
|
||||
|
Loading…
Reference in New Issue
Block a user