satellite/metainfo/metabase: reduce number of fields for LoopSegmentEntry

For metainfo loop we need only some of Segment fields. By removing some of them we will reduce memory consumption during loop.

Change-Id: I4af8baab58f7de8ddf5e142380180bb70b1b442d
This commit is contained in:
Michał Niewrzał 2021-03-02 13:58:23 +01:00
parent c223c2d845
commit c51ea68ad3
5 changed files with 45 additions and 50 deletions

View File

@ -294,7 +294,7 @@ func (observer *Observer) InlineSegment(ctx context.Context, segment *metainfo.S
bucket := observer.ensureBucket(ctx, segment.Location.Object()) bucket := observer.ensureBucket(ctx, segment.Location.Object())
bucket.InlineSegments++ bucket.InlineSegments++
bucket.InlineBytes += int64(segment.DataSize) bucket.InlineBytes += int64(segment.EncryptedSize)
return nil return nil
} }
@ -307,7 +307,7 @@ func (observer *Observer) RemoteSegment(ctx context.Context, segment *metainfo.S
bucket := observer.ensureBucket(ctx, segment.Location.Object()) bucket := observer.ensureBucket(ctx, segment.Location.Object())
bucket.RemoteSegments++ bucket.RemoteSegments++
bucket.RemoteBytes += int64(segment.DataSize) bucket.RemoteBytes += int64(segment.EncryptedSize)
// add node info // add node info
minimumRequired := segment.Redundancy.RequiredShares minimumRequired := segment.Redundancy.RequiredShares
@ -317,7 +317,7 @@ func (observer *Observer) RemoteSegment(ctx context.Context, segment *metainfo.S
return nil return nil
} }
pieceSize := float64(segment.DataSize / int(minimumRequired)) // TODO: Add this as a method to RedundancyScheme pieceSize := float64(segment.EncryptedSize / int32(minimumRequired)) // TODO: Add this as a method to RedundancyScheme
for _, piece := range segment.Pieces { for _, piece := range segment.Pieces {
observer.Node[piece.StorageNode] += pieceSize observer.Node[piece.StorageNode] += pieceSize

View File

@ -73,7 +73,7 @@ func (collector *PathCollector) RemoteSegment(ctx context.Context, segment *meta
if err != nil { if err != nil {
return err return err
} }
pieceSize := eestream.CalcPieceSize(int64(segment.DataSize), redundancy) pieceSize := eestream.CalcPieceSize(int64(segment.EncryptedSize), redundancy)
collector.nodeIDStorage[piece.StorageNode] += pieceSize collector.nodeIDStorage[piece.StorageNode] += pieceSize
item := TransferQueueItem{ item := TransferQueueItem{

View File

@ -13,7 +13,6 @@ import (
"github.com/zeebo/errs" "github.com/zeebo/errs"
"golang.org/x/time/rate" "golang.org/x/time/rate"
"storj.io/common/storj"
"storj.io/common/uuid" "storj.io/common/uuid"
"storj.io/storj/satellite/metainfo/metabase" "storj.io/storj/satellite/metainfo/metabase"
) )
@ -38,15 +37,11 @@ func (object *Object) Expired(now time.Time) bool {
// Segment is the segment info passed to Observer by metainfo loop. // Segment is the segment info passed to Observer by metainfo loop.
type Segment struct { type Segment struct {
Location metabase.SegmentLocation // tally, repair, graceful exit, audit Location metabase.SegmentLocation // tally, repair, graceful exit, audit
StreamID uuid.UUID // audit
DataSize int // tally, graceful exit
Inline bool // metrics
Redundancy storj.RedundancyScheme // tally, graceful exit, repair
RootPieceID storj.PieceID // gc, graceful exit
Pieces metabase.Pieces // tally, audit, gc, graceful exit, repair
CreationDate time.Time // repair CreationDate time.Time // repair
ExpirationDate time.Time // tally, repair ExpirationDate time.Time // tally, repair
LastRepaired time.Time // repair LastRepaired time.Time // repair
metabase.LoopSegmentEntry
} }
// Expired checks if segment is expired relative to now. // Expired checks if segment is expired relative to now.
@ -459,26 +454,28 @@ func handleObject(ctx context.Context, observer *observerContext, object *Object
return true return true
} }
func handleSegment(ctx context.Context, observer *observerContext, location metabase.SegmentLocation, segment metabase.LoopSegmentEntry, expiresAt *time.Time) bool { func handleSegment(ctx context.Context, observer *observerContext, location metabase.SegmentLocation, segment metabase.LoopSegmentEntry, expirationDate *time.Time) bool {
loopSegment := &Segment{ loopSegment := &Segment{
Location: location, Location: location,
// TODO we are not setting this since multipart-upload branch, we need to
// check if thats affecting anything and if we need to set it correctly
// or just replace it with something else
CreationDate: time.Time{},
// TODO we are not setting this and we need to decide what to do with this
LastRepaired: time.Time{},
LoopSegmentEntry: segment,
} }
if expiresAt != nil { if expirationDate != nil {
loopSegment.ExpirationDate = *expiresAt loopSegment.ExpirationDate = *expirationDate
} }
loopSegment.StreamID = segment.StreamID if loopSegment.Inline() {
loopSegment.DataSize = int(segment.EncryptedSize)
if segment.Inline() {
loopSegment.Inline = true
if observer.HandleError(observer.InlineSegment(ctx, loopSegment)) { if observer.HandleError(observer.InlineSegment(ctx, loopSegment)) {
return false return false
} }
} else { } else {
loopSegment.RootPieceID = segment.RootPieceID
loopSegment.Redundancy = segment.Redundancy
loopSegment.Pieces = segment.Pieces
if observer.HandleError(observer.RemoteSegment(ctx, loopSegment)) { if observer.HandleError(observer.RemoteSegment(ctx, loopSegment)) {
return false return false
} }

View File

@ -178,22 +178,12 @@ func (it *loopIterator) scanItem(item *LoopObjectEntry) error {
// LoopSegmentEntry contains information about segment metadata needed by metainfo loop. // LoopSegmentEntry contains information about segment metadata needed by metainfo loop.
type LoopSegmentEntry struct { type LoopSegmentEntry struct {
StreamID uuid.UUID StreamID uuid.UUID
Position SegmentPosition Position SegmentPosition
RootPieceID storj.PieceID
RootPieceID storj.PieceID
EncryptedKeyNonce []byte
EncryptedKey []byte
EncryptedSize int32 // size of the whole segment (not a piece) EncryptedSize int32 // size of the whole segment (not a piece)
PlainSize int32 Redundancy storj.RedundancyScheme
PlainOffset int64 Pieces Pieces
// TODO: add fields for proofs/chains
Redundancy storj.RedundancyScheme
InlineData []byte
Pieces Pieces
} }
// Inline returns true if segment is inline. // Inline returns true if segment is inline.
@ -237,10 +227,10 @@ func (db *DB) ListLoopSegmentEntries(ctx context.Context, opts ListLoopSegmentEn
err = withRows(db.db.Query(ctx, ` err = withRows(db.db.Query(ctx, `
SELECT SELECT
stream_id, position, stream_id, position,
root_piece_id, encrypted_key_nonce, encrypted_key, root_piece_id,
encrypted_size, plain_offset, plain_size, encrypted_size,
redundancy, redundancy,
inline_data, remote_alias_pieces remote_alias_pieces
FROM segments FROM segments
WHERE WHERE
-- this turns out to be a little bit faster than stream_id IN (SELECT unnest($1::BYTEA[])) -- this turns out to be a little bit faster than stream_id IN (SELECT unnest($1::BYTEA[]))
@ -252,10 +242,10 @@ func (db *DB) ListLoopSegmentEntries(ctx context.Context, opts ListLoopSegmentEn
var aliasPieces AliasPieces var aliasPieces AliasPieces
err = rows.Scan( err = rows.Scan(
&segment.StreamID, &segment.Position, &segment.StreamID, &segment.Position,
&segment.RootPieceID, &segment.EncryptedKeyNonce, &segment.EncryptedKey, &segment.RootPieceID,
&segment.EncryptedSize, &segment.PlainOffset, &segment.PlainSize, &segment.EncryptedSize,
redundancyScheme{&segment.Redundancy}, redundancyScheme{&segment.Redundancy},
&segment.InlineData, &aliasPieces, &aliasPieces,
) )
if err != nil { if err != nil {
return Error.New("failed to scan segments: %w", err) return Error.New("failed to scan segments: %w", err)

View File

@ -253,6 +253,7 @@ func createFullObjectsWithKeys(ctx *testcontext.Context, t *testing.T, db *metab
return objects return objects
} }
func loopObjectEntryFromRaw(m metabase.RawObject) metabase.LoopObjectEntry { func loopObjectEntryFromRaw(m metabase.RawObject) metabase.LoopObjectEntry {
return metabase.LoopObjectEntry{ return metabase.LoopObjectEntry{
ObjectStream: m.ObjectStream, ObjectStream: m.ObjectStream,
@ -312,16 +313,23 @@ func TestListLoopSegmentEntries(t *testing.T) {
Position: metabase.SegmentPosition{ Position: metabase.SegmentPosition{
Index: uint32(i), Index: uint32(i),
}, },
RootPieceID: storj.PieceID{1}, RootPieceID: storj.PieceID{1},
EncryptedKey: []byte{3}, EncryptedSize: 1024,
EncryptedKeyNonce: []byte{4}, Pieces: metabase.Pieces{{Number: 0, StorageNode: storj.NodeID{2}}},
EncryptedSize: 1024, Redundancy: defaultTestRedundancy,
PlainSize: 512,
Pieces: metabase.Pieces{{Number: 0, StorageNode: storj.NodeID{2}}},
Redundancy: defaultTestRedundancy,
} }
expectedSegments = append(expectedSegments, segment) expectedSegments = append(expectedSegments, segment)
expectedRawSegments = append(expectedRawSegments, metabase.RawSegment(segment)) expectedRawSegments = append(expectedRawSegments, metabase.RawSegment{
StreamID: segment.StreamID,
Position: segment.Position,
EncryptedSize: segment.EncryptedSize,
Pieces: segment.Pieces,
Redundancy: segment.Redundancy,
RootPieceID: segment.RootPieceID,
PlainSize: 512,
EncryptedKey: []byte{3},
EncryptedKeyNonce: []byte{4},
})
} }
} }