satellite/metainfo/metabase: reduce number of fields for LoopSegmentEntry

For metainfo loop we need only some of Segment fields. By removing some of them we will reduce memory consumption during loop.

Change-Id: I4af8baab58f7de8ddf5e142380180bb70b1b442d
This commit is contained in:
Michał Niewrzał 2021-03-02 13:58:23 +01:00
parent c223c2d845
commit c51ea68ad3
5 changed files with 45 additions and 50 deletions

View File

@ -294,7 +294,7 @@ func (observer *Observer) InlineSegment(ctx context.Context, segment *metainfo.S
bucket := observer.ensureBucket(ctx, segment.Location.Object())
bucket.InlineSegments++
bucket.InlineBytes += int64(segment.DataSize)
bucket.InlineBytes += int64(segment.EncryptedSize)
return nil
}
@ -307,7 +307,7 @@ func (observer *Observer) RemoteSegment(ctx context.Context, segment *metainfo.S
bucket := observer.ensureBucket(ctx, segment.Location.Object())
bucket.RemoteSegments++
bucket.RemoteBytes += int64(segment.DataSize)
bucket.RemoteBytes += int64(segment.EncryptedSize)
// add node info
minimumRequired := segment.Redundancy.RequiredShares
@ -317,7 +317,7 @@ func (observer *Observer) RemoteSegment(ctx context.Context, segment *metainfo.S
return nil
}
pieceSize := float64(segment.DataSize / int(minimumRequired)) // TODO: Add this as a method to RedundancyScheme
pieceSize := float64(segment.EncryptedSize / int32(minimumRequired)) // TODO: Add this as a method to RedundancyScheme
for _, piece := range segment.Pieces {
observer.Node[piece.StorageNode] += pieceSize

View File

@ -73,7 +73,7 @@ func (collector *PathCollector) RemoteSegment(ctx context.Context, segment *meta
if err != nil {
return err
}
pieceSize := eestream.CalcPieceSize(int64(segment.DataSize), redundancy)
pieceSize := eestream.CalcPieceSize(int64(segment.EncryptedSize), redundancy)
collector.nodeIDStorage[piece.StorageNode] += pieceSize
item := TransferQueueItem{

View File

@ -13,7 +13,6 @@ import (
"github.com/zeebo/errs"
"golang.org/x/time/rate"
"storj.io/common/storj"
"storj.io/common/uuid"
"storj.io/storj/satellite/metainfo/metabase"
)
@ -38,15 +37,11 @@ func (object *Object) Expired(now time.Time) bool {
// Segment is the segment info passed to Observer by metainfo loop.
type Segment struct {
Location metabase.SegmentLocation // tally, repair, graceful exit, audit
StreamID uuid.UUID // audit
DataSize int // tally, graceful exit
Inline bool // metrics
Redundancy storj.RedundancyScheme // tally, graceful exit, repair
RootPieceID storj.PieceID // gc, graceful exit
Pieces metabase.Pieces // tally, audit, gc, graceful exit, repair
CreationDate time.Time // repair
ExpirationDate time.Time // tally, repair
LastRepaired time.Time // repair
metabase.LoopSegmentEntry
}
// Expired checks if segment is expired relative to now.
@ -459,26 +454,28 @@ func handleObject(ctx context.Context, observer *observerContext, object *Object
return true
}
func handleSegment(ctx context.Context, observer *observerContext, location metabase.SegmentLocation, segment metabase.LoopSegmentEntry, expiresAt *time.Time) bool {
func handleSegment(ctx context.Context, observer *observerContext, location metabase.SegmentLocation, segment metabase.LoopSegmentEntry, expirationDate *time.Time) bool {
loopSegment := &Segment{
Location: location,
// TODO we are not setting this since multipart-upload branch, we need to
// check if thats affecting anything and if we need to set it correctly
// or just replace it with something else
CreationDate: time.Time{},
// TODO we are not setting this and we need to decide what to do with this
LastRepaired: time.Time{},
LoopSegmentEntry: segment,
}
if expiresAt != nil {
loopSegment.ExpirationDate = *expiresAt
if expirationDate != nil {
loopSegment.ExpirationDate = *expirationDate
}
loopSegment.StreamID = segment.StreamID
loopSegment.DataSize = int(segment.EncryptedSize)
if segment.Inline() {
loopSegment.Inline = true
if loopSegment.Inline() {
if observer.HandleError(observer.InlineSegment(ctx, loopSegment)) {
return false
}
} else {
loopSegment.RootPieceID = segment.RootPieceID
loopSegment.Redundancy = segment.Redundancy
loopSegment.Pieces = segment.Pieces
if observer.HandleError(observer.RemoteSegment(ctx, loopSegment)) {
return false
}

View File

@ -178,22 +178,12 @@ func (it *loopIterator) scanItem(item *LoopObjectEntry) error {
// LoopSegmentEntry contains information about segment metadata needed by metainfo loop.
type LoopSegmentEntry struct {
StreamID uuid.UUID
Position SegmentPosition
RootPieceID storj.PieceID
EncryptedKeyNonce []byte
EncryptedKey []byte
StreamID uuid.UUID
Position SegmentPosition
RootPieceID storj.PieceID
EncryptedSize int32 // size of the whole segment (not a piece)
PlainSize int32
PlainOffset int64
// TODO: add fields for proofs/chains
Redundancy storj.RedundancyScheme
InlineData []byte
Pieces Pieces
Redundancy storj.RedundancyScheme
Pieces Pieces
}
// Inline returns true if segment is inline.
@ -237,10 +227,10 @@ func (db *DB) ListLoopSegmentEntries(ctx context.Context, opts ListLoopSegmentEn
err = withRows(db.db.Query(ctx, `
SELECT
stream_id, position,
root_piece_id, encrypted_key_nonce, encrypted_key,
encrypted_size, plain_offset, plain_size,
root_piece_id,
encrypted_size,
redundancy,
inline_data, remote_alias_pieces
remote_alias_pieces
FROM segments
WHERE
-- this turns out to be a little bit faster than stream_id IN (SELECT unnest($1::BYTEA[]))
@ -252,10 +242,10 @@ func (db *DB) ListLoopSegmentEntries(ctx context.Context, opts ListLoopSegmentEn
var aliasPieces AliasPieces
err = rows.Scan(
&segment.StreamID, &segment.Position,
&segment.RootPieceID, &segment.EncryptedKeyNonce, &segment.EncryptedKey,
&segment.EncryptedSize, &segment.PlainOffset, &segment.PlainSize,
&segment.RootPieceID,
&segment.EncryptedSize,
redundancyScheme{&segment.Redundancy},
&segment.InlineData, &aliasPieces,
&aliasPieces,
)
if err != nil {
return Error.New("failed to scan segments: %w", err)

View File

@ -253,6 +253,7 @@ func createFullObjectsWithKeys(ctx *testcontext.Context, t *testing.T, db *metab
return objects
}
func loopObjectEntryFromRaw(m metabase.RawObject) metabase.LoopObjectEntry {
return metabase.LoopObjectEntry{
ObjectStream: m.ObjectStream,
@ -312,16 +313,23 @@ func TestListLoopSegmentEntries(t *testing.T) {
Position: metabase.SegmentPosition{
Index: uint32(i),
},
RootPieceID: storj.PieceID{1},
EncryptedKey: []byte{3},
EncryptedKeyNonce: []byte{4},
EncryptedSize: 1024,
PlainSize: 512,
Pieces: metabase.Pieces{{Number: 0, StorageNode: storj.NodeID{2}}},
Redundancy: defaultTestRedundancy,
RootPieceID: storj.PieceID{1},
EncryptedSize: 1024,
Pieces: metabase.Pieces{{Number: 0, StorageNode: storj.NodeID{2}}},
Redundancy: defaultTestRedundancy,
}
expectedSegments = append(expectedSegments, segment)
expectedRawSegments = append(expectedRawSegments, metabase.RawSegment(segment))
expectedRawSegments = append(expectedRawSegments, metabase.RawSegment{
StreamID: segment.StreamID,
Position: segment.Position,
EncryptedSize: segment.EncryptedSize,
Pieces: segment.Pieces,
Redundancy: segment.Redundancy,
RootPieceID: segment.RootPieceID,
PlainSize: 512,
EncryptedKey: []byte{3},
EncryptedKeyNonce: []byte{4},
})
}
}