2021-02-18 12:54:09 +00:00
|
|
|
// Copyright (C) 2021 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package metabase
|
|
|
|
|
|
|
|
import (
|
2021-03-02 12:20:02 +00:00
|
|
|
"bytes"
|
2021-02-18 12:54:09 +00:00
|
|
|
"context"
|
2021-03-01 15:27:04 +00:00
|
|
|
"fmt"
|
2021-03-02 12:20:02 +00:00
|
|
|
"sort"
|
2021-02-18 12:54:09 +00:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/zeebo/errs"
|
|
|
|
|
2021-03-02 12:20:02 +00:00
|
|
|
"storj.io/common/storj"
|
2021-02-18 12:54:09 +00:00
|
|
|
"storj.io/common/uuid"
|
2021-03-01 15:27:04 +00:00
|
|
|
"storj.io/storj/private/dbutil"
|
2021-03-02 12:20:02 +00:00
|
|
|
"storj.io/storj/private/dbutil/pgutil"
|
2021-02-18 12:54:09 +00:00
|
|
|
"storj.io/storj/private/tagsql"
|
|
|
|
)
|
|
|
|
|
2021-03-01 14:29:03 +00:00
|
|
|
const loopIteratorBatchSizeLimit = 2500
|
2021-02-19 13:02:52 +00:00
|
|
|
|
2021-03-01 14:29:03 +00:00
|
|
|
// IterateLoopObjects contains arguments necessary for listing objects in metabase.
|
|
|
|
type IterateLoopObjects struct {
|
2021-02-18 12:54:09 +00:00
|
|
|
BatchSize int
|
2021-03-03 11:20:41 +00:00
|
|
|
|
|
|
|
AsOfSystemTime time.Time
|
2021-02-18 12:54:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Verify verifies get object request fields.
|
2021-03-01 14:29:03 +00:00
|
|
|
func (opts *IterateLoopObjects) Verify() error {
|
2021-02-18 12:54:09 +00:00
|
|
|
if opts.BatchSize < 0 {
|
|
|
|
return ErrInvalidRequest.New("BatchSize is negative")
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2021-03-03 11:20:41 +00:00
|
|
|
// LoopObjectsIterator iterates over a sequence of LoopObjectEntry items.
|
|
|
|
type LoopObjectsIterator interface {
|
|
|
|
Next(ctx context.Context, item *LoopObjectEntry) bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// LoopObjectEntry contains information about object needed by metainfo loop.
|
|
|
|
type LoopObjectEntry struct {
|
|
|
|
ObjectStream // metrics, repair, tally
|
2021-03-19 13:06:13 +00:00
|
|
|
CreatedAt time.Time // temp used by metabase-createdat-migration
|
2021-03-03 11:20:41 +00:00
|
|
|
ExpiresAt *time.Time // tally
|
|
|
|
SegmentCount int32 // metrics
|
|
|
|
EncryptedMetadataSize int // tally
|
|
|
|
}
|
|
|
|
|
2021-03-01 14:29:03 +00:00
|
|
|
// IterateLoopObjects iterates through all objects in metabase.
|
|
|
|
func (db *DB) IterateLoopObjects(ctx context.Context, opts IterateLoopObjects, fn func(context.Context, LoopObjectsIterator) error) (err error) {
|
2021-02-18 12:54:09 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
if err := opts.Verify(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-03-01 14:29:03 +00:00
|
|
|
it := &loopIterator{
|
2021-02-18 12:54:09 +00:00
|
|
|
db: db,
|
|
|
|
|
|
|
|
batchSize: opts.BatchSize,
|
|
|
|
|
2021-04-21 18:15:03 +01:00
|
|
|
curIndex: 0,
|
|
|
|
cursor: loopIterateCursor{},
|
|
|
|
asOfSystemTime: opts.AsOfSystemTime,
|
2021-02-18 12:54:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ensure batch size is reasonable
|
2021-03-01 14:29:03 +00:00
|
|
|
if it.batchSize <= 0 || it.batchSize > loopIteratorBatchSizeLimit {
|
|
|
|
it.batchSize = loopIteratorBatchSizeLimit
|
2021-02-18 12:54:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
it.curRows, err = it.doNextQuery(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if rowsErr := it.curRows.Err(); rowsErr != nil {
|
|
|
|
err = errs.Combine(err, rowsErr)
|
|
|
|
}
|
|
|
|
err = errs.Combine(err, it.curRows.Close())
|
|
|
|
}()
|
|
|
|
|
|
|
|
return fn(ctx, it)
|
|
|
|
}
|
|
|
|
|
2021-03-01 14:29:03 +00:00
|
|
|
// loopIterator enables iteration of all objects in metabase.
|
|
|
|
type loopIterator struct {
|
2021-02-18 12:54:09 +00:00
|
|
|
db *DB
|
|
|
|
|
2021-03-03 11:20:41 +00:00
|
|
|
batchSize int
|
|
|
|
asOfSystemTime time.Time
|
2021-02-18 12:54:09 +00:00
|
|
|
|
|
|
|
curIndex int
|
|
|
|
curRows tagsql.Rows
|
2021-03-01 14:29:03 +00:00
|
|
|
cursor loopIterateCursor
|
2021-02-18 12:54:09 +00:00
|
|
|
}
|
|
|
|
|
2021-03-01 14:29:03 +00:00
|
|
|
type loopIterateCursor struct {
|
2021-02-18 12:54:09 +00:00
|
|
|
ProjectID uuid.UUID
|
|
|
|
BucketName string
|
|
|
|
ObjectKey ObjectKey
|
|
|
|
Version Version
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next returns true if there was another item and copy it in item.
|
2021-03-01 14:29:03 +00:00
|
|
|
func (it *loopIterator) Next(ctx context.Context, item *LoopObjectEntry) bool {
|
2021-02-18 12:54:09 +00:00
|
|
|
next := it.curRows.Next()
|
|
|
|
if !next {
|
|
|
|
if it.curIndex < it.batchSize {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
if it.curRows.Err() != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
rows, err := it.doNextQuery(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
if it.curRows.Close() != nil {
|
|
|
|
_ = rows.Close()
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
it.curRows = rows
|
|
|
|
it.curIndex = 0
|
|
|
|
if !it.curRows.Next() {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err := it.scanItem(item)
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
it.curIndex++
|
|
|
|
it.cursor.ProjectID = item.ProjectID
|
|
|
|
it.cursor.BucketName = item.BucketName
|
|
|
|
it.cursor.ObjectKey = item.ObjectKey
|
|
|
|
it.cursor.Version = item.Version
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2021-03-01 14:29:03 +00:00
|
|
|
func (it *loopIterator) doNextQuery(ctx context.Context) (_ tagsql.Rows, err error) {
|
2021-02-18 12:54:09 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2021-03-03 11:20:41 +00:00
|
|
|
var asOfSystemTime string
|
|
|
|
if !it.asOfSystemTime.IsZero() && it.db.implementation == dbutil.Cockroach {
|
|
|
|
asOfSystemTime = fmt.Sprintf(` AS OF SYSTEM TIME '%d' `, it.asOfSystemTime.UnixNano())
|
|
|
|
}
|
|
|
|
|
2021-02-18 12:54:09 +00:00
|
|
|
return it.db.db.Query(ctx, `
|
|
|
|
SELECT
|
|
|
|
project_id, bucket_name,
|
2021-03-01 17:01:49 +00:00
|
|
|
object_key, stream_id, version,
|
2021-03-19 13:06:13 +00:00
|
|
|
created_at, expires_at,
|
2021-02-18 12:54:09 +00:00
|
|
|
segment_count,
|
2021-03-01 17:01:49 +00:00
|
|
|
LENGTH(COALESCE(encrypted_metadata,''))
|
2021-02-18 12:54:09 +00:00
|
|
|
FROM objects
|
2021-03-03 11:20:41 +00:00
|
|
|
`+asOfSystemTime+`
|
2021-02-18 12:54:09 +00:00
|
|
|
WHERE (project_id, bucket_name, object_key, version) > ($1, $2, $3, $4)
|
|
|
|
ORDER BY project_id ASC, bucket_name ASC, object_key ASC, version ASC
|
|
|
|
LIMIT $5
|
2021-03-02 11:22:49 +00:00
|
|
|
`, it.cursor.ProjectID, []byte(it.cursor.BucketName),
|
2021-02-18 12:54:09 +00:00
|
|
|
[]byte(it.cursor.ObjectKey), int(it.cursor.Version),
|
|
|
|
it.batchSize,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2021-03-01 14:29:03 +00:00
|
|
|
// scanItem scans doNextQuery results into LoopObjectEntry.
|
|
|
|
func (it *loopIterator) scanItem(item *LoopObjectEntry) error {
|
2021-02-18 12:54:09 +00:00
|
|
|
return it.curRows.Scan(
|
|
|
|
&item.ProjectID, &item.BucketName,
|
2021-03-01 17:01:49 +00:00
|
|
|
&item.ObjectKey, &item.StreamID, &item.Version,
|
2021-03-19 13:06:13 +00:00
|
|
|
&item.CreatedAt, &item.ExpiresAt,
|
2021-02-18 12:54:09 +00:00
|
|
|
&item.SegmentCount,
|
2021-03-01 17:01:49 +00:00
|
|
|
&item.EncryptedMetadataSize,
|
2021-02-18 12:54:09 +00:00
|
|
|
)
|
|
|
|
}
|
2021-03-02 12:20:02 +00:00
|
|
|
|
2021-03-01 15:27:04 +00:00
|
|
|
// IterateLoopStreams contains arguments necessary for listing multiple streams segments.
|
|
|
|
type IterateLoopStreams struct {
|
|
|
|
StreamIDs []uuid.UUID
|
|
|
|
|
|
|
|
AsOfSystemTime time.Time
|
|
|
|
}
|
|
|
|
|
|
|
|
// SegmentIterator returns the next segment.
|
|
|
|
type SegmentIterator func(segment *LoopSegmentEntry) bool
|
|
|
|
|
2021-03-02 12:20:02 +00:00
|
|
|
// LoopSegmentEntry contains information about segment metadata needed by metainfo loop.
|
|
|
|
type LoopSegmentEntry struct {
|
2021-03-02 12:58:23 +00:00
|
|
|
StreamID uuid.UUID
|
|
|
|
Position SegmentPosition
|
2021-03-31 15:08:10 +01:00
|
|
|
CreatedAt *time.Time // repair
|
|
|
|
RepairedAt *time.Time // repair
|
2021-03-02 12:58:23 +00:00
|
|
|
RootPieceID storj.PieceID
|
2021-03-02 12:20:02 +00:00
|
|
|
EncryptedSize int32 // size of the whole segment (not a piece)
|
2021-03-02 12:58:23 +00:00
|
|
|
Redundancy storj.RedundancyScheme
|
|
|
|
Pieces Pieces
|
2021-03-02 12:20:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Inline returns true if segment is inline.
|
|
|
|
func (s LoopSegmentEntry) Inline() bool {
|
|
|
|
return s.Redundancy.IsZero() && len(s.Pieces) == 0
|
|
|
|
}
|
|
|
|
|
2021-03-01 15:27:04 +00:00
|
|
|
// IterateLoopStreams lists multiple streams segments.
|
|
|
|
func (db *DB) IterateLoopStreams(ctx context.Context, opts IterateLoopStreams, handleStream func(ctx context.Context, streamID uuid.UUID, next SegmentIterator) error) (err error) {
|
2021-03-02 12:20:02 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
if len(opts.StreamIDs) == 0 {
|
2021-03-01 15:27:04 +00:00
|
|
|
return ErrInvalidRequest.New("StreamIDs list is empty")
|
2021-03-02 12:20:02 +00:00
|
|
|
}
|
|
|
|
|
2021-03-01 15:27:04 +00:00
|
|
|
sort.Slice(opts.StreamIDs, func(i, k int) bool {
|
|
|
|
return bytes.Compare(opts.StreamIDs[i][:], opts.StreamIDs[k][:]) < 0
|
|
|
|
})
|
|
|
|
|
2021-03-02 12:20:02 +00:00
|
|
|
// TODO do something like pgutil.UUIDArray()
|
2021-03-01 15:27:04 +00:00
|
|
|
bytesIDs := make([][]byte, len(opts.StreamIDs))
|
2021-03-02 12:20:02 +00:00
|
|
|
for i, streamID := range opts.StreamIDs {
|
|
|
|
if streamID.IsZero() {
|
2021-03-01 15:27:04 +00:00
|
|
|
return ErrInvalidRequest.New("StreamID missing: index %d", i)
|
2021-03-02 12:20:02 +00:00
|
|
|
}
|
|
|
|
id := streamID
|
2021-03-01 15:27:04 +00:00
|
|
|
bytesIDs[i] = id[:]
|
2021-03-02 12:20:02 +00:00
|
|
|
}
|
|
|
|
|
2021-03-01 15:27:04 +00:00
|
|
|
var asOfSystemTime string
|
|
|
|
if !opts.AsOfSystemTime.IsZero() && db.implementation == dbutil.Cockroach {
|
|
|
|
asOfSystemTime = fmt.Sprintf(` AS OF SYSTEM TIME '%d' `, opts.AsOfSystemTime.UnixNano())
|
|
|
|
}
|
2021-03-02 12:20:02 +00:00
|
|
|
|
2021-03-01 15:27:04 +00:00
|
|
|
rows, err := db.db.Query(ctx, `
|
2021-03-02 12:20:02 +00:00
|
|
|
SELECT
|
|
|
|
stream_id, position,
|
2021-03-31 15:08:10 +01:00
|
|
|
created_at, repaired_at,
|
2021-03-02 12:58:23 +00:00
|
|
|
root_piece_id,
|
|
|
|
encrypted_size,
|
2021-03-02 12:20:02 +00:00
|
|
|
redundancy,
|
2021-03-02 12:58:23 +00:00
|
|
|
remote_alias_pieces
|
2021-03-02 12:20:02 +00:00
|
|
|
FROM segments
|
2021-03-01 15:27:04 +00:00
|
|
|
`+asOfSystemTime+`
|
2021-03-02 12:20:02 +00:00
|
|
|
WHERE
|
|
|
|
-- this turns out to be a little bit faster than stream_id IN (SELECT unnest($1::BYTEA[]))
|
|
|
|
stream_id = ANY ($1::BYTEA[])
|
|
|
|
ORDER BY stream_id ASC, position ASC
|
2021-03-01 15:27:04 +00:00
|
|
|
`, pgutil.ByteaArray(bytesIDs))
|
|
|
|
if err != nil {
|
|
|
|
return Error.Wrap(err)
|
|
|
|
}
|
|
|
|
defer func() { err = errs.Combine(err, rows.Err(), rows.Close()) }()
|
|
|
|
|
|
|
|
var noMoreData bool
|
|
|
|
var nextSegment *LoopSegmentEntry
|
|
|
|
for _, streamID := range opts.StreamIDs {
|
|
|
|
streamID := streamID
|
|
|
|
var internalError error
|
|
|
|
err := handleStream(ctx, streamID, func(output *LoopSegmentEntry) bool {
|
|
|
|
if nextSegment != nil {
|
|
|
|
if nextSegment.StreamID != streamID {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
*output = *nextSegment
|
|
|
|
nextSegment = nil
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
if noMoreData {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if !rows.Next() {
|
|
|
|
noMoreData = true
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2021-03-02 12:20:02 +00:00
|
|
|
var segment LoopSegmentEntry
|
|
|
|
var aliasPieces AliasPieces
|
|
|
|
err = rows.Scan(
|
|
|
|
&segment.StreamID, &segment.Position,
|
2021-03-31 15:08:10 +01:00
|
|
|
&segment.CreatedAt, &segment.RepairedAt,
|
2021-03-02 12:58:23 +00:00
|
|
|
&segment.RootPieceID,
|
|
|
|
&segment.EncryptedSize,
|
2021-03-02 12:20:02 +00:00
|
|
|
redundancyScheme{&segment.Redundancy},
|
2021-03-02 12:58:23 +00:00
|
|
|
&aliasPieces,
|
2021-03-02 12:20:02 +00:00
|
|
|
)
|
|
|
|
if err != nil {
|
2021-03-01 15:27:04 +00:00
|
|
|
internalError = Error.New("failed to scan segments: %w", err)
|
|
|
|
return false
|
2021-03-02 12:20:02 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
segment.Pieces, err = db.aliasCache.ConvertAliasesToPieces(ctx, aliasPieces)
|
|
|
|
if err != nil {
|
2021-03-01 15:27:04 +00:00
|
|
|
internalError = Error.New("failed to convert aliases to pieces: %w", err)
|
|
|
|
return false
|
2021-03-02 12:20:02 +00:00
|
|
|
}
|
|
|
|
|
2021-03-01 15:27:04 +00:00
|
|
|
if segment.StreamID != streamID {
|
|
|
|
nextSegment = &segment
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
*output = segment
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
if internalError != nil || err != nil {
|
|
|
|
return Error.Wrap(errs.Combine(internalError, err))
|
2021-03-02 12:20:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-03-06 09:17:12 +00:00
|
|
|
if !noMoreData {
|
|
|
|
return Error.New("expected rows to be completely read")
|
|
|
|
}
|
|
|
|
|
2021-03-03 11:20:41 +00:00
|
|
|
return nil
|
2021-03-02 12:20:02 +00:00
|
|
|
}
|