2020-11-05 12:59:19 +00:00
|
|
|
// Copyright (C) 2020 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package metabase
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"database/sql"
|
|
|
|
"errors"
|
2021-03-12 17:43:30 +00:00
|
|
|
"time"
|
2020-11-05 12:59:19 +00:00
|
|
|
|
|
|
|
"storj.io/common/uuid"
|
2022-03-01 12:08:09 +00:00
|
|
|
"storj.io/private/dbutil/pgutil"
|
2021-04-23 10:52:40 +01:00
|
|
|
"storj.io/private/tagsql"
|
2020-11-05 12:59:19 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// ListSegments contains arguments necessary for listing stream segments.
|
|
|
|
type ListSegments struct {
|
|
|
|
StreamID uuid.UUID
|
|
|
|
Cursor SegmentPosition
|
|
|
|
Limit int
|
2023-03-23 20:04:58 +00:00
|
|
|
|
|
|
|
Range *StreamRange
|
|
|
|
|
|
|
|
// This causes ListSegments to update the first Segment in the response
|
|
|
|
// with the ancestor info if it exists and server side copy is enabled.
|
|
|
|
UpdateFirstWithAncestor bool
|
2020-11-05 12:59:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListSegmentsResult result of listing segments.
|
|
|
|
type ListSegmentsResult struct {
|
|
|
|
Segments []Segment
|
|
|
|
More bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListSegments lists specified stream segments.
|
|
|
|
func (db *DB) ListSegments(ctx context.Context, opts ListSegments) (result ListSegmentsResult, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
if opts.StreamID.IsZero() {
|
|
|
|
return ListSegmentsResult{}, ErrInvalidRequest.New("StreamID missing")
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts.Limit < 0 {
|
|
|
|
return ListSegmentsResult{}, ErrInvalidRequest.New("Invalid limit: %d", opts.Limit)
|
|
|
|
}
|
|
|
|
|
2021-06-25 09:19:32 +01:00
|
|
|
ListLimit.Ensure(&opts.Limit)
|
2020-11-05 12:59:19 +00:00
|
|
|
|
2023-03-23 20:04:58 +00:00
|
|
|
if opts.Range != nil {
|
|
|
|
if opts.Range.PlainStart > opts.Range.PlainLimit {
|
|
|
|
return ListSegmentsResult{}, ErrInvalidRequest.New("invalid range: %d:%d", opts.Range.PlainStart, opts.Range.PlainLimit)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var rows tagsql.Rows
|
|
|
|
var rowsErr error
|
|
|
|
if opts.Range == nil {
|
|
|
|
rows, rowsErr = db.db.QueryContext(ctx, `
|
|
|
|
SELECT
|
|
|
|
position, created_at, expires_at, root_piece_id,
|
|
|
|
encrypted_key_nonce, encrypted_key, encrypted_size,
|
|
|
|
plain_offset, plain_size, encrypted_etag, redundancy,
|
2023-06-05 18:55:56 +01:00
|
|
|
inline_data, remote_alias_pieces, placement
|
2023-03-23 20:04:58 +00:00
|
|
|
FROM segments
|
|
|
|
WHERE
|
|
|
|
stream_id = $1 AND
|
|
|
|
($2 = 0::INT8 OR position > $2)
|
|
|
|
ORDER BY stream_id, position ASC
|
|
|
|
LIMIT $3
|
|
|
|
`, opts.StreamID, opts.Cursor, opts.Limit+1)
|
|
|
|
} else {
|
|
|
|
rows, rowsErr = db.db.QueryContext(ctx, `
|
|
|
|
SELECT
|
|
|
|
position, created_at, expires_at, root_piece_id,
|
|
|
|
encrypted_key_nonce, encrypted_key, encrypted_size,
|
|
|
|
plain_offset, plain_size, encrypted_etag, redundancy,
|
2023-06-05 18:55:56 +01:00
|
|
|
inline_data, remote_alias_pieces, placement
|
2023-03-23 20:04:58 +00:00
|
|
|
FROM segments
|
|
|
|
WHERE
|
|
|
|
stream_id = $1 AND
|
|
|
|
($2 = 0::INT8 OR position > $2) AND
|
|
|
|
$4 < plain_offset + plain_size AND plain_offset < $5
|
|
|
|
ORDER BY stream_id, position ASC
|
|
|
|
LIMIT $3
|
|
|
|
`, opts.StreamID, opts.Cursor, opts.Limit+1, opts.Range.PlainStart, opts.Range.PlainLimit)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = withRows(rows, rowsErr)(func(rows tagsql.Rows) error {
|
2020-11-05 12:59:19 +00:00
|
|
|
for rows.Next() {
|
|
|
|
var segment Segment
|
2021-02-08 09:33:45 +00:00
|
|
|
var aliasPieces AliasPieces
|
2020-11-05 12:59:19 +00:00
|
|
|
err = rows.Scan(
|
|
|
|
&segment.Position,
|
2022-04-17 19:58:19 +01:00
|
|
|
&segment.CreatedAt, &segment.ExpiresAt,
|
2020-11-05 12:59:19 +00:00
|
|
|
&segment.RootPieceID, &segment.EncryptedKeyNonce, &segment.EncryptedKey,
|
|
|
|
&segment.EncryptedSize, &segment.PlainOffset, &segment.PlainSize,
|
2021-03-25 07:53:10 +00:00
|
|
|
&segment.EncryptedETag,
|
2020-11-05 12:59:19 +00:00
|
|
|
redundancyScheme{&segment.Redundancy},
|
2021-02-08 09:33:45 +00:00
|
|
|
&segment.InlineData, &aliasPieces,
|
2023-06-05 18:55:56 +01:00
|
|
|
&segment.Placement,
|
2020-11-05 12:59:19 +00:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return Error.New("failed to scan segments: %w", err)
|
|
|
|
}
|
|
|
|
|
2021-02-08 09:33:45 +00:00
|
|
|
segment.Pieces, err = db.aliasCache.ConvertAliasesToPieces(ctx, aliasPieces)
|
|
|
|
if err != nil {
|
|
|
|
return Error.New("failed to convert aliases to pieces: %w", err)
|
|
|
|
}
|
|
|
|
|
2020-11-05 12:59:19 +00:00
|
|
|
segment.StreamID = opts.StreamID
|
|
|
|
result.Segments = append(result.Segments, segment)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, sql.ErrNoRows) {
|
|
|
|
return ListSegmentsResult{}, nil
|
|
|
|
}
|
|
|
|
return ListSegmentsResult{}, Error.New("unable to fetch object segments: %w", err)
|
|
|
|
}
|
|
|
|
|
2022-03-01 12:08:09 +00:00
|
|
|
if db.config.ServerSideCopy {
|
|
|
|
copies := make([]Segment, 0, len(result.Segments))
|
|
|
|
copiesPositions := make([]int64, 0, len(result.Segments))
|
|
|
|
for _, segment := range result.Segments {
|
|
|
|
if segment.PiecesInAncestorSegment() {
|
|
|
|
copies = append(copies, segment)
|
|
|
|
copiesPositions = append(copiesPositions, int64(segment.Position.Encode()))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(copies) > 0 {
|
|
|
|
index := 0
|
|
|
|
err = withRows(db.db.QueryContext(ctx, `
|
|
|
|
SELECT
|
|
|
|
root_piece_id,
|
|
|
|
remote_alias_pieces
|
2023-01-11 07:30:14 +00:00
|
|
|
FROM segments as segments
|
|
|
|
LEFT JOIN segment_copies as copies
|
|
|
|
ON copies.ancestor_stream_id = segments.stream_id
|
2022-03-01 12:08:09 +00:00
|
|
|
WHERE
|
2023-01-11 07:30:14 +00:00
|
|
|
copies.stream_id = $1 AND segments.position IN (SELECT position FROM UNNEST($2::INT8[]) as position)
|
|
|
|
ORDER BY segments.stream_id, segments.position ASC
|
2022-03-01 12:08:09 +00:00
|
|
|
`, opts.StreamID, pgutil.Int8Array(copiesPositions)))(func(rows tagsql.Rows) error {
|
|
|
|
|
|
|
|
for rows.Next() {
|
|
|
|
var aliasPieces AliasPieces
|
|
|
|
err = rows.Scan(
|
|
|
|
&copies[index].RootPieceID,
|
|
|
|
&aliasPieces,
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return Error.New("failed to scan segments: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
copies[index].Pieces, err = db.aliasCache.ConvertAliasesToPieces(ctx, aliasPieces)
|
|
|
|
if err != nil {
|
|
|
|
return Error.New("failed to convert aliases to pieces: %w", err)
|
|
|
|
}
|
|
|
|
index++
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return ListSegmentsResult{}, Error.New("unable to fetch object segments: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if index != len(copies) {
|
|
|
|
return ListSegmentsResult{}, Error.New("number of ancestor segments is different than copies: want %d got %d",
|
|
|
|
len(copies), index)
|
|
|
|
}
|
|
|
|
}
|
2023-03-23 20:04:58 +00:00
|
|
|
|
|
|
|
// we have to update the first segment because DownloadObject uses this call
|
|
|
|
// and we only need to do the first segment because it only uses the extra
|
|
|
|
// information for the first segment.
|
|
|
|
if len(result.Segments) > 0 && opts.UpdateFirstWithAncestor {
|
|
|
|
err = db.updateWithAncestorSegment(ctx, &result.Segments[0])
|
|
|
|
if err != nil {
|
|
|
|
return ListSegmentsResult{}, err
|
|
|
|
}
|
|
|
|
}
|
2022-03-01 12:08:09 +00:00
|
|
|
}
|
|
|
|
|
2020-11-05 12:59:19 +00:00
|
|
|
if len(result.Segments) > opts.Limit {
|
|
|
|
result.More = true
|
|
|
|
result.Segments = result.Segments[:len(result.Segments)-1]
|
|
|
|
}
|
|
|
|
|
|
|
|
return result, nil
|
|
|
|
}
|
2021-03-08 12:09:22 +00:00
|
|
|
|
|
|
|
// ListStreamPositions contains arguments necessary for listing stream segments.
|
|
|
|
type ListStreamPositions struct {
|
|
|
|
StreamID uuid.UUID
|
|
|
|
Cursor SegmentPosition
|
|
|
|
Limit int
|
2021-03-31 12:08:22 +01:00
|
|
|
|
|
|
|
Range *StreamRange
|
|
|
|
}
|
|
|
|
|
|
|
|
// StreamRange allows to limit stream positions based on the plain offsets.
|
|
|
|
type StreamRange struct {
|
|
|
|
PlainStart int64
|
|
|
|
PlainLimit int64 // limit is exclusive
|
2021-03-08 12:09:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListStreamPositionsResult result of listing segments.
|
|
|
|
type ListStreamPositionsResult struct {
|
|
|
|
Segments []SegmentPositionInfo
|
|
|
|
More bool
|
|
|
|
}
|
|
|
|
|
|
|
|
// SegmentPositionInfo contains information for segment position.
|
|
|
|
type SegmentPositionInfo struct {
|
2021-04-09 09:24:18 +01:00
|
|
|
Position SegmentPosition
|
|
|
|
// PlainSize is 0 for a migrated object.
|
|
|
|
PlainSize int32
|
|
|
|
// PlainOffset is 0 for a migrated object.
|
2021-03-29 16:31:17 +01:00
|
|
|
PlainOffset int64
|
2021-03-25 07:53:10 +00:00
|
|
|
CreatedAt *time.Time // TODO: make it non-nilable after we migrate all existing segments to have creation time
|
|
|
|
EncryptedETag []byte
|
2021-10-12 14:37:12 +01:00
|
|
|
EncryptedKeyNonce []byte
|
2021-03-25 07:53:10 +00:00
|
|
|
EncryptedKey []byte
|
2021-03-08 12:09:22 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListStreamPositions lists specified stream segment positions.
|
|
|
|
func (db *DB) ListStreamPositions(ctx context.Context, opts ListStreamPositions) (result ListStreamPositionsResult, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
if opts.StreamID.IsZero() {
|
|
|
|
return ListStreamPositionsResult{}, ErrInvalidRequest.New("StreamID missing")
|
|
|
|
}
|
|
|
|
|
|
|
|
if opts.Limit < 0 {
|
|
|
|
return ListStreamPositionsResult{}, ErrInvalidRequest.New("Invalid limit: %d", opts.Limit)
|
|
|
|
}
|
2021-06-25 09:19:32 +01:00
|
|
|
|
|
|
|
ListLimit.Ensure(&opts.Limit)
|
2021-03-08 12:09:22 +00:00
|
|
|
|
2021-03-31 12:08:22 +01:00
|
|
|
if opts.Range != nil {
|
|
|
|
if opts.Range.PlainStart > opts.Range.PlainLimit {
|
|
|
|
return ListStreamPositionsResult{}, ErrInvalidRequest.New("invalid range: %d:%d", opts.Range.PlainStart, opts.Range.PlainLimit)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var rows tagsql.Rows
|
|
|
|
var rowsErr error
|
|
|
|
if opts.Range == nil {
|
2021-07-28 14:44:22 +01:00
|
|
|
rows, rowsErr = db.db.QueryContext(ctx, `
|
2021-03-31 12:08:22 +01:00
|
|
|
SELECT
|
|
|
|
position, plain_size, plain_offset, created_at,
|
|
|
|
encrypted_etag, encrypted_key_nonce, encrypted_key
|
|
|
|
FROM segments
|
|
|
|
WHERE
|
|
|
|
stream_id = $1 AND
|
|
|
|
($2 = 0::INT8 OR position > $2)
|
|
|
|
ORDER BY position ASC
|
|
|
|
LIMIT $3
|
|
|
|
`, opts.StreamID, opts.Cursor, opts.Limit+1)
|
|
|
|
} else {
|
2021-07-28 14:44:22 +01:00
|
|
|
rows, rowsErr = db.db.QueryContext(ctx, `
|
2021-03-31 12:08:22 +01:00
|
|
|
SELECT
|
|
|
|
position, plain_size, plain_offset, created_at,
|
|
|
|
encrypted_etag, encrypted_key_nonce, encrypted_key
|
|
|
|
FROM segments
|
|
|
|
WHERE
|
|
|
|
stream_id = $1 AND
|
|
|
|
($2 = 0::INT8 OR position > $2) AND
|
|
|
|
$4 < plain_offset + plain_size AND plain_offset < $5
|
|
|
|
ORDER BY position ASC
|
|
|
|
LIMIT $3
|
|
|
|
`, opts.StreamID, opts.Cursor, opts.Limit+1, opts.Range.PlainStart, opts.Range.PlainLimit)
|
|
|
|
}
|
|
|
|
|
|
|
|
err = withRows(rows, rowsErr)(func(rows tagsql.Rows) error {
|
2021-03-08 12:09:22 +00:00
|
|
|
for rows.Next() {
|
|
|
|
var segment SegmentPositionInfo
|
2021-03-25 07:53:10 +00:00
|
|
|
err = rows.Scan(
|
2021-03-29 16:31:17 +01:00
|
|
|
&segment.Position, &segment.PlainSize, &segment.PlainOffset, &segment.CreatedAt,
|
2021-03-25 07:53:10 +00:00
|
|
|
&segment.EncryptedETag, &segment.EncryptedKeyNonce, &segment.EncryptedKey,
|
|
|
|
)
|
2021-03-08 12:09:22 +00:00
|
|
|
if err != nil {
|
|
|
|
return Error.New("failed to scan segments: %w", err)
|
|
|
|
}
|
|
|
|
result.Segments = append(result.Segments, segment)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
if errors.Is(err, sql.ErrNoRows) {
|
|
|
|
return ListStreamPositionsResult{}, nil
|
|
|
|
}
|
|
|
|
return ListStreamPositionsResult{}, Error.New("unable to fetch object segments: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(result.Segments) > opts.Limit {
|
|
|
|
result.More = true
|
|
|
|
result.Segments = result.Segments[:len(result.Segments)-1]
|
|
|
|
}
|
|
|
|
|
|
|
|
return result, nil
|
|
|
|
}
|