2020-10-29 18:10:46 +00:00
|
|
|
// Copyright (C) 2020 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package metabase
|
|
|
|
|
|
|
|
import (
|
2020-11-16 14:02:11 +00:00
|
|
|
"bytes"
|
2020-10-29 18:10:46 +00:00
|
|
|
"context"
|
2020-11-17 14:05:41 +00:00
|
|
|
"strings"
|
2020-10-29 18:10:46 +00:00
|
|
|
|
|
|
|
"github.com/zeebo/errs"
|
|
|
|
|
2020-11-16 14:02:11 +00:00
|
|
|
"storj.io/common/uuid"
|
2021-04-23 10:52:40 +01:00
|
|
|
"storj.io/private/tagsql"
|
2020-10-29 18:10:46 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// objectIterator enables iteration on objects in a bucket.
|
|
|
|
type objectsIterator struct {
|
2020-11-16 14:02:11 +00:00
|
|
|
db *DB
|
|
|
|
|
2021-09-28 13:36:10 +01:00
|
|
|
projectID uuid.UUID
|
|
|
|
bucketName []byte
|
|
|
|
status ObjectStatus
|
|
|
|
prefix ObjectKey
|
|
|
|
prefixLimit ObjectKey
|
|
|
|
batchSize int
|
|
|
|
recursive bool
|
|
|
|
includeCustomMetadata bool
|
|
|
|
includeSystemMetadata bool
|
2020-11-16 14:02:11 +00:00
|
|
|
|
2021-06-25 15:01:12 +01:00
|
|
|
curIndex int
|
|
|
|
curRows tagsql.Rows
|
|
|
|
cursor iterateCursor // not relative to prefix
|
2020-11-17 14:05:41 +00:00
|
|
|
|
2021-06-25 15:01:12 +01:00
|
|
|
skipPrefix ObjectKey // relative to prefix
|
2021-01-11 12:06:04 +00:00
|
|
|
doNextQuery func(context.Context, *objectsIterator) (_ tagsql.Rows, err error)
|
2021-06-04 18:32:25 +01:00
|
|
|
|
|
|
|
// failErr is set when either scan or next query fails during iteration.
|
|
|
|
failErr error
|
2021-01-11 12:06:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type iterateCursor struct {
|
2021-06-25 15:01:12 +01:00
|
|
|
Key ObjectKey
|
|
|
|
Version Version
|
|
|
|
StreamID uuid.UUID
|
|
|
|
Inclusive bool
|
2020-10-29 18:10:46 +00:00
|
|
|
}
|
|
|
|
|
2020-12-21 15:07:00 +00:00
|
|
|
func iterateAllVersionsWithStatus(ctx context.Context, db *DB, opts IterateObjectsWithStatus, fn func(context.Context, ObjectsIterator) error) (err error) {
|
2020-10-29 18:10:46 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
it := &objectsIterator{
|
2020-11-16 14:02:11 +00:00
|
|
|
db: db,
|
|
|
|
|
2021-09-28 13:36:10 +01:00
|
|
|
projectID: opts.ProjectID,
|
|
|
|
bucketName: []byte(opts.BucketName),
|
|
|
|
status: opts.Status,
|
|
|
|
prefix: opts.Prefix,
|
|
|
|
prefixLimit: prefixLimit(opts.Prefix),
|
|
|
|
batchSize: opts.BatchSize,
|
|
|
|
recursive: opts.Recursive,
|
|
|
|
includeCustomMetadata: opts.IncludeCustomMetadata,
|
|
|
|
includeSystemMetadata: opts.IncludeSystemMetadata,
|
2020-11-16 14:02:11 +00:00
|
|
|
|
|
|
|
curIndex: 0,
|
2021-06-25 15:01:12 +01:00
|
|
|
cursor: firstIterateCursor(opts.Recursive, opts.Cursor, opts.Prefix),
|
|
|
|
|
2021-01-11 12:06:04 +00:00
|
|
|
doNextQuery: doNextQueryAllVersionsWithStatus,
|
2020-10-29 18:10:46 +00:00
|
|
|
}
|
|
|
|
|
2021-01-11 12:06:04 +00:00
|
|
|
// start from either the cursor or prefix, depending on which is larger
|
|
|
|
if lessKey(it.cursor.Key, opts.Prefix) {
|
2021-02-28 18:27:38 +00:00
|
|
|
it.cursor.Key = opts.Prefix
|
2021-01-11 12:06:04 +00:00
|
|
|
it.cursor.Version = -1
|
2021-06-25 15:01:12 +01:00
|
|
|
it.cursor.Inclusive = true
|
2021-01-11 12:06:04 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return iterate(ctx, it, fn)
|
2020-12-21 16:43:08 +00:00
|
|
|
}
|
|
|
|
|
2021-01-11 12:06:04 +00:00
|
|
|
func iteratePendingObjectsByKey(ctx context.Context, db *DB, opts IteratePendingObjectsByKey, fn func(context.Context, ObjectsIterator) error) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
cursor := opts.Cursor
|
|
|
|
|
|
|
|
if cursor.StreamID.IsZero() {
|
|
|
|
cursor.StreamID = uuid.UUID{}
|
|
|
|
}
|
|
|
|
|
|
|
|
it := &objectsIterator{
|
|
|
|
db: db,
|
|
|
|
|
2021-09-28 13:36:10 +01:00
|
|
|
projectID: opts.ProjectID,
|
|
|
|
bucketName: []byte(opts.BucketName),
|
|
|
|
prefix: "",
|
|
|
|
prefixLimit: "",
|
|
|
|
batchSize: opts.BatchSize,
|
|
|
|
recursive: true,
|
|
|
|
includeCustomMetadata: true,
|
|
|
|
includeSystemMetadata: true,
|
2021-01-11 12:06:04 +00:00
|
|
|
|
|
|
|
curIndex: 0,
|
|
|
|
cursor: iterateCursor{
|
|
|
|
Key: opts.ObjectKey,
|
|
|
|
Version: 0,
|
|
|
|
StreamID: opts.Cursor.StreamID,
|
|
|
|
},
|
|
|
|
doNextQuery: doNextQueryStreamsByKey,
|
|
|
|
}
|
|
|
|
|
|
|
|
return iterate(ctx, it, fn)
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func iterate(ctx context.Context, it *objectsIterator, fn func(context.Context, ObjectsIterator) error) (err error) {
|
2021-06-25 09:19:32 +01:00
|
|
|
batchsizeLimit.Ensure(&it.batchSize)
|
2020-11-16 13:08:22 +00:00
|
|
|
|
2021-01-11 12:06:04 +00:00
|
|
|
it.curRows, err = it.doNextQuery(ctx, it)
|
2020-10-29 18:10:46 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-06-25 15:01:12 +01:00
|
|
|
it.cursor.Inclusive = false
|
2020-10-29 18:10:46 +00:00
|
|
|
|
|
|
|
defer func() {
|
|
|
|
if rowsErr := it.curRows.Err(); rowsErr != nil {
|
|
|
|
err = errs.Combine(err, rowsErr)
|
|
|
|
}
|
2021-06-04 18:32:25 +01:00
|
|
|
err = errs.Combine(err, it.failErr, it.curRows.Close())
|
2020-10-29 18:10:46 +00:00
|
|
|
}()
|
|
|
|
|
|
|
|
return fn(ctx, it)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Next returns true if there was another item and copy it in item.
|
|
|
|
func (it *objectsIterator) Next(ctx context.Context, item *ObjectEntry) bool {
|
2020-11-17 14:05:41 +00:00
|
|
|
if it.recursive {
|
|
|
|
return it.next(ctx, item)
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: implement this on the database side
|
|
|
|
|
2022-07-21 15:32:50 +01:00
|
|
|
// skip prefix to avoid listing all objects from prefixes
|
|
|
|
// inside listed prefix
|
2020-11-17 14:05:41 +00:00
|
|
|
if it.skipPrefix != "" {
|
2022-07-21 15:32:50 +01:00
|
|
|
// drop current results page
|
|
|
|
if rowsErr := it.curRows.Err(); rowsErr != nil {
|
|
|
|
it.failErr = rowsErr
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
if closeErr := it.curRows.Close(); closeErr != nil {
|
|
|
|
it.failErr = closeErr
|
|
|
|
return false
|
2020-11-17 14:05:41 +00:00
|
|
|
}
|
2022-07-21 15:32:50 +01:00
|
|
|
|
|
|
|
// set new cursor after prefix we would like to skip
|
|
|
|
it.cursor.Key = it.prefix + prefixLimit(it.skipPrefix)
|
|
|
|
it.cursor.StreamID = uuid.UUID{}
|
|
|
|
it.cursor.Version = 0
|
|
|
|
|
|
|
|
// bump curIndex to not stop iteration because of no more results
|
|
|
|
it.curIndex = it.batchSize
|
|
|
|
|
2020-11-17 14:05:41 +00:00
|
|
|
it.skipPrefix = ""
|
|
|
|
}
|
|
|
|
|
2022-07-21 15:32:50 +01:00
|
|
|
ok := it.next(ctx, item)
|
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2021-06-30 18:00:45 +01:00
|
|
|
// should this be treated as a prefix?
|
|
|
|
p := strings.IndexByte(string(item.ObjectKey), Delimiter)
|
|
|
|
if p >= 0 {
|
|
|
|
it.skipPrefix = item.ObjectKey[:p+1]
|
|
|
|
*item = ObjectEntry{
|
|
|
|
IsPrefix: true,
|
|
|
|
ObjectKey: item.ObjectKey[:p+1],
|
|
|
|
Status: it.status,
|
2020-11-17 14:05:41 +00:00
|
|
|
}
|
|
|
|
}
|
2021-06-30 18:00:45 +01:00
|
|
|
|
2020-11-17 14:05:41 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// next returns true if there was another item and copy it in item.
|
|
|
|
func (it *objectsIterator) next(ctx context.Context, item *ObjectEntry) bool {
|
2020-10-29 18:10:46 +00:00
|
|
|
next := it.curRows.Next()
|
|
|
|
if !next {
|
|
|
|
if it.curIndex < it.batchSize {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
if it.curRows.Err() != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2021-01-11 12:06:04 +00:00
|
|
|
rows, err := it.doNextQuery(ctx, it)
|
2020-10-29 18:10:46 +00:00
|
|
|
if err != nil {
|
2021-06-04 18:32:25 +01:00
|
|
|
it.failErr = errs.Combine(it.failErr, err)
|
2020-10-29 18:10:46 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2021-06-04 18:32:25 +01:00
|
|
|
if closeErr := it.curRows.Close(); closeErr != nil {
|
|
|
|
it.failErr = errs.Combine(it.failErr, closeErr, rows.Close())
|
2020-10-29 18:10:46 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
it.curRows = rows
|
|
|
|
it.curIndex = 0
|
|
|
|
if !it.curRows.Next() {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
err := it.scanItem(item)
|
|
|
|
if err != nil {
|
2021-06-04 18:32:25 +01:00
|
|
|
it.failErr = errs.Combine(it.failErr, err)
|
2020-10-29 18:10:46 +00:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
it.curIndex++
|
|
|
|
it.cursor.Key = item.ObjectKey
|
|
|
|
it.cursor.Version = item.Version
|
2021-01-11 12:06:04 +00:00
|
|
|
it.cursor.StreamID = item.StreamID
|
2020-10-29 18:10:46 +00:00
|
|
|
|
2021-02-28 20:00:29 +00:00
|
|
|
if it.prefix != "" {
|
|
|
|
if !strings.HasPrefix(string(item.ObjectKey), string(it.prefix)) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO this should be done with SQL query
|
|
|
|
item.ObjectKey = item.ObjectKey[len(it.prefix):]
|
|
|
|
|
2020-10-29 18:10:46 +00:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2021-01-11 12:06:04 +00:00
|
|
|
func doNextQueryAllVersionsWithStatus(ctx context.Context, it *objectsIterator) (_ tagsql.Rows, err error) {
|
2020-10-29 18:10:46 +00:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2021-09-28 13:36:10 +01:00
|
|
|
// minimum needed for cursor
|
|
|
|
querySelectFields := `
|
|
|
|
object_key
|
|
|
|
,stream_id
|
2021-11-10 10:46:11 +00:00
|
|
|
,version
|
|
|
|
,encryption`
|
2021-09-28 13:36:10 +01:00
|
|
|
|
|
|
|
if it.includeSystemMetadata {
|
|
|
|
querySelectFields += `
|
|
|
|
,status
|
|
|
|
,created_at
|
|
|
|
,expires_at
|
|
|
|
,segment_count
|
|
|
|
,total_plain_size
|
|
|
|
,total_encrypted_size
|
2021-11-10 10:46:11 +00:00
|
|
|
,fixed_segment_size`
|
2021-09-28 13:36:10 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if it.includeCustomMetadata {
|
|
|
|
querySelectFields += `
|
|
|
|
,encrypted_metadata_nonce
|
|
|
|
,encrypted_metadata
|
|
|
|
,encrypted_metadata_encrypted_key`
|
2021-08-03 02:26:19 +01:00
|
|
|
}
|
|
|
|
|
2021-02-28 18:27:38 +00:00
|
|
|
cursorCompare := ">"
|
2021-06-25 15:01:12 +01:00
|
|
|
if it.cursor.Inclusive {
|
2021-02-28 18:27:38 +00:00
|
|
|
cursorCompare = ">="
|
|
|
|
}
|
|
|
|
|
|
|
|
if it.prefixLimit == "" {
|
2021-07-28 14:44:22 +01:00
|
|
|
return it.db.db.QueryContext(ctx, `
|
2020-11-16 14:02:11 +00:00
|
|
|
SELECT
|
2021-09-28 13:36:10 +01:00
|
|
|
`+querySelectFields+`
|
2020-11-16 14:02:11 +00:00
|
|
|
FROM objects
|
|
|
|
WHERE
|
2021-03-01 19:27:42 +00:00
|
|
|
(project_id, bucket_name, object_key, version) `+cursorCompare+` ($1, $2, $4, $5)
|
|
|
|
AND (project_id, bucket_name) < ($1, $7)
|
2020-11-16 14:02:11 +00:00
|
|
|
AND status = $3
|
2022-06-21 10:03:56 +01:00
|
|
|
AND (expires_at IS NULL OR expires_at > now())
|
2021-03-01 19:27:42 +00:00
|
|
|
ORDER BY (project_id, bucket_name, object_key, version) ASC
|
2020-11-16 14:02:11 +00:00
|
|
|
LIMIT $6
|
|
|
|
`, it.projectID, it.bucketName,
|
|
|
|
it.status,
|
|
|
|
[]byte(it.cursor.Key), int(it.cursor.Version),
|
|
|
|
it.batchSize,
|
2021-03-01 19:27:42 +00:00
|
|
|
nextBucket(it.bucketName),
|
2020-11-16 14:02:11 +00:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2020-12-02 09:47:18 +00:00
|
|
|
// TODO this query should use SUBSTRING(object_key from $8) but there is a problem how it
|
|
|
|
// works with CRDB.
|
2021-07-28 14:44:22 +01:00
|
|
|
return it.db.db.QueryContext(ctx, `
|
2020-10-29 18:10:46 +00:00
|
|
|
SELECT
|
2021-09-28 13:36:10 +01:00
|
|
|
`+querySelectFields+`
|
2020-10-29 18:10:46 +00:00
|
|
|
FROM objects
|
|
|
|
WHERE
|
2021-03-01 19:27:42 +00:00
|
|
|
(project_id, bucket_name, object_key, version) `+cursorCompare+` ($1, $2, $4, $5)
|
|
|
|
AND (project_id, bucket_name, object_key) < ($1, $2, $6)
|
2020-10-29 18:10:46 +00:00
|
|
|
AND status = $3
|
2022-06-21 10:03:56 +01:00
|
|
|
AND (expires_at IS NULL OR expires_at > now())
|
2021-03-01 19:27:42 +00:00
|
|
|
ORDER BY (project_id, bucket_name, object_key, version) ASC
|
2020-11-16 14:02:11 +00:00
|
|
|
LIMIT $7
|
2022-07-21 15:32:50 +01:00
|
|
|
`, it.projectID, it.bucketName,
|
2020-11-16 14:02:11 +00:00
|
|
|
it.status,
|
|
|
|
[]byte(it.cursor.Key), int(it.cursor.Version),
|
2021-02-28 18:27:38 +00:00
|
|
|
[]byte(it.prefixLimit),
|
2020-11-16 14:02:11 +00:00
|
|
|
it.batchSize,
|
2021-02-28 18:27:38 +00:00
|
|
|
// len(it.prefix)+1, // TODO uncomment when CRDB issue will be fixed
|
2020-11-16 14:02:11 +00:00
|
|
|
)
|
2020-10-29 18:10:46 +00:00
|
|
|
}
|
|
|
|
|
2021-03-01 19:27:42 +00:00
|
|
|
// nextBucket returns the lexicographically next bucket.
|
2021-03-02 11:22:49 +00:00
|
|
|
func nextBucket(b []byte) []byte {
|
|
|
|
xs := make([]byte, len(b)+1)
|
|
|
|
copy(xs, b)
|
|
|
|
return xs
|
2021-03-01 19:27:42 +00:00
|
|
|
}
|
|
|
|
|
2021-01-11 12:06:04 +00:00
|
|
|
// doNextQuery executes query to fetch the next batch returning the rows.
|
|
|
|
func doNextQueryStreamsByKey(ctx context.Context, it *objectsIterator) (_ tagsql.Rows, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2021-07-28 14:44:22 +01:00
|
|
|
return it.db.db.QueryContext(ctx, `
|
2021-01-11 12:06:04 +00:00
|
|
|
SELECT
|
2021-11-10 10:46:11 +00:00
|
|
|
object_key, stream_id, version, encryption, status,
|
2021-01-11 12:06:04 +00:00
|
|
|
created_at, expires_at,
|
|
|
|
segment_count,
|
|
|
|
total_plain_size, total_encrypted_size, fixed_segment_size,
|
2021-09-28 13:36:10 +01:00
|
|
|
encrypted_metadata_nonce, encrypted_metadata, encrypted_metadata_encrypted_key
|
2021-01-11 12:06:04 +00:00
|
|
|
FROM objects
|
|
|
|
WHERE
|
|
|
|
project_id = $1 AND bucket_name = $2
|
|
|
|
AND object_key = $3
|
|
|
|
AND stream_id > $4::BYTEA
|
2021-03-01 19:27:42 +00:00
|
|
|
AND status = `+pendingStatus+`
|
2021-01-11 12:06:04 +00:00
|
|
|
ORDER BY stream_id ASC
|
|
|
|
LIMIT $5
|
|
|
|
`, it.projectID, it.bucketName,
|
|
|
|
[]byte(it.cursor.Key),
|
|
|
|
it.cursor.StreamID,
|
|
|
|
it.batchSize,
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2020-11-16 14:02:11 +00:00
|
|
|
// scanItem scans doNextQuery results into ObjectEntry.
|
2021-08-03 02:26:19 +01:00
|
|
|
func (it *objectsIterator) scanItem(item *ObjectEntry) (err error) {
|
2020-11-17 14:05:41 +00:00
|
|
|
item.IsPrefix = false
|
2021-08-03 02:26:19 +01:00
|
|
|
|
2021-09-28 13:36:10 +01:00
|
|
|
fields := []interface{}{
|
|
|
|
&item.ObjectKey,
|
|
|
|
&item.StreamID,
|
|
|
|
&item.Version,
|
2021-11-10 10:46:11 +00:00
|
|
|
encryptionParameters{&item.Encryption},
|
2021-09-28 13:36:10 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if it.includeSystemMetadata {
|
|
|
|
fields = append(fields,
|
|
|
|
&item.Status,
|
|
|
|
&item.CreatedAt,
|
|
|
|
&item.ExpiresAt,
|
2021-08-03 02:26:19 +01:00
|
|
|
&item.SegmentCount,
|
2021-09-28 13:36:10 +01:00
|
|
|
&item.TotalPlainSize,
|
|
|
|
&item.TotalEncryptedSize,
|
|
|
|
&item.FixedSegmentSize,
|
2021-08-03 02:26:19 +01:00
|
|
|
)
|
2021-09-28 13:36:10 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
if it.includeCustomMetadata {
|
|
|
|
fields = append(fields,
|
|
|
|
&item.EncryptedMetadataNonce,
|
|
|
|
&item.EncryptedMetadata,
|
|
|
|
&item.EncryptedMetadataEncryptedKey,
|
2021-08-03 02:26:19 +01:00
|
|
|
)
|
|
|
|
}
|
|
|
|
|
2021-09-28 13:36:10 +01:00
|
|
|
err = it.curRows.Scan(fields...)
|
|
|
|
|
2020-12-02 09:47:18 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
2020-10-29 18:10:46 +00:00
|
|
|
}
|
2020-11-16 14:02:11 +00:00
|
|
|
|
2021-02-28 18:27:38 +00:00
|
|
|
func prefixLimit(a ObjectKey) ObjectKey {
|
|
|
|
if a == "" {
|
2020-11-16 14:02:11 +00:00
|
|
|
return ""
|
|
|
|
}
|
2021-02-28 18:27:38 +00:00
|
|
|
if a[len(a)-1] == 0xFF {
|
|
|
|
return a + "\x00"
|
2020-11-16 14:02:11 +00:00
|
|
|
}
|
|
|
|
|
2021-02-28 18:27:38 +00:00
|
|
|
key := []byte(a)
|
|
|
|
key[len(key)-1]++
|
|
|
|
return ObjectKey(key)
|
2020-11-16 14:02:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// lessKey returns whether a < b.
|
|
|
|
func lessKey(a, b ObjectKey) bool {
|
|
|
|
return bytes.Compare([]byte(a), []byte(b)) < 0
|
|
|
|
}
|
2021-06-25 15:01:12 +01:00
|
|
|
|
|
|
|
// firstIterateCursor adjust the cursor for a non-recursive iteration.
|
|
|
|
// The cursor is non-inclusive and we need to adjust to handle prefix as cursor properly.
|
|
|
|
// We return the next possible key from the prefix.
|
|
|
|
func firstIterateCursor(recursive bool, cursor IterateCursor, prefix ObjectKey) iterateCursor {
|
|
|
|
if recursive {
|
|
|
|
return iterateCursor{
|
|
|
|
Key: cursor.Key,
|
|
|
|
Version: cursor.Version,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// when the cursor does not match the prefix, we'll return the original cursor.
|
|
|
|
if !strings.HasPrefix(string(cursor.Key), string(prefix)) {
|
|
|
|
return iterateCursor{
|
|
|
|
Key: cursor.Key,
|
|
|
|
Version: cursor.Version,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// handle case where:
|
|
|
|
// prefix: x/y/
|
|
|
|
// cursor: x/y/z/w
|
|
|
|
// In this case, we want the skip prefix to be `x/y/z` + string('/' + 1).
|
|
|
|
|
|
|
|
cursorWithoutPrefix := cursor.Key[len(prefix):]
|
|
|
|
p := strings.IndexByte(string(cursorWithoutPrefix), Delimiter)
|
|
|
|
if p < 0 {
|
|
|
|
// The cursor is not a prefix, but instead a path inside the prefix,
|
|
|
|
// so we can use it directly.
|
|
|
|
return iterateCursor{
|
|
|
|
Key: cursor.Key,
|
|
|
|
Version: cursor.Version,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// return the next prefix given a scoped path
|
|
|
|
return iterateCursor{
|
|
|
|
Key: cursor.Key[:len(prefix)+p] + ObjectKey(Delimiter+1),
|
|
|
|
Version: -1,
|
|
|
|
Inclusive: true,
|
|
|
|
}
|
|
|
|
}
|