2021-04-26 10:35:44 +01:00
|
|
|
// Copyright (C) 2021 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package metabasetest
|
|
|
|
|
|
|
|
import (
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
|
2022-02-24 10:54:57 +00:00
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
|
2021-04-26 10:35:44 +01:00
|
|
|
"storj.io/common/storj"
|
|
|
|
"storj.io/common/testcontext"
|
|
|
|
"storj.io/common/testrand"
|
|
|
|
"storj.io/common/uuid"
|
|
|
|
"storj.io/storj/satellite/metabase"
|
|
|
|
)
|
|
|
|
|
|
|
|
// RandObjectStream returns a random object stream.
|
|
|
|
func RandObjectStream() metabase.ObjectStream {
|
|
|
|
return metabase.ObjectStream{
|
|
|
|
ProjectID: testrand.UUID(),
|
|
|
|
BucketName: testrand.BucketName(),
|
2022-03-04 11:28:04 +00:00
|
|
|
ObjectKey: RandObjectKey(),
|
2021-04-26 10:35:44 +01:00
|
|
|
Version: 1,
|
|
|
|
StreamID: testrand.UUID(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-03-04 11:28:04 +00:00
|
|
|
// RandObjectKey returns a random object key.
|
|
|
|
func RandObjectKey() metabase.ObjectKey {
|
|
|
|
return metabase.ObjectKey(testrand.Bytes(16))
|
|
|
|
}
|
|
|
|
|
2022-05-17 16:25:48 +01:00
|
|
|
// RandEncryptedKeyAndNonce generates random segment metadata.
|
|
|
|
func RandEncryptedKeyAndNonce(position int) metabase.EncryptedKeyAndNonce {
|
|
|
|
return metabase.EncryptedKeyAndNonce{
|
|
|
|
Position: metabase.SegmentPosition{Index: uint32(position)},
|
|
|
|
EncryptedKeyNonce: testrand.Nonce().Bytes(),
|
|
|
|
EncryptedKey: testrand.Bytes(32),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-26 10:35:44 +01:00
|
|
|
// CreatePendingObject creates a new pending object with the specified number of segments.
|
|
|
|
func CreatePendingObject(ctx *testcontext.Context, t *testing.T, db *metabase.DB, obj metabase.ObjectStream, numberOfSegments byte) {
|
|
|
|
BeginObjectExactVersion{
|
|
|
|
Opts: metabase.BeginObjectExactVersion{
|
|
|
|
ObjectStream: obj,
|
|
|
|
Encryption: DefaultEncryption,
|
|
|
|
},
|
|
|
|
Version: obj.Version,
|
|
|
|
}.Check(ctx, t, db)
|
|
|
|
|
|
|
|
for i := byte(0); i < numberOfSegments; i++ {
|
|
|
|
BeginSegment{
|
|
|
|
Opts: metabase.BeginSegment{
|
|
|
|
ObjectStream: obj,
|
|
|
|
Position: metabase.SegmentPosition{Part: 0, Index: uint32(i)},
|
|
|
|
RootPieceID: storj.PieceID{i + 1},
|
|
|
|
Pieces: []metabase.Piece{{
|
|
|
|
Number: 1,
|
|
|
|
StorageNode: testrand.NodeID(),
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
}.Check(ctx, t, db)
|
|
|
|
|
|
|
|
CommitSegment{
|
|
|
|
Opts: metabase.CommitSegment{
|
|
|
|
ObjectStream: obj,
|
|
|
|
Position: metabase.SegmentPosition{Part: 0, Index: uint32(i)},
|
|
|
|
RootPieceID: storj.PieceID{1},
|
|
|
|
Pieces: metabase.Pieces{{Number: 0, StorageNode: storj.NodeID{2}}},
|
|
|
|
|
|
|
|
EncryptedKey: []byte{3},
|
2021-10-12 14:37:12 +01:00
|
|
|
EncryptedKeyNonce: []byte{4},
|
2021-04-26 10:35:44 +01:00
|
|
|
EncryptedETag: []byte{5},
|
|
|
|
|
|
|
|
EncryptedSize: 1024,
|
|
|
|
PlainSize: 512,
|
|
|
|
PlainOffset: 0,
|
|
|
|
Redundancy: DefaultRedundancy,
|
|
|
|
},
|
|
|
|
}.Check(ctx, t, db)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateObject creates a new committed object with the specified number of segments.
|
2022-08-01 10:50:46 +01:00
|
|
|
func CreateObject(ctx *testcontext.Context, t require.TestingT, db *metabase.DB, obj metabase.ObjectStream, numberOfSegments byte) metabase.Object {
|
2021-04-26 10:35:44 +01:00
|
|
|
BeginObjectExactVersion{
|
|
|
|
Opts: metabase.BeginObjectExactVersion{
|
|
|
|
ObjectStream: obj,
|
|
|
|
Encryption: DefaultEncryption,
|
|
|
|
},
|
|
|
|
Version: obj.Version,
|
|
|
|
}.Check(ctx, t, db)
|
|
|
|
|
|
|
|
for i := byte(0); i < numberOfSegments; i++ {
|
|
|
|
BeginSegment{
|
|
|
|
Opts: metabase.BeginSegment{
|
|
|
|
ObjectStream: obj,
|
|
|
|
Position: metabase.SegmentPosition{Part: 0, Index: uint32(i)},
|
|
|
|
RootPieceID: storj.PieceID{i + 1},
|
|
|
|
Pieces: []metabase.Piece{{
|
|
|
|
Number: 1,
|
|
|
|
StorageNode: testrand.NodeID(),
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
}.Check(ctx, t, db)
|
|
|
|
|
|
|
|
CommitSegment{
|
|
|
|
Opts: metabase.CommitSegment{
|
|
|
|
ObjectStream: obj,
|
|
|
|
Position: metabase.SegmentPosition{Part: 0, Index: uint32(i)},
|
|
|
|
RootPieceID: storj.PieceID{1},
|
|
|
|
Pieces: metabase.Pieces{{Number: 0, StorageNode: storj.NodeID{2}}},
|
|
|
|
|
|
|
|
EncryptedKey: []byte{3},
|
2021-10-12 14:37:12 +01:00
|
|
|
EncryptedKeyNonce: []byte{4},
|
2021-04-26 10:35:44 +01:00
|
|
|
EncryptedETag: []byte{5},
|
|
|
|
|
|
|
|
EncryptedSize: 1024,
|
|
|
|
PlainSize: 512,
|
|
|
|
PlainOffset: 0,
|
|
|
|
Redundancy: DefaultRedundancy,
|
|
|
|
},
|
|
|
|
}.Check(ctx, t, db)
|
|
|
|
}
|
|
|
|
|
|
|
|
return CommitObject{
|
|
|
|
Opts: metabase.CommitObject{
|
|
|
|
ObjectStream: obj,
|
|
|
|
},
|
|
|
|
}.Check(ctx, t, db)
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateExpiredObject creates a new committed expired object with the specified number of segments.
|
|
|
|
func CreateExpiredObject(ctx *testcontext.Context, t *testing.T, db *metabase.DB, obj metabase.ObjectStream, numberOfSegments byte, expiresAt time.Time) metabase.Object {
|
|
|
|
BeginObjectExactVersion{
|
|
|
|
Opts: metabase.BeginObjectExactVersion{
|
|
|
|
ObjectStream: obj,
|
|
|
|
Encryption: DefaultEncryption,
|
|
|
|
ExpiresAt: &expiresAt,
|
|
|
|
},
|
|
|
|
Version: obj.Version,
|
|
|
|
}.Check(ctx, t, db)
|
|
|
|
|
|
|
|
for i := byte(0); i < numberOfSegments; i++ {
|
|
|
|
BeginSegment{
|
|
|
|
Opts: metabase.BeginSegment{
|
|
|
|
ObjectStream: obj,
|
|
|
|
Position: metabase.SegmentPosition{Part: 0, Index: uint32(i)},
|
|
|
|
RootPieceID: storj.PieceID{i + 1},
|
|
|
|
Pieces: []metabase.Piece{{
|
|
|
|
Number: 1,
|
|
|
|
StorageNode: testrand.NodeID(),
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
}.Check(ctx, t, db)
|
|
|
|
|
|
|
|
CommitSegment{
|
|
|
|
Opts: metabase.CommitSegment{
|
|
|
|
ObjectStream: obj,
|
|
|
|
Position: metabase.SegmentPosition{Part: 0, Index: uint32(i)},
|
2021-06-08 11:03:24 +01:00
|
|
|
ExpiresAt: &expiresAt,
|
2021-04-26 10:35:44 +01:00
|
|
|
RootPieceID: storj.PieceID{1},
|
|
|
|
Pieces: metabase.Pieces{{Number: 0, StorageNode: storj.NodeID{2}}},
|
|
|
|
|
|
|
|
EncryptedKey: []byte{3},
|
2021-10-12 14:37:12 +01:00
|
|
|
EncryptedKeyNonce: []byte{4},
|
2021-04-26 10:35:44 +01:00
|
|
|
EncryptedETag: []byte{5},
|
|
|
|
|
|
|
|
EncryptedSize: 1024,
|
|
|
|
PlainSize: 512,
|
|
|
|
PlainOffset: 0,
|
|
|
|
Redundancy: DefaultRedundancy,
|
|
|
|
},
|
|
|
|
}.Check(ctx, t, db)
|
|
|
|
}
|
|
|
|
|
|
|
|
return CommitObject{
|
|
|
|
Opts: metabase.CommitObject{
|
|
|
|
ObjectStream: obj,
|
|
|
|
},
|
|
|
|
}.Check(ctx, t, db)
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateFullObjectsWithKeys creates multiple objects with the specified keys.
|
|
|
|
func CreateFullObjectsWithKeys(ctx *testcontext.Context, t *testing.T, db *metabase.DB, projectID uuid.UUID, bucketName string, keys []metabase.ObjectKey) map[metabase.ObjectKey]metabase.LoopObjectEntry {
|
|
|
|
objects := make(map[metabase.ObjectKey]metabase.LoopObjectEntry, len(keys))
|
|
|
|
for _, key := range keys {
|
|
|
|
obj := RandObjectStream()
|
|
|
|
obj.ProjectID = projectID
|
|
|
|
obj.BucketName = bucketName
|
|
|
|
obj.ObjectKey = key
|
|
|
|
|
|
|
|
CreateObject(ctx, t, db, obj, 0)
|
|
|
|
|
|
|
|
objects[key] = metabase.LoopObjectEntry{
|
|
|
|
ObjectStream: obj,
|
|
|
|
Status: metabase.Committed,
|
|
|
|
CreatedAt: time.Now(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return objects
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateTestObject is for testing metabase.CreateTestObject.
|
|
|
|
type CreateTestObject struct {
|
|
|
|
BeginObjectExactVersion *metabase.BeginObjectExactVersion
|
|
|
|
CommitObject *metabase.CommitObject
|
2022-09-12 16:40:11 +01:00
|
|
|
CreateSegment func(object metabase.Object, index int) metabase.Segment
|
2021-04-26 10:35:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Run runs the test.
|
2022-02-24 14:26:05 +00:00
|
|
|
func (co CreateTestObject) Run(ctx *testcontext.Context, t testing.TB, db *metabase.DB, obj metabase.ObjectStream, numberOfSegments byte) (metabase.Object, []metabase.Segment) {
|
2021-04-26 10:35:44 +01:00
|
|
|
boeOpts := metabase.BeginObjectExactVersion{
|
|
|
|
ObjectStream: obj,
|
|
|
|
Encryption: DefaultEncryption,
|
|
|
|
}
|
|
|
|
if co.BeginObjectExactVersion != nil {
|
|
|
|
boeOpts = *co.BeginObjectExactVersion
|
|
|
|
}
|
|
|
|
|
2022-09-12 16:40:11 +01:00
|
|
|
object, err := db.BeginObjectExactVersion(ctx, boeOpts)
|
|
|
|
require.NoError(t, err)
|
2021-04-26 10:35:44 +01:00
|
|
|
|
2022-02-24 14:26:05 +00:00
|
|
|
createdSegments := []metabase.Segment{}
|
2021-04-26 10:35:44 +01:00
|
|
|
for i := byte(0); i < numberOfSegments; i++ {
|
2022-09-12 16:40:11 +01:00
|
|
|
if co.CreateSegment != nil {
|
|
|
|
segment := co.CreateSegment(object, int(i))
|
|
|
|
createdSegments = append(createdSegments, segment)
|
|
|
|
} else {
|
|
|
|
BeginSegment{
|
|
|
|
Opts: metabase.BeginSegment{
|
|
|
|
ObjectStream: obj,
|
|
|
|
Position: metabase.SegmentPosition{Part: 0, Index: uint32(i)},
|
|
|
|
RootPieceID: storj.PieceID{i + 1},
|
|
|
|
Pieces: []metabase.Piece{{
|
|
|
|
Number: 1,
|
|
|
|
StorageNode: testrand.NodeID(),
|
|
|
|
}},
|
|
|
|
},
|
|
|
|
}.Check(ctx, t, db)
|
|
|
|
|
|
|
|
commitSegmentOpts := metabase.CommitSegment{
|
2021-04-26 10:35:44 +01:00
|
|
|
ObjectStream: obj,
|
2022-09-12 16:40:11 +01:00
|
|
|
ExpiresAt: boeOpts.ExpiresAt,
|
2021-04-26 10:35:44 +01:00
|
|
|
Position: metabase.SegmentPosition{Part: 0, Index: uint32(i)},
|
2022-09-12 16:40:11 +01:00
|
|
|
RootPieceID: storj.PieceID{1},
|
|
|
|
Pieces: metabase.Pieces{{Number: 0, StorageNode: storj.NodeID{2}}},
|
2021-04-26 10:35:44 +01:00
|
|
|
|
2022-09-12 16:40:11 +01:00
|
|
|
EncryptedKey: []byte{3},
|
|
|
|
EncryptedKeyNonce: []byte{4},
|
|
|
|
EncryptedETag: []byte{5},
|
2022-02-24 14:26:05 +00:00
|
|
|
|
2022-09-12 16:40:11 +01:00
|
|
|
EncryptedSize: 1060,
|
|
|
|
PlainSize: 512,
|
|
|
|
PlainOffset: int64(i) * 512,
|
|
|
|
Redundancy: DefaultRedundancy,
|
|
|
|
}
|
|
|
|
|
|
|
|
CommitSegment{
|
|
|
|
Opts: commitSegmentOpts,
|
|
|
|
}.Check(ctx, t, db)
|
2021-04-26 10:35:44 +01:00
|
|
|
|
2022-09-12 16:40:11 +01:00
|
|
|
segment, err := db.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
|
|
|
StreamID: commitSegmentOpts.StreamID,
|
|
|
|
Position: commitSegmentOpts.Position,
|
|
|
|
})
|
|
|
|
require.NoError(t, err)
|
2021-04-26 10:35:44 +01:00
|
|
|
|
2022-09-12 16:40:11 +01:00
|
|
|
createdSegments = append(createdSegments, metabase.Segment{
|
|
|
|
StreamID: obj.StreamID,
|
|
|
|
Position: commitSegmentOpts.Position,
|
2022-02-24 14:26:05 +00:00
|
|
|
|
2022-09-12 16:40:11 +01:00
|
|
|
CreatedAt: segment.CreatedAt,
|
|
|
|
RepairedAt: nil,
|
|
|
|
ExpiresAt: nil,
|
2022-02-24 14:26:05 +00:00
|
|
|
|
2022-09-12 16:40:11 +01:00
|
|
|
RootPieceID: commitSegmentOpts.RootPieceID,
|
|
|
|
EncryptedKeyNonce: commitSegmentOpts.EncryptedKeyNonce,
|
|
|
|
EncryptedKey: commitSegmentOpts.EncryptedKey,
|
2022-02-24 14:26:05 +00:00
|
|
|
|
2022-09-12 16:40:11 +01:00
|
|
|
EncryptedSize: commitSegmentOpts.EncryptedSize,
|
|
|
|
PlainSize: commitSegmentOpts.PlainSize,
|
|
|
|
PlainOffset: commitSegmentOpts.PlainOffset,
|
|
|
|
EncryptedETag: commitSegmentOpts.EncryptedETag,
|
2022-02-24 14:26:05 +00:00
|
|
|
|
2022-09-12 16:40:11 +01:00
|
|
|
Redundancy: commitSegmentOpts.Redundancy,
|
2022-02-24 14:26:05 +00:00
|
|
|
|
2022-09-12 16:40:11 +01:00
|
|
|
InlineData: nil,
|
|
|
|
Pieces: commitSegmentOpts.Pieces,
|
2022-02-24 14:26:05 +00:00
|
|
|
|
2022-09-12 16:40:11 +01:00
|
|
|
Placement: segment.Placement,
|
|
|
|
})
|
|
|
|
}
|
2021-04-26 10:35:44 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
coOpts := metabase.CommitObject{
|
|
|
|
ObjectStream: obj,
|
|
|
|
}
|
|
|
|
if co.CommitObject != nil {
|
|
|
|
coOpts = *co.CommitObject
|
|
|
|
}
|
|
|
|
|
2022-02-24 14:26:05 +00:00
|
|
|
createdObject := CommitObject{
|
2021-04-26 10:35:44 +01:00
|
|
|
Opts: coOpts,
|
|
|
|
}.Check(ctx, t, db)
|
2022-02-24 14:26:05 +00:00
|
|
|
|
|
|
|
return createdObject, createdSegments
|
2021-04-26 10:35:44 +01:00
|
|
|
}
|
2022-01-27 00:01:03 +00:00
|
|
|
|
|
|
|
// CreateObjectCopy is for testing object copy.
|
|
|
|
type CreateObjectCopy struct {
|
2022-02-25 11:12:37 +00:00
|
|
|
OriginalObject metabase.Object
|
|
|
|
// if empty, creates fake segments if necessary
|
|
|
|
OriginalSegments []metabase.Segment
|
2022-01-27 00:01:03 +00:00
|
|
|
FinishObject *metabase.FinishCopyObject
|
|
|
|
CopyObjectStream *metabase.ObjectStream
|
|
|
|
}
|
|
|
|
|
|
|
|
// Run creates the copy.
|
2022-02-18 00:03:50 +00:00
|
|
|
func (cc CreateObjectCopy) Run(ctx *testcontext.Context, t testing.TB, db *metabase.DB) (copyObj metabase.Object, expectedOriginalSegments []metabase.RawSegment, expectedCopySegments []metabase.RawSegment) {
|
2022-01-27 00:01:03 +00:00
|
|
|
var copyStream metabase.ObjectStream
|
|
|
|
if cc.CopyObjectStream != nil {
|
|
|
|
copyStream = *cc.CopyObjectStream
|
|
|
|
} else {
|
|
|
|
copyStream = RandObjectStream()
|
|
|
|
}
|
|
|
|
|
|
|
|
newEncryptedKeysNonces := make([]metabase.EncryptedKeyAndNonce, cc.OriginalObject.SegmentCount)
|
2022-02-18 00:03:50 +00:00
|
|
|
expectedOriginalSegments = make([]metabase.RawSegment, cc.OriginalObject.SegmentCount)
|
|
|
|
expectedCopySegments = make([]metabase.RawSegment, cc.OriginalObject.SegmentCount)
|
2022-01-27 00:01:03 +00:00
|
|
|
expectedEncryptedSize := 1060
|
|
|
|
|
|
|
|
for i := 0; i < int(cc.OriginalObject.SegmentCount); i++ {
|
2022-05-17 16:25:48 +01:00
|
|
|
newEncryptedKeysNonces[i] = RandEncryptedKeyAndNonce(i)
|
2022-01-27 00:01:03 +00:00
|
|
|
|
2022-02-18 00:03:50 +00:00
|
|
|
expectedOriginalSegments[i] = DefaultRawSegment(cc.OriginalObject.ObjectStream, metabase.SegmentPosition{Index: uint32(i)})
|
|
|
|
|
|
|
|
// TODO: place this calculation in metabasetest.
|
|
|
|
expectedOriginalSegments[i].PlainOffset = int64(int32(i) * expectedOriginalSegments[i].PlainSize)
|
|
|
|
// TODO: we should use the same value for encrypted size in both test methods.
|
|
|
|
expectedOriginalSegments[i].EncryptedSize = int32(expectedEncryptedSize)
|
|
|
|
|
|
|
|
expectedCopySegments[i] = metabase.RawSegment{}
|
|
|
|
expectedCopySegments[i].StreamID = copyStream.StreamID
|
|
|
|
expectedCopySegments[i].EncryptedKeyNonce = newEncryptedKeysNonces[i].EncryptedKeyNonce
|
|
|
|
expectedCopySegments[i].EncryptedKey = newEncryptedKeysNonces[i].EncryptedKey
|
|
|
|
expectedCopySegments[i].EncryptedSize = expectedOriginalSegments[i].EncryptedSize
|
|
|
|
expectedCopySegments[i].Position = expectedOriginalSegments[i].Position
|
|
|
|
expectedCopySegments[i].RootPieceID = expectedOriginalSegments[i].RootPieceID
|
|
|
|
expectedCopySegments[i].Redundancy = expectedOriginalSegments[i].Redundancy
|
|
|
|
expectedCopySegments[i].PlainSize = expectedOriginalSegments[i].PlainSize
|
|
|
|
expectedCopySegments[i].PlainOffset = expectedOriginalSegments[i].PlainOffset
|
|
|
|
expectedCopySegments[i].CreatedAt = time.Now().UTC()
|
|
|
|
if len(expectedOriginalSegments[i].InlineData) > 0 {
|
|
|
|
expectedCopySegments[i].InlineData = expectedOriginalSegments[i].InlineData
|
2022-02-25 11:12:37 +00:00
|
|
|
} else {
|
2022-02-18 00:03:50 +00:00
|
|
|
expectedCopySegments[i].InlineData = []byte{}
|
2022-02-25 11:12:37 +00:00
|
|
|
}
|
2022-01-27 00:01:03 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
opts := cc.FinishObject
|
|
|
|
if opts == nil {
|
|
|
|
opts = &metabase.FinishCopyObject{
|
2022-05-17 16:25:48 +01:00
|
|
|
ObjectStream: cc.OriginalObject.ObjectStream,
|
2022-01-27 00:01:03 +00:00
|
|
|
NewStreamID: copyStream.StreamID,
|
|
|
|
NewBucket: copyStream.BucketName,
|
|
|
|
NewSegmentKeys: newEncryptedKeysNonces,
|
2022-03-04 11:28:04 +00:00
|
|
|
NewEncryptedObjectKey: copyStream.ObjectKey,
|
2022-03-23 16:06:14 +00:00
|
|
|
NewEncryptedMetadataKeyNonce: testrand.Nonce(),
|
2022-01-27 00:01:03 +00:00
|
|
|
NewEncryptedMetadataKey: testrand.Bytes(32),
|
|
|
|
}
|
|
|
|
}
|
2022-02-24 10:54:57 +00:00
|
|
|
|
2022-02-25 11:12:37 +00:00
|
|
|
copyObj, err := db.FinishCopyObject(ctx, *opts)
|
2022-02-24 10:54:57 +00:00
|
|
|
require.NoError(t, err)
|
2022-01-27 00:01:03 +00:00
|
|
|
|
2022-02-18 00:03:50 +00:00
|
|
|
return copyObj, expectedOriginalSegments, expectedCopySegments
|
2022-02-25 11:12:37 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// SegmentsToRaw converts a slice of Segment to a slice of RawSegment.
|
|
|
|
func SegmentsToRaw(segments []metabase.Segment) []metabase.RawSegment {
|
|
|
|
rawSegments := []metabase.RawSegment{}
|
|
|
|
|
|
|
|
for _, segment := range segments {
|
|
|
|
rawSegments = append(rawSegments, metabase.RawSegment(segment))
|
|
|
|
}
|
2022-01-27 00:01:03 +00:00
|
|
|
|
2022-02-25 11:12:37 +00:00
|
|
|
return rawSegments
|
2022-01-27 00:01:03 +00:00
|
|
|
}
|