2022-12-08 14:27:21 +00:00
|
|
|
// Copyright (C) 2022 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package rangedloop_test
|
|
|
|
|
|
|
|
import (
|
2023-01-05 15:32:30 +00:00
|
|
|
"context"
|
2023-01-11 21:23:17 +00:00
|
|
|
"errors"
|
2022-12-08 14:27:21 +00:00
|
|
|
"fmt"
|
2023-03-03 09:09:27 +00:00
|
|
|
"sort"
|
2023-02-13 09:36:04 +00:00
|
|
|
"strconv"
|
2023-03-03 09:09:27 +00:00
|
|
|
"sync"
|
2023-01-11 21:23:17 +00:00
|
|
|
"sync/atomic"
|
2022-12-08 14:27:21 +00:00
|
|
|
"testing"
|
2022-12-21 20:58:08 +00:00
|
|
|
"time"
|
2022-12-08 14:27:21 +00:00
|
|
|
|
|
|
|
"github.com/stretchr/testify/require"
|
|
|
|
"go.uber.org/zap/zaptest"
|
|
|
|
|
2023-02-13 09:36:04 +00:00
|
|
|
"storj.io/common/memory"
|
2022-12-21 20:58:08 +00:00
|
|
|
"storj.io/common/testcontext"
|
2023-02-13 09:36:04 +00:00
|
|
|
"storj.io/common/testrand"
|
2022-12-21 20:58:08 +00:00
|
|
|
"storj.io/common/uuid"
|
2023-02-13 09:36:04 +00:00
|
|
|
"storj.io/storj/private/testplanet"
|
|
|
|
"storj.io/storj/satellite/accounting/nodetally"
|
|
|
|
"storj.io/storj/satellite/audit"
|
|
|
|
"storj.io/storj/satellite/gc/bloomfilter"
|
|
|
|
"storj.io/storj/satellite/gracefulexit"
|
2023-03-03 09:09:27 +00:00
|
|
|
"storj.io/storj/satellite/metabase"
|
|
|
|
"storj.io/storj/satellite/metabase/metabasetest"
|
2022-12-08 14:27:21 +00:00
|
|
|
"storj.io/storj/satellite/metabase/rangedloop"
|
|
|
|
"storj.io/storj/satellite/metabase/rangedloop/rangedlooptest"
|
2023-02-13 09:36:04 +00:00
|
|
|
"storj.io/storj/satellite/metrics"
|
2023-07-06 13:35:26 +01:00
|
|
|
"storj.io/storj/satellite/overlay"
|
2023-02-13 09:36:04 +00:00
|
|
|
"storj.io/storj/satellite/repair/checker"
|
2022-12-08 14:27:21 +00:00
|
|
|
)
|
|
|
|
|
2022-12-21 20:58:08 +00:00
|
|
|
func TestLoopCount(t *testing.T) {
|
2022-12-08 14:27:21 +00:00
|
|
|
for _, parallelism := range []int{1, 2, 3} {
|
|
|
|
for _, nSegments := range []int{0, 1, 2, 11} {
|
|
|
|
for _, nObservers := range []int{0, 1, 2} {
|
|
|
|
t.Run(
|
|
|
|
fmt.Sprintf("par%d_seg%d_obs%d", parallelism, nSegments, nObservers),
|
|
|
|
func(t *testing.T) {
|
2022-12-21 20:58:08 +00:00
|
|
|
runCountTest(t, parallelism, nSegments, nObservers)
|
2022-12-08 14:27:21 +00:00
|
|
|
},
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-21 20:58:08 +00:00
|
|
|
func runCountTest(t *testing.T, parallelism int, nSegments int, nObservers int) {
|
2022-12-08 14:27:21 +00:00
|
|
|
batchSize := 2
|
2022-12-21 20:58:08 +00:00
|
|
|
ctx := testcontext.New(t)
|
2022-12-08 14:27:21 +00:00
|
|
|
|
|
|
|
observers := []rangedloop.Observer{}
|
|
|
|
for i := 0; i < nObservers; i++ {
|
|
|
|
observers = append(observers, &rangedlooptest.CountObserver{})
|
|
|
|
}
|
|
|
|
|
|
|
|
loopService := rangedloop.NewService(
|
|
|
|
zaptest.NewLogger(t),
|
|
|
|
rangedloop.Config{
|
2022-12-21 20:58:08 +00:00
|
|
|
BatchSize: batchSize,
|
|
|
|
Parallelism: parallelism,
|
2022-12-08 14:27:21 +00:00
|
|
|
},
|
2022-12-09 15:40:23 +00:00
|
|
|
&rangedlooptest.RangeSplitter{
|
2023-05-09 12:13:19 +01:00
|
|
|
Segments: make([]rangedloop.Segment, nSegments),
|
2022-12-08 14:27:21 +00:00
|
|
|
},
|
|
|
|
observers,
|
|
|
|
)
|
|
|
|
|
2022-12-21 20:58:08 +00:00
|
|
|
observerDurations, err := loopService.RunOnce(ctx)
|
2022-12-08 14:27:21 +00:00
|
|
|
require.NoError(t, err)
|
2022-12-21 20:58:08 +00:00
|
|
|
require.Len(t, observerDurations, nObservers)
|
2022-12-08 14:27:21 +00:00
|
|
|
|
|
|
|
for _, observer := range observers {
|
|
|
|
countObserver := observer.(*rangedlooptest.CountObserver)
|
|
|
|
require.Equal(t, nSegments, countObserver.NumSegments)
|
|
|
|
}
|
|
|
|
}
|
2022-12-21 20:58:08 +00:00
|
|
|
|
|
|
|
func TestLoopDuration(t *testing.T) {
|
|
|
|
t.Skip("Flaky test because it validates concurrency by measuring time")
|
|
|
|
|
|
|
|
nSegments := 8
|
|
|
|
nObservers := 2
|
|
|
|
parallelism := 4
|
|
|
|
batchSize := 2
|
|
|
|
sleepIncrement := time.Millisecond * 10
|
|
|
|
|
|
|
|
ctx := testcontext.New(t)
|
|
|
|
|
|
|
|
observers := []rangedloop.Observer{}
|
|
|
|
for i := 0; i < nObservers; i++ {
|
|
|
|
observers = append(observers, &rangedlooptest.SleepObserver{
|
|
|
|
Duration: sleepIncrement,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2023-05-09 12:13:19 +01:00
|
|
|
segments := []rangedloop.Segment{}
|
2022-12-21 20:58:08 +00:00
|
|
|
for i := 0; i < nSegments; i++ {
|
|
|
|
streamId, err := uuid.FromBytes([]byte{byte(i), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0})
|
|
|
|
require.NoError(t, err)
|
2023-05-09 12:13:19 +01:00
|
|
|
segments = append(segments, rangedloop.Segment{
|
2022-12-21 20:58:08 +00:00
|
|
|
StreamID: streamId,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
loopService := rangedloop.NewService(
|
|
|
|
zaptest.NewLogger(t),
|
|
|
|
rangedloop.Config{
|
|
|
|
BatchSize: batchSize,
|
|
|
|
Parallelism: parallelism,
|
|
|
|
},
|
|
|
|
&rangedlooptest.RangeSplitter{
|
|
|
|
Segments: segments,
|
|
|
|
},
|
|
|
|
observers,
|
|
|
|
)
|
|
|
|
|
|
|
|
start := time.Now()
|
|
|
|
observerDurations, err := loopService.RunOnce(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
duration := time.Since(start)
|
|
|
|
expectedDuration := time.Duration(int64(nSegments) * int64(sleepIncrement) * int64(nObservers) / int64(parallelism))
|
|
|
|
require.Equal(t, expectedDuration, duration.Truncate(sleepIncrement))
|
|
|
|
|
|
|
|
require.Len(t, observerDurations, nObservers)
|
|
|
|
for _, observerDuration := range observerDurations {
|
|
|
|
expectedSleep := time.Duration(int64(nSegments) * int64(sleepIncrement))
|
|
|
|
require.Equal(t, expectedSleep, observerDuration.Duration.Round(sleepIncrement))
|
|
|
|
}
|
|
|
|
}
|
2023-01-05 15:32:30 +00:00
|
|
|
|
|
|
|
func TestLoopCancellation(t *testing.T) {
|
|
|
|
parallelism := 2
|
|
|
|
batchSize := 1
|
|
|
|
ctx, cancel := context.WithCancel(testcontext.NewWithTimeout(t, time.Second*10))
|
|
|
|
|
|
|
|
observers := []rangedloop.Observer{
|
|
|
|
&rangedlooptest.CountObserver{},
|
|
|
|
&rangedlooptest.CallbackObserver{
|
2023-05-09 12:13:19 +01:00
|
|
|
OnProcess: func(ctx context.Context, segments []rangedloop.Segment) error {
|
2023-01-05 15:32:30 +00:00
|
|
|
// cancel from inside the loop, when it is certain that the loop has started
|
|
|
|
cancel()
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
loopService := rangedloop.NewService(
|
|
|
|
zaptest.NewLogger(t),
|
|
|
|
rangedloop.Config{
|
|
|
|
BatchSize: batchSize,
|
|
|
|
Parallelism: parallelism,
|
|
|
|
},
|
|
|
|
&rangedlooptest.InfiniteSegmentProvider{},
|
|
|
|
observers,
|
|
|
|
)
|
|
|
|
|
|
|
|
_, err := loopService.RunOnce(ctx)
|
|
|
|
|
|
|
|
require.ErrorIs(t, err, context.Canceled)
|
|
|
|
}
|
2023-01-11 21:23:17 +00:00
|
|
|
|
|
|
|
func TestLoopContinuesAfterObserverError(t *testing.T) {
|
|
|
|
parallelism := 2
|
|
|
|
batchSize := 1
|
2023-05-09 12:13:19 +01:00
|
|
|
segments := make([]rangedloop.Segment, 2)
|
2023-01-11 21:23:17 +00:00
|
|
|
|
|
|
|
numOnStartCalls := 0
|
|
|
|
numOnForkCalls := 0
|
|
|
|
numOnProcessCalls := int32(0)
|
|
|
|
numOnJoinCalls := 0
|
|
|
|
numOnFinishCalls := 0
|
|
|
|
|
|
|
|
incNumOnProcessCalls := func() {
|
|
|
|
atomic.AddInt32(&numOnProcessCalls, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// first and last observer emit no error
|
|
|
|
// other observers emit an error at different stages
|
|
|
|
observers := []rangedloop.Observer{
|
|
|
|
&rangedlooptest.CallbackObserver{
|
|
|
|
OnStart: func(ctx context.Context, t time.Time) error {
|
|
|
|
numOnStartCalls++
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
OnFork: func(ctx context.Context) (rangedloop.Partial, error) {
|
|
|
|
numOnForkCalls++
|
|
|
|
return nil, nil
|
|
|
|
},
|
2023-05-09 12:13:19 +01:00
|
|
|
OnProcess: func(ctx context.Context, segments []rangedloop.Segment) error {
|
2023-01-11 21:23:17 +00:00
|
|
|
incNumOnProcessCalls()
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
OnJoin: func(ctx context.Context, partial rangedloop.Partial) error {
|
|
|
|
numOnJoinCalls++
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
OnFinish: func(ctx context.Context) error {
|
|
|
|
numOnFinishCalls++
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
},
|
|
|
|
&rangedlooptest.CallbackObserver{
|
|
|
|
OnStart: func(ctx context.Context, t time.Time) error {
|
|
|
|
numOnStartCalls++
|
|
|
|
return errors.New("Test OnStart error")
|
|
|
|
},
|
|
|
|
OnFork: func(ctx context.Context) (rangedloop.Partial, error) {
|
|
|
|
require.Fail(t, "OnFork should not be called")
|
|
|
|
return nil, nil
|
|
|
|
},
|
2023-05-09 12:13:19 +01:00
|
|
|
OnProcess: func(ctx context.Context, segments []rangedloop.Segment) error {
|
2023-01-11 21:23:17 +00:00
|
|
|
require.Fail(t, "OnProcess should not be called")
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
OnJoin: func(ctx context.Context, partial rangedloop.Partial) error {
|
|
|
|
require.Fail(t, "OnJoin should not be called")
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
OnFinish: func(ctx context.Context) error {
|
|
|
|
require.Fail(t, "OnFinish should not be called")
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
},
|
|
|
|
&rangedlooptest.CallbackObserver{
|
|
|
|
OnStart: func(ctx context.Context, t time.Time) error {
|
|
|
|
numOnStartCalls++
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
OnFork: func(ctx context.Context) (rangedloop.Partial, error) {
|
|
|
|
numOnForkCalls++
|
|
|
|
return nil, errors.New("Test OnFork error")
|
|
|
|
},
|
2023-05-09 12:13:19 +01:00
|
|
|
OnProcess: func(ctx context.Context, segments []rangedloop.Segment) error {
|
2023-01-11 21:23:17 +00:00
|
|
|
require.Fail(t, "OnProcess should not be called")
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
OnJoin: func(ctx context.Context, partial rangedloop.Partial) error {
|
|
|
|
require.Fail(t, "OnJoin should not be called")
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
OnFinish: func(ctx context.Context) error {
|
|
|
|
require.Fail(t, "OnFinish should not be called")
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
},
|
|
|
|
&rangedlooptest.CallbackObserver{
|
|
|
|
OnStart: func(ctx context.Context, t time.Time) error {
|
|
|
|
numOnStartCalls++
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
OnFork: func(ctx context.Context) (rangedloop.Partial, error) {
|
|
|
|
numOnForkCalls++
|
|
|
|
return nil, nil
|
|
|
|
},
|
2023-05-09 12:13:19 +01:00
|
|
|
OnProcess: func(ctx context.Context, segments []rangedloop.Segment) error {
|
2023-01-11 21:23:17 +00:00
|
|
|
incNumOnProcessCalls()
|
|
|
|
return errors.New("Test OnProcess error")
|
|
|
|
},
|
|
|
|
OnJoin: func(ctx context.Context, partial rangedloop.Partial) error {
|
|
|
|
require.Fail(t, "OnJoin should not be called")
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
OnFinish: func(ctx context.Context) error {
|
|
|
|
require.Fail(t, "OnFinish should not be called")
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
},
|
|
|
|
&rangedlooptest.CallbackObserver{
|
|
|
|
OnStart: func(ctx context.Context, t time.Time) error {
|
|
|
|
numOnStartCalls++
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
OnFork: func(ctx context.Context) (rangedloop.Partial, error) {
|
|
|
|
numOnForkCalls++
|
|
|
|
return nil, nil
|
|
|
|
},
|
2023-05-09 12:13:19 +01:00
|
|
|
OnProcess: func(ctx context.Context, segments []rangedloop.Segment) error {
|
2023-01-11 21:23:17 +00:00
|
|
|
incNumOnProcessCalls()
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
OnJoin: func(ctx context.Context, partial rangedloop.Partial) error {
|
|
|
|
numOnJoinCalls++
|
|
|
|
return errors.New("Test OnJoin error")
|
|
|
|
},
|
|
|
|
OnFinish: func(ctx context.Context) error {
|
|
|
|
require.Fail(t, "OnFinish should not be called")
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
},
|
|
|
|
&rangedlooptest.CallbackObserver{
|
|
|
|
OnStart: func(ctx context.Context, t time.Time) error {
|
|
|
|
numOnStartCalls++
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
OnFork: func(ctx context.Context) (rangedloop.Partial, error) {
|
|
|
|
numOnForkCalls++
|
|
|
|
return nil, nil
|
|
|
|
},
|
2023-05-09 12:13:19 +01:00
|
|
|
OnProcess: func(ctx context.Context, segments []rangedloop.Segment) error {
|
2023-01-11 21:23:17 +00:00
|
|
|
incNumOnProcessCalls()
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
OnJoin: func(ctx context.Context, partial rangedloop.Partial) error {
|
|
|
|
numOnJoinCalls++
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
OnFinish: func(ctx context.Context) error {
|
|
|
|
numOnFinishCalls++
|
|
|
|
return errors.New("Test OnFinish error")
|
|
|
|
},
|
|
|
|
},
|
|
|
|
&rangedlooptest.CallbackObserver{
|
|
|
|
OnStart: func(ctx context.Context, t time.Time) error {
|
|
|
|
numOnStartCalls++
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
OnFork: func(ctx context.Context) (rangedloop.Partial, error) {
|
|
|
|
numOnForkCalls++
|
|
|
|
return nil, nil
|
|
|
|
},
|
2023-05-09 12:13:19 +01:00
|
|
|
OnProcess: func(ctx context.Context, segments []rangedloop.Segment) error {
|
2023-01-11 21:23:17 +00:00
|
|
|
incNumOnProcessCalls()
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
OnJoin: func(ctx context.Context, partial rangedloop.Partial) error {
|
|
|
|
numOnJoinCalls++
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
OnFinish: func(ctx context.Context) error {
|
|
|
|
numOnFinishCalls++
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
loopService := rangedloop.NewService(
|
|
|
|
zaptest.NewLogger(t),
|
|
|
|
rangedloop.Config{
|
|
|
|
BatchSize: batchSize,
|
|
|
|
Parallelism: parallelism,
|
|
|
|
},
|
|
|
|
&rangedlooptest.RangeSplitter{
|
|
|
|
Segments: segments,
|
|
|
|
},
|
|
|
|
observers,
|
|
|
|
)
|
|
|
|
|
|
|
|
observerDurations, err := loopService.RunOnce(testcontext.New(t))
|
|
|
|
require.NoError(t, err)
|
|
|
|
require.Len(t, observerDurations, len(observers))
|
|
|
|
|
|
|
|
require.EqualValues(t, 7, numOnStartCalls)
|
|
|
|
require.EqualValues(t, 6*parallelism, numOnForkCalls)
|
|
|
|
require.EqualValues(t, 5*parallelism-1, numOnProcessCalls)
|
|
|
|
require.EqualValues(t, 4*parallelism-1, numOnJoinCalls)
|
|
|
|
require.EqualValues(t, 3, numOnFinishCalls)
|
|
|
|
|
|
|
|
// success observer should have the duration reported
|
|
|
|
require.Greater(t, observerDurations[0].Duration, time.Duration(0))
|
|
|
|
require.Greater(t, observerDurations[6].Duration, time.Duration(0))
|
|
|
|
|
|
|
|
// error observers should have sentinel duration reported
|
|
|
|
require.Equal(t, observerDurations[1].Duration, -1*time.Second)
|
|
|
|
require.Equal(t, observerDurations[2].Duration, -1*time.Second)
|
|
|
|
require.Equal(t, observerDurations[3].Duration, -1*time.Second)
|
|
|
|
require.Equal(t, observerDurations[4].Duration, -1*time.Second)
|
|
|
|
require.Equal(t, observerDurations[5].Duration, -1*time.Second)
|
|
|
|
}
|
2023-02-13 09:36:04 +00:00
|
|
|
|
|
|
|
func TestAllInOne(t *testing.T) {
|
|
|
|
testplanet.Run(t, testplanet.Config{
|
|
|
|
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
|
|
|
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
|
|
|
log := zaptest.NewLogger(t)
|
|
|
|
satellite := planet.Satellites[0]
|
|
|
|
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
err := planet.Uplinks[0].Upload(ctx, satellite, "testbucket", "object"+strconv.Itoa(i), testrand.Bytes(5*memory.KiB))
|
|
|
|
require.NoError(t, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
require.NoError(t, planet.Uplinks[0].CreateBucket(ctx, satellite, "bf-bucket"))
|
|
|
|
|
|
|
|
metabaseProvider := rangedloop.NewMetabaseRangeSplitter(satellite.Metabase.DB, 0, 10)
|
|
|
|
|
|
|
|
config := rangedloop.Config{
|
|
|
|
Parallelism: 8,
|
|
|
|
BatchSize: 3,
|
|
|
|
}
|
|
|
|
|
|
|
|
bfConfig := satellite.Config.GarbageCollectionBF
|
|
|
|
bfConfig.Bucket = "bf-bucket"
|
|
|
|
accessGrant, err := planet.Uplinks[0].Access[satellite.ID()].Serialize()
|
|
|
|
require.NoError(t, err)
|
|
|
|
bfConfig.AccessGrant = accessGrant
|
|
|
|
|
|
|
|
service := rangedloop.NewService(log, config, metabaseProvider, []rangedloop.Observer{
|
2023-03-06 09:30:53 +00:00
|
|
|
rangedloop.NewLiveCountObserver(satellite.Metabase.DB, config.SuspiciousProcessedRatio, config.AsOfSystemInterval),
|
2023-02-13 09:36:04 +00:00
|
|
|
metrics.NewObserver(),
|
2023-04-24 09:32:56 +01:00
|
|
|
nodetally.NewObserver(log.Named("accounting:nodetally"),
|
2023-02-13 09:36:04 +00:00
|
|
|
satellite.DB.StoragenodeAccounting(),
|
2023-03-22 18:05:48 +00:00
|
|
|
satellite.Metabase.DB,
|
2023-02-13 09:36:04 +00:00
|
|
|
),
|
|
|
|
audit.NewObserver(log.Named("audit"),
|
|
|
|
satellite.DB.VerifyQueue(),
|
|
|
|
satellite.Config.Audit,
|
|
|
|
),
|
|
|
|
gracefulexit.NewObserver(log.Named("gracefulexit:observer"),
|
|
|
|
satellite.DB.GracefulExit(),
|
|
|
|
satellite.DB.OverlayCache(),
|
2023-05-09 14:23:04 +01:00
|
|
|
satellite.Metabase.DB,
|
2023-02-13 09:36:04 +00:00
|
|
|
satellite.Config.GracefulExit,
|
|
|
|
),
|
|
|
|
bloomfilter.NewObserver(log.Named("gc-bf"),
|
|
|
|
bfConfig,
|
|
|
|
satellite.DB.OverlayCache(),
|
|
|
|
),
|
2023-04-25 09:40:22 +01:00
|
|
|
checker.NewObserver(
|
2023-02-13 09:36:04 +00:00
|
|
|
log.Named("repair:checker"),
|
|
|
|
satellite.DB.RepairQueue(),
|
|
|
|
satellite.Overlay.Service,
|
satellite/overlay: fix placement selection config parsing
When we do `satellite run api --placement '...'`, the placement rules are not parsed well.
The problem is based on `viper.AllSettings()`, and the main logic is sg. like this (from a new unit test):
```
r := ConfigurablePlacementRule{}
err := r.Set(p)
require.NoError(t, err)
serialized := r.String()
r2 := ConfigurablePlacementRule{}
err = r2.Set(serialized)
require.NoError(t, err)
require.Equal(t, p, r2.String())
```
All settings evaluates the placement rules in `ConfigurablePlacementRules` and stores the string representation.
The problem is that we don't have proper `String()` implementation (it prints out the structs instead of the original definition.
There are two main solutions for this problem:
1. We can fix the `String()`. When we parse a placement rule, the `String()` method should print out the original definition
2. We can switch to use pure string as configuration parameter, and parse the rules only when required.
I feel that 1 is error prone, we can do it (and in this patch I added a lot of `String()` implementations, but it's hard to be sure that our `String()` logic is inline with the parsing logic.
Therefore I decided to make the configuration value of the placements a string (or a wrapper around string).
That's the main reason why this patch seems to be big, as I updated all the usages.
But the main part is in beginning of the `placement.go` (configuration parsing is not a pflag.Value implementation any more, but a separated step).
And `filter.go`, (a few more String implementation for filters.
https://github.com/storj/storj/issues/6248
Change-Id: I47c762d3514342b76a2e85683b1c891502a0756a
2023-09-06 10:40:22 +01:00
|
|
|
overlay.NewPlacementDefinitions().CreateFilters,
|
2023-06-30 10:02:01 +01:00
|
|
|
satellite.Config.Checker,
|
2023-02-13 09:36:04 +00:00
|
|
|
),
|
|
|
|
})
|
|
|
|
|
|
|
|
for i := 0; i < 5; i++ {
|
|
|
|
_, err = service.RunOnce(ctx)
|
|
|
|
require.NoError(t, err, "iteration %d", i+1)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
2023-03-03 09:09:27 +00:00
|
|
|
|
|
|
|
func TestLoopBoundaries(t *testing.T) {
|
|
|
|
metabasetest.Run(t, func(ctx *testcontext.Context, t *testing.T, db *metabase.DB) {
|
|
|
|
type Segment struct {
|
|
|
|
StreamID uuid.UUID
|
|
|
|
Position metabase.SegmentPosition
|
|
|
|
}
|
|
|
|
|
|
|
|
var expectedSegments []Segment
|
|
|
|
|
|
|
|
parallelism := 4
|
|
|
|
|
|
|
|
ranges, err := rangedloop.CreateUUIDRanges(uint32(parallelism))
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
for _, r := range ranges {
|
|
|
|
if r.Start != nil {
|
|
|
|
obj := metabasetest.RandObjectStream()
|
|
|
|
obj.StreamID = *r.Start
|
|
|
|
|
|
|
|
metabasetest.CreateObject(ctx, t, db, obj, 1)
|
|
|
|
expectedSegments = append(expectedSegments, Segment{
|
|
|
|
StreamID: obj.StreamID,
|
|
|
|
})
|
|
|
|
|
|
|
|
// additional object/segment close to boundary
|
|
|
|
obj = metabasetest.RandObjectStream()
|
|
|
|
obj.StreamID = *r.Start
|
|
|
|
obj.StreamID[len(obj.StreamID)-1]++
|
|
|
|
|
|
|
|
metabasetest.CreateObject(ctx, t, db, obj, 1)
|
|
|
|
expectedSegments = append(expectedSegments, Segment{
|
|
|
|
StreamID: obj.StreamID,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, batchSize := range []int{0, 1, 2, 3, 10} {
|
|
|
|
var visitedSegments []Segment
|
|
|
|
var mu sync.Mutex
|
|
|
|
|
|
|
|
provider := rangedloop.NewMetabaseRangeSplitter(db, 0, batchSize)
|
|
|
|
config := rangedloop.Config{
|
|
|
|
Parallelism: parallelism,
|
|
|
|
BatchSize: batchSize,
|
|
|
|
}
|
|
|
|
|
|
|
|
callbackObserver := rangedlooptest.CallbackObserver{
|
2023-05-09 12:13:19 +01:00
|
|
|
OnProcess: func(ctx context.Context, segments []rangedloop.Segment) error {
|
2023-03-03 09:09:27 +00:00
|
|
|
// OnProcess is called many times by different goroutines
|
|
|
|
mu.Lock()
|
|
|
|
defer mu.Unlock()
|
|
|
|
|
|
|
|
for _, segment := range segments {
|
|
|
|
visitedSegments = append(visitedSegments, Segment{
|
|
|
|
StreamID: segment.StreamID,
|
|
|
|
Position: segment.Position,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
service := rangedloop.NewService(zaptest.NewLogger(t), config, provider, []rangedloop.Observer{&callbackObserver})
|
|
|
|
_, err = service.RunOnce(ctx)
|
|
|
|
require.NoError(t, err)
|
|
|
|
|
|
|
|
sort.Slice(visitedSegments, func(i, j int) bool {
|
|
|
|
return visitedSegments[i].StreamID.Less(visitedSegments[j].StreamID)
|
|
|
|
})
|
|
|
|
require.Equal(t, expectedSegments, visitedSegments, "batch size %d", batchSize)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|