satellite/metainfo: Update metainfo RS config to more easily support multiple RS schemes.
Make metainfo.RSConfig a valid pflag config value. This allows us to configure the RSConfig as a string like k/m/o/n-shareSize, which makes having multiple supported RS schemes easier in the future. RS-related config values that are no longer needed have been removed (MinTotalThreshold, MaxTotalThreshold, MaxBufferMem, Verify). Change-Id: I0178ae467dcf4375c504e7202f31443d627c15e1
This commit is contained in:
parent
dc5a5df7f5
commit
db6bc6503d
@ -66,10 +66,10 @@ var Combine = func(elements ...func(log *zap.Logger, index int, config *satellit
|
||||
// ReconfigureRS returns function to change satellite redundancy scheme values.
|
||||
var ReconfigureRS = func(minThreshold, repairThreshold, successThreshold, totalThreshold int) func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
return func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Metainfo.RS.MinThreshold = minThreshold
|
||||
config.Metainfo.RS.RepairThreshold = repairThreshold
|
||||
config.Metainfo.RS.SuccessThreshold = successThreshold
|
||||
config.Metainfo.RS.TotalThreshold = totalThreshold
|
||||
config.Metainfo.RS.Min = minThreshold
|
||||
config.Metainfo.RS.Repair = repairThreshold
|
||||
config.Metainfo.RS.Success = successThreshold
|
||||
config.Metainfo.RS.Total = totalThreshold
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -454,16 +454,11 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
|
||||
MaxCommitInterval: 1 * time.Hour,
|
||||
Overlay: true,
|
||||
RS: metainfo.RSConfig{
|
||||
MaxBufferMem: memory.Size(256),
|
||||
ErasureShareSize: memory.Size(256),
|
||||
MinThreshold: atLeastOne(planet.config.StorageNodeCount * 1 / 5),
|
||||
RepairThreshold: atLeastOne(planet.config.StorageNodeCount * 2 / 5),
|
||||
SuccessThreshold: atLeastOne(planet.config.StorageNodeCount * 3 / 5),
|
||||
TotalThreshold: atLeastOne(planet.config.StorageNodeCount * 4 / 5),
|
||||
|
||||
MinTotalThreshold: (planet.config.StorageNodeCount * 4 / 5),
|
||||
MaxTotalThreshold: (planet.config.StorageNodeCount * 4 / 5),
|
||||
Validate: false,
|
||||
Min: atLeastOne(planet.config.StorageNodeCount * 1 / 5),
|
||||
Repair: atLeastOne(planet.config.StorageNodeCount * 2 / 5),
|
||||
Success: atLeastOne(planet.config.StorageNodeCount * 3 / 5),
|
||||
Total: atLeastOne(planet.config.StorageNodeCount * 4 / 5),
|
||||
},
|
||||
Loop: metainfo.LoopConfig{
|
||||
CoalesceDuration: 1 * time.Second,
|
||||
|
@ -143,7 +143,7 @@ func TestCalculateNodeAtRestData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Confirm the correct number of shares were stored
|
||||
rs := satelliteRS(planet.Satellites[0])
|
||||
rs := satelliteRS(t, planet.Satellites[0])
|
||||
if !correctRedundencyScheme(len(obs.Node), rs) {
|
||||
t.Fatalf("expected between: %d and %d, actual: %d", rs.RepairShares, rs.TotalShares, len(obs.Node))
|
||||
}
|
||||
@ -175,7 +175,7 @@ func TestCalculateBucketAtRestData(t *testing.T) {
|
||||
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
satellitePeer := planet.Satellites[0]
|
||||
redundancyScheme := satelliteRS(satellitePeer)
|
||||
redundancyScheme := satelliteRS(t, satellitePeer)
|
||||
expectedBucketTallies := make(map[metabase.BucketLocation]*accounting.BucketTally)
|
||||
for _, tt := range testCases {
|
||||
tt := tt // avoid scopelint error
|
||||
@ -221,7 +221,7 @@ func TestTallyIgnoresExpiredPointers(t *testing.T) {
|
||||
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
satellitePeer := planet.Satellites[0]
|
||||
redundancyScheme := satelliteRS(satellitePeer)
|
||||
redundancyScheme := satelliteRS(t, satellitePeer)
|
||||
|
||||
projectID, err := uuid.FromString("9656af6e-2d9c-42fa-91f2-bfd516a722d7")
|
||||
require.NoError(t, err)
|
||||
@ -423,12 +423,14 @@ func correctRedundencyScheme(shareCount int, uplinkRS storj.RedundancyScheme) bo
|
||||
return int(uplinkRS.RepairShares) <= shareCount && shareCount <= int(uplinkRS.TotalShares)
|
||||
}
|
||||
|
||||
func satelliteRS(satellite *testplanet.Satellite) storj.RedundancyScheme {
|
||||
func satelliteRS(t *testing.T, satellite *testplanet.Satellite) storj.RedundancyScheme {
|
||||
rs := satellite.Config.Metainfo.RS
|
||||
|
||||
return storj.RedundancyScheme{
|
||||
RequiredShares: int16(satellite.Config.Metainfo.RS.MinThreshold),
|
||||
RepairShares: int16(satellite.Config.Metainfo.RS.RepairThreshold),
|
||||
OptimalShares: int16(satellite.Config.Metainfo.RS.SuccessThreshold),
|
||||
TotalShares: int16(satellite.Config.Metainfo.RS.TotalThreshold),
|
||||
ShareSize: satellite.Config.Metainfo.RS.ErasureShareSize.Int32(),
|
||||
RequiredShares: int16(rs.Min),
|
||||
RepairShares: int16(rs.Repair),
|
||||
OptimalShares: int16(rs.Success),
|
||||
TotalShares: int16(rs.Total),
|
||||
ShareSize: rs.ErasureShareSize.Int32(),
|
||||
}
|
||||
}
|
||||
|
@ -1074,16 +1074,14 @@ func TestReverifySlowDownload(t *testing.T) {
|
||||
StorageNodeDB: func(index int, db storagenode.DB, log *zap.Logger) (storagenode.DB, error) {
|
||||
return testblobs.NewSlowDB(log.Named("slowdb"), db), nil
|
||||
},
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
Satellite: testplanet.Combine(
|
||||
func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
// These config values are chosen to force the slow node to time out without timing out on the three normal nodes
|
||||
config.Audit.MinBytesPerSecond = 100 * memory.KiB
|
||||
config.Audit.MinDownloadTimeout = 1 * time.Second
|
||||
|
||||
config.Metainfo.RS.MinThreshold = 2
|
||||
config.Metainfo.RS.RepairThreshold = 2
|
||||
config.Metainfo.RS.SuccessThreshold = 4
|
||||
config.Metainfo.RS.TotalThreshold = 4
|
||||
},
|
||||
testplanet.ReconfigureRS(2, 2, 4, 4),
|
||||
),
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
satellite := planet.Satellites[0]
|
||||
|
@ -776,16 +776,14 @@ func TestVerifierSlowDownload(t *testing.T) {
|
||||
StorageNodeDB: func(index int, db storagenode.DB, log *zap.Logger) (storagenode.DB, error) {
|
||||
return testblobs.NewSlowDB(log.Named("slowdb"), db), nil
|
||||
},
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
Satellite: testplanet.Combine(
|
||||
func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
// These config values are chosen to force the slow node to time out without timing out on the three normal nodes
|
||||
config.Audit.MinBytesPerSecond = 100 * memory.KiB
|
||||
config.Audit.MinDownloadTimeout = 950 * time.Millisecond
|
||||
|
||||
config.Metainfo.RS.MinThreshold = 2
|
||||
config.Metainfo.RS.RepairThreshold = 2
|
||||
config.Metainfo.RS.SuccessThreshold = 4
|
||||
config.Metainfo.RS.TotalThreshold = 4
|
||||
},
|
||||
testplanet.ReconfigureRS(2, 2, 4, 4),
|
||||
),
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
satellite := planet.Satellites[0]
|
||||
|
@ -32,14 +32,12 @@ func TestChore(t *testing.T) {
|
||||
StorageNodeCount: 8,
|
||||
UplinkCount: 1,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
Satellite: testplanet.Combine(
|
||||
func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.GracefulExit.MaxInactiveTimeFrame = maximumInactiveTimeFrame
|
||||
|
||||
config.Metainfo.RS.MinThreshold = 4
|
||||
config.Metainfo.RS.RepairThreshold = 6
|
||||
config.Metainfo.RS.SuccessThreshold = 8
|
||||
config.Metainfo.RS.TotalThreshold = 8
|
||||
},
|
||||
testplanet.ReconfigureRS(4, 6, 8, 8),
|
||||
),
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
uplinkPeer := planet.Uplinks[0]
|
||||
@ -136,14 +134,12 @@ func TestDurabilityRatio(t *testing.T) {
|
||||
StorageNodeCount: 4,
|
||||
UplinkCount: 1,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
Satellite: testplanet.Combine(
|
||||
func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.GracefulExit.MaxInactiveTimeFrame = maximumInactiveTimeFrame
|
||||
|
||||
config.Metainfo.RS.MinThreshold = 2
|
||||
config.Metainfo.RS.RepairThreshold = 3
|
||||
config.Metainfo.RS.SuccessThreshold = successThreshold
|
||||
config.Metainfo.RS.TotalThreshold = 4
|
||||
},
|
||||
testplanet.ReconfigureRS(2, 3, successThreshold, 4),
|
||||
),
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
uplinkPeer := planet.Uplinks[0]
|
||||
|
@ -255,16 +255,14 @@ func TestRecvTimeout(t *testing.T) {
|
||||
StorageNodeDB: func(index int, db storagenode.DB, log *zap.Logger) (storagenode.DB, error) {
|
||||
return testblobs.NewSlowDB(log.Named("slowdb"), db), nil
|
||||
},
|
||||
Satellite: func(logger *zap.Logger, index int, config *satellite.Config) {
|
||||
Satellite: testplanet.Combine(
|
||||
func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
// This config value will create a very short timeframe allowed for receiving
|
||||
// data from storage nodes. This will cause context to cancel with timeout.
|
||||
config.GracefulExit.RecvTimeout = 10 * time.Millisecond
|
||||
|
||||
config.Metainfo.RS.MinThreshold = 2
|
||||
config.Metainfo.RS.RepairThreshold = 3
|
||||
config.Metainfo.RS.SuccessThreshold = successThreshold
|
||||
config.Metainfo.RS.TotalThreshold = successThreshold
|
||||
},
|
||||
testplanet.ReconfigureRS(2, 3, successThreshold, successThreshold),
|
||||
),
|
||||
StorageNode: func(index int, config *storagenode.Config) {
|
||||
config.GracefulExit = gracefulexit.Config{
|
||||
ChoreInterval: 2 * time.Minute,
|
||||
@ -1240,17 +1238,15 @@ func TestFailureStorageNodeIgnoresTransferMessages(t *testing.T) {
|
||||
StorageNodeCount: 5,
|
||||
UplinkCount: 1,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(logger *zap.Logger, index int, config *satellite.Config) {
|
||||
Satellite: testplanet.Combine(
|
||||
func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
// We don't care whether a node gracefully exits or not in this test,
|
||||
// so we set the max failures percentage extra high.
|
||||
config.GracefulExit.OverallMaxFailuresPercentage = 101
|
||||
config.GracefulExit.MaxOrderLimitSendCount = maxOrderLimitSendCount
|
||||
|
||||
config.Metainfo.RS.MinThreshold = 2
|
||||
config.Metainfo.RS.RepairThreshold = 3
|
||||
config.Metainfo.RS.SuccessThreshold = 4
|
||||
config.Metainfo.RS.TotalThreshold = 4
|
||||
},
|
||||
testplanet.ReconfigureRS(2, 3, 4, 4),
|
||||
),
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
uplinkPeer := planet.Uplinks[0]
|
||||
@ -1370,15 +1366,13 @@ func TestIneligibleNodeAge(t *testing.T) {
|
||||
StorageNodeCount: 5,
|
||||
UplinkCount: 1,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(logger *zap.Logger, index int, config *satellite.Config) {
|
||||
Satellite: testplanet.Combine(
|
||||
func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
// Set the required node age to 1 month.
|
||||
config.GracefulExit.NodeMinAgeInMonths = 1
|
||||
|
||||
config.Metainfo.RS.MinThreshold = 2
|
||||
config.Metainfo.RS.RepairThreshold = 3
|
||||
config.Metainfo.RS.SuccessThreshold = 4
|
||||
config.Metainfo.RS.TotalThreshold = 4
|
||||
},
|
||||
testplanet.ReconfigureRS(2, 3, 4, 4),
|
||||
),
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
uplinkPeer := planet.Uplinks[0]
|
||||
|
@ -5,6 +5,9 @@ package metainfo
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"go.uber.org/zap"
|
||||
@ -25,18 +28,73 @@ const (
|
||||
|
||||
// RSConfig is a configuration struct that keeps details about default
|
||||
// redundancy strategy information.
|
||||
//
|
||||
// Can be used as a flag.
|
||||
type RSConfig struct {
|
||||
MaxBufferMem memory.Size `help:"maximum buffer memory to be allocated for read buffers" default:"4.00MiB"`
|
||||
ErasureShareSize memory.Size `help:"the size of each new erasure share in bytes" default:"256B"`
|
||||
MinThreshold int `help:"the minimum pieces required to recover a segment. k." releaseDefault:"29" devDefault:"4"`
|
||||
RepairThreshold int `help:"the minimum safe pieces before a repair is triggered. m." releaseDefault:"35" devDefault:"6"`
|
||||
SuccessThreshold int `help:"the desired total pieces for a segment. o." releaseDefault:"80" devDefault:"8"`
|
||||
TotalThreshold int `help:"the largest amount of pieces to encode to. n." releaseDefault:"110" devDefault:"10"`
|
||||
ErasureShareSize memory.Size
|
||||
Min int
|
||||
Repair int
|
||||
Success int
|
||||
Total int
|
||||
}
|
||||
|
||||
// TODO left for validation until we will remove CreateSegmentOld
|
||||
MinTotalThreshold int `help:"the largest amount of pieces to encode to. n (lower bound for validation)." releaseDefault:"95" devDefault:"10"`
|
||||
MaxTotalThreshold int `help:"the largest amount of pieces to encode to. n (upper bound for validation)." releaseDefault:"130" devDefault:"10"`
|
||||
Validate bool `help:"validate redundancy scheme configuration" default:"true"`
|
||||
// Type implements pflag.Value.
|
||||
func (RSConfig) Type() string { return "metainfo.RSConfig" }
|
||||
|
||||
// String is required for pflag.Value.
|
||||
func (rs *RSConfig) String() string {
|
||||
return fmt.Sprintf("%d/%d/%d/%d-%s",
|
||||
rs.Min,
|
||||
rs.Repair,
|
||||
rs.Success,
|
||||
rs.Total,
|
||||
rs.ErasureShareSize.String())
|
||||
}
|
||||
|
||||
// Set sets the value from a string in the format k/m/o/n-size (min/repair/optimal/total-erasuresharesize).
|
||||
func (rs *RSConfig) Set(s string) error {
|
||||
// Split on dash. Expect two items. First item is RS numbers. Second item is memory.Size.
|
||||
info := strings.Split(s, "-")
|
||||
if len(info) != 2 {
|
||||
return Error.New("Invalid default RS config (expect format k/m/o/n-ShareSize, got %s)", s)
|
||||
}
|
||||
rsNumbersString := info[0]
|
||||
shareSizeString := info[1]
|
||||
|
||||
// Attempt to parse "-size" part of config.
|
||||
shareSizeInt, err := memory.ParseString(shareSizeString)
|
||||
if err != nil {
|
||||
return Error.New("Invalid share size in RS config: '%s', %w", shareSizeString, err)
|
||||
}
|
||||
shareSize := memory.Size(shareSizeInt)
|
||||
|
||||
// Split on forward slash. Expect exactly four positive non-decreasing integers.
|
||||
rsNumbers := strings.Split(rsNumbersString, "/")
|
||||
if len(rsNumbers) != 4 {
|
||||
return Error.New("Invalid default RS numbers (wrong size, expect 4): %s", rsNumbersString)
|
||||
}
|
||||
|
||||
minValue := 1
|
||||
values := []int{}
|
||||
for _, nextValueString := range rsNumbers {
|
||||
nextValue, err := strconv.Atoi(nextValueString)
|
||||
if err != nil {
|
||||
return Error.New("Invalid default RS numbers (should all be valid integers): %s, %w", rsNumbersString, err)
|
||||
}
|
||||
if nextValue < minValue {
|
||||
return Error.New("Invalid default RS numbers (should be non-decreasing): %s", rsNumbersString)
|
||||
}
|
||||
values = append(values, nextValue)
|
||||
minValue = nextValue
|
||||
}
|
||||
|
||||
rs.ErasureShareSize = shareSize
|
||||
rs.Min = values[0]
|
||||
rs.Repair = values[1]
|
||||
rs.Success = values[2]
|
||||
rs.Total = values[3]
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RateLimiterConfig is a configuration struct for endpoint rate limiting.
|
||||
@ -63,7 +121,7 @@ type Config struct {
|
||||
MaxMetadataSize memory.Size `default:"2KiB" help:"maximum segment metadata size"`
|
||||
MaxCommitInterval time.Duration `default:"48h" help:"maximum time allowed to pass between creating and committing a segment"`
|
||||
Overlay bool `default:"true" help:"toggle flag if overlay is enabled"`
|
||||
RS RSConfig `help:"redundancy scheme configuration"`
|
||||
RS RSConfig `releaseDefault:"29/35/80/110-256B" devDefault:"4/6/8/10-256B" help:"redundancy scheme configuration in the format k/m/o/n-sharesize"`
|
||||
Loop LoopConfig `help:"loop configuration"`
|
||||
RateLimiter RateLimiterConfig `help:"rate limiter configuration"`
|
||||
ProjectLimits ProjectLimitConfig `help:"project limit configuration"`
|
||||
|
98
satellite/metainfo/config_test.go
Normal file
98
satellite/metainfo/config_test.go
Normal file
@ -0,0 +1,98 @@
|
||||
// Copyright (C) 2020 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package metainfo_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/common/memory"
|
||||
"storj.io/storj/satellite/metainfo"
|
||||
)
|
||||
|
||||
func TestRSConfigValidation(t *testing.T) {
|
||||
tests := []struct {
|
||||
description string
|
||||
configString string
|
||||
expectedConfig metainfo.RSConfig
|
||||
expectError bool
|
||||
}{
|
||||
{
|
||||
description: "valid rs config",
|
||||
configString: "4/8/10/20-256B",
|
||||
expectedConfig: metainfo.RSConfig{
|
||||
ErasureShareSize: 256 * memory.B, Min: 4, Repair: 8, Success: 10, Total: 20,
|
||||
},
|
||||
expectError: false,
|
||||
},
|
||||
{
|
||||
description: "invalid rs config - numbers decrease",
|
||||
configString: "4/8/5/20-256B",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
description: "invalid rs config - starts at 0",
|
||||
configString: "0/2/4/6-256B",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
description: "invalid rs config - strings",
|
||||
configString: "4/a/b/20-256B",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
description: "invalid rs config - floating-point numbers",
|
||||
configString: "4/5.2/7/20-256B",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
description: "invalid rs config - not enough items",
|
||||
configString: "4/5/20-256B",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
description: "invalid rs config - too many items",
|
||||
configString: "4/5/20/30/50-256B",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
description: "invalid rs config - empty numbers",
|
||||
configString: "-256B",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
description: "invalid rs config - empty size",
|
||||
configString: "1/2/3/4-",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
description: "invalid rs config - empty",
|
||||
configString: "",
|
||||
expectError: true,
|
||||
},
|
||||
{
|
||||
description: "invalid valid rs config - invalid share size",
|
||||
configString: "4/8/10/20-256A",
|
||||
expectError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Log(tt.description)
|
||||
|
||||
rsConfig := metainfo.RSConfig{}
|
||||
err := rsConfig.Set(tt.configString)
|
||||
if tt.expectError {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, tt.expectedConfig.ErasureShareSize, rsConfig.ErasureShareSize)
|
||||
require.EqualValues(t, tt.expectedConfig.Min, rsConfig.Min)
|
||||
require.EqualValues(t, tt.expectedConfig.Repair, rsConfig.Repair)
|
||||
require.EqualValues(t, tt.expectedConfig.Success, rsConfig.Success)
|
||||
require.EqualValues(t, tt.expectedConfig.Total, rsConfig.Total)
|
||||
}
|
||||
}
|
||||
}
|
@ -81,6 +81,7 @@ type Endpoint struct {
|
||||
limiterCache *lrucache.ExpiringLRU
|
||||
encInlineSegmentSize int64 // max inline segment size + encryption overhead
|
||||
revocations revocation.DB
|
||||
defaultRS *pb.RedundancyScheme
|
||||
config Config
|
||||
}
|
||||
|
||||
@ -103,6 +104,16 @@ func NewEndpoint(log *zap.Logger, metainfo *Service, deletePieces *piecedeletion
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defaultRSScheme := &pb.RedundancyScheme{
|
||||
Type: pb.RedundancyScheme_RS,
|
||||
MinReq: int32(config.RS.Min),
|
||||
RepairThreshold: int32(config.RS.Repair),
|
||||
SuccessThreshold: int32(config.RS.Success),
|
||||
Total: int32(config.RS.Total),
|
||||
ErasureShareSize: config.RS.ErasureShareSize.Int32(),
|
||||
}
|
||||
|
||||
return &Endpoint{
|
||||
log: log,
|
||||
metainfo: metainfo,
|
||||
@ -123,6 +134,7 @@ func NewEndpoint(log *zap.Logger, metainfo *Service, deletePieces *piecedeletion
|
||||
}),
|
||||
encInlineSegmentSize: encInlineSegmentSize,
|
||||
revocations: revocations,
|
||||
defaultRS: defaultRSScheme,
|
||||
config: config,
|
||||
}, nil
|
||||
}
|
||||
@ -248,7 +260,7 @@ func (endpoint *Endpoint) GetBucket(ctx context.Context, req *pb.BucketGetReques
|
||||
}
|
||||
|
||||
// override RS to fit satellite settings
|
||||
convBucket, err := convertBucketToProto(bucket, endpoint.redundancyScheme())
|
||||
convBucket, err := convertBucketToProto(bucket, endpoint.defaultRS)
|
||||
if err != nil {
|
||||
return resp, err
|
||||
}
|
||||
@ -323,7 +335,7 @@ func (endpoint *Endpoint) CreateBucket(ctx context.Context, req *pb.BucketCreate
|
||||
}
|
||||
|
||||
// override RS to fit satellite settings
|
||||
convBucket, err := convertBucketToProto(bucket, endpoint.redundancyScheme())
|
||||
convBucket, err := convertBucketToProto(bucket, endpoint.defaultRS)
|
||||
if err != nil {
|
||||
endpoint.log.Error("error while converting bucket to proto", zap.String("bucketName", bucket.Name), zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, "unable to create bucket")
|
||||
@ -382,7 +394,7 @@ func (endpoint *Endpoint) DeleteBucket(ctx context.Context, req *pb.BucketDelete
|
||||
return nil, err
|
||||
}
|
||||
|
||||
convBucket, err = convertBucketToProto(bucket, endpoint.redundancyScheme())
|
||||
convBucket, err = convertBucketToProto(bucket, endpoint.defaultRS)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -664,7 +676,7 @@ func (endpoint *Endpoint) BeginObject(ctx context.Context, req *pb.ObjectBeginRe
|
||||
}
|
||||
|
||||
// use only satellite values for Redundancy Scheme
|
||||
pbRS := endpoint.redundancyScheme()
|
||||
pbRS := endpoint.defaultRS
|
||||
|
||||
streamID, err := endpoint.packStreamID(ctx, &internalpb.StreamID{
|
||||
Bucket: req.Bucket,
|
||||
@ -1804,17 +1816,6 @@ func (endpoint *Endpoint) deleteObjectsPieces(ctx context.Context, reqs ...*meta
|
||||
return report, nil
|
||||
}
|
||||
|
||||
func (endpoint *Endpoint) redundancyScheme() *pb.RedundancyScheme {
|
||||
return &pb.RedundancyScheme{
|
||||
Type: pb.RedundancyScheme_RS,
|
||||
MinReq: int32(endpoint.config.RS.MinThreshold),
|
||||
RepairThreshold: int32(endpoint.config.RS.RepairThreshold),
|
||||
SuccessThreshold: int32(endpoint.config.RS.SuccessThreshold),
|
||||
Total: int32(endpoint.config.RS.TotalThreshold),
|
||||
ErasureShareSize: endpoint.config.RS.ErasureShareSize.Int32(),
|
||||
}
|
||||
}
|
||||
|
||||
// RevokeAPIKey handles requests to revoke an api key.
|
||||
func (endpoint *Endpoint) RevokeAPIKey(ctx context.Context, req *pb.RevokeAPIKeyRequest) (resp *pb.RevokeAPIKeyResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
@ -346,14 +346,13 @@ func TestService_DeletePieces_Timeout(t *testing.T) {
|
||||
StorageNodeDB: func(index int, db storagenode.DB, log *zap.Logger) (storagenode.DB, error) {
|
||||
return testblobs.NewSlowDB(log.Named("slowdb"), db), nil
|
||||
},
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
Satellite: testplanet.Combine(
|
||||
func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Metainfo.PieceDeletion.RequestTimeout = 200 * time.Millisecond
|
||||
config.Metainfo.RS.MinThreshold = 2
|
||||
config.Metainfo.RS.RepairThreshold = 2
|
||||
config.Metainfo.RS.SuccessThreshold = 4
|
||||
config.Metainfo.RS.TotalThreshold = 4
|
||||
config.Metainfo.MaxSegmentSize = 15 * memory.KiB
|
||||
},
|
||||
testplanet.ReconfigureRS(2, 2, 4, 4),
|
||||
),
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
uplnk := planet.Uplinks[0]
|
||||
|
@ -53,15 +53,13 @@ func testDataRepair(t *testing.T, inMemoryRepair bool) {
|
||||
StorageNodeCount: 14,
|
||||
UplinkCount: 1,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
Satellite: testplanet.Combine(
|
||||
func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Repairer.MaxExcessRateOptimalThreshold = RepairMaxExcessRateOptimalThreshold
|
||||
config.Repairer.InMemoryRepair = inMemoryRepair
|
||||
|
||||
config.Metainfo.RS.MinThreshold = minThreshold
|
||||
config.Metainfo.RS.RepairThreshold = 5
|
||||
config.Metainfo.RS.SuccessThreshold = successThreshold
|
||||
config.Metainfo.RS.TotalThreshold = 9
|
||||
},
|
||||
testplanet.ReconfigureRS(minThreshold, 5, successThreshold, 9),
|
||||
),
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
|
||||
@ -188,15 +186,13 @@ func testCorruptDataRepairFailed(t *testing.T, inMemoryRepair bool) {
|
||||
StorageNodeCount: 14,
|
||||
UplinkCount: 1,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
Satellite: testplanet.Combine(
|
||||
func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Repairer.MaxExcessRateOptimalThreshold = RepairMaxExcessRateOptimalThreshold
|
||||
config.Repairer.InMemoryRepair = inMemoryRepair
|
||||
|
||||
config.Metainfo.RS.MinThreshold = 3
|
||||
config.Metainfo.RS.RepairThreshold = 5
|
||||
config.Metainfo.RS.SuccessThreshold = 7
|
||||
config.Metainfo.RS.TotalThreshold = 9
|
||||
},
|
||||
testplanet.ReconfigureRS(3, 5, 7, 9),
|
||||
),
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
uplinkPeer := planet.Uplinks[0]
|
||||
@ -305,15 +301,13 @@ func testCorruptDataRepairSucceed(t *testing.T, inMemoryRepair bool) {
|
||||
StorageNodeCount: 14,
|
||||
UplinkCount: 1,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
Satellite: testplanet.Combine(
|
||||
func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Repairer.MaxExcessRateOptimalThreshold = RepairMaxExcessRateOptimalThreshold
|
||||
config.Repairer.InMemoryRepair = inMemoryRepair
|
||||
|
||||
config.Metainfo.RS.MinThreshold = 3
|
||||
config.Metainfo.RS.RepairThreshold = 5
|
||||
config.Metainfo.RS.SuccessThreshold = 7
|
||||
config.Metainfo.RS.TotalThreshold = 9
|
||||
},
|
||||
testplanet.ReconfigureRS(3, 5, 7, 9),
|
||||
),
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
uplinkPeer := planet.Uplinks[0]
|
||||
@ -799,14 +793,12 @@ func testRepairMultipleDisqualifiedAndSuspended(t *testing.T, inMemoryRepair boo
|
||||
StorageNodeCount: 12,
|
||||
UplinkCount: 1,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
Satellite: testplanet.Combine(
|
||||
func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Repairer.InMemoryRepair = inMemoryRepair
|
||||
|
||||
config.Metainfo.RS.MinThreshold = 3
|
||||
config.Metainfo.RS.RepairThreshold = 5
|
||||
config.Metainfo.RS.SuccessThreshold = 7
|
||||
config.Metainfo.RS.TotalThreshold = 7
|
||||
},
|
||||
testplanet.ReconfigureRS(3, 5, 7, 7),
|
||||
),
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
// first, upload some remote data
|
||||
@ -920,15 +912,13 @@ func testDataRepairOverrideHigherLimit(t *testing.T, inMemoryRepair bool) {
|
||||
StorageNodeCount: 14,
|
||||
UplinkCount: 1,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Checker.RepairOverride = repairOverride
|
||||
Satellite: testplanet.Combine(
|
||||
func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Repairer.InMemoryRepair = inMemoryRepair
|
||||
|
||||
config.Metainfo.RS.MinThreshold = 3
|
||||
config.Metainfo.RS.RepairThreshold = 4
|
||||
config.Metainfo.RS.SuccessThreshold = 9
|
||||
config.Metainfo.RS.TotalThreshold = 9
|
||||
config.Checker.RepairOverride = repairOverride
|
||||
},
|
||||
testplanet.ReconfigureRS(3, 4, 9, 9),
|
||||
),
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
uplinkPeer := planet.Uplinks[0]
|
||||
@ -1014,15 +1004,13 @@ func testDataRepairOverrideLowerLimit(t *testing.T, inMemoryRepair bool) {
|
||||
StorageNodeCount: 14,
|
||||
UplinkCount: 1,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Checker.RepairOverride = repairOverride
|
||||
Satellite: testplanet.Combine(
|
||||
func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Repairer.InMemoryRepair = inMemoryRepair
|
||||
|
||||
config.Metainfo.RS.MinThreshold = 3
|
||||
config.Metainfo.RS.RepairThreshold = 6
|
||||
config.Metainfo.RS.SuccessThreshold = 9
|
||||
config.Metainfo.RS.TotalThreshold = 9
|
||||
config.Checker.RepairOverride = repairOverride
|
||||
},
|
||||
testplanet.ReconfigureRS(3, 6, 9, 9),
|
||||
),
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
uplinkPeer := planet.Uplinks[0]
|
||||
@ -1141,15 +1129,13 @@ func testDataRepairUploadLimit(t *testing.T, inMemoryRepair bool) {
|
||||
StorageNodeCount: 13,
|
||||
UplinkCount: 1,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
Satellite: testplanet.Combine(
|
||||
func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Repairer.MaxExcessRateOptimalThreshold = RepairMaxExcessRateOptimalThreshold
|
||||
config.Repairer.InMemoryRepair = inMemoryRepair
|
||||
|
||||
config.Metainfo.RS.MinThreshold = 3
|
||||
config.Metainfo.RS.RepairThreshold = repairThreshold
|
||||
config.Metainfo.RS.SuccessThreshold = successThreshold
|
||||
config.Metainfo.RS.TotalThreshold = maxThreshold
|
||||
},
|
||||
testplanet.ReconfigureRS(3, repairThreshold, successThreshold, maxThreshold),
|
||||
),
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
satellite := planet.Satellites[0]
|
||||
@ -1266,14 +1252,12 @@ func testRepairGracefullyExited(t *testing.T, inMemoryRepair bool) {
|
||||
StorageNodeCount: 12,
|
||||
UplinkCount: 1,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
Satellite: testplanet.Combine(
|
||||
func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Repairer.InMemoryRepair = inMemoryRepair
|
||||
|
||||
config.Metainfo.RS.MinThreshold = 3
|
||||
config.Metainfo.RS.RepairThreshold = 5
|
||||
config.Metainfo.RS.SuccessThreshold = 7
|
||||
config.Metainfo.RS.TotalThreshold = 7
|
||||
},
|
||||
testplanet.ReconfigureRS(3, 5, 7, 7),
|
||||
),
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
// first, upload some remote data
|
||||
|
28
scripts/testdata/satellite-config.yaml.lock
vendored
28
scripts/testdata/satellite-config.yaml.lock
vendored
@ -400,32 +400,8 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
|
||||
# request rate per project per second.
|
||||
# metainfo.rate-limiter.rate: 1000
|
||||
|
||||
# the size of each new erasure share in bytes
|
||||
# metainfo.rs.erasure-share-size: 256 B
|
||||
|
||||
# maximum buffer memory to be allocated for read buffers
|
||||
# metainfo.rs.max-buffer-mem: 4.0 MiB
|
||||
|
||||
# the largest amount of pieces to encode to. n (upper bound for validation).
|
||||
# metainfo.rs.max-total-threshold: 130
|
||||
|
||||
# the minimum pieces required to recover a segment. k.
|
||||
# metainfo.rs.min-threshold: 29
|
||||
|
||||
# the largest amount of pieces to encode to. n (lower bound for validation).
|
||||
# metainfo.rs.min-total-threshold: 95
|
||||
|
||||
# the minimum safe pieces before a repair is triggered. m.
|
||||
# metainfo.rs.repair-threshold: 35
|
||||
|
||||
# the desired total pieces for a segment. o.
|
||||
# metainfo.rs.success-threshold: 80
|
||||
|
||||
# the largest amount of pieces to encode to. n.
|
||||
# metainfo.rs.total-threshold: 110
|
||||
|
||||
# validate redundancy scheme configuration
|
||||
# metainfo.rs.validate: true
|
||||
# redundancy scheme configuration in the format k/m/o/n-sharesize
|
||||
# metainfo.rs: 29/35/80/110-256 B
|
||||
|
||||
# address(es) to send telemetry to (comma-separated)
|
||||
# metrics.addr: collectora.storj.io:9000
|
||||
|
@ -177,15 +177,14 @@ func TestWorkerFailure_IneligibleNodeAge(t *testing.T) {
|
||||
StorageNodeCount: 5,
|
||||
UplinkCount: 1,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(logger *zap.Logger, index int, config *satellite.Config) {
|
||||
Satellite: testplanet.Combine(
|
||||
func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
// Set the required node age to 1 month.
|
||||
config.GracefulExit.NodeMinAgeInMonths = 1
|
||||
|
||||
config.Metainfo.RS.MinThreshold = 2
|
||||
config.Metainfo.RS.RepairThreshold = 3
|
||||
config.Metainfo.RS.SuccessThreshold = successThreshold
|
||||
config.Metainfo.RS.TotalThreshold = successThreshold
|
||||
},
|
||||
testplanet.ReconfigureRS(2, 3, successThreshold, successThreshold),
|
||||
),
|
||||
|
||||
StorageNode: func(index int, config *storagenode.Config) {
|
||||
config.GracefulExit.NumWorkers = 2
|
||||
config.GracefulExit.NumConcurrentTransfers = 2
|
||||
|
Loading…
Reference in New Issue
Block a user