satellite/metainfo: add max segment size and max inline size to
BeginObject response We want to control inline segment size and segment size on satellite side. We need to return such information to uplink like with redundancy scheme. Change-Id: If04b0a45a2757a01c0cc046432c115f475e9323c
This commit is contained in:
parent
4a79b609e9
commit
c178a08cb8
2
go.mod
2
go.mod
@ -43,7 +43,7 @@ require (
|
|||||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5
|
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5
|
||||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0
|
golang.org/x/time v0.0.0-20191024005414-555d28b269f0
|
||||||
google.golang.org/grpc v1.28.0
|
google.golang.org/grpc v1.28.0
|
||||||
storj.io/common v0.0.0-20200401062549-3fa2945ebe52
|
storj.io/common v0.0.0-20200401122855-4ad28fd2218f
|
||||||
storj.io/drpc v0.0.11
|
storj.io/drpc v0.0.11
|
||||||
storj.io/private v0.0.0-20200327035409-e9d82e7e0c6b
|
storj.io/private v0.0.0-20200327035409-e9d82e7e0c6b
|
||||||
storj.io/uplink v1.0.3
|
storj.io/uplink v1.0.3
|
||||||
|
6
go.sum
6
go.sum
@ -623,8 +623,10 @@ storj.io/common v0.0.0-20200323134045-2bd4d6e2dd7d/go.mod h1:I0QTs7z1rI+ZEN95GGY
|
|||||||
storj.io/common v0.0.0-20200331095257-30ebbdbbba88/go.mod h1:RBaNRmk/lqyZ7h1MAH4N9zld0z+tO4M9sLOFT30K+cE=
|
storj.io/common v0.0.0-20200331095257-30ebbdbbba88/go.mod h1:RBaNRmk/lqyZ7h1MAH4N9zld0z+tO4M9sLOFT30K+cE=
|
||||||
storj.io/common v0.0.0-20200331124657-a4f8265946f2 h1:Fctei5lPPAfbvtpijLQZTjQeeuh+MCkacLYau7nyxKA=
|
storj.io/common v0.0.0-20200331124657-a4f8265946f2 h1:Fctei5lPPAfbvtpijLQZTjQeeuh+MCkacLYau7nyxKA=
|
||||||
storj.io/common v0.0.0-20200331124657-a4f8265946f2/go.mod h1:RBaNRmk/lqyZ7h1MAH4N9zld0z+tO4M9sLOFT30K+cE=
|
storj.io/common v0.0.0-20200331124657-a4f8265946f2/go.mod h1:RBaNRmk/lqyZ7h1MAH4N9zld0z+tO4M9sLOFT30K+cE=
|
||||||
storj.io/common v0.0.0-20200401062549-3fa2945ebe52 h1:Oulb5CY00odkUrddYONRYkr7FefoJx1mbiIh8kOTh+0=
|
storj.io/common v0.0.0-20200401095230-4fe9b2ad3ec0 h1:gFsVck24e/eU3j9qmfXwsyBUwvtcjuTD5JXvvv9NdFE=
|
||||||
storj.io/common v0.0.0-20200401062549-3fa2945ebe52/go.mod h1:RBaNRmk/lqyZ7h1MAH4N9zld0z+tO4M9sLOFT30K+cE=
|
storj.io/common v0.0.0-20200401095230-4fe9b2ad3ec0/go.mod h1:RBaNRmk/lqyZ7h1MAH4N9zld0z+tO4M9sLOFT30K+cE=
|
||||||
|
storj.io/common v0.0.0-20200401122855-4ad28fd2218f h1:17QhsiCE8PFKoVaxRk+8tfGazlm2r5mMsXEtFTT2F9w=
|
||||||
|
storj.io/common v0.0.0-20200401122855-4ad28fd2218f/go.mod h1:RBaNRmk/lqyZ7h1MAH4N9zld0z+tO4M9sLOFT30K+cE=
|
||||||
storj.io/drpc v0.0.7-0.20191115031725-2171c57838d2/go.mod h1:/ascUDbzNAv0A3Jj7wUIKFBH2JdJ2uJIBO/b9+2yHgQ=
|
storj.io/drpc v0.0.7-0.20191115031725-2171c57838d2/go.mod h1:/ascUDbzNAv0A3Jj7wUIKFBH2JdJ2uJIBO/b9+2yHgQ=
|
||||||
storj.io/drpc v0.0.11 h1:6vLxfpSbwCLtqzAoXzXx/SxBqBtbzbmquXPqfcWKqfw=
|
storj.io/drpc v0.0.11 h1:6vLxfpSbwCLtqzAoXzXx/SxBqBtbzbmquXPqfcWKqfw=
|
||||||
storj.io/drpc v0.0.11/go.mod h1:TiFc2obNjL9/3isMW1Rpxjy8V9uE0B2HMeMFGiiI7Iw=
|
storj.io/drpc v0.0.11/go.mod h1:TiFc2obNjL9/3isMW1Rpxjy8V9uE0B2HMeMFGiiI7Iw=
|
||||||
|
@ -332,11 +332,11 @@ func (planet *Planet) newSatellites(count int, satelliteDatabases satellitedbtes
|
|||||||
Metainfo: metainfo.Config{
|
Metainfo: metainfo.Config{
|
||||||
DatabaseURL: "", // not used
|
DatabaseURL: "", // not used
|
||||||
MinRemoteSegmentSize: 0, // TODO: fix tests to work with 1024
|
MinRemoteSegmentSize: 0, // TODO: fix tests to work with 1024
|
||||||
MaxInlineSegmentSize: 8000,
|
MaxInlineSegmentSize: 4 * memory.KiB,
|
||||||
|
MaxSegmentSize: 64 * memory.MiB,
|
||||||
MaxCommitInterval: 1 * time.Hour,
|
MaxCommitInterval: 1 * time.Hour,
|
||||||
Overlay: true,
|
Overlay: true,
|
||||||
RS: metainfo.RSConfig{
|
RS: metainfo.RSConfig{
|
||||||
MaxSegmentSize: 64 * memory.MiB,
|
|
||||||
MaxBufferMem: memory.Size(256),
|
MaxBufferMem: memory.Size(256),
|
||||||
ErasureShareSize: memory.Size(256),
|
ErasureShareSize: memory.Size(256),
|
||||||
MinThreshold: atLeastOne(planet.config.StorageNodeCount * 1 / 5),
|
MinThreshold: atLeastOne(planet.config.StorageNodeCount * 1 / 5),
|
||||||
|
@ -425,10 +425,8 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
|
|||||||
peer.DB.Console().APIKeys(),
|
peer.DB.Console().APIKeys(),
|
||||||
peer.Accounting.ProjectUsage,
|
peer.Accounting.ProjectUsage,
|
||||||
peer.DB.Console().Projects(),
|
peer.DB.Console().Projects(),
|
||||||
config.Metainfo.RS,
|
|
||||||
signing.SignerFromFullIdentity(peer.Identity),
|
signing.SignerFromFullIdentity(peer.Identity),
|
||||||
config.Metainfo.MaxCommitInterval,
|
config.Metainfo,
|
||||||
config.Metainfo.RateLimiter,
|
|
||||||
)
|
)
|
||||||
pbgrpc.RegisterMetainfoServer(peer.Server.GRPC(), peer.Metainfo.Endpoint2)
|
pbgrpc.RegisterMetainfoServer(peer.Server.GRPC(), peer.Metainfo.Endpoint2)
|
||||||
if err := pb.DRPCRegisterMetainfo(peer.Server.DRPC(), peer.Metainfo.Endpoint2); err != nil {
|
if err := pb.DRPCRegisterMetainfo(peer.Server.DRPC(), peer.Metainfo.Endpoint2); err != nil {
|
||||||
|
@ -24,7 +24,6 @@ const (
|
|||||||
// RSConfig is a configuration struct that keeps details about default
|
// RSConfig is a configuration struct that keeps details about default
|
||||||
// redundancy strategy information
|
// redundancy strategy information
|
||||||
type RSConfig struct {
|
type RSConfig struct {
|
||||||
MaxSegmentSize memory.Size `help:"maximum segment size" default:"64MiB"`
|
|
||||||
MaxBufferMem memory.Size `help:"maximum buffer memory to be allocated for read buffers" default:"4MiB"`
|
MaxBufferMem memory.Size `help:"maximum buffer memory to be allocated for read buffers" default:"4MiB"`
|
||||||
ErasureShareSize memory.Size `help:"the size of each new erasure share in bytes" default:"256B"`
|
ErasureShareSize memory.Size `help:"the size of each new erasure share in bytes" default:"256B"`
|
||||||
MinThreshold int `help:"the minimum pieces required to recover a segment. k." releaseDefault:"29" devDefault:"4"`
|
MinThreshold int `help:"the minimum pieces required to recover a segment. k." releaseDefault:"29" devDefault:"4"`
|
||||||
@ -50,7 +49,8 @@ type RateLimiterConfig struct {
|
|||||||
type Config struct {
|
type Config struct {
|
||||||
DatabaseURL string `help:"the database connection string to use" default:"postgres://"`
|
DatabaseURL string `help:"the database connection string to use" default:"postgres://"`
|
||||||
MinRemoteSegmentSize memory.Size `default:"1240" help:"minimum remote segment size"`
|
MinRemoteSegmentSize memory.Size `default:"1240" help:"minimum remote segment size"`
|
||||||
MaxInlineSegmentSize memory.Size `default:"8000" help:"maximum inline segment size"`
|
MaxInlineSegmentSize memory.Size `default:"4KiB" help:"maximum inline segment size"`
|
||||||
|
MaxSegmentSize memory.Size `default:"64MiB" help:"maximum segment size"`
|
||||||
MaxCommitInterval time.Duration `default:"48h" help:"maximum time allowed to pass between creating and committing a segment"`
|
MaxCommitInterval time.Duration `default:"48h" help:"maximum time allowed to pass between creating and committing a segment"`
|
||||||
Overlay bool `default:"true" help:"toggle flag if overlay is enabled"`
|
Overlay bool `default:"true" help:"toggle flag if overlay is enabled"`
|
||||||
RS RSConfig `help:"redundancy scheme configuration"`
|
RS RSConfig `help:"redundancy scheme configuration"`
|
||||||
|
@ -83,11 +83,9 @@ type Endpoint struct {
|
|||||||
projects console.Projects
|
projects console.Projects
|
||||||
apiKeys APIKeys
|
apiKeys APIKeys
|
||||||
createRequests *createRequests
|
createRequests *createRequests
|
||||||
requiredRSConfig RSConfig
|
|
||||||
satellite signing.Signer
|
satellite signing.Signer
|
||||||
maxCommitInterval time.Duration
|
|
||||||
limiterCache *lrucache.ExpiringLRU
|
limiterCache *lrucache.ExpiringLRU
|
||||||
limiterConfig RateLimiterConfig
|
config Config
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewEndpoint creates new metainfo endpoint instance.
|
// NewEndpoint creates new metainfo endpoint instance.
|
||||||
@ -95,8 +93,7 @@ func NewEndpoint(log *zap.Logger, metainfo *Service, deletePieces *piecedeletion
|
|||||||
orders *orders.Service, cache *overlay.Service, attributions attribution.DB,
|
orders *orders.Service, cache *overlay.Service, attributions attribution.DB,
|
||||||
partners *rewards.PartnersService, peerIdentities overlay.PeerIdentities,
|
partners *rewards.PartnersService, peerIdentities overlay.PeerIdentities,
|
||||||
apiKeys APIKeys, projectUsage *accounting.Service, projects console.Projects,
|
apiKeys APIKeys, projectUsage *accounting.Service, projects console.Projects,
|
||||||
rsConfig RSConfig, satellite signing.Signer, maxCommitInterval time.Duration,
|
satellite signing.Signer, config Config) *Endpoint {
|
||||||
limiterConfig RateLimiterConfig) *Endpoint {
|
|
||||||
// TODO do something with too many params
|
// TODO do something with too many params
|
||||||
return &Endpoint{
|
return &Endpoint{
|
||||||
log: log,
|
log: log,
|
||||||
@ -111,14 +108,12 @@ func NewEndpoint(log *zap.Logger, metainfo *Service, deletePieces *piecedeletion
|
|||||||
projectUsage: projectUsage,
|
projectUsage: projectUsage,
|
||||||
projects: projects,
|
projects: projects,
|
||||||
createRequests: newCreateRequests(),
|
createRequests: newCreateRequests(),
|
||||||
requiredRSConfig: rsConfig,
|
|
||||||
satellite: satellite,
|
satellite: satellite,
|
||||||
maxCommitInterval: maxCommitInterval,
|
|
||||||
limiterCache: lrucache.New(lrucache.Options{
|
limiterCache: lrucache.New(lrucache.Options{
|
||||||
Capacity: limiterConfig.CacheCapacity,
|
Capacity: config.RateLimiter.CacheCapacity,
|
||||||
Expiration: limiterConfig.CacheExpiration,
|
Expiration: config.RateLimiter.CacheExpiration,
|
||||||
}),
|
}),
|
||||||
limiterConfig: limiterConfig,
|
config: config,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1053,11 +1048,13 @@ func (endpoint *Endpoint) BeginObject(ctx context.Context, req *pb.ObjectBeginRe
|
|||||||
mon.Meter("req_put_object").Mark(1)
|
mon.Meter("req_put_object").Mark(1)
|
||||||
|
|
||||||
return &pb.ObjectBeginResponse{
|
return &pb.ObjectBeginResponse{
|
||||||
Bucket: req.Bucket,
|
Bucket: req.Bucket,
|
||||||
EncryptedPath: req.EncryptedPath,
|
EncryptedPath: req.EncryptedPath,
|
||||||
Version: req.Version,
|
Version: req.Version,
|
||||||
StreamId: streamID,
|
StreamId: streamID,
|
||||||
RedundancyScheme: pbRS,
|
RedundancyScheme: pbRS,
|
||||||
|
MaxInlineSegmentSize: endpoint.config.MaxInlineSegmentSize.Int64(),
|
||||||
|
MaxSegmentSize: endpoint.config.MaxSegmentSize.Int64(),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1697,6 +1694,11 @@ func (endpoint *Endpoint) makeInlineSegment(ctx context.Context, req *pb.Segment
|
|||||||
return nil, nil, rpcstatus.Error(rpcstatus.InvalidArgument, "segment index must be greater then 0")
|
return nil, nil, rpcstatus.Error(rpcstatus.InvalidArgument, "segment index must be greater then 0")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inlineUsed := int64(len(req.EncryptedInlineData))
|
||||||
|
if inlineUsed > endpoint.config.MaxInlineSegmentSize.Int64() {
|
||||||
|
return nil, nil, rpcstatus.Error(rpcstatus.InvalidArgument, fmt.Sprintf("inline segment size cannot be larger than %s", endpoint.config.MaxInlineSegmentSize))
|
||||||
|
}
|
||||||
|
|
||||||
exceeded, limit, err := endpoint.projectUsage.ExceedsStorageUsage(ctx, keyInfo.ProjectID)
|
exceeded, limit, err := endpoint.projectUsage.ExceedsStorageUsage(ctx, keyInfo.ProjectID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
return nil, nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||||
@ -1708,8 +1710,6 @@ func (endpoint *Endpoint) makeInlineSegment(ctx context.Context, req *pb.Segment
|
|||||||
return nil, nil, rpcstatus.Error(rpcstatus.ResourceExhausted, "Exceeded Usage Limit")
|
return nil, nil, rpcstatus.Error(rpcstatus.ResourceExhausted, "Exceeded Usage Limit")
|
||||||
}
|
}
|
||||||
|
|
||||||
inlineUsed := int64(len(req.EncryptedInlineData))
|
|
||||||
|
|
||||||
if err := endpoint.projectUsage.AddProjectStorageUsage(ctx, keyInfo.ProjectID, inlineUsed); err != nil {
|
if err := endpoint.projectUsage.AddProjectStorageUsage(ctx, keyInfo.ProjectID, inlineUsed); err != nil {
|
||||||
endpoint.log.Sugar().Errorf("Could not track new storage usage by project %v: %v", keyInfo.ProjectID, err)
|
endpoint.log.Sugar().Errorf("Could not track new storage usage by project %v: %v", keyInfo.ProjectID, err)
|
||||||
// but continue. it's most likely our own fault that we couldn't track it, and the only thing
|
// but continue. it's most likely our own fault that we couldn't track it, and the only thing
|
||||||
@ -2502,10 +2502,10 @@ func (endpoint *Endpoint) findIndexPreviousLastSegmentWhenNotKnowingNumSegments(
|
|||||||
func (endpoint *Endpoint) redundancyScheme() *pb.RedundancyScheme {
|
func (endpoint *Endpoint) redundancyScheme() *pb.RedundancyScheme {
|
||||||
return &pb.RedundancyScheme{
|
return &pb.RedundancyScheme{
|
||||||
Type: pb.RedundancyScheme_RS,
|
Type: pb.RedundancyScheme_RS,
|
||||||
MinReq: int32(endpoint.requiredRSConfig.MinThreshold),
|
MinReq: int32(endpoint.config.RS.MinThreshold),
|
||||||
RepairThreshold: int32(endpoint.requiredRSConfig.RepairThreshold),
|
RepairThreshold: int32(endpoint.config.RS.RepairThreshold),
|
||||||
SuccessThreshold: int32(endpoint.requiredRSConfig.SuccessThreshold),
|
SuccessThreshold: int32(endpoint.config.RS.SuccessThreshold),
|
||||||
Total: int32(endpoint.requiredRSConfig.TotalThreshold),
|
Total: int32(endpoint.config.RS.TotalThreshold),
|
||||||
ErasureShareSize: endpoint.requiredRSConfig.ErasureShareSize.Int32(),
|
ErasureShareSize: endpoint.config.RS.ErasureShareSize.Int32(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -760,6 +760,23 @@ func TestInlineSegment(t *testing.T) {
|
|||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
{ // test max inline segment size 4KiB
|
||||||
|
beginObjectResp, err := metainfoClient.BeginObject(ctx, metainfo.BeginObjectParams{
|
||||||
|
Bucket: []byte(bucket.Name),
|
||||||
|
EncryptedPath: []byte("too-large-inline-segment"),
|
||||||
|
})
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
data := testrand.Bytes(5 * memory.KiB)
|
||||||
|
err = metainfoClient.MakeInlineSegment(ctx, metainfo.MakeInlineSegmentParams{
|
||||||
|
StreamID: beginObjectResp.StreamID,
|
||||||
|
Position: storj.SegmentPosition{
|
||||||
|
Index: 0,
|
||||||
|
},
|
||||||
|
EncryptedInlineData: data,
|
||||||
|
})
|
||||||
|
require.Error(t, err)
|
||||||
|
}
|
||||||
{ // test listing inline segments
|
{ // test listing inline segments
|
||||||
for _, test := range []struct {
|
for _, test := range []struct {
|
||||||
Index int32
|
Index int32
|
||||||
|
@ -168,11 +168,11 @@ func (endpoint *Endpoint) validateAuth(ctx context.Context, header *pb.RequestHe
|
|||||||
|
|
||||||
func (endpoint *Endpoint) checkRate(ctx context.Context, projectID uuid.UUID) (err error) {
|
func (endpoint *Endpoint) checkRate(ctx context.Context, projectID uuid.UUID) (err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
if !endpoint.limiterConfig.Enabled {
|
if !endpoint.config.RateLimiter.Enabled {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
limiter, err := endpoint.limiterCache.Get(projectID.String(), func() (interface{}, error) {
|
limiter, err := endpoint.limiterCache.Get(projectID.String(), func() (interface{}, error) {
|
||||||
limit := rate.Limit(endpoint.limiterConfig.Rate)
|
limit := rate.Limit(endpoint.config.RateLimiter.Rate)
|
||||||
|
|
||||||
project, err := endpoint.projects.Get(ctx, projectID)
|
project, err := endpoint.projects.Get(ctx, projectID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -320,7 +320,7 @@ func (endpoint *Endpoint) validatePointer(ctx context.Context, pointer *pb.Point
|
|||||||
return Error.New("invalid no order limit for piece")
|
return Error.New("invalid no order limit for piece")
|
||||||
}
|
}
|
||||||
|
|
||||||
maxAllowed, err := encryption.CalcEncryptedSize(endpoint.requiredRSConfig.MaxSegmentSize.Int64(), storj.EncryptionParameters{
|
maxAllowed, err := encryption.CalcEncryptedSize(endpoint.config.MaxSegmentSize.Int64(), storj.EncryptionParameters{
|
||||||
CipherSuite: storj.EncAESGCM,
|
CipherSuite: storj.EncAESGCM,
|
||||||
BlockSize: 128, // intentionally low block size to allow maximum possible encryption overhead
|
BlockSize: 128, // intentionally low block size to allow maximum possible encryption overhead
|
||||||
})
|
})
|
||||||
@ -351,8 +351,8 @@ func (endpoint *Endpoint) validatePointer(ctx context.Context, pointer *pb.Point
|
|||||||
}
|
}
|
||||||
|
|
||||||
// expect that too much time has not passed between order limit creation and now
|
// expect that too much time has not passed between order limit creation and now
|
||||||
if time.Since(limit.OrderCreation) > endpoint.maxCommitInterval {
|
if time.Since(limit.OrderCreation) > endpoint.config.MaxCommitInterval {
|
||||||
return Error.New("Segment not committed before max commit interval of %f minutes.", endpoint.maxCommitInterval.Minutes())
|
return Error.New("Segment not committed before max commit interval of %f minutes.", endpoint.config.MaxCommitInterval.Minutes())
|
||||||
}
|
}
|
||||||
|
|
||||||
derivedPieceID := remote.RootPieceId.Derive(piece.NodeId, piece.PieceNum)
|
derivedPieceID := remote.RootPieceId.Derive(piece.NodeId, piece.PieceNum)
|
||||||
@ -382,20 +382,20 @@ func (endpoint *Endpoint) validatePointer(ctx context.Context, pointer *pb.Point
|
|||||||
func (endpoint *Endpoint) validateRedundancy(ctx context.Context, redundancy *pb.RedundancyScheme) (err error) {
|
func (endpoint *Endpoint) validateRedundancy(ctx context.Context, redundancy *pb.RedundancyScheme) (err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
if endpoint.requiredRSConfig.Validate {
|
if endpoint.config.RS.Validate {
|
||||||
if endpoint.requiredRSConfig.ErasureShareSize.Int32() != redundancy.ErasureShareSize ||
|
if endpoint.config.RS.ErasureShareSize.Int32() != redundancy.ErasureShareSize ||
|
||||||
endpoint.requiredRSConfig.MinTotalThreshold > int(redundancy.Total) ||
|
endpoint.config.RS.MinTotalThreshold > int(redundancy.Total) ||
|
||||||
endpoint.requiredRSConfig.MaxTotalThreshold < int(redundancy.Total) ||
|
endpoint.config.RS.MaxTotalThreshold < int(redundancy.Total) ||
|
||||||
endpoint.requiredRSConfig.MinThreshold != int(redundancy.MinReq) ||
|
endpoint.config.RS.MinThreshold != int(redundancy.MinReq) ||
|
||||||
endpoint.requiredRSConfig.RepairThreshold != int(redundancy.RepairThreshold) ||
|
endpoint.config.RS.RepairThreshold != int(redundancy.RepairThreshold) ||
|
||||||
endpoint.requiredRSConfig.SuccessThreshold != int(redundancy.SuccessThreshold) {
|
endpoint.config.RS.SuccessThreshold != int(redundancy.SuccessThreshold) {
|
||||||
return Error.New("provided redundancy scheme parameters not allowed: want [%d, %d, %d, %d-%d, %d] got [%d, %d, %d, %d, %d]",
|
return Error.New("provided redundancy scheme parameters not allowed: want [%d, %d, %d, %d-%d, %d] got [%d, %d, %d, %d, %d]",
|
||||||
endpoint.requiredRSConfig.MinThreshold,
|
endpoint.config.RS.MinThreshold,
|
||||||
endpoint.requiredRSConfig.RepairThreshold,
|
endpoint.config.RS.RepairThreshold,
|
||||||
endpoint.requiredRSConfig.SuccessThreshold,
|
endpoint.config.RS.SuccessThreshold,
|
||||||
endpoint.requiredRSConfig.MinTotalThreshold,
|
endpoint.config.RS.MinTotalThreshold,
|
||||||
endpoint.requiredRSConfig.MaxTotalThreshold,
|
endpoint.config.RS.MaxTotalThreshold,
|
||||||
endpoint.requiredRSConfig.ErasureShareSize.Int32(),
|
endpoint.config.RS.ErasureShareSize.Int32(),
|
||||||
|
|
||||||
redundancy.MinReq,
|
redundancy.MinReq,
|
||||||
redundancy.RepairThreshold,
|
redundancy.RepairThreshold,
|
||||||
|
8
scripts/testdata/satellite-config.yaml.lock
vendored
8
scripts/testdata/satellite-config.yaml.lock
vendored
@ -281,7 +281,10 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
|
|||||||
# metainfo.max-commit-interval: 48h0m0s
|
# metainfo.max-commit-interval: 48h0m0s
|
||||||
|
|
||||||
# maximum inline segment size
|
# maximum inline segment size
|
||||||
# metainfo.max-inline-segment-size: 8.0 KB
|
# metainfo.max-inline-segment-size: 4.0 KiB
|
||||||
|
|
||||||
|
# maximum segment size
|
||||||
|
# metainfo.max-segment-size: 64.0 MiB
|
||||||
|
|
||||||
# minimum remote segment size
|
# minimum remote segment size
|
||||||
# metainfo.min-remote-segment-size: 1.2 KiB
|
# metainfo.min-remote-segment-size: 1.2 KiB
|
||||||
@ -325,9 +328,6 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
|
|||||||
# maximum buffer memory to be allocated for read buffers
|
# maximum buffer memory to be allocated for read buffers
|
||||||
# metainfo.rs.max-buffer-mem: 4.0 MiB
|
# metainfo.rs.max-buffer-mem: 4.0 MiB
|
||||||
|
|
||||||
# maximum segment size
|
|
||||||
# metainfo.rs.max-segment-size: 64.0 MiB
|
|
||||||
|
|
||||||
# the largest amount of pieces to encode to. n (upper bound for validation).
|
# the largest amount of pieces to encode to. n (upper bound for validation).
|
||||||
# metainfo.rs.max-total-threshold: 130
|
# metainfo.rs.max-total-threshold: 130
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user