satellite rs config check with validation check set to false default (#2229)

* satellite rs config check with validation check
This commit is contained in:
aligeti 2019-06-21 14:15:58 -04:00 committed by GitHub
parent 8f47fca5d3
commit 043d603cbe
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 165 additions and 7 deletions

View File

@ -134,6 +134,15 @@ func (planet *Planet) newSatellites(count int) ([]*satellite.Peer, error) {
MaxInlineSegmentSize: 8000,
Overlay: true,
BwExpiration: 45,
RS: metainfo.RSConfig{
MaxBufferMem: memory.Size(256),
ErasureShareSize: memory.Size(256),
MinThreshold: (planet.config.StorageNodeCount * 1 / 5),
RepairThreshold: (planet.config.StorageNodeCount * 2 / 5),
SuccessThreshold: (planet.config.StorageNodeCount * 3 / 5),
MaxThreshold: (planet.config.StorageNodeCount * 4 / 5),
Validate: false,
},
},
Orders: orders.Config{
Expiration: 45 * 24 * time.Hour,

View File

@ -18,6 +18,18 @@ const (
BoltPointerBucket = "pointers"
)
// RSConfig is a configuration struct that keeps details about default
// redundancy strategy information
type RSConfig struct {
MaxBufferMem memory.Size `help:"maximum buffer memory (in bytes) to be allocated for read buffers" default:"4MiB"`
ErasureShareSize memory.Size `help:"the size of each new erasure share in bytes" default:"256B"`
MinThreshold int `help:"the minimum pieces required to recover a segment. k." releaseDefault:"29" devDefault:"4"`
RepairThreshold int `help:"the minimum safe pieces before a repair is triggered. m." releaseDefault:"35" devDefault:"6"`
SuccessThreshold int `help:"the desired total pieces for a segment. o." releaseDefault:"80" devDefault:"8"`
MaxThreshold int `help:"the largest amount of pieces to encode to. n." releaseDefault:"130" devDefault:"10"`
Validate bool `help:"validate redundancy scheme configuration" releaseDefault:"true" devDefault:"false"`
}
// Config is a configuration struct that is everything you need to start a metainfo
type Config struct {
DatabaseURL string `help:"the database connection string to use" releaseDefault:"postgres://" devDefault:"bolt://$CONFDIR/pointerdb.db"`
@ -25,6 +37,7 @@ type Config struct {
MaxInlineSegmentSize memory.Size `default:"8000" help:"maximum inline segment size"`
Overlay bool `default:"true" help:"toggle flag if overlay is enabled"`
BwExpiration int `default:"45" help:"lifespan of bandwidth agreements in days"`
RS RSConfig `help:"redundancy scheme configuration"`
}
// NewStore returns database for storing pointer data

View File

@ -59,11 +59,12 @@ type Endpoint struct {
containment Containment
apiKeys APIKeys
createRequests *createRequests
rsConfig RSConfig
}
// NewEndpoint creates new metainfo endpoint instance
func NewEndpoint(log *zap.Logger, metainfo *Service, orders *orders.Service, cache *overlay.Cache, containment Containment,
apiKeys APIKeys, projectUsage *accounting.ProjectUsage) *Endpoint {
apiKeys APIKeys, projectUsage *accounting.ProjectUsage, rsConfig RSConfig) *Endpoint {
// TODO do something with too many params
return &Endpoint{
log: log,
@ -74,6 +75,7 @@ func NewEndpoint(log *zap.Logger, metainfo *Service, orders *orders.Service, cac
apiKeys: apiKeys,
projectUsage: projectUsage,
createRequests: newCreateRequests(),
rsConfig: rsConfig,
}
}

View File

@ -14,6 +14,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/zeebo/errs"
"go.uber.org/zap"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@ -23,6 +24,7 @@ import (
"storj.io/storj/pkg/macaroon"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
"storj.io/storj/satellite"
"storj.io/storj/satellite/console"
"storj.io/storj/uplink/metainfo"
)
@ -276,6 +278,11 @@ func TestServiceList(t *testing.T) {
func TestCommitSegment(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Metainfo.RS.Validate = true
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
@ -297,9 +304,9 @@ func TestCommitSegment(t *testing.T) {
redundancy := &pb.RedundancyScheme{
MinReq: 1,
RepairThreshold: 2,
SuccessThreshold: 4,
Total: 6,
ErasureShareSize: 10,
SuccessThreshold: 3,
Total: 4,
ErasureShareSize: 256,
}
expirationDate := time.Now()
addresedLimits, rootPieceID, err := metainfo.CreateSegment(ctx, "bucket", "path", -1, redundancy, 1000, expirationDate)
@ -339,6 +346,105 @@ func TestCommitSegment(t *testing.T) {
})
}
func TestCreateSegment(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Metainfo.RS.Validate = true
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
metainfo, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
require.NoError(t, err)
for _, r := range []struct {
rs *pb.RedundancyScheme
fail bool
}{
{ // error - ErasureShareSize <= 0
rs: &pb.RedundancyScheme{
MinReq: 1,
RepairThreshold: 2,
SuccessThreshold: 3,
Total: 4,
ErasureShareSize: -1,
},
fail: true,
},
{ // error - any of the values are negative
rs: &pb.RedundancyScheme{
MinReq: 1,
RepairThreshold: -2,
SuccessThreshold: 3,
Total: -4,
ErasureShareSize: 10,
},
fail: true,
},
{ // error - MinReq >= RepairThreshold
rs: &pb.RedundancyScheme{
MinReq: 10,
RepairThreshold: 2,
SuccessThreshold: 3,
Total: 4,
ErasureShareSize: 10,
},
fail: true,
},
{ // error - MinReq >= RepairThreshold
rs: &pb.RedundancyScheme{
MinReq: 2,
RepairThreshold: 2,
SuccessThreshold: 3,
Total: 4,
ErasureShareSize: 10,
},
fail: true,
},
{ // error - RepairThreshold >= SuccessThreshol
rs: &pb.RedundancyScheme{
MinReq: 1,
RepairThreshold: 3,
SuccessThreshold: 3,
Total: 4,
ErasureShareSize: 10,
},
fail: true,
},
{ // error - SuccessThreshold >= Total
rs: &pb.RedundancyScheme{
MinReq: 1,
RepairThreshold: 2,
SuccessThreshold: 4,
Total: 4,
ErasureShareSize: 10,
},
fail: true,
},
{ // ok - valid RS parameters
rs: &pb.RedundancyScheme{
MinReq: 1,
RepairThreshold: 2,
SuccessThreshold: 3,
Total: 4,
ErasureShareSize: 256,
},
fail: false,
},
} {
_, _, err := metainfo.CreateSegment(ctx, "bucket", "path", -1, r.rs, 1000, time.Now())
if r.fail {
require.Error(t, err)
} else {
require.NoError(t, err)
}
}
})
}
func TestDoubleCommitSegment(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,

View File

@ -260,9 +260,15 @@ func (endpoint *Endpoint) validatePointer(ctx context.Context, pointer *pb.Point
func (endpoint *Endpoint) validateRedundancy(ctx context.Context, redundancy *pb.RedundancyScheme) (err error) {
defer mon.Task()(&ctx)(&err)
// TODO more validation, use validation from eestream.NewRedundancyStrategy
if redundancy.ErasureShareSize <= 0 {
return Error.New("erasure share size cannot be less than 0")
if endpoint.rsConfig.Validate == true {
if endpoint.rsConfig.ErasureShareSize.Int32() != redundancy.ErasureShareSize ||
endpoint.rsConfig.MaxThreshold != int(redundancy.Total) ||
endpoint.rsConfig.MinThreshold != int(redundancy.MinReq) ||
endpoint.rsConfig.RepairThreshold != int(redundancy.RepairThreshold) ||
endpoint.rsConfig.SuccessThreshold != int(redundancy.SuccessThreshold) {
return Error.New("provided redundancy scheme parameters not allowed")
}
}
return nil
}

View File

@ -416,6 +416,7 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, config *Config, ve
peer.DB.Containment(),
peer.DB.Console().APIKeys(),
peer.Accounting.ProjectUsage,
config.Metainfo.RS,
)
pb.RegisterMetainfoServer(peer.Server.GRPC(), peer.Metainfo.Endpoint2)

View File

@ -178,6 +178,27 @@ kademlia.operator.wallet: ""
# toggle flag if overlay is enabled
# metainfo.overlay: true
# the size of each new erasure share in bytes
# metainfo.rs.erasure-share-size: 256 B
# maximum buffer memory (in bytes) to be allocated for read buffers
# metainfo.rs.max-buffer-mem: 4.0 MiB
# the largest amount of pieces to encode to. n.
# metainfo.rs.max-threshold: 130
# the minimum pieces required to recover a segment. k.
# metainfo.rs.min-threshold: 29
# the minimum safe pieces before a repair is triggered. m.
# metainfo.rs.repair-threshold: 35
# the desired total pieces for a segment. o.
# metainfo.rs.success-threshold: 80
# validate redundancy scheme configuration
# metainfo.rs.validate: true
# address to send telemetry to
# metrics.addr: "collectora.storj.io:9000"