satellite/metainfo: add commit interval to prevent long delays between order limit creation and segment commit (#3149)

This commit is contained in:
Maximillian von Briesen 2019-10-01 12:55:02 -04:00 committed by GitHub
parent 89c59d06f9
commit 08ed50bcaa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 73 additions and 30 deletions

View File

@ -118,6 +118,7 @@ func (planet *Planet) newSatellites(count int) ([]*SatelliteSystem, error) {
DatabaseURL: "bolt://" + filepath.Join(storageDir, "pointers.db"),
MinRemoteSegmentSize: 0, // TODO: fix tests to work with 1024
MaxInlineSegmentSize: 8000,
MaxCommitInterval: 1 * time.Hour,
Overlay: true,
RS: metainfo.RSConfig{
MaxSegmentSize: 64 * memory.MiB,

View File

@ -4,6 +4,8 @@
package metainfo
import (
"time"
"go.uber.org/zap"
"storj.io/storj/internal/dbutil"
@ -33,12 +35,13 @@ type RSConfig struct {
// Config is a configuration struct that is everything you need to start a metainfo
type Config struct {
DatabaseURL string `help:"the database connection string to use" releaseDefault:"postgres://" devDefault:"bolt://$CONFDIR/pointerdb.db"`
MinRemoteSegmentSize memory.Size `default:"1240" help:"minimum remote segment size"`
MaxInlineSegmentSize memory.Size `default:"8000" help:"maximum inline segment size"`
Overlay bool `default:"true" help:"toggle flag if overlay is enabled"`
RS RSConfig `help:"redundancy scheme configuration"`
Loop LoopConfig `help:"metainfo loop configuration"`
DatabaseURL string `help:"the database connection string to use" releaseDefault:"postgres://" devDefault:"bolt://$CONFDIR/pointerdb.db"`
MinRemoteSegmentSize memory.Size `default:"1240" help:"minimum remote segment size"`
MaxInlineSegmentSize memory.Size `default:"8000" help:"maximum inline segment size"`
MaxCommitInterval time.Duration `default:"1h" help:"maximum time allowed to pass between creating and committing a segment"`
Overlay bool `default:"true" help:"toggle flag if overlay is enabled"`
RS RSConfig `help:"redundancy scheme configuration"`
Loop LoopConfig `help:"metainfo loop configuration"`
}
// PointerDB stores pointers.

View File

@ -63,35 +63,37 @@ type Revocations interface {
//
// architecture: Endpoint
type Endpoint struct {
log *zap.Logger
metainfo *Service
orders *orders.Service
overlay *overlay.Service
partnerinfo attribution.DB
peerIdentities overlay.PeerIdentities
projectUsage *accounting.ProjectUsage
apiKeys APIKeys
createRequests *createRequests
requiredRSConfig RSConfig
satellite signing.Signer
log *zap.Logger
metainfo *Service
orders *orders.Service
overlay *overlay.Service
partnerinfo attribution.DB
peerIdentities overlay.PeerIdentities
projectUsage *accounting.ProjectUsage
apiKeys APIKeys
createRequests *createRequests
requiredRSConfig RSConfig
satellite signing.Signer
maxCommitInterval time.Duration
}
// NewEndpoint creates new metainfo endpoint instance
func NewEndpoint(log *zap.Logger, metainfo *Service, orders *orders.Service, cache *overlay.Service, partnerinfo attribution.DB, peerIdentities overlay.PeerIdentities,
apiKeys APIKeys, projectUsage *accounting.ProjectUsage, rsConfig RSConfig, satellite signing.Signer) *Endpoint {
apiKeys APIKeys, projectUsage *accounting.ProjectUsage, rsConfig RSConfig, satellite signing.Signer, maxCommitInterval time.Duration) *Endpoint {
// TODO do something with too many params
return &Endpoint{
log: log,
metainfo: metainfo,
orders: orders,
overlay: cache,
partnerinfo: partnerinfo,
peerIdentities: peerIdentities,
apiKeys: apiKeys,
projectUsage: projectUsage,
createRequests: newCreateRequests(),
requiredRSConfig: rsConfig,
satellite: satellite,
log: log,
metainfo: metainfo,
orders: orders,
overlay: cache,
partnerinfo: partnerinfo,
peerIdentities: peerIdentities,
apiKeys: apiKeys,
projectUsage: projectUsage,
createRequests: newCreateRequests(),
requiredRSConfig: rsConfig,
satellite: satellite,
maxCommitInterval: maxCommitInterval,
}
}

View File

@ -564,6 +564,34 @@ func TestExpirationTimeSegment(t *testing.T) {
})
}
func TestMaxCommitInterval(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Metainfo.MaxCommitInterval = -1 * time.Hour
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
metainfo, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)
require.NoError(t, err)
defer ctx.Check(metainfo.Close)
fullIDMap := make(map[storj.NodeID]*identity.FullIdentity)
for _, node := range planet.StorageNodes {
fullIDMap[node.ID()] = node.Identity
}
pointer, limits := runCreateSegment(ctx, t, metainfo, fullIDMap)
_, err = metainfo.CommitSegment(ctx, "my-bucket-name", "file/path", -1, pointer, limits)
require.Error(t, err)
require.Contains(t, err.Error(), "not committed before max commit interval")
})
}
func TestDoubleCommitSegment(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,

View File

@ -316,6 +316,11 @@ func (endpoint *Endpoint) validatePointer(ctx context.Context, pointer *pb.Point
return err
}
// expect that too much time has not passed between order limit creation and now
if time.Since(limit.OrderCreation) > endpoint.maxCommitInterval {
return Error.New("Segment not committed before max commit interval of %f minutes.", endpoint.maxCommitInterval.Minutes())
}
derivedPieceID := remote.RootPieceId.Derive(piece.NodeId, piece.PieceNum)
if limit.PieceId.IsZero() || limit.PieceId != derivedPieceID {
return Error.New("invalid order limit piece id")

View File

@ -399,6 +399,7 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten
peer.Accounting.ProjectUsage,
config.Metainfo.RS,
signing.SignerFromFullIdentity(peer.Identity),
config.Metainfo.MaxCommitInterval,
)
pb.RegisterMetainfoServer(peer.Server.GRPC(), peer.Metainfo.Endpoint2)

View File

@ -187,6 +187,9 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
# how long to wait for new observers before starting iteration
# metainfo.loop.coalesce-duration: 5s
# maximum time allowed to pass between creating and committing a segment
# metainfo.max-commit-interval: 1h0m0s
# maximum inline segment size
# metainfo.max-inline-segment-size: 8.0 KB

View File

@ -28,7 +28,7 @@ var (
// Config defines parameters for the retain service.
type Config struct {
MaxTimeSkew time.Duration `help:"allows for small differences in the satellite and storagenode clocks" default:"1h0m0s"`
MaxTimeSkew time.Duration `help:"allows for small differences in the satellite and storagenode clocks" default:"24h0m0s"`
Status Status `help:"allows configuration to enable, disable, or test retain requests from the satellite. Options: (disabled/enabled/debug)" default:"disabled"`
Concurrency int `help:"how many concurrent retain requests can be processed at the same time." default:"5"`
}