diff --git a/internal/testplanet/satellite.go b/internal/testplanet/satellite.go index 9b7927b1f..b4b87cf24 100644 --- a/internal/testplanet/satellite.go +++ b/internal/testplanet/satellite.go @@ -118,6 +118,7 @@ func (planet *Planet) newSatellites(count int) ([]*SatelliteSystem, error) { DatabaseURL: "bolt://" + filepath.Join(storageDir, "pointers.db"), MinRemoteSegmentSize: 0, // TODO: fix tests to work with 1024 MaxInlineSegmentSize: 8000, + MaxCommitInterval: 1 * time.Hour, Overlay: true, RS: metainfo.RSConfig{ MaxSegmentSize: 64 * memory.MiB, diff --git a/satellite/metainfo/config.go b/satellite/metainfo/config.go index 19ccf6867..897098264 100644 --- a/satellite/metainfo/config.go +++ b/satellite/metainfo/config.go @@ -4,6 +4,8 @@ package metainfo import ( + "time" + "go.uber.org/zap" "storj.io/storj/internal/dbutil" @@ -33,12 +35,13 @@ type RSConfig struct { // Config is a configuration struct that is everything you need to start a metainfo type Config struct { - DatabaseURL string `help:"the database connection string to use" releaseDefault:"postgres://" devDefault:"bolt://$CONFDIR/pointerdb.db"` - MinRemoteSegmentSize memory.Size `default:"1240" help:"minimum remote segment size"` - MaxInlineSegmentSize memory.Size `default:"8000" help:"maximum inline segment size"` - Overlay bool `default:"true" help:"toggle flag if overlay is enabled"` - RS RSConfig `help:"redundancy scheme configuration"` - Loop LoopConfig `help:"metainfo loop configuration"` + DatabaseURL string `help:"the database connection string to use" releaseDefault:"postgres://" devDefault:"bolt://$CONFDIR/pointerdb.db"` + MinRemoteSegmentSize memory.Size `default:"1240" help:"minimum remote segment size"` + MaxInlineSegmentSize memory.Size `default:"8000" help:"maximum inline segment size"` + MaxCommitInterval time.Duration `default:"1h" help:"maximum time allowed to pass between creating and committing a segment"` + Overlay bool `default:"true" help:"toggle flag if overlay is enabled"` + RS RSConfig `help:"redundancy scheme configuration"` + Loop LoopConfig `help:"metainfo loop configuration"` } // PointerDB stores pointers. diff --git a/satellite/metainfo/metainfo.go b/satellite/metainfo/metainfo.go index 5f5f33954..81dc9be4b 100644 --- a/satellite/metainfo/metainfo.go +++ b/satellite/metainfo/metainfo.go @@ -63,35 +63,37 @@ type Revocations interface { // // architecture: Endpoint type Endpoint struct { - log *zap.Logger - metainfo *Service - orders *orders.Service - overlay *overlay.Service - partnerinfo attribution.DB - peerIdentities overlay.PeerIdentities - projectUsage *accounting.ProjectUsage - apiKeys APIKeys - createRequests *createRequests - requiredRSConfig RSConfig - satellite signing.Signer + log *zap.Logger + metainfo *Service + orders *orders.Service + overlay *overlay.Service + partnerinfo attribution.DB + peerIdentities overlay.PeerIdentities + projectUsage *accounting.ProjectUsage + apiKeys APIKeys + createRequests *createRequests + requiredRSConfig RSConfig + satellite signing.Signer + maxCommitInterval time.Duration } // NewEndpoint creates new metainfo endpoint instance func NewEndpoint(log *zap.Logger, metainfo *Service, orders *orders.Service, cache *overlay.Service, partnerinfo attribution.DB, peerIdentities overlay.PeerIdentities, - apiKeys APIKeys, projectUsage *accounting.ProjectUsage, rsConfig RSConfig, satellite signing.Signer) *Endpoint { + apiKeys APIKeys, projectUsage *accounting.ProjectUsage, rsConfig RSConfig, satellite signing.Signer, maxCommitInterval time.Duration) *Endpoint { // TODO do something with too many params return &Endpoint{ - log: log, - metainfo: metainfo, - orders: orders, - overlay: cache, - partnerinfo: partnerinfo, - peerIdentities: peerIdentities, - apiKeys: apiKeys, - projectUsage: projectUsage, - createRequests: newCreateRequests(), - requiredRSConfig: rsConfig, - satellite: satellite, + log: log, + metainfo: metainfo, + orders: orders, + overlay: cache, + partnerinfo: partnerinfo, + peerIdentities: peerIdentities, + apiKeys: apiKeys, + projectUsage: projectUsage, + createRequests: newCreateRequests(), + requiredRSConfig: rsConfig, + satellite: satellite, + maxCommitInterval: maxCommitInterval, } } diff --git a/satellite/metainfo/metainfo_test.go b/satellite/metainfo/metainfo_test.go index d7c419616..7bd6deb78 100644 --- a/satellite/metainfo/metainfo_test.go +++ b/satellite/metainfo/metainfo_test.go @@ -564,6 +564,34 @@ func TestExpirationTimeSegment(t *testing.T) { }) } +func TestMaxCommitInterval(t *testing.T) { + testplanet.Run(t, testplanet.Config{ + SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1, + Reconfigure: testplanet.Reconfigure{ + Satellite: func(log *zap.Logger, index int, config *satellite.Config) { + config.Metainfo.MaxCommitInterval = -1 * time.Hour + }, + }, + }, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) { + apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()] + + metainfo, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey) + require.NoError(t, err) + defer ctx.Check(metainfo.Close) + + fullIDMap := make(map[storj.NodeID]*identity.FullIdentity) + for _, node := range planet.StorageNodes { + fullIDMap[node.ID()] = node.Identity + } + + pointer, limits := runCreateSegment(ctx, t, metainfo, fullIDMap) + + _, err = metainfo.CommitSegment(ctx, "my-bucket-name", "file/path", -1, pointer, limits) + require.Error(t, err) + require.Contains(t, err.Error(), "not committed before max commit interval") + }) +} + func TestDoubleCommitSegment(t *testing.T) { testplanet.Run(t, testplanet.Config{ SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1, diff --git a/satellite/metainfo/validation.go b/satellite/metainfo/validation.go index 4cf25cf53..1b642a921 100644 --- a/satellite/metainfo/validation.go +++ b/satellite/metainfo/validation.go @@ -316,6 +316,11 @@ func (endpoint *Endpoint) validatePointer(ctx context.Context, pointer *pb.Point return err } + // expect that too much time has not passed between order limit creation and now + if time.Since(limit.OrderCreation) > endpoint.maxCommitInterval { + return Error.New("Segment not committed before max commit interval of %f minutes.", endpoint.maxCommitInterval.Minutes()) + } + derivedPieceID := remote.RootPieceId.Derive(piece.NodeId, piece.PieceNum) if limit.PieceId.IsZero() || limit.PieceId != derivedPieceID { return Error.New("invalid order limit piece id") diff --git a/satellite/peer.go b/satellite/peer.go index e93c9be52..10832be42 100644 --- a/satellite/peer.go +++ b/satellite/peer.go @@ -399,6 +399,7 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten peer.Accounting.ProjectUsage, config.Metainfo.RS, signing.SignerFromFullIdentity(peer.Identity), + config.Metainfo.MaxCommitInterval, ) pb.RegisterMetainfoServer(peer.Server.GRPC(), peer.Metainfo.Endpoint2) diff --git a/scripts/testdata/satellite-config.yaml.lock b/scripts/testdata/satellite-config.yaml.lock index edfac4c73..148b4f743 100644 --- a/scripts/testdata/satellite-config.yaml.lock +++ b/scripts/testdata/satellite-config.yaml.lock @@ -187,6 +187,9 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key # how long to wait for new observers before starting iteration # metainfo.loop.coalesce-duration: 5s +# maximum time allowed to pass between creating and committing a segment +# metainfo.max-commit-interval: 1h0m0s + # maximum inline segment size # metainfo.max-inline-segment-size: 8.0 KB diff --git a/storagenode/retain/retain.go b/storagenode/retain/retain.go index dd3e246fc..6b5d2e42d 100644 --- a/storagenode/retain/retain.go +++ b/storagenode/retain/retain.go @@ -28,7 +28,7 @@ var ( // Config defines parameters for the retain service. type Config struct { - MaxTimeSkew time.Duration `help:"allows for small differences in the satellite and storagenode clocks" default:"1h0m0s"` + MaxTimeSkew time.Duration `help:"allows for small differences in the satellite and storagenode clocks" default:"24h0m0s"` Status Status `help:"allows configuration to enable, disable, or test retain requests from the satellite. Options: (disabled/enabled/debug)" default:"disabled"` Concurrency int `help:"how many concurrent retain requests can be processed at the same time." default:"5"` }