storj/private/testplanet/satellite.go

824 lines
25 KiB
Go
Raw Normal View History

// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information
package testplanet
import (
"context"
"net"
"os"
"path/filepath"
"runtime/pprof"
"strconv"
"time"
"github.com/zeebo/errs"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"storj.io/common/errs2"
"storj.io/common/identity"
"storj.io/common/memory"
"storj.io/common/peertls/extensions"
"storj.io/common/peertls/tlsopts"
"storj.io/common/rpc"
"storj.io/common/storj"
"storj.io/common/uuid"
"storj.io/private/debug"
"storj.io/private/version"
"storj.io/storj/private/revocation"
"storj.io/storj/private/server"
"storj.io/storj/private/testredis"
versionchecker "storj.io/storj/private/version/checker"
"storj.io/storj/private/web"
"storj.io/storj/satellite"
"storj.io/storj/satellite/accounting"
"storj.io/storj/satellite/accounting/live"
"storj.io/storj/satellite/accounting/projectbwcleanup"
"storj.io/storj/satellite/accounting/rollup"
"storj.io/storj/satellite/accounting/rolluparchive"
"storj.io/storj/satellite/accounting/tally"
"storj.io/storj/satellite/admin"
"storj.io/storj/satellite/audit"
"storj.io/storj/satellite/console"
"storj.io/storj/satellite/console/consoleauth"
"storj.io/storj/satellite/console/consoleweb"
"storj.io/storj/satellite/contact"
"storj.io/storj/satellite/gc"
"storj.io/storj/satellite/gracefulexit"
"storj.io/storj/satellite/inspector"
"storj.io/storj/satellite/mailservice"
"storj.io/storj/satellite/metabase"
"storj.io/storj/satellite/metabase/metaloop"
"storj.io/storj/satellite/metainfo"
"storj.io/storj/satellite/metainfo/expireddeletion"
"storj.io/storj/satellite/metainfo/piecedeletion"
"storj.io/storj/satellite/metrics"
"storj.io/storj/satellite/nodestats"
"storj.io/storj/satellite/orders"
"storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/overlay/straynodes"
"storj.io/storj/satellite/payments/paymentsconfig"
"storj.io/storj/satellite/payments/stripecoinpayments"
"storj.io/storj/satellite/repair/checker"
"storj.io/storj/satellite/repair/irreparable"
"storj.io/storj/satellite/repair/repairer"
"storj.io/storj/satellite/satellitedb/satellitedbtest"
)
// Satellite contains all the processes needed to run a full Satellite setup.
type Satellite struct {
Name string
Config satellite.Config
Core *satellite.Core
API *satellite.API
Repairer *satellite.Repairer
Admin *satellite.Admin
GC *satellite.GarbageCollection
Log *zap.Logger
Identity *identity.FullIdentity
DB satellite.DB
Dialer rpc.Dialer
Server *server.Server
Version *versionchecker.Service
Contact struct {
Service *contact.Service
Endpoint *contact.Endpoint
}
Overlay struct {
DB overlay.DB
Service *overlay.Service
DQStrayNodes *straynodes.Chore
}
Metainfo struct {
Metabase *metabase.DB
Service *metainfo.Service
Endpoint2 *metainfo.Endpoint
Loop *metaloop.Service
}
Inspector struct {
Endpoint *inspector.Endpoint
}
Orders struct {
DB orders.DB
Endpoint *orders.Endpoint
Service *orders.Service
Chore *orders.Chore
}
Repair struct {
Checker *checker.Checker
Repairer *repairer.Service
Inspector *irreparable.Inspector
}
Audit struct {
Queues *audit.Queues
Worker *audit.Worker
Chore *audit.Chore
Verifier *audit.Verifier
Reporter *audit.Reporter
}
GarbageCollection struct {
Service *gc.Service
}
ExpiredDeletion struct {
Chore *expireddeletion.Chore
}
Accounting struct {
Tally *tally.Service
Rollup *rollup.Service
ProjectUsage *accounting.Service
ProjectBWCleanup *projectbwcleanup.Chore
RollupArchive *rolluparchive.Chore
}
LiveAccounting struct {
Cache accounting.Cache
}
satellite/accounting: add cache for getting project storage and bw limits This PR adds the following items: 1) an in-memory read-only cache thats stores project limit info for projectIDs This cache is stored in-memory since this is expected to be a small amount of data. In this implementation we are only storing in the cache projects that have been accessed. Currently for the largest Satellite (eu-west) there is about 4500 total projects. So storing the storage limit (int64) and the bandwidth limit (int64), this would end up being about 200kb (including the 32 byte project ID) if all 4500 projectIDs were in the cache. So this all fits in memory for the time being. At some point it may not as usage grows, but that seems years out. The cache is a read only cache. When requests come in to upload/download a file, we will read from the cache what the current limits are for that project. If the cache does not contain the projectID, it will get the info from the database (satellitedb project table), then add it to the cache. The only time the values in the cache are modified is when either a) the project ID is not in the cache, or b) the item in the cache has expired (default 10mins), then the data gets refreshed out of the database. This occurs by default every 10 mins. This means that if we update the usage limits in the database, that change might not show up in the cache for 10 mins which mean it will not be reflected to limit end users uploading/downloading files for that time period.. Change-Id: I3fd7056cf963676009834fcbcf9c4a0922ca4a8f
2020-09-09 20:20:44 +01:00
ProjectLimits struct {
Cache *accounting.ProjectLimitCache
}
Mail struct {
Service *mailservice.Service
}
Console struct {
Listener net.Listener
Service *console.Service
Endpoint *consoleweb.Server
}
NodeStats struct {
Endpoint *nodestats.Endpoint
}
GracefulExit struct {
Chore *gracefulexit.Chore
Endpoint *gracefulexit.Endpoint
}
Metrics struct {
Chore *metrics.Chore
}
}
// Label returns name for debugger.
func (system *Satellite) Label() string { return system.Name }
// ID returns the ID of the Satellite system.
func (system *Satellite) ID() storj.NodeID { return system.API.Identity.ID }
// Addr returns the public address from the Satellite system API.
func (system *Satellite) Addr() string { return system.API.Server.Addr().String() }
// URL returns the node url from the Satellite system API.
func (system *Satellite) URL() string { return system.NodeURL().String() }
// NodeURL returns the storj.NodeURL from the Satellite system API.
func (system *Satellite) NodeURL() storj.NodeURL {
return storj.NodeURL{ID: system.API.ID(), Address: system.API.Addr()}
}
// AddUser adds user to a satellite. Password from newUser will be always overridden by FullName to have
// known password which can be used automatically.
func (system *Satellite) AddUser(ctx context.Context, newUser console.CreateUser, maxNumberOfProjects int) (*console.User, error) {
regToken, err := system.API.Console.Service.CreateRegToken(ctx, maxNumberOfProjects)
if err != nil {
return nil, err
}
newUser.Password = newUser.FullName
user, err := system.API.Console.Service.CreateUser(ctx, newUser, regToken.Secret)
if err != nil {
return nil, err
}
activationToken, err := system.API.Console.Service.GenerateActivationToken(ctx, user.ID, user.Email)
if err != nil {
return nil, err
}
err = system.API.Console.Service.ActivateAccount(ctx, activationToken)
if err != nil {
return nil, err
}
authCtx, err := system.AuthenticatedContext(ctx, user.ID)
if err != nil {
return nil, err
}
err = system.API.Console.Service.Payments().SetupAccount(authCtx)
if err != nil {
return nil, err
}
return user, nil
}
// AddProject adds project to a satellite and makes specified user an owner.
func (system *Satellite) AddProject(ctx context.Context, ownerID uuid.UUID, name string) (*console.Project, error) {
authCtx, err := system.AuthenticatedContext(ctx, ownerID)
if err != nil {
return nil, err
}
project, err := system.API.Console.Service.CreateProject(authCtx, console.ProjectInfo{
Name: name,
})
if err != nil {
return nil, err
}
return project, nil
}
// AuthenticatedContext creates context with authentication date for given user.
func (system *Satellite) AuthenticatedContext(ctx context.Context, userID uuid.UUID) (context.Context, error) {
user, err := system.API.Console.Service.GetUser(ctx, userID)
if err != nil {
return nil, err
}
// we are using full name as a password
token, err := system.API.Console.Service.Token(ctx, user.Email, user.FullName)
if err != nil {
return nil, err
}
auth, err := system.API.Console.Service.Authorize(consoleauth.WithAPIKey(ctx, []byte(token)))
if err != nil {
return nil, err
}
return console.WithAuth(ctx, auth), nil
}
// Close closes all the subsystems in the Satellite system.
func (system *Satellite) Close() error {
return errs.Combine(
system.API.Close(),
system.Core.Close(),
system.Repairer.Close(),
system.Admin.Close(),
system.GC.Close(),
)
}
// Run runs all the subsystems in the Satellite system.
func (system *Satellite) Run(ctx context.Context) (err error) {
group, ctx := errgroup.WithContext(ctx)
group.Go(func() error {
return errs2.IgnoreCanceled(system.Core.Run(ctx))
})
group.Go(func() error {
return errs2.IgnoreCanceled(system.API.Run(ctx))
})
group.Go(func() error {
return errs2.IgnoreCanceled(system.Repairer.Run(ctx))
})
group.Go(func() error {
return errs2.IgnoreCanceled(system.Admin.Run(ctx))
})
group.Go(func() error {
return errs2.IgnoreCanceled(system.GC.Run(ctx))
})
return group.Wait()
}
// PrivateAddr returns the private address from the Satellite system API.
func (system *Satellite) PrivateAddr() string { return system.API.Server.PrivateAddr().String() }
// newSatellites initializes satellites.
func (planet *Planet) newSatellites(ctx context.Context, count int, databases satellitedbtest.SatelliteDatabases) ([]*Satellite, error) {
var satellites []*Satellite
for i := 0; i < count; i++ {
index := i
prefix := "satellite" + strconv.Itoa(index)
log := planet.log.Named(prefix)
var system *Satellite
var err error
pprof.Do(ctx, pprof.Labels("peer", prefix), func(ctx context.Context) {
system, err = planet.newSatellite(ctx, prefix, index, log, databases)
})
if err != nil {
return nil, err
}
log.Debug("id=" + system.ID().String() + " addr=" + system.Addr())
satellites = append(satellites, system)
planet.peers = append(planet.peers, newClosablePeer(system))
}
return satellites, nil
}
func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int, log *zap.Logger, databases satellitedbtest.SatelliteDatabases) (*Satellite, error) {
storageDir := filepath.Join(planet.directory, prefix)
if err := os.MkdirAll(storageDir, 0700); err != nil {
return nil, err
}
identity, err := planet.NewIdentity()
if err != nil {
return nil, err
}
db, err := satellitedbtest.CreateMasterDB(ctx, log.Named("db"), planet.config.Name, "S", index, databases.MasterDB)
if err != nil {
return nil, err
}
if planet.config.Reconfigure.SatelliteDB != nil {
var newdb satellite.DB
newdb, err = planet.config.Reconfigure.SatelliteDB(log.Named("db"), index, db)
if err != nil {
return nil, errs.Combine(err, db.Close())
}
db = newdb
}
planet.databases = append(planet.databases, db)
metabaseDB, err := satellitedbtest.CreateMetabaseDB(context.TODO(), log.Named("metabase"), planet.config.Name, "M", index, databases.MetabaseDB)
if err != nil {
return nil, err
}
if planet.config.Reconfigure.SatelliteMetabaseDB != nil {
var newMetabaseDB *metabase.DB
newMetabaseDB, err = planet.config.Reconfigure.SatelliteMetabaseDB(log.Named("metabase"), index, metabaseDB)
if err != nil {
return nil, errs.Combine(err, metabaseDB.Close())
}
metabaseDB = newMetabaseDB
}
planet.databases = append(planet.databases, metabaseDB)
redis, err := testredis.Mini(ctx)
if err != nil {
return nil, err
}
encryptionKeys, err := orders.NewEncryptionKeys(orders.EncryptionKey{
ID: orders.EncryptionKeyID{1},
Key: storj.Key{1},
})
if err != nil {
return nil, err
}
// TODO: it is a huge surprise that this doesn't use the config
// parsing `default` or `devDefault` struct tag values.
// we should use storj.io/private/cfgstruct to autopopulate default
// config values and then only override ones in special cases.
config := satellite.Config{
Server: server.Config{
Address: "127.0.0.1:0",
PrivateAddress: "127.0.0.1:0",
Config: tlsopts.Config{
RevocationDBURL: "bolt://" + filepath.Join(storageDir, "revocation.db"),
UsePeerCAWhitelist: true,
PeerCAWhitelistPath: planet.whitelistPath,
PeerIDVersions: "latest",
Extensions: extensions.Config{
Revocation: false,
WhitelistSignedLeaf: false,
},
},
},
Debug: debug.Config{
Address: "",
},
Admin: admin.Config{
Address: "127.0.0.1:0",
},
Contact: contact.Config{
Timeout: 1 * time.Minute,
RateLimitInterval: time.Nanosecond,
RateLimitBurst: 1000,
RateLimitCacheSize: 1000,
},
Overlay: overlay.Config{
Node: overlay.NodeSelectionConfig{
AuditCount: 0,
NewNodeFraction: 1,
OnlineWindow: time.Minute,
DistinctIP: false,
MinimumDiskSpace: 100 * memory.MB,
AuditReputationRepairWeight: 1,
AuditReputationUplinkWeight: 1,
AuditReputationLambda: 0.95,
AuditReputationWeight: 1,
AuditReputationDQ: 0.6,
SuspensionGracePeriod: time.Hour,
SuspensionDQEnabled: true,
},
NodeSelectionCache: overlay.UploadSelectionCacheConfig{
Staleness: 3 * time.Minute,
},
UpdateStatsBatchSize: 100,
AuditHistory: overlay.AuditHistoryConfig{
WindowSize: 10 * time.Minute,
TrackingPeriod: time.Hour,
GracePeriod: time.Hour,
OfflineThreshold: 0.6,
OfflineSuspensionEnabled: true,
},
},
StrayNodes: straynodes.Config{
EnableDQ: true,
Interval: time.Minute,
MaxDurationWithoutContact: 30 * time.Second,
Limit: 1000,
},
Metainfo: metainfo.Config{
DatabaseURL: "", // not used
MinRemoteSegmentSize: 0, // TODO: fix tests to work with 1024
MaxInlineSegmentSize: 4 * memory.KiB,
MaxSegmentSize: 64 * memory.MiB,
MaxMetadataSize: 2 * memory.KiB,
MaxCommitInterval: 1 * time.Hour,
Overlay: true,
RS: metainfo.RSConfig{
ErasureShareSize: memory.Size(256),
Min: atLeastOne(planet.config.StorageNodeCount * 1 / 5),
Repair: atLeastOne(planet.config.StorageNodeCount * 2 / 5),
Success: atLeastOne(planet.config.StorageNodeCount * 3 / 5),
Total: atLeastOne(planet.config.StorageNodeCount * 4 / 5),
},
Loop: metaloop.Config{
CoalesceDuration: 1 * time.Second,
ListLimit: 10000,
},
RateLimiter: metainfo.RateLimiterConfig{
Enabled: true,
Rate: 1000,
CacheCapacity: 100,
CacheExpiration: 10 * time.Second,
},
ProjectLimits: metainfo.ProjectLimitConfig{
MaxBuckets: 10,
},
PieceDeletion: piecedeletion.Config{
MaxConcurrency: 100,
MaxConcurrentPieces: 1000,
MaxPiecesPerBatch: 4000,
MaxPiecesPerRequest: 2000,
DialTimeout: 2 * time.Second,
RequestTimeout: 2 * time.Second,
FailThreshold: 2 * time.Second,
},
},
Orders: orders.Config{
Expiration: 7 * 24 * time.Hour,
FlushBatchSize: 10,
FlushInterval: defaultInterval,
NodeStatusLogging: true,
EncryptionKeys: *encryptionKeys,
},
Checker: checker.Config{
Interval: defaultInterval,
IrreparableInterval: defaultInterval,
ReliabilityCacheStaleness: 1 * time.Minute,
},
Payments: paymentsconfig.Config{
StorageTBPrice: "10",
EgressTBPrice: "45",
ObjectPrice: "0.0000022",
StripeCoinPayments: stripecoinpayments.Config{
TransactionUpdateInterval: defaultInterval,
AccountBalanceUpdateInterval: defaultInterval,
ConversionRatesCycleInterval: defaultInterval,
ListingLimit: 100,
},
CouponDuration: paymentsconfig.CouponDuration{
Enabled: true,
BillingPeriods: 2,
},
CouponValue: 275,
PaywallProportion: 1,
},
Repairer: repairer.Config{
MaxRepair: 10,
Interval: defaultInterval,
Timeout: 1 * time.Minute, // Repairs can take up to 10 seconds. Leaving room for outliers
DownloadTimeout: 1 * time.Minute,
TotalTimeout: 10 * time.Minute,
MaxBufferMem: 4 * memory.MiB,
MaxExcessRateOptimalThreshold: 0.05,
InMemoryRepair: false,
},
Audit: audit.Config{
MaxRetriesStatDB: 0,
MinBytesPerSecond: 1 * memory.KB,
MinDownloadTimeout: 5 * time.Second,
MaxReverifyCount: 3,
ChoreInterval: defaultInterval,
QueueInterval: defaultInterval,
Slots: 3,
WorkerConcurrency: 2,
},
GarbageCollection: gc.Config{
Interval: defaultInterval,
Enabled: true,
InitialPieces: 10,
FalsePositiveRate: 0.1,
ConcurrentSends: 1,
RunInCore: false,
},
ExpiredDeletion: expireddeletion.Config{
Interval: defaultInterval,
Enabled: true,
},
Tally: tally.Config{
Interval: defaultInterval,
},
Rollup: rollup.Config{
Interval: defaultInterval,
DeleteTallies: false,
},
RollupArchive: rolluparchive.Config{
Interval: defaultInterval,
ArchiveAge: time.Hour * 24,
BatchSize: 1000,
Enabled: true,
},
ProjectBWCleanup: projectbwcleanup.Config{
Interval: defaultInterval,
RetainMonths: 1,
},
LiveAccounting: live.Config{
StorageBackend: "redis://" + redis.Addr() + "?db=0",
BandwidthCacheTTL: 5 * time.Minute,
},
Mail: mailservice.Config{
SMTPServerAddress: "smtp.mail.test:587",
From: "Labs <storj@mail.test>",
AuthType: "simulate",
TemplatePath: filepath.Join(developmentRoot, "web/satellite/static/emails"),
},
Console: consoleweb.Config{
Address: "127.0.0.1:0",
StaticDir: filepath.Join(developmentRoot, "web/satellite"),
AuthToken: "very-secret-token",
AuthTokenSecret: "my-suppa-secret-key",
Config: console.Config{
PasswordCost: console.TestPasswordCost,
DefaultProjectLimit: 5,
UsageLimits: console.UsageLimitsConfig{
DefaultStorageLimit: 25 * memory.GB,
DefaultBandwidthLimit: 25 * memory.GB,
},
},
RateLimit: web.IPRateLimiterConfig{
Duration: 5 * time.Minute,
Burst: 3,
NumLimits: 10,
},
},
Version: planet.NewVersionConfig(),
GracefulExit: gracefulexit.Config{
Enabled: true,
ChoreBatchSize: 10,
ChoreInterval: defaultInterval,
EndpointBatchSize: 100,
MaxFailuresPerPiece: 5,
MaxInactiveTimeFrame: time.Second * 10,
OverallMaxFailuresPercentage: 10,
RecvTimeout: time.Minute * 1,
MaxOrderLimitSendCount: 3,
NodeMinAgeInMonths: 0,
AsOfSystemTimeInterval: 0,
TransferQueueBatchSize: 1000,
},
Metrics: metrics.Config{},
}
if planet.config.Reconfigure.Satellite != nil {
planet.config.Reconfigure.Satellite(log, index, &config)
}
versionInfo := planet.NewVersionInfo()
revocationDB, err := revocation.OpenDBFromCfg(ctx, config.Server.Config)
if err != nil {
return nil, errs.Wrap(err)
}
planet.databases = append(planet.databases, revocationDB)
liveAccounting, err := live.OpenCache(ctx, log.Named("live-accounting"), config.LiveAccounting)
if err != nil {
return nil, errs.Wrap(err)
}
planet.databases = append(planet.databases, liveAccounting)
rollupsWriteCache := orders.NewRollupsWriteCache(log.Named("orders-write-cache"), db.Orders(), config.Orders.FlushBatchSize)
planet.databases = append(planet.databases, rollupsWriteCacheCloser{rollupsWriteCache})
peer, err := satellite.New(log, identity, db, metabaseDB, revocationDB, liveAccounting, rollupsWriteCache, versionInfo, &config, nil)
if err != nil {
return nil, err
}
err = db.TestingMigrateToLatest(ctx)
if err != nil {
return nil, err
}
err = metabaseDB.MigrateToLatest(ctx)
if err != nil {
return nil, err
}
api, err := planet.newAPI(ctx, index, identity, db, metabaseDB, config, versionInfo)
if err != nil {
return nil, err
}
adminPeer, err := planet.newAdmin(ctx, index, identity, db, config, versionInfo)
if err != nil {
return nil, err
}
repairerPeer, err := planet.newRepairer(ctx, index, identity, db, metabaseDB, config, versionInfo)
if err != nil {
return nil, err
}
gcPeer, err := planet.newGarbageCollection(ctx, index, identity, db, metabaseDB, config, versionInfo)
if err != nil {
return nil, err
}
return createNewSystem(prefix, log, config, peer, api, repairerPeer, adminPeer, gcPeer), nil
}
// createNewSystem makes a new Satellite System and exposes the same interface from
// before we split out the API. In the short term this will help keep all the tests passing
// without much modification needed. However long term, we probably want to rework this
// so it represents how the satellite will run when it is made up of many prrocesses.
func createNewSystem(name string, log *zap.Logger, config satellite.Config, peer *satellite.Core, api *satellite.API, repairerPeer *satellite.Repairer, adminPeer *satellite.Admin, gcPeer *satellite.GarbageCollection) *Satellite {
system := &Satellite{
Name: name,
Config: config,
Core: peer,
API: api,
Repairer: repairerPeer,
Admin: adminPeer,
GC: gcPeer,
}
system.Log = log
system.Identity = peer.Identity
system.DB = api.DB
system.Dialer = api.Dialer
system.Contact.Service = api.Contact.Service
system.Contact.Endpoint = api.Contact.Endpoint
system.Overlay.DB = api.Overlay.DB
system.Overlay.Service = api.Overlay.Service
system.Overlay.DQStrayNodes = peer.Overlay.DQStrayNodes
system.Metainfo.Metabase = api.Metainfo.Metabase
system.Metainfo.Service = peer.Metainfo.Service
system.Metainfo.Endpoint2 = api.Metainfo.Endpoint2
system.Metainfo.Loop = peer.Metainfo.Loop
system.Inspector.Endpoint = api.Inspector.Endpoint
system.Orders.DB = api.Orders.DB
system.Orders.Endpoint = api.Orders.Endpoint
system.Orders.Service = peer.Orders.Service
system.Orders.Chore = api.Orders.Chore
system.Repair.Checker = peer.Repair.Checker
system.Repair.Repairer = repairerPeer.Repairer
system.Repair.Inspector = api.Repair.Inspector
system.Audit.Queues = peer.Audit.Queues
system.Audit.Worker = peer.Audit.Worker
system.Audit.Chore = peer.Audit.Chore
system.Audit.Verifier = peer.Audit.Verifier
system.Audit.Reporter = peer.Audit.Reporter
system.GarbageCollection.Service = gcPeer.GarbageCollection.Service
system.ExpiredDeletion.Chore = peer.ExpiredDeletion.Chore
system.Accounting.Tally = peer.Accounting.Tally
system.Accounting.Rollup = peer.Accounting.Rollup
satellite/accounting: add cache for getting project storage and bw limits This PR adds the following items: 1) an in-memory read-only cache thats stores project limit info for projectIDs This cache is stored in-memory since this is expected to be a small amount of data. In this implementation we are only storing in the cache projects that have been accessed. Currently for the largest Satellite (eu-west) there is about 4500 total projects. So storing the storage limit (int64) and the bandwidth limit (int64), this would end up being about 200kb (including the 32 byte project ID) if all 4500 projectIDs were in the cache. So this all fits in memory for the time being. At some point it may not as usage grows, but that seems years out. The cache is a read only cache. When requests come in to upload/download a file, we will read from the cache what the current limits are for that project. If the cache does not contain the projectID, it will get the info from the database (satellitedb project table), then add it to the cache. The only time the values in the cache are modified is when either a) the project ID is not in the cache, or b) the item in the cache has expired (default 10mins), then the data gets refreshed out of the database. This occurs by default every 10 mins. This means that if we update the usage limits in the database, that change might not show up in the cache for 10 mins which mean it will not be reflected to limit end users uploading/downloading files for that time period.. Change-Id: I3fd7056cf963676009834fcbcf9c4a0922ca4a8f
2020-09-09 20:20:44 +01:00
system.Accounting.ProjectUsage = api.Accounting.ProjectUsage
system.Accounting.ProjectBWCleanup = peer.Accounting.ProjectBWCleanupChore
system.Accounting.RollupArchive = peer.Accounting.RollupArchiveChore
system.LiveAccounting = peer.LiveAccounting
satellite/accounting: add cache for getting project storage and bw limits This PR adds the following items: 1) an in-memory read-only cache thats stores project limit info for projectIDs This cache is stored in-memory since this is expected to be a small amount of data. In this implementation we are only storing in the cache projects that have been accessed. Currently for the largest Satellite (eu-west) there is about 4500 total projects. So storing the storage limit (int64) and the bandwidth limit (int64), this would end up being about 200kb (including the 32 byte project ID) if all 4500 projectIDs were in the cache. So this all fits in memory for the time being. At some point it may not as usage grows, but that seems years out. The cache is a read only cache. When requests come in to upload/download a file, we will read from the cache what the current limits are for that project. If the cache does not contain the projectID, it will get the info from the database (satellitedb project table), then add it to the cache. The only time the values in the cache are modified is when either a) the project ID is not in the cache, or b) the item in the cache has expired (default 10mins), then the data gets refreshed out of the database. This occurs by default every 10 mins. This means that if we update the usage limits in the database, that change might not show up in the cache for 10 mins which mean it will not be reflected to limit end users uploading/downloading files for that time period.. Change-Id: I3fd7056cf963676009834fcbcf9c4a0922ca4a8f
2020-09-09 20:20:44 +01:00
system.ProjectLimits.Cache = api.ProjectLimits.Cache
system.GracefulExit.Chore = peer.GracefulExit.Chore
system.GracefulExit.Endpoint = api.GracefulExit.Endpoint
system.Metrics.Chore = peer.Metrics.Chore
return system
}
func (planet *Planet) newAPI(ctx context.Context, index int, identity *identity.FullIdentity, db satellite.DB, metabaseDB *metabase.DB, config satellite.Config, versionInfo version.Info) (*satellite.API, error) {
prefix := "satellite-api" + strconv.Itoa(index)
log := planet.log.Named(prefix)
var err error
revocationDB, err := revocation.OpenDBFromCfg(ctx, config.Server.Config)
if err != nil {
return nil, errs.Wrap(err)
}
planet.databases = append(planet.databases, revocationDB)
liveAccounting, err := live.OpenCache(ctx, log.Named("live-accounting"), config.LiveAccounting)
if err != nil {
return nil, errs.Wrap(err)
}
planet.databases = append(planet.databases, liveAccounting)
rollupsWriteCache := orders.NewRollupsWriteCache(log.Named("orders-write-cache"), db.Orders(), config.Orders.FlushBatchSize)
planet.databases = append(planet.databases, rollupsWriteCacheCloser{rollupsWriteCache})
return satellite.NewAPI(log, identity, db, metabaseDB, revocationDB, liveAccounting, rollupsWriteCache, &config, versionInfo, nil)
}
func (planet *Planet) newAdmin(ctx context.Context, index int, identity *identity.FullIdentity, db satellite.DB, config satellite.Config, versionInfo version.Info) (*satellite.Admin, error) {
prefix := "satellite-admin" + strconv.Itoa(index)
log := planet.log.Named(prefix)
return satellite.NewAdmin(log, identity, db, versionInfo, &config, nil)
}
func (planet *Planet) newRepairer(ctx context.Context, index int, identity *identity.FullIdentity, db satellite.DB, metabaseDB *metabase.DB, config satellite.Config, versionInfo version.Info) (*satellite.Repairer, error) {
prefix := "satellite-repairer" + strconv.Itoa(index)
log := planet.log.Named(prefix)
revocationDB, err := revocation.OpenDBFromCfg(ctx, config.Server.Config)
if err != nil {
return nil, errs.Wrap(err)
}
planet.databases = append(planet.databases, revocationDB)
rollupsWriteCache := orders.NewRollupsWriteCache(log.Named("orders-write-cache"), db.Orders(), config.Orders.FlushBatchSize)
planet.databases = append(planet.databases, rollupsWriteCacheCloser{rollupsWriteCache})
return satellite.NewRepairer(log, identity, metabaseDB, revocationDB, db.RepairQueue(), db.Buckets(), db.OverlayCache(), rollupsWriteCache, db.Irreparable(), versionInfo, &config, nil)
}
type rollupsWriteCacheCloser struct {
*orders.RollupsWriteCache
}
func (cache rollupsWriteCacheCloser) Close() error {
return cache.RollupsWriteCache.CloseAndFlush(context.TODO())
}
func (planet *Planet) newGarbageCollection(ctx context.Context, index int, identity *identity.FullIdentity, db satellite.DB, metabaseDB *metabase.DB, config satellite.Config, versionInfo version.Info) (*satellite.GarbageCollection, error) {
prefix := "satellite-gc" + strconv.Itoa(index)
log := planet.log.Named(prefix)
revocationDB, err := revocation.OpenDBFromCfg(ctx, config.Server.Config)
if err != nil {
return nil, errs.Wrap(err)
}
planet.databases = append(planet.databases, revocationDB)
return satellite.NewGarbageCollection(log, identity, db, metabaseDB, revocationDB, versionInfo, &config, nil)
}
// atLeastOne returns 1 if value < 1, or value otherwise.
func atLeastOne(value int) int {
if value < 1 {
return 1
}
return value
}