satellite/accounting: expose project segment limit

Exposes functionality to get and update project segment
limit. It will be used to limit number of segments per project
while uploading object.

Change-Id: I971d48eebb4e7db8b01535c3091829e73437f48d
This commit is contained in:
Michał Niewrzał 2021-12-03 16:06:20 +01:00 committed by Michal Niewrzal
parent ace90fd844
commit d94d8d1775
10 changed files with 196 additions and 29 deletions

View File

@ -84,10 +84,11 @@ type ProjectObjectsSegments struct {
ObjectCount int64 `json:"objectCount"`
}
// ProjectLimits contains the storage and bandwidth limits.
// ProjectLimits contains the storage, bandwidth and segments limits.
type ProjectLimits struct {
Usage *int64
Bandwidth *int64
Segments *int64
}
// BucketUsage consist of total bucket usage for period.
@ -200,10 +201,14 @@ type ProjectAccounting interface {
UpdateProjectUsageLimit(ctx context.Context, projectID uuid.UUID, limit memory.Size) error
// UpdateProjectBandwidthLimit updates project bandwidth limit.
UpdateProjectBandwidthLimit(ctx context.Context, projectID uuid.UUID, limit memory.Size) error
// UpdateProjectSegmentLimit updates project segment limit.
UpdateProjectSegmentLimit(ctx context.Context, projectID uuid.UUID, limit int64) error
// GetProjectStorageLimit returns project storage usage limit.
GetProjectStorageLimit(ctx context.Context, projectID uuid.UUID) (*int64, error)
// GetProjectBandwidthLimit returns project bandwidth usage limit.
GetProjectBandwidthLimit(ctx context.Context, projectID uuid.UUID) (*int64, error)
// GetProjectSegmentLimit returns the segment limit for a project ID.
GetProjectSegmentLimit(ctx context.Context, projectID uuid.UUID) (_ *int64, err error)
// GetProjectLimits returns current project limit for both storage and bandwidth.
GetProjectLimits(ctx context.Context, projectID uuid.UUID) (ProjectLimits, error)
// GetProjectTotal returns project usage summary for specified period of time.

View File

@ -43,16 +43,18 @@ type ProjectLimitCache struct {
projectLimitDB ProjectLimitDB
defaultMaxUsage memory.Size
defaultMaxBandwidth memory.Size
defaultMaxSegments int64
state *lrucache.ExpiringLRU
}
// NewProjectLimitCache creates a new project limit cache to store the project limits for each project ID.
func NewProjectLimitCache(db ProjectLimitDB, defaultMaxUsage, defaultMaxBandwidth memory.Size, config ProjectLimitConfig) *ProjectLimitCache {
func NewProjectLimitCache(db ProjectLimitDB, defaultMaxUsage, defaultMaxBandwidth memory.Size, defaultMaxSegments int64, config ProjectLimitConfig) *ProjectLimitCache {
return &ProjectLimitCache{
projectLimitDB: db,
defaultMaxUsage: defaultMaxUsage,
defaultMaxBandwidth: defaultMaxBandwidth,
defaultMaxSegments: defaultMaxSegments,
state: lrucache.New(lrucache.Options{
Capacity: config.CacheCapacity,
Expiration: config.CacheExpiration,
@ -122,3 +124,18 @@ func (c *ProjectLimitCache) GetProjectBandwidthLimit(ctx context.Context, projec
}
return memory.Size(*projectLimits.Bandwidth), nil
}
// GetProjectSegmentLimit returns the segment limit for a project ID.
func (c *ProjectLimitCache) GetProjectSegmentLimit(ctx context.Context, projectID uuid.UUID) (_ int64, err error) {
defer mon.Task()(&ctx)(&err)
projectLimits, err := c.Get(ctx, projectID)
if err != nil {
return 0, err
}
if projectLimits.Segments == nil {
return c.defaultMaxSegments, nil
}
return *projectLimits.Segments, nil
}

View File

@ -32,7 +32,7 @@ func (mdb *mockDB) GetProjectLimits(ctx context.Context, projectID uuid.UUID) (a
func TestProjectLimitCacheCallCount(t *testing.T) {
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
mdb := mockDB{}
projectLimitCache := accounting.NewProjectLimitCache(&mdb, 0, 0, accounting.ProjectLimitConfig{CacheCapacity: 100})
projectLimitCache := accounting.NewProjectLimitCache(&mdb, 0, 0, 0, accounting.ProjectLimitConfig{CacheCapacity: 100})
testProject, err := db.Console().Projects().Insert(ctx, &console.Project{Name: "test", OwnerID: testrand.UUID()})
require.NoError(t, err)
@ -62,7 +62,8 @@ func TestProjectLimitCache(t *testing.T) {
projectLimitCache := saPeer.ProjectLimits.Cache
defaultUsageLimit := saPeer.Config.Console.UsageLimits.Storage.Free.Int64()
defaultBandwidthLimit := saPeer.Config.Console.UsageLimits.Bandwidth.Free.Int64()
dbDefaultLimits := accounting.ProjectLimits{Usage: &defaultUsageLimit, Bandwidth: &defaultBandwidthLimit}
defaultSegmentLimit := int64(1000000)
dbDefaultLimits := accounting.ProjectLimits{Usage: &defaultUsageLimit, Bandwidth: &defaultBandwidthLimit, Segments: &defaultSegmentLimit}
testProject, err := saPeer.DB.Console().Projects().Insert(ctx, &console.Project{Name: "test", OwnerID: testrand.UUID()})
require.NoError(t, err)
@ -71,6 +72,7 @@ func TestProjectLimitCache(t *testing.T) {
errorLimit = 0
expectedUsageLimit = 1
expectedBandwidthLimit = 2
expectedSegmentLimit = 3
)
t.Run("project ID doesn't exist", func(t *testing.T) {
@ -83,6 +85,7 @@ func TestProjectLimitCache(t *testing.T) {
assert.Error(t, err)
assert.Equal(t, accounting.ProjectLimits{}, actualLimitsFromDB)
// storage
actualStorageLimitFromCache, err := projectLimitCache.GetProjectStorageLimit(ctx, projectID)
assert.Error(t, err)
assert.Equal(t, memory.Size(errorLimit), actualStorageLimitFromCache)
@ -90,12 +93,32 @@ func TestProjectLimitCache(t *testing.T) {
actualStorageLimitFromSvc, err := projectUsageSvc.GetProjectStorageLimit(ctx, projectID)
assert.Error(t, err)
assert.Equal(t, memory.Size(errorLimit), actualStorageLimitFromSvc)
// bandwidth
actualBandwidthLimitFromCache, err := projectLimitCache.GetProjectBandwidthLimit(ctx, projectID)
assert.Error(t, err)
assert.Equal(t, memory.Size(errorLimit), actualBandwidthLimitFromCache)
actualBandwidthLimitFromSvc, err := projectUsageSvc.GetProjectBandwidthLimit(ctx, projectID)
assert.Error(t, err)
assert.Equal(t, memory.Size(errorLimit), actualBandwidthLimitFromSvc)
// segments
actualSegmentLimitFromCache, err := projectLimitCache.GetProjectSegmentLimit(ctx, projectID)
assert.Error(t, err)
assert.EqualValues(t, errorLimit, actualSegmentLimitFromCache)
actualSegmentLimitFromSvc, err := projectUsageSvc.GetProjectSegmentLimit(ctx, projectID)
assert.Error(t, err)
assert.EqualValues(t, errorLimit, actualSegmentLimitFromSvc)
})
t.Run("default limits", func(t *testing.T) {
actualLimitsFromDB, err := accountingDB.GetProjectLimits(ctx, testProject.ID)
assert.NoError(t, err)
assert.Equal(t, accounting.ProjectLimits{}, actualLimitsFromDB)
assert.Equal(t, accounting.ProjectLimits{
Segments: &defaultSegmentLimit,
}, actualLimitsFromDB)
actualLimitsFromCache, err := projectLimitCache.GetProjectLimits(ctx, testProject.ID)
assert.NoError(t, err)
@ -131,6 +154,8 @@ func TestProjectLimitCache(t *testing.T) {
require.NoError(t, err)
err = accountingDB.UpdateProjectBandwidthLimit(ctx, testProject.ID, expectedBandwidthLimit)
require.NoError(t, err)
err = accountingDB.UpdateProjectSegmentLimit(ctx, testProject.ID, expectedSegmentLimit)
require.NoError(t, err)
actualStorageLimitFromDB, err := accountingDB.GetProjectStorageLimit(ctx, testProject.ID)
assert.NoError(t, err)
@ -140,8 +165,10 @@ func TestProjectLimitCache(t *testing.T) {
assert.NoError(t, err)
usageLimits := int64(expectedUsageLimit)
bwLimits := int64(expectedBandwidthLimit)
assert.Equal(t, accounting.ProjectLimits{Usage: &usageLimits, Bandwidth: &bwLimits}, actualLimitsFromDB)
segmentsLimits := int64(expectedSegmentLimit)
assert.Equal(t, accounting.ProjectLimits{Usage: &usageLimits, Bandwidth: &bwLimits, Segments: &segmentsLimits}, actualLimitsFromDB)
// storage
actualStorageLimitFromCache, err := projectLimitCache.GetProjectStorageLimit(ctx, testProject.ID)
assert.NoError(t, err)
require.Equal(t, memory.Size(expectedUsageLimit), actualStorageLimitFromCache)
@ -150,6 +177,7 @@ func TestProjectLimitCache(t *testing.T) {
assert.NoError(t, err)
require.Equal(t, memory.Size(expectedUsageLimit), actualStorageLimitFromSvc)
// bandwidth
actualBandwidthLimitFromDB, err := accountingDB.GetProjectBandwidthLimit(ctx, testProject.ID)
require.NoError(t, err)
require.Equal(t, int64(expectedBandwidthLimit), *actualBandwidthLimitFromDB)
@ -161,6 +189,19 @@ func TestProjectLimitCache(t *testing.T) {
actualBandwidthLimitFromSvc, err := projectUsageSvc.GetProjectBandwidthLimit(ctx, testProject.ID)
assert.NoError(t, err)
require.Equal(t, memory.Size(expectedBandwidthLimit), actualBandwidthLimitFromSvc)
// segments
actualSegmentLimitFromDB, err := accountingDB.GetProjectSegmentLimit(ctx, testProject.ID)
require.NoError(t, err)
require.EqualValues(t, expectedSegmentLimit, *actualSegmentLimitFromDB)
actualSegmentLimitFromCache, err := projectLimitCache.GetProjectSegmentLimit(ctx, testProject.ID)
assert.NoError(t, err)
require.EqualValues(t, expectedSegmentLimit, actualSegmentLimitFromCache)
actualSegmentLimitFromSvc, err := projectUsageSvc.GetProjectSegmentLimit(ctx, testProject.ID)
assert.NoError(t, err)
require.EqualValues(t, expectedSegmentLimit, actualSegmentLimitFromSvc)
})
})
}

View File

@ -183,7 +183,14 @@ func (usage *Service) GetProjectBandwidthLimit(ctx context.Context, projectID uu
return usage.projectLimitCache.GetProjectBandwidthLimit(ctx, projectID)
}
// GetProjectSegmentLimit returns current project segment limit.
func (usage *Service) GetProjectSegmentLimit(ctx context.Context, projectID uuid.UUID) (_ int64, err error) {
defer mon.Task()(&ctx, projectID)(&err)
return usage.projectLimitCache.GetProjectSegmentLimit(ctx, projectID)
}
// UpdateProjectLimits sets new value for project's bandwidth and storage limit.
// TODO remove because it's not used.
func (usage *Service) UpdateProjectLimits(ctx context.Context, projectID uuid.UUID, limit memory.Size) (err error) {
defer mon.Task()(&ctx, projectID)(&err)

View File

@ -316,6 +316,7 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
peer.ProjectLimits.Cache = accounting.NewProjectLimitCache(peer.DB.ProjectAccounting(),
config.Console.Config.UsageLimits.Storage.Free,
config.Console.Config.UsageLimits.Bandwidth.Free,
1000000, // TODO this will be correctly populated with up coming change
config.ProjectLimit,
)
}

View File

@ -65,7 +65,7 @@ func TestGraphqlMutation(t *testing.T) {
cache, err := live.OpenCache(ctx, log.Named("cache"), live.Config{StorageBackend: "redis://" + redis.Addr() + "?db=0"})
require.NoError(t, err)
projectLimitCache := accounting.NewProjectLimitCache(db.ProjectAccounting(), 0, 0, accounting.ProjectLimitConfig{CacheCapacity: 100})
projectLimitCache := accounting.NewProjectLimitCache(db.ProjectAccounting(), 0, 0, 0, accounting.ProjectLimitConfig{CacheCapacity: 100})
projectUsage := accounting.NewService(db.ProjectAccounting(), cache, projectLimitCache, 5*time.Minute, -10*time.Second)

View File

@ -49,7 +49,7 @@ func TestGraphqlQuery(t *testing.T) {
cache, err := live.OpenCache(ctx, log.Named("cache"), live.Config{StorageBackend: "redis://" + redis.Addr() + "?db=0"})
require.NoError(t, err)
projectLimitCache := accounting.NewProjectLimitCache(db.ProjectAccounting(), 0, 0, accounting.ProjectLimitConfig{CacheCapacity: 100})
projectLimitCache := accounting.NewProjectLimitCache(db.ProjectAccounting(), 0, 0, 0, accounting.ProjectLimitConfig{CacheCapacity: 100})
projectUsage := accounting.NewService(db.ProjectAccounting(), cache, projectLimitCache, 5*time.Minute, -10*time.Second)

View File

@ -371,12 +371,16 @@ read one (
select project.bandwidth_limit
where project.id = ?
)
read one (
select project.segment_limit
where project.id = ?
)
read one (
select project.max_buckets
where project.id = ?
)
read one (
select project.bandwidth_limit project.usage_limit
select project.bandwidth_limit project.usage_limit project.segment_limit
where project.id = ?
)

View File

@ -10141,9 +10141,10 @@ type BandwidthLimit_Row struct {
BandwidthLimit *int64
}
type BandwidthLimit_UsageLimit_Row struct {
type BandwidthLimit_UsageLimit_SegmentLimit_Row struct {
BandwidthLimit *int64
UsageLimit *int64
SegmentLimit *int64
}
type CreatedAt_Row struct {
@ -10233,6 +10234,10 @@ type ProjectStorageLimit_ProjectBandwidthLimit_Row struct {
ProjectBandwidthLimit int64
}
type SegmentLimit_Row struct {
SegmentLimit *int64
}
type UsageLimit_Row struct {
UsageLimit *int64
}
@ -11696,6 +11701,28 @@ func (obj *pgxImpl) Get_Project_BandwidthLimit_By_Id(ctx context.Context,
}
func (obj *pgxImpl) Get_Project_SegmentLimit_By_Id(ctx context.Context,
project_id Project_Id_Field) (
row *SegmentLimit_Row, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT projects.segment_limit FROM projects WHERE projects.id = ?")
var __values []interface{}
__values = append(__values, project_id.value())
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
row = &SegmentLimit_Row{}
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.SegmentLimit)
if err != nil {
return (*SegmentLimit_Row)(nil), obj.makeErr(err)
}
return row, nil
}
func (obj *pgxImpl) Get_Project_MaxBuckets_By_Id(ctx context.Context,
project_id Project_Id_Field) (
row *MaxBuckets_Row, err error) {
@ -11718,12 +11745,12 @@ func (obj *pgxImpl) Get_Project_MaxBuckets_By_Id(ctx context.Context,
}
func (obj *pgxImpl) Get_Project_BandwidthLimit_Project_UsageLimit_By_Id(ctx context.Context,
func (obj *pgxImpl) Get_Project_BandwidthLimit_Project_UsageLimit_Project_SegmentLimit_By_Id(ctx context.Context,
project_id Project_Id_Field) (
row *BandwidthLimit_UsageLimit_Row, err error) {
row *BandwidthLimit_UsageLimit_SegmentLimit_Row, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT projects.bandwidth_limit, projects.usage_limit FROM projects WHERE projects.id = ?")
var __embed_stmt = __sqlbundle_Literal("SELECT projects.bandwidth_limit, projects.usage_limit, projects.segment_limit FROM projects WHERE projects.id = ?")
var __values []interface{}
__values = append(__values, project_id.value())
@ -11731,10 +11758,10 @@ func (obj *pgxImpl) Get_Project_BandwidthLimit_Project_UsageLimit_By_Id(ctx cont
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
row = &BandwidthLimit_UsageLimit_Row{}
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.BandwidthLimit, &row.UsageLimit)
row = &BandwidthLimit_UsageLimit_SegmentLimit_Row{}
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.BandwidthLimit, &row.UsageLimit, &row.SegmentLimit)
if err != nil {
return (*BandwidthLimit_UsageLimit_Row)(nil), obj.makeErr(err)
return (*BandwidthLimit_UsageLimit_SegmentLimit_Row)(nil), obj.makeErr(err)
}
return row, nil
@ -17682,6 +17709,28 @@ func (obj *pgxcockroachImpl) Get_Project_BandwidthLimit_By_Id(ctx context.Contex
}
func (obj *pgxcockroachImpl) Get_Project_SegmentLimit_By_Id(ctx context.Context,
project_id Project_Id_Field) (
row *SegmentLimit_Row, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT projects.segment_limit FROM projects WHERE projects.id = ?")
var __values []interface{}
__values = append(__values, project_id.value())
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
row = &SegmentLimit_Row{}
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.SegmentLimit)
if err != nil {
return (*SegmentLimit_Row)(nil), obj.makeErr(err)
}
return row, nil
}
func (obj *pgxcockroachImpl) Get_Project_MaxBuckets_By_Id(ctx context.Context,
project_id Project_Id_Field) (
row *MaxBuckets_Row, err error) {
@ -17704,12 +17753,12 @@ func (obj *pgxcockroachImpl) Get_Project_MaxBuckets_By_Id(ctx context.Context,
}
func (obj *pgxcockroachImpl) Get_Project_BandwidthLimit_Project_UsageLimit_By_Id(ctx context.Context,
func (obj *pgxcockroachImpl) Get_Project_BandwidthLimit_Project_UsageLimit_Project_SegmentLimit_By_Id(ctx context.Context,
project_id Project_Id_Field) (
row *BandwidthLimit_UsageLimit_Row, err error) {
row *BandwidthLimit_UsageLimit_SegmentLimit_Row, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT projects.bandwidth_limit, projects.usage_limit FROM projects WHERE projects.id = ?")
var __embed_stmt = __sqlbundle_Literal("SELECT projects.bandwidth_limit, projects.usage_limit, projects.segment_limit FROM projects WHERE projects.id = ?")
var __values []interface{}
__values = append(__values, project_id.value())
@ -17717,10 +17766,10 @@ func (obj *pgxcockroachImpl) Get_Project_BandwidthLimit_Project_UsageLimit_By_Id
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
row = &BandwidthLimit_UsageLimit_Row{}
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.BandwidthLimit, &row.UsageLimit)
row = &BandwidthLimit_UsageLimit_SegmentLimit_Row{}
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&row.BandwidthLimit, &row.UsageLimit, &row.SegmentLimit)
if err != nil {
return (*BandwidthLimit_UsageLimit_Row)(nil), obj.makeErr(err)
return (*BandwidthLimit_UsageLimit_SegmentLimit_Row)(nil), obj.makeErr(err)
}
return row, nil
@ -23082,14 +23131,14 @@ func (rx *Rx) Get_Project_BandwidthLimit_By_Id(ctx context.Context,
return tx.Get_Project_BandwidthLimit_By_Id(ctx, project_id)
}
func (rx *Rx) Get_Project_BandwidthLimit_Project_UsageLimit_By_Id(ctx context.Context,
func (rx *Rx) Get_Project_BandwidthLimit_Project_UsageLimit_Project_SegmentLimit_By_Id(ctx context.Context,
project_id Project_Id_Field) (
row *BandwidthLimit_UsageLimit_Row, err error) {
row *BandwidthLimit_UsageLimit_SegmentLimit_Row, err error) {
var tx *Tx
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.Get_Project_BandwidthLimit_Project_UsageLimit_By_Id(ctx, project_id)
return tx.Get_Project_BandwidthLimit_Project_UsageLimit_Project_SegmentLimit_By_Id(ctx, project_id)
}
func (rx *Rx) Get_Project_By_Id(ctx context.Context,
@ -23112,6 +23161,16 @@ func (rx *Rx) Get_Project_MaxBuckets_By_Id(ctx context.Context,
return tx.Get_Project_MaxBuckets_By_Id(ctx, project_id)
}
func (rx *Rx) Get_Project_SegmentLimit_By_Id(ctx context.Context,
project_id Project_Id_Field) (
row *SegmentLimit_Row, err error) {
var tx *Tx
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.Get_Project_SegmentLimit_By_Id(ctx, project_id)
}
func (rx *Rx) Get_Project_UsageLimit_By_Id(ctx context.Context,
project_id Project_Id_Field) (
row *UsageLimit_Row, err error) {
@ -24107,9 +24166,9 @@ type Methods interface {
project_id Project_Id_Field) (
row *BandwidthLimit_Row, err error)
Get_Project_BandwidthLimit_Project_UsageLimit_By_Id(ctx context.Context,
Get_Project_BandwidthLimit_Project_UsageLimit_Project_SegmentLimit_By_Id(ctx context.Context,
project_id Project_Id_Field) (
row *BandwidthLimit_UsageLimit_Row, err error)
row *BandwidthLimit_UsageLimit_SegmentLimit_Row, err error)
Get_Project_By_Id(ctx context.Context,
project_id Project_Id_Field) (
@ -24119,6 +24178,10 @@ type Methods interface {
project_id Project_Id_Field) (
row *MaxBuckets_Row, err error)
Get_Project_SegmentLimit_By_Id(ctx context.Context,
project_id Project_Id_Field) (
row *SegmentLimit_Row, err error)
Get_Project_UsageLimit_By_Id(ctx context.Context,
project_id Project_Id_Field) (
row *UsageLimit_Row, err error)

View File

@ -236,6 +236,20 @@ func (db *ProjectAccounting) UpdateProjectBandwidthLimit(ctx context.Context, pr
return err
}
// UpdateProjectSegmentLimit updates project segment limit.
func (db *ProjectAccounting) UpdateProjectSegmentLimit(ctx context.Context, projectID uuid.UUID, limit int64) (err error) {
defer mon.Task()(&ctx)(&err)
_, err = db.db.Update_Project_By_Id(ctx,
dbx.Project_Id(projectID[:]),
dbx.Project_Update_Fields{
SegmentLimit: dbx.Project_SegmentLimit(limit),
},
)
return err
}
// GetProjectStorageLimit returns project storage usage limit.
func (db *ProjectAccounting) GetProjectStorageLimit(ctx context.Context, projectID uuid.UUID) (_ *int64, err error) {
defer mon.Task()(&ctx)(&err)
@ -300,7 +314,7 @@ func (db *ProjectAccounting) GetProjectObjectsSegments(ctx context.Context, proj
FROM
bucket_storage_tallies
WHERE
project_id = ? AND
project_id = ? AND
interval_start = ?
`), projectID[:], latestDate)
if err = storageTalliesRows.Scan(&objectsSegments.SegmentCount, &objectsSegments.ObjectCount); err != nil {
@ -310,6 +324,20 @@ func (db *ProjectAccounting) GetProjectObjectsSegments(ctx context.Context, proj
return objectsSegments, nil
}
// GetProjectSegmentLimit returns project segment limit.
func (db *ProjectAccounting) GetProjectSegmentLimit(ctx context.Context, projectID uuid.UUID) (_ *int64, err error) {
defer mon.Task()(&ctx)(&err)
row, err := db.db.Get_Project_SegmentLimit_By_Id(ctx,
dbx.Project_Id(projectID[:]),
)
if err != nil {
return nil, err
}
return row.SegmentLimit, nil
}
// GetProjectTotal retrieves project usage for a given period.
func (db *ProjectAccounting) GetProjectTotal(ctx context.Context, projectID uuid.UUID, since, before time.Time) (usage *accounting.ProjectUsage, err error) {
defer mon.Task()(&ctx)(&err)
@ -827,7 +855,7 @@ func timeTruncateDown(t time.Time) time.Time {
func (db *ProjectAccounting) GetProjectLimits(ctx context.Context, projectID uuid.UUID) (_ accounting.ProjectLimits, err error) {
defer mon.Task()(&ctx)(&err)
row, err := db.db.Get_Project_BandwidthLimit_Project_UsageLimit_By_Id(ctx,
row, err := db.db.Get_Project_BandwidthLimit_Project_UsageLimit_Project_SegmentLimit_By_Id(ctx,
dbx.Project_Id(projectID[:]),
)
if err != nil {
@ -837,6 +865,7 @@ func (db *ProjectAccounting) GetProjectLimits(ctx context.Context, projectID uui
return accounting.ProjectLimits{
Usage: row.UsageLimit,
Bandwidth: row.BandwidthLimit,
Segments: row.SegmentLimit,
}, nil
}