2019-01-02 17:53:27 +00:00
|
|
|
// Code generated by lockedgen using 'go generate'. DO NOT EDIT.
|
|
|
|
|
2019-01-11 16:07:26 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
2019-01-02 17:53:27 +00:00
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package satellitedb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
"github.com/skyrings/skyring-common/tools/uuid"
|
|
|
|
|
2019-05-28 16:36:52 +01:00
|
|
|
"storj.io/storj/internal/memory"
|
2019-08-26 17:49:42 +01:00
|
|
|
"storj.io/storj/pkg/identity"
|
2019-07-12 13:57:02 +01:00
|
|
|
"storj.io/storj/pkg/macaroon"
|
2019-01-02 17:53:27 +00:00
|
|
|
"storj.io/storj/pkg/pb"
|
|
|
|
"storj.io/storj/pkg/storj"
|
|
|
|
"storj.io/storj/satellite"
|
2019-07-28 06:55:36 +01:00
|
|
|
"storj.io/storj/satellite/accounting"
|
2019-06-19 13:02:37 +01:00
|
|
|
"storj.io/storj/satellite/attribution"
|
2019-07-28 06:55:36 +01:00
|
|
|
"storj.io/storj/satellite/audit"
|
2019-01-16 20:23:28 +00:00
|
|
|
"storj.io/storj/satellite/console"
|
2019-07-08 23:32:18 +01:00
|
|
|
"storj.io/storj/satellite/metainfo"
|
2019-03-27 10:24:35 +00:00
|
|
|
"storj.io/storj/satellite/orders"
|
2019-07-28 06:55:36 +01:00
|
|
|
"storj.io/storj/satellite/overlay"
|
|
|
|
"storj.io/storj/satellite/repair/irreparable"
|
|
|
|
"storj.io/storj/satellite/repair/queue"
|
2019-06-24 21:51:54 +01:00
|
|
|
"storj.io/storj/satellite/rewards"
|
2019-01-02 17:53:27 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// locked implements a locking wrapper around satellite.DB.
|
|
|
|
type locked struct {
|
|
|
|
sync.Locker
|
|
|
|
db satellite.DB
|
|
|
|
}
|
|
|
|
|
|
|
|
// newLocked returns database wrapped with locker.
|
|
|
|
func newLocked(db satellite.DB) satellite.DB {
|
|
|
|
return &locked{&sync.Mutex{}, db}
|
|
|
|
}
|
|
|
|
|
2019-06-19 13:02:37 +01:00
|
|
|
// Attribution returns database for partner keys information
|
|
|
|
func (m *locked) Attribution() attribution.DB {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedAttribution{m.Locker, m.db.Attribution()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedAttribution implements locking wrapper for attribution.DB
|
|
|
|
type lockedAttribution struct {
|
|
|
|
sync.Locker
|
|
|
|
db attribution.DB
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get retrieves attribution info using project id and bucket name.
|
|
|
|
func (m *lockedAttribution) Get(ctx context.Context, projectID uuid.UUID, bucketName []byte) (*attribution.Info, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Get(ctx, projectID, bucketName)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Insert creates and stores new Info
|
|
|
|
func (m *lockedAttribution) Insert(ctx context.Context, info *attribution.Info) (*attribution.Info, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Insert(ctx, info)
|
|
|
|
}
|
|
|
|
|
2019-07-01 21:45:21 +01:00
|
|
|
// QueryAttribution queries partner bucket attribution data
|
2019-06-25 21:58:38 +01:00
|
|
|
func (m *lockedAttribution) QueryAttribution(ctx context.Context, partnerID uuid.UUID, start time.Time, end time.Time) ([]*attribution.CSVRow, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.QueryAttribution(ctx, partnerID, start, end)
|
|
|
|
}
|
|
|
|
|
2019-07-08 23:32:18 +01:00
|
|
|
// Buckets returns the database to interact with buckets
|
|
|
|
func (m *locked) Buckets() metainfo.BucketsDB {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedBuckets{m.Locker, m.db.Buckets()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedBuckets implements locking wrapper for metainfo.BucketsDB
|
|
|
|
type lockedBuckets struct {
|
|
|
|
sync.Locker
|
|
|
|
db metainfo.BucketsDB
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create creates a new bucket
|
|
|
|
func (m *lockedBuckets) CreateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.CreateBucket(ctx, bucket)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete deletes a bucket
|
|
|
|
func (m *lockedBuckets) DeleteBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (err error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.DeleteBucket(ctx, bucketName, projectID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get returns an existing bucket
|
|
|
|
func (m *lockedBuckets) GetBucket(ctx context.Context, bucketName []byte, projectID uuid.UUID) (bucket storj.Bucket, err error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetBucket(ctx, bucketName, projectID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// List returns all buckets for a project
|
2019-07-12 13:57:02 +01:00
|
|
|
func (m *lockedBuckets) ListBuckets(ctx context.Context, projectID uuid.UUID, listOpts storj.BucketListOptions, allowedBuckets macaroon.AllowedBuckets) (bucketList storj.BucketList, err error) {
|
2019-07-08 23:32:18 +01:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.ListBuckets(ctx, projectID, listOpts, allowedBuckets)
|
|
|
|
}
|
|
|
|
|
2019-07-19 16:17:34 +01:00
|
|
|
// UpdateBucket updates an existing bucket
|
|
|
|
func (m *lockedBuckets) UpdateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.UpdateBucket(ctx, bucket)
|
|
|
|
}
|
|
|
|
|
2019-01-02 17:53:27 +00:00
|
|
|
// Close closes the database
|
|
|
|
func (m *locked) Close() error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Close()
|
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// Console returns database for satellite console
|
|
|
|
func (m *locked) Console() console.DB {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedConsole{m.Locker, m.db.Console()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedConsole implements locking wrapper for console.DB
|
|
|
|
type lockedConsole struct {
|
|
|
|
sync.Locker
|
|
|
|
db console.DB
|
|
|
|
}
|
|
|
|
|
|
|
|
// APIKeys is a getter for APIKeys repository
|
|
|
|
func (m *lockedConsole) APIKeys() console.APIKeys {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedAPIKeys{m.Locker, m.db.APIKeys()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedAPIKeys implements locking wrapper for console.APIKeys
|
|
|
|
type lockedAPIKeys struct {
|
|
|
|
sync.Locker
|
|
|
|
db console.APIKeys
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create creates and stores new APIKeyInfo
|
2019-05-24 17:51:27 +01:00
|
|
|
func (m *lockedAPIKeys) Create(ctx context.Context, head []byte, info console.APIKeyInfo) (*console.APIKeyInfo, error) {
|
2019-01-16 20:23:28 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-05-24 17:51:27 +01:00
|
|
|
return m.db.Create(ctx, head, info)
|
2019-01-16 20:23:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Delete deletes APIKeyInfo from store
|
|
|
|
func (m *lockedAPIKeys) Delete(ctx context.Context, id uuid.UUID) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Delete(ctx, id)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get retrieves APIKeyInfo with given ID
|
|
|
|
func (m *lockedAPIKeys) Get(ctx context.Context, id uuid.UUID) (*console.APIKeyInfo, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Get(ctx, id)
|
|
|
|
}
|
|
|
|
|
2019-05-24 17:51:27 +01:00
|
|
|
// GetByHead retrieves APIKeyInfo for given key head
|
|
|
|
func (m *lockedAPIKeys) GetByHead(ctx context.Context, head []byte) (*console.APIKeyInfo, error) {
|
2019-01-16 20:23:28 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-05-24 17:51:27 +01:00
|
|
|
return m.db.GetByHead(ctx, head)
|
2019-01-16 20:23:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetByProjectID retrieves list of APIKeys for given projectID
|
|
|
|
func (m *lockedAPIKeys) GetByProjectID(ctx context.Context, projectID uuid.UUID) ([]console.APIKeyInfo, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetByProjectID(ctx, projectID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update updates APIKeyInfo in store
|
|
|
|
func (m *lockedAPIKeys) Update(ctx context.Context, key console.APIKeyInfo) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Update(ctx, key)
|
|
|
|
}
|
|
|
|
|
2019-03-06 15:54:48 +00:00
|
|
|
// BucketUsage is a getter for accounting.BucketUsage repository
|
|
|
|
func (m *lockedConsole) BucketUsage() accounting.BucketUsage {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedBucketUsage{m.Locker, m.db.BucketUsage()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedBucketUsage implements locking wrapper for accounting.BucketUsage
|
|
|
|
type lockedBucketUsage struct {
|
|
|
|
sync.Locker
|
|
|
|
db accounting.BucketUsage
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *lockedBucketUsage) Create(ctx context.Context, rollup accounting.BucketRollup) (*accounting.BucketRollup, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Create(ctx, rollup)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *lockedBucketUsage) Delete(ctx context.Context, id uuid.UUID) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Delete(ctx, id)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *lockedBucketUsage) Get(ctx context.Context, id uuid.UUID) (*accounting.BucketRollup, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Get(ctx, id)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *lockedBucketUsage) GetPaged(ctx context.Context, cursor *accounting.BucketRollupCursor) ([]accounting.BucketRollup, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetPaged(ctx, cursor)
|
|
|
|
}
|
|
|
|
|
2019-06-06 17:07:14 +01:00
|
|
|
// ProjectInvoiceStamps is a getter for ProjectInvoiceStamps repository
|
|
|
|
func (m *lockedConsole) ProjectInvoiceStamps() console.ProjectInvoiceStamps {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedProjectInvoiceStamps{m.Locker, m.db.ProjectInvoiceStamps()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedProjectInvoiceStamps implements locking wrapper for console.ProjectInvoiceStamps
|
|
|
|
type lockedProjectInvoiceStamps struct {
|
|
|
|
sync.Locker
|
|
|
|
db console.ProjectInvoiceStamps
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *lockedProjectInvoiceStamps) Create(ctx context.Context, stamp console.ProjectInvoiceStamp) (*console.ProjectInvoiceStamp, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Create(ctx, stamp)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *lockedProjectInvoiceStamps) GetAll(ctx context.Context, projectID uuid.UUID) ([]console.ProjectInvoiceStamp, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetAll(ctx, projectID)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *lockedProjectInvoiceStamps) GetByProjectIDStartDate(ctx context.Context, projectID uuid.UUID, startDate time.Time) (*console.ProjectInvoiceStamp, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetByProjectIDStartDate(ctx, projectID, startDate)
|
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// ProjectMembers is a getter for ProjectMembers repository
|
|
|
|
func (m *lockedConsole) ProjectMembers() console.ProjectMembers {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-16 20:23:28 +00:00
|
|
|
return &lockedProjectMembers{m.Locker, m.db.ProjectMembers()}
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// lockedProjectMembers implements locking wrapper for console.ProjectMembers
|
|
|
|
type lockedProjectMembers struct {
|
|
|
|
sync.Locker
|
|
|
|
db console.ProjectMembers
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete is a method for deleting project member by memberID and projectID from the database.
|
|
|
|
func (m *lockedProjectMembers) Delete(ctx context.Context, memberID uuid.UUID, projectID uuid.UUID) error {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-16 20:23:28 +00:00
|
|
|
return m.db.Delete(ctx, memberID, projectID)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// GetByMemberID is a method for querying project members from the database by memberID.
|
|
|
|
func (m *lockedProjectMembers) GetByMemberID(ctx context.Context, memberID uuid.UUID) ([]console.ProjectMember, error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-16 20:23:28 +00:00
|
|
|
return m.db.GetByMemberID(ctx, memberID)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-08-12 11:22:32 +01:00
|
|
|
// GetPagedByProjectID is a method for querying project members from the database by projectID and cursor
|
|
|
|
func (m *lockedProjectMembers) GetPagedByProjectID(ctx context.Context, projectID uuid.UUID, cursor console.ProjectMembersCursor) (*console.ProjectMembersPage, error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-08-12 11:22:32 +01:00
|
|
|
return m.db.GetPagedByProjectID(ctx, projectID, cursor)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// Insert is a method for inserting project member into the database.
|
|
|
|
func (m *lockedProjectMembers) Insert(ctx context.Context, memberID uuid.UUID, projectID uuid.UUID) (*console.ProjectMember, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Insert(ctx, memberID, projectID)
|
|
|
|
}
|
|
|
|
|
2019-06-06 17:07:14 +01:00
|
|
|
// ProjectPayments is a getter for ProjectPayments repository
|
|
|
|
func (m *lockedConsole) ProjectPayments() console.ProjectPayments {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedProjectPayments{m.Locker, m.db.ProjectPayments()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedProjectPayments implements locking wrapper for console.ProjectPayments
|
|
|
|
type lockedProjectPayments struct {
|
|
|
|
sync.Locker
|
|
|
|
db console.ProjectPayments
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *lockedProjectPayments) Create(ctx context.Context, info console.ProjectPayment) (*console.ProjectPayment, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Create(ctx, info)
|
|
|
|
}
|
|
|
|
|
2019-07-10 21:29:26 +01:00
|
|
|
func (m *lockedProjectPayments) Delete(ctx context.Context, projectPaymentID uuid.UUID) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Delete(ctx, projectPaymentID)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *lockedProjectPayments) GetByID(ctx context.Context, projectPaymentID uuid.UUID) (*console.ProjectPayment, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetByID(ctx, projectPaymentID)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *lockedProjectPayments) GetByPayerID(ctx context.Context, payerID uuid.UUID) ([]*console.ProjectPayment, error) {
|
2019-06-06 17:07:14 +01:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetByPayerID(ctx, payerID)
|
|
|
|
}
|
|
|
|
|
2019-07-10 21:29:26 +01:00
|
|
|
func (m *lockedProjectPayments) GetByProjectID(ctx context.Context, projectID uuid.UUID) ([]*console.ProjectPayment, error) {
|
2019-06-06 17:07:14 +01:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetByProjectID(ctx, projectID)
|
|
|
|
}
|
|
|
|
|
2019-07-10 21:29:26 +01:00
|
|
|
func (m *lockedProjectPayments) GetDefaultByProjectID(ctx context.Context, projectID uuid.UUID) (*console.ProjectPayment, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetDefaultByProjectID(ctx, projectID)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *lockedProjectPayments) Update(ctx context.Context, info console.ProjectPayment) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Update(ctx, info)
|
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// Projects is a getter for Projects repository
|
|
|
|
func (m *lockedConsole) Projects() console.Projects {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedProjects{m.Locker, m.db.Projects()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedProjects implements locking wrapper for console.Projects
|
|
|
|
type lockedProjects struct {
|
2019-01-02 17:53:27 +00:00
|
|
|
sync.Locker
|
2019-01-16 20:23:28 +00:00
|
|
|
db console.Projects
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// Delete is a method for deleting project by Id from the database.
|
|
|
|
func (m *lockedProjects) Delete(ctx context.Context, id uuid.UUID) error {
|
2019-01-16 19:30:33 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-16 20:23:28 +00:00
|
|
|
return m.db.Delete(ctx, id)
|
2019-01-16 19:30:33 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// Get is a method for querying project from the database by id.
|
|
|
|
func (m *lockedProjects) Get(ctx context.Context, id uuid.UUID) (*console.Project, error) {
|
2019-01-16 19:30:33 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-16 20:23:28 +00:00
|
|
|
return m.db.Get(ctx, id)
|
2019-01-16 19:30:33 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// GetAll is a method for querying all projects from the database.
|
|
|
|
func (m *lockedProjects) GetAll(ctx context.Context) ([]console.Project, error) {
|
2019-01-16 19:30:33 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-16 20:23:28 +00:00
|
|
|
return m.db.GetAll(ctx)
|
2019-01-16 19:30:33 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// GetByUserID is a method for querying all projects from the database by userID.
|
|
|
|
func (m *lockedProjects) GetByUserID(ctx context.Context, userID uuid.UUID) ([]console.Project, error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-16 20:23:28 +00:00
|
|
|
return m.db.GetByUserID(ctx, userID)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-06-06 17:07:14 +01:00
|
|
|
// GetCreatedBefore retrieves all projects created before provided date
|
|
|
|
func (m *lockedProjects) GetCreatedBefore(ctx context.Context, before time.Time) ([]console.Project, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetCreatedBefore(ctx, before)
|
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// Insert is a method for inserting project into the database.
|
|
|
|
func (m *lockedProjects) Insert(ctx context.Context, project *console.Project) (*console.Project, error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-16 20:23:28 +00:00
|
|
|
return m.db.Insert(ctx, project)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// Update is a method for updating project entity.
|
|
|
|
func (m *lockedProjects) Update(ctx context.Context, project *console.Project) error {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-16 20:23:28 +00:00
|
|
|
return m.db.Update(ctx, project)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-03-19 17:55:43 +00:00
|
|
|
// RegistrationTokens is a getter for RegistrationTokens repository
|
|
|
|
func (m *lockedConsole) RegistrationTokens() console.RegistrationTokens {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedRegistrationTokens{m.Locker, m.db.RegistrationTokens()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedRegistrationTokens implements locking wrapper for console.RegistrationTokens
|
|
|
|
type lockedRegistrationTokens struct {
|
|
|
|
sync.Locker
|
|
|
|
db console.RegistrationTokens
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create creates new registration token
|
|
|
|
func (m *lockedRegistrationTokens) Create(ctx context.Context, projectLimit int) (*console.RegistrationToken, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Create(ctx, projectLimit)
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetByOwnerID retrieves RegTokenInfo by ownerID
|
|
|
|
func (m *lockedRegistrationTokens) GetByOwnerID(ctx context.Context, ownerID uuid.UUID) (*console.RegistrationToken, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetByOwnerID(ctx, ownerID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetBySecret retrieves RegTokenInfo with given Secret
|
|
|
|
func (m *lockedRegistrationTokens) GetBySecret(ctx context.Context, secret console.RegistrationSecret) (*console.RegistrationToken, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetBySecret(ctx, secret)
|
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateOwner updates registration token's owner
|
|
|
|
func (m *lockedRegistrationTokens) UpdateOwner(ctx context.Context, secret console.RegistrationSecret, ownerID uuid.UUID) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.UpdateOwner(ctx, secret, ownerID)
|
|
|
|
}
|
|
|
|
|
2019-05-13 16:53:52 +01:00
|
|
|
// ResetPasswordTokens is a getter for ResetPasswordTokens repository
|
|
|
|
func (m *lockedConsole) ResetPasswordTokens() console.ResetPasswordTokens {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedResetPasswordTokens{m.Locker, m.db.ResetPasswordTokens()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedResetPasswordTokens implements locking wrapper for console.ResetPasswordTokens
|
|
|
|
type lockedResetPasswordTokens struct {
|
|
|
|
sync.Locker
|
|
|
|
db console.ResetPasswordTokens
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create creates new reset password token
|
|
|
|
func (m *lockedResetPasswordTokens) Create(ctx context.Context, ownerID uuid.UUID) (*console.ResetPasswordToken, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Create(ctx, ownerID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete deletes ResetPasswordToken by ResetPasswordSecret
|
|
|
|
func (m *lockedResetPasswordTokens) Delete(ctx context.Context, secret console.ResetPasswordSecret) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Delete(ctx, secret)
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetByOwnerID retrieves ResetPasswordToken by ownerID
|
|
|
|
func (m *lockedResetPasswordTokens) GetByOwnerID(ctx context.Context, ownerID uuid.UUID) (*console.ResetPasswordToken, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetByOwnerID(ctx, ownerID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetBySecret retrieves ResetPasswordToken with given secret
|
|
|
|
func (m *lockedResetPasswordTokens) GetBySecret(ctx context.Context, secret console.ResetPasswordSecret) (*console.ResetPasswordToken, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetBySecret(ctx, secret)
|
|
|
|
}
|
|
|
|
|
2019-04-04 15:56:20 +01:00
|
|
|
// UsageRollups is a getter for UsageRollups repository
|
|
|
|
func (m *lockedConsole) UsageRollups() console.UsageRollups {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedUsageRollups{m.Locker, m.db.UsageRollups()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedUsageRollups implements locking wrapper for console.UsageRollups
|
|
|
|
type lockedUsageRollups struct {
|
|
|
|
sync.Locker
|
|
|
|
db console.UsageRollups
|
|
|
|
}
|
|
|
|
|
2019-05-16 11:43:46 +01:00
|
|
|
func (m *lockedUsageRollups) GetBucketTotals(ctx context.Context, projectID uuid.UUID, cursor console.BucketUsageCursor, since time.Time, before time.Time) (*console.BucketUsagePage, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetBucketTotals(ctx, projectID, cursor, since, before)
|
|
|
|
}
|
|
|
|
|
2019-04-10 00:14:19 +01:00
|
|
|
func (m *lockedUsageRollups) GetBucketUsageRollups(ctx context.Context, projectID uuid.UUID, since time.Time, before time.Time) ([]console.BucketUsageRollup, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetBucketUsageRollups(ctx, projectID, since, before)
|
|
|
|
}
|
|
|
|
|
2019-04-04 15:56:20 +01:00
|
|
|
func (m *lockedUsageRollups) GetProjectTotal(ctx context.Context, projectID uuid.UUID, since time.Time, before time.Time) (*console.ProjectUsage, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetProjectTotal(ctx, projectID, since, before)
|
|
|
|
}
|
|
|
|
|
2019-06-18 16:55:47 +01:00
|
|
|
// UserCredits is a getter for UserCredits repository
|
|
|
|
func (m *lockedConsole) UserCredits() console.UserCredits {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedUserCredits{m.Locker, m.db.UserCredits()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedUserCredits implements locking wrapper for console.UserCredits
|
|
|
|
type lockedUserCredits struct {
|
|
|
|
sync.Locker
|
|
|
|
db console.UserCredits
|
|
|
|
}
|
|
|
|
|
2019-08-14 20:53:48 +01:00
|
|
|
func (m *lockedUserCredits) Create(ctx context.Context, userCredit console.CreateCredit) error {
|
2019-06-18 16:55:47 +01:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Create(ctx, userCredit)
|
|
|
|
}
|
|
|
|
|
2019-06-19 21:49:04 +01:00
|
|
|
func (m *lockedUserCredits) GetCreditUsage(ctx context.Context, userID uuid.UUID, expirationEndDate time.Time) (*console.UserCreditUsage, error) {
|
2019-06-18 16:55:47 +01:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-06-19 21:49:04 +01:00
|
|
|
return m.db.GetCreditUsage(ctx, userID, expirationEndDate)
|
2019-06-18 16:55:47 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *lockedUserCredits) UpdateAvailableCredits(ctx context.Context, creditsToCharge int, id uuid.UUID, billingStartDate time.Time) (remainingCharge int, err error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.UpdateAvailableCredits(ctx, creditsToCharge, id, billingStartDate)
|
|
|
|
}
|
|
|
|
|
2019-07-30 14:21:00 +01:00
|
|
|
func (m *lockedUserCredits) UpdateEarnedCredits(ctx context.Context, userID uuid.UUID) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.UpdateEarnedCredits(ctx, userID)
|
|
|
|
}
|
|
|
|
|
2019-06-06 17:07:14 +01:00
|
|
|
// UserPayments is a getter for UserPayments repository
|
|
|
|
func (m *lockedConsole) UserPayments() console.UserPayments {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedUserPayments{m.Locker, m.db.UserPayments()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedUserPayments implements locking wrapper for console.UserPayments
|
|
|
|
type lockedUserPayments struct {
|
|
|
|
sync.Locker
|
|
|
|
db console.UserPayments
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *lockedUserPayments) Create(ctx context.Context, info console.UserPayment) (*console.UserPayment, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Create(ctx, info)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *lockedUserPayments) Get(ctx context.Context, userID uuid.UUID) (*console.UserPayment, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Get(ctx, userID)
|
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// Users is a getter for Users repository
|
|
|
|
func (m *lockedConsole) Users() console.Users {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedUsers{m.Locker, m.db.Users()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedUsers implements locking wrapper for console.Users
|
|
|
|
type lockedUsers struct {
|
2019-01-02 17:53:27 +00:00
|
|
|
sync.Locker
|
2019-01-16 20:23:28 +00:00
|
|
|
db console.Users
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// Delete is a method for deleting user by Id from the database.
|
|
|
|
func (m *lockedUsers) Delete(ctx context.Context, id uuid.UUID) error {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-16 20:23:28 +00:00
|
|
|
return m.db.Delete(ctx, id)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-01-30 15:04:40 +00:00
|
|
|
// Get is a method for querying user from the database by id.
|
2019-01-16 20:23:28 +00:00
|
|
|
func (m *lockedUsers) Get(ctx context.Context, id uuid.UUID) (*console.User, error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-16 20:23:28 +00:00
|
|
|
return m.db.Get(ctx, id)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// GetByEmail is a method for querying user by email from the database.
|
|
|
|
func (m *lockedUsers) GetByEmail(ctx context.Context, email string) (*console.User, error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-16 20:23:28 +00:00
|
|
|
return m.db.GetByEmail(ctx, email)
|
|
|
|
}
|
|
|
|
|
2019-01-30 15:04:40 +00:00
|
|
|
// Insert is a method for inserting user into the database.
|
2019-01-16 20:23:28 +00:00
|
|
|
func (m *lockedUsers) Insert(ctx context.Context, user *console.User) (*console.User, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Insert(ctx, user)
|
|
|
|
}
|
|
|
|
|
2019-01-30 15:04:40 +00:00
|
|
|
// Update is a method for updating user entity.
|
2019-01-16 20:23:28 +00:00
|
|
|
func (m *lockedUsers) Update(ctx context.Context, user *console.User) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Update(ctx, user)
|
|
|
|
}
|
|
|
|
|
2019-05-22 15:50:22 +01:00
|
|
|
// Containment returns database for containment
|
2019-05-23 15:37:23 +01:00
|
|
|
func (m *locked) Containment() audit.Containment {
|
2019-05-22 15:50:22 +01:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedContainment{m.Locker, m.db.Containment()}
|
|
|
|
}
|
|
|
|
|
2019-05-23 15:37:23 +01:00
|
|
|
// lockedContainment implements locking wrapper for audit.Containment
|
2019-05-22 15:50:22 +01:00
|
|
|
type lockedContainment struct {
|
|
|
|
sync.Locker
|
2019-05-23 15:37:23 +01:00
|
|
|
db audit.Containment
|
2019-05-22 15:50:22 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func (m *lockedContainment) Delete(ctx context.Context, nodeID storj.NodeID) (bool, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Delete(ctx, nodeID)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *lockedContainment) Get(ctx context.Context, nodeID storj.NodeID) (*audit.PendingAudit, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Get(ctx, nodeID)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *lockedContainment) IncrementPending(ctx context.Context, pendingAudit *audit.PendingAudit) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.IncrementPending(ctx, pendingAudit)
|
|
|
|
}
|
|
|
|
|
2019-02-04 20:37:46 +00:00
|
|
|
// CreateSchema sets the schema
|
|
|
|
func (m *locked) CreateSchema(schema string) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.CreateSchema(schema)
|
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// CreateTables initializes the database
|
|
|
|
func (m *locked) CreateTables() error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.CreateTables()
|
|
|
|
}
|
|
|
|
|
2019-01-31 19:17:12 +00:00
|
|
|
// DropSchema drops the schema
|
|
|
|
func (m *locked) DropSchema(schema string) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.DropSchema(schema)
|
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// Irreparable returns database for failed repairs
|
|
|
|
func (m *locked) Irreparable() irreparable.DB {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedIrreparable{m.Locker, m.db.Irreparable()}
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// lockedIrreparable implements locking wrapper for irreparable.DB
|
|
|
|
type lockedIrreparable struct {
|
|
|
|
sync.Locker
|
|
|
|
db irreparable.DB
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete removes irreparable segment info based on segmentPath.
|
|
|
|
func (m *lockedIrreparable) Delete(ctx context.Context, segmentPath []byte) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Delete(ctx, segmentPath)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get returns irreparable segment info based on segmentPath.
|
2019-03-15 20:21:52 +00:00
|
|
|
func (m *lockedIrreparable) Get(ctx context.Context, segmentPath []byte) (*pb.IrreparableSegment, error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Get(ctx, segmentPath)
|
|
|
|
}
|
|
|
|
|
2019-07-18 17:21:21 +01:00
|
|
|
// GetLimited returns a list of irreparable segment info starting after the last segment info we retrieved
|
|
|
|
func (m *lockedIrreparable) GetLimited(ctx context.Context, limit int, lastSeenSegmentPath []byte) ([]*pb.IrreparableSegment, error) {
|
2019-03-15 20:21:52 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-07-18 17:21:21 +01:00
|
|
|
return m.db.GetLimited(ctx, limit, lastSeenSegmentPath)
|
2019-03-15 20:21:52 +00:00
|
|
|
}
|
|
|
|
|
2019-01-02 17:53:27 +00:00
|
|
|
// IncrementRepairAttempts increments the repair attempts.
|
2019-03-15 20:21:52 +00:00
|
|
|
func (m *lockedIrreparable) IncrementRepairAttempts(ctx context.Context, segmentInfo *pb.IrreparableSegment) error {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.IncrementRepairAttempts(ctx, segmentInfo)
|
|
|
|
}
|
|
|
|
|
2019-03-27 10:24:35 +00:00
|
|
|
// Orders returns database for orders
|
|
|
|
func (m *locked) Orders() orders.DB {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedOrders{m.Locker, m.db.Orders()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedOrders implements locking wrapper for orders.DB
|
|
|
|
type lockedOrders struct {
|
|
|
|
sync.Locker
|
|
|
|
db orders.DB
|
|
|
|
}
|
|
|
|
|
2019-04-01 21:14:58 +01:00
|
|
|
// CreateSerialInfo creates serial number entry in database
|
2019-03-28 20:09:23 +00:00
|
|
|
func (m *lockedOrders) CreateSerialInfo(ctx context.Context, serialNumber storj.SerialNumber, bucketID []byte, limitExpiration time.Time) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.CreateSerialInfo(ctx, serialNumber, bucketID, limitExpiration)
|
|
|
|
}
|
|
|
|
|
2019-04-01 21:14:58 +01:00
|
|
|
// GetBucketBandwidth gets total bucket bandwidth from period of time
|
2019-06-25 16:58:42 +01:00
|
|
|
func (m *lockedOrders) GetBucketBandwidth(ctx context.Context, projectID uuid.UUID, bucketName []byte, from time.Time, to time.Time) (int64, error) {
|
2019-03-27 10:24:35 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-06-25 16:58:42 +01:00
|
|
|
return m.db.GetBucketBandwidth(ctx, projectID, bucketName, from, to)
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|
|
|
|
|
2019-04-01 21:14:58 +01:00
|
|
|
// GetStorageNodeBandwidth gets total storage node bandwidth from period of time
|
|
|
|
func (m *lockedOrders) GetStorageNodeBandwidth(ctx context.Context, nodeID storj.NodeID, from time.Time, to time.Time) (int64, error) {
|
2019-03-27 10:24:35 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-04-01 21:14:58 +01:00
|
|
|
return m.db.GetStorageNodeBandwidth(ctx, nodeID, from, to)
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|
|
|
|
|
2019-08-27 13:37:42 +01:00
|
|
|
// ProcessOrders takes a list of order requests and processes them in a batch
|
|
|
|
func (m *lockedOrders) ProcessOrders(ctx context.Context, requests []*orders.ProcessOrderRequest) (responses []*orders.ProcessOrderResponse, err error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.ProcessOrders(ctx, requests)
|
|
|
|
}
|
|
|
|
|
2019-04-02 19:21:18 +01:00
|
|
|
// UnuseSerialNumber removes pair serial number -> storage node id from database
|
2019-04-01 21:14:58 +01:00
|
|
|
func (m *lockedOrders) UnuseSerialNumber(ctx context.Context, serialNumber storj.SerialNumber, storageNodeID storj.NodeID) error {
|
2019-03-27 10:24:35 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-04-01 21:14:58 +01:00
|
|
|
return m.db.UnuseSerialNumber(ctx, serialNumber, storageNodeID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateBucketBandwidthAllocation updates 'allocated' bandwidth for given bucket
|
2019-06-25 16:58:42 +01:00
|
|
|
func (m *lockedOrders) UpdateBucketBandwidthAllocation(ctx context.Context, projectID uuid.UUID, bucketName []byte, action pb.PieceAction, amount int64, intervalStart time.Time) error {
|
2019-04-01 21:14:58 +01:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-06-25 16:58:42 +01:00
|
|
|
return m.db.UpdateBucketBandwidthAllocation(ctx, projectID, bucketName, action, amount, intervalStart)
|
2019-04-01 21:14:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateBucketBandwidthInline updates 'inline' bandwidth for given bucket
|
2019-06-25 16:58:42 +01:00
|
|
|
func (m *lockedOrders) UpdateBucketBandwidthInline(ctx context.Context, projectID uuid.UUID, bucketName []byte, action pb.PieceAction, amount int64, intervalStart time.Time) error {
|
2019-04-01 21:14:58 +01:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-06-25 16:58:42 +01:00
|
|
|
return m.db.UpdateBucketBandwidthInline(ctx, projectID, bucketName, action, amount, intervalStart)
|
2019-04-01 21:14:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateBucketBandwidthSettle updates 'settled' bandwidth for given bucket
|
2019-06-25 16:58:42 +01:00
|
|
|
func (m *lockedOrders) UpdateBucketBandwidthSettle(ctx context.Context, projectID uuid.UUID, bucketName []byte, action pb.PieceAction, amount int64, intervalStart time.Time) error {
|
2019-04-01 21:14:58 +01:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-06-25 16:58:42 +01:00
|
|
|
return m.db.UpdateBucketBandwidthSettle(ctx, projectID, bucketName, action, amount, intervalStart)
|
2019-04-01 21:14:58 +01:00
|
|
|
}
|
|
|
|
|
2019-06-10 15:58:28 +01:00
|
|
|
// UpdateStoragenodeBandwidthAllocation updates 'allocated' bandwidth for given storage nodes
|
|
|
|
func (m *lockedOrders) UpdateStoragenodeBandwidthAllocation(ctx context.Context, storageNodes []storj.NodeID, action pb.PieceAction, amount int64, intervalStart time.Time) error {
|
2019-04-01 21:14:58 +01:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-06-10 15:58:28 +01:00
|
|
|
return m.db.UpdateStoragenodeBandwidthAllocation(ctx, storageNodes, action, amount, intervalStart)
|
2019-04-01 21:14:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateStoragenodeBandwidthSettle updates 'settled' bandwidth for given storage node
|
2019-04-04 16:20:59 +01:00
|
|
|
func (m *lockedOrders) UpdateStoragenodeBandwidthSettle(ctx context.Context, storageNode storj.NodeID, action pb.PieceAction, amount int64, intervalStart time.Time) error {
|
2019-04-01 21:14:58 +01:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-04-04 16:20:59 +01:00
|
|
|
return m.db.UpdateStoragenodeBandwidthSettle(ctx, storageNode, action, amount, intervalStart)
|
2019-04-01 21:14:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// UseSerialNumber creates serial number entry in database
|
|
|
|
func (m *lockedOrders) UseSerialNumber(ctx context.Context, serialNumber storj.SerialNumber, storageNodeID storj.NodeID) ([]byte, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.UseSerialNumber(ctx, serialNumber, storageNodeID)
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// OverlayCache returns database for caching overlay information
|
|
|
|
func (m *locked) OverlayCache() overlay.DB {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedOverlayCache{m.Locker, m.db.OverlayCache()}
|
|
|
|
}
|
|
|
|
|
2019-01-15 16:08:45 +00:00
|
|
|
// lockedOverlayCache implements locking wrapper for overlay.DB
|
2019-01-02 17:53:27 +00:00
|
|
|
type lockedOverlayCache struct {
|
|
|
|
sync.Locker
|
2019-01-15 16:08:45 +00:00
|
|
|
db overlay.DB
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-08-27 13:37:42 +01:00
|
|
|
// AllPieceCounts returns a map of node IDs to piece counts from the db.
|
|
|
|
func (m *lockedOverlayCache) AllPieceCounts(ctx context.Context) (pieceCounts map[storj.NodeID]int, err error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.AllPieceCounts(ctx)
|
|
|
|
}
|
|
|
|
|
2019-08-02 18:47:35 +01:00
|
|
|
// BatchUpdateStats updates multiple storagenode's stats in one transaction
|
|
|
|
func (m *lockedOverlayCache) BatchUpdateStats(ctx context.Context, updateRequests []*overlay.UpdateRequest, batchSize int) (failed storj.NodeIDList, err error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.BatchUpdateStats(ctx, updateRequests, batchSize)
|
|
|
|
}
|
|
|
|
|
2019-01-15 16:08:45 +00:00
|
|
|
// Get looks up the node by nodeID
|
2019-04-04 17:34:36 +01:00
|
|
|
func (m *lockedOverlayCache) Get(ctx context.Context, nodeID storj.NodeID) (*overlay.NodeDossier, error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-15 16:08:45 +00:00
|
|
|
return m.db.Get(ctx, nodeID)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-06-04 20:17:01 +01:00
|
|
|
// IsVetted returns whether or not the node reaches reputable thresholds
|
|
|
|
func (m *lockedOverlayCache) IsVetted(ctx context.Context, id storj.NodeID, criteria *overlay.NodeCriteria) (bool, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.IsVetted(ctx, id, criteria)
|
|
|
|
}
|
|
|
|
|
2019-06-18 23:22:14 +01:00
|
|
|
// KnownOffline filters a set of nodes to offline nodes
|
|
|
|
func (m *lockedOverlayCache) KnownOffline(ctx context.Context, a1 *overlay.NodeCriteria, a2 storj.NodeIDList) (storj.NodeIDList, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.KnownOffline(ctx, a1, a2)
|
|
|
|
}
|
|
|
|
|
2019-05-10 20:05:42 +01:00
|
|
|
// KnownUnreliableOrOffline filters a set of nodes to unhealth or offlines node, independent of new
|
|
|
|
func (m *lockedOverlayCache) KnownUnreliableOrOffline(ctx context.Context, a1 *overlay.NodeCriteria, a2 storj.NodeIDList) (storj.NodeIDList, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.KnownUnreliableOrOffline(ctx, a1, a2)
|
|
|
|
}
|
|
|
|
|
2019-01-30 16:29:18 +00:00
|
|
|
// Paginate will page through the database nodes
|
2019-04-04 17:34:36 +01:00
|
|
|
func (m *lockedOverlayCache) Paginate(ctx context.Context, offset int64, limit int) ([]*overlay.NodeDossier, bool, error) {
|
2019-01-30 16:29:18 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Paginate(ctx, offset, limit)
|
|
|
|
}
|
|
|
|
|
2019-07-16 00:30:00 +01:00
|
|
|
// PaginateQualified will page through the qualified nodes
|
2019-07-12 15:35:48 +01:00
|
|
|
func (m *lockedOverlayCache) PaginateQualified(ctx context.Context, offset int64, limit int) ([]*pb.Node, bool, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.PaginateQualified(ctx, offset, limit)
|
|
|
|
}
|
|
|
|
|
2019-07-08 23:04:35 +01:00
|
|
|
// Reliable returns all nodes that are reliable
|
|
|
|
func (m *lockedOverlayCache) Reliable(ctx context.Context, a1 *overlay.NodeCriteria) (storj.NodeIDList, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Reliable(ctx, a1)
|
|
|
|
}
|
|
|
|
|
2019-02-11 19:24:51 +00:00
|
|
|
// SelectNewStorageNodes looks up nodes based on new node criteria
|
2019-04-23 21:47:11 +01:00
|
|
|
func (m *lockedOverlayCache) SelectNewStorageNodes(ctx context.Context, count int, criteria *overlay.NodeCriteria) ([]*pb.Node, error) {
|
2019-01-31 18:49:00 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-02-11 19:24:51 +00:00
|
|
|
return m.db.SelectNewStorageNodes(ctx, count, criteria)
|
2019-01-31 18:49:00 +00:00
|
|
|
}
|
|
|
|
|
2019-02-11 19:24:51 +00:00
|
|
|
// SelectStorageNodes looks up nodes based on criteria
|
|
|
|
func (m *lockedOverlayCache) SelectStorageNodes(ctx context.Context, count int, criteria *overlay.NodeCriteria) ([]*pb.Node, error) {
|
2019-01-31 18:49:00 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-02-11 19:24:51 +00:00
|
|
|
return m.db.SelectStorageNodes(ctx, count, criteria)
|
2019-01-31 18:49:00 +00:00
|
|
|
}
|
|
|
|
|
2019-04-22 10:07:50 +01:00
|
|
|
// Update updates node address
|
2019-06-20 14:56:04 +01:00
|
|
|
func (m *lockedOverlayCache) UpdateAddress(ctx context.Context, value *pb.Node, defaults overlay.NodeSelectionConfig) error {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-06-20 14:56:04 +01:00
|
|
|
return m.db.UpdateAddress(ctx, value, defaults)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-04-22 10:07:50 +01:00
|
|
|
// UpdateNodeInfo updates node dossier with info requested from the node itself like node type, email, wallet, capacity, and version.
|
2019-04-10 07:04:24 +01:00
|
|
|
func (m *lockedOverlayCache) UpdateNodeInfo(ctx context.Context, node storj.NodeID, nodeInfo *pb.InfoResponse) (stats *overlay.NodeDossier, err error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-04-10 07:04:24 +01:00
|
|
|
return m.db.UpdateNodeInfo(ctx, node, nodeInfo)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-08-27 13:37:42 +01:00
|
|
|
// UpdatePieceCounts sets the piece count field for the given node IDs.
|
|
|
|
func (m *lockedOverlayCache) UpdatePieceCounts(ctx context.Context, pieceCounts map[storj.NodeID]int) (err error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.UpdatePieceCounts(ctx, pieceCounts)
|
|
|
|
}
|
|
|
|
|
2019-03-25 22:25:09 +00:00
|
|
|
// UpdateStats all parts of single storagenode's stats.
|
|
|
|
func (m *lockedOverlayCache) UpdateStats(ctx context.Context, request *overlay.UpdateRequest) (stats *overlay.NodeStats, err error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-03-25 22:25:09 +00:00
|
|
|
return m.db.UpdateStats(ctx, request)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-03-25 22:25:09 +00:00
|
|
|
// UpdateUptime updates a single storagenode's uptime stats.
|
2019-06-20 14:56:04 +01:00
|
|
|
func (m *lockedOverlayCache) UpdateUptime(ctx context.Context, nodeID storj.NodeID, isUp bool, lambda float64, weight float64, uptimeDQ float64) (stats *overlay.NodeStats, err error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-06-20 14:56:04 +01:00
|
|
|
return m.db.UpdateUptime(ctx, nodeID, isUp, lambda, weight, uptimeDQ)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-08-26 17:49:42 +01:00
|
|
|
// PeerIdentities returns a storage for peer identities
|
|
|
|
func (m *locked) PeerIdentities() overlay.PeerIdentities {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedPeerIdentities{m.Locker, m.db.PeerIdentities()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedPeerIdentities implements locking wrapper for overlay.PeerIdentities
|
|
|
|
type lockedPeerIdentities struct {
|
|
|
|
sync.Locker
|
|
|
|
db overlay.PeerIdentities
|
|
|
|
}
|
|
|
|
|
|
|
|
// BatchGet gets all nodes peer identities in a transaction
|
|
|
|
func (m *lockedPeerIdentities) BatchGet(ctx context.Context, a1 storj.NodeIDList) (_ []*identity.PeerIdentity, err error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.BatchGet(ctx, a1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get gets peer identity
|
|
|
|
func (m *lockedPeerIdentities) Get(ctx context.Context, a1 storj.NodeID) (*identity.PeerIdentity, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Get(ctx, a1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Set adds a peer identity entry for a node
|
|
|
|
func (m *lockedPeerIdentities) Set(ctx context.Context, a1 storj.NodeID, a2 *identity.PeerIdentity) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Set(ctx, a1, a2)
|
|
|
|
}
|
|
|
|
|
2019-05-10 20:05:42 +01:00
|
|
|
// ProjectAccounting returns database for storing information about project data use
|
|
|
|
func (m *locked) ProjectAccounting() accounting.ProjectAccounting {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedProjectAccounting{m.Locker, m.db.ProjectAccounting()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedProjectAccounting implements locking wrapper for accounting.ProjectAccounting
|
|
|
|
type lockedProjectAccounting struct {
|
|
|
|
sync.Locker
|
|
|
|
db accounting.ProjectAccounting
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateStorageTally creates a record for BucketStorageTally in the accounting DB table
|
|
|
|
func (m *lockedProjectAccounting) CreateStorageTally(ctx context.Context, tally accounting.BucketStorageTally) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.CreateStorageTally(ctx, tally)
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetAllocatedBandwidthTotal returns the sum of GET bandwidth usage allocated for a projectID in the past time frame
|
2019-06-25 16:58:42 +01:00
|
|
|
func (m *lockedProjectAccounting) GetAllocatedBandwidthTotal(ctx context.Context, projectID uuid.UUID, from time.Time) (int64, error) {
|
2019-05-10 20:05:42 +01:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-06-25 16:58:42 +01:00
|
|
|
return m.db.GetAllocatedBandwidthTotal(ctx, projectID, from)
|
2019-05-10 20:05:42 +01:00
|
|
|
}
|
|
|
|
|
2019-05-30 22:38:23 +01:00
|
|
|
// GetProjectUsageLimits returns project usage limit
|
2019-05-28 16:36:52 +01:00
|
|
|
func (m *lockedProjectAccounting) GetProjectUsageLimits(ctx context.Context, projectID uuid.UUID) (memory.Size, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetProjectUsageLimits(ctx, projectID)
|
|
|
|
}
|
|
|
|
|
2019-05-10 20:05:42 +01:00
|
|
|
// GetStorageTotals returns the current inline and remote storage usage for a projectID
|
|
|
|
func (m *lockedProjectAccounting) GetStorageTotals(ctx context.Context, projectID uuid.UUID) (int64, int64, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetStorageTotals(ctx, projectID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// SaveTallies saves the latest project info
|
|
|
|
func (m *lockedProjectAccounting) SaveTallies(ctx context.Context, intervalStart time.Time, bucketTallies map[string]*accounting.BucketTally) ([]accounting.BucketTally, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.SaveTallies(ctx, intervalStart, bucketTallies)
|
|
|
|
}
|
|
|
|
|
2019-03-25 22:25:09 +00:00
|
|
|
// RepairQueue returns queue for segments that need repairing
|
|
|
|
func (m *locked) RepairQueue() queue.RepairQueue {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-03-25 22:25:09 +00:00
|
|
|
return &lockedRepairQueue{m.Locker, m.db.RepairQueue()}
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-03-25 22:25:09 +00:00
|
|
|
// lockedRepairQueue implements locking wrapper for queue.RepairQueue
|
|
|
|
type lockedRepairQueue struct {
|
|
|
|
sync.Locker
|
|
|
|
db queue.RepairQueue
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-08-02 18:47:35 +01:00
|
|
|
// Count counts the number of segments in the repair queue.
|
|
|
|
func (m *lockedRepairQueue) Count(ctx context.Context) (count int, err error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Count(ctx)
|
|
|
|
}
|
|
|
|
|
2019-04-16 19:14:09 +01:00
|
|
|
// Delete removes an injured segment.
|
|
|
|
func (m *lockedRepairQueue) Delete(ctx context.Context, s *pb.InjuredSegment) error {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-04-16 19:14:09 +01:00
|
|
|
return m.db.Delete(ctx, s)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-04-16 19:14:09 +01:00
|
|
|
// Insert adds an injured segment.
|
|
|
|
func (m *lockedRepairQueue) Insert(ctx context.Context, s *pb.InjuredSegment) error {
|
2019-03-01 17:46:34 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-04-16 19:14:09 +01:00
|
|
|
return m.db.Insert(ctx, s)
|
2019-03-01 17:46:34 +00:00
|
|
|
}
|
|
|
|
|
2019-04-16 19:14:09 +01:00
|
|
|
// Select gets an injured segment.
|
|
|
|
func (m *lockedRepairQueue) Select(ctx context.Context) (*pb.InjuredSegment, error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-04-16 19:14:09 +01:00
|
|
|
return m.db.Select(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
// SelectN lists limit amount of injured segments.
|
|
|
|
func (m *lockedRepairQueue) SelectN(ctx context.Context, limit int) ([]pb.InjuredSegment, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.SelectN(ctx, limit)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
2019-05-10 20:05:42 +01:00
|
|
|
|
2019-06-24 21:51:54 +01:00
|
|
|
// returns database for marketing admin GUI
|
|
|
|
func (m *locked) Rewards() rewards.DB {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedRewards{m.Locker, m.db.Rewards()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedRewards implements locking wrapper for rewards.DB
|
|
|
|
type lockedRewards struct {
|
|
|
|
sync.Locker
|
|
|
|
db rewards.DB
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *lockedRewards) Create(ctx context.Context, offer *rewards.NewOffer) (*rewards.Offer, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Create(ctx, offer)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *lockedRewards) Finish(ctx context.Context, offerID int) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Finish(ctx, offerID)
|
|
|
|
}
|
|
|
|
|
2019-08-01 18:46:33 +01:00
|
|
|
func (m *lockedRewards) GetActiveOffersByType(ctx context.Context, offerType rewards.OfferType) (rewards.Offers, error) {
|
2019-06-24 21:51:54 +01:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-08-01 18:46:33 +01:00
|
|
|
return m.db.GetActiveOffersByType(ctx, offerType)
|
2019-06-24 21:51:54 +01:00
|
|
|
}
|
|
|
|
|
2019-07-10 18:12:40 +01:00
|
|
|
func (m *lockedRewards) ListAll(ctx context.Context) (rewards.Offers, error) {
|
2019-06-24 21:51:54 +01:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.ListAll(ctx)
|
|
|
|
}
|
|
|
|
|
2019-05-10 20:05:42 +01:00
|
|
|
// StoragenodeAccounting returns database for storing information about storagenode use
|
|
|
|
func (m *locked) StoragenodeAccounting() accounting.StoragenodeAccounting {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedStoragenodeAccounting{m.Locker, m.db.StoragenodeAccounting()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedStoragenodeAccounting implements locking wrapper for accounting.StoragenodeAccounting
|
|
|
|
type lockedStoragenodeAccounting struct {
|
|
|
|
sync.Locker
|
|
|
|
db accounting.StoragenodeAccounting
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteTalliesBefore deletes all tallies prior to some time
|
|
|
|
func (m *lockedStoragenodeAccounting) DeleteTalliesBefore(ctx context.Context, latestRollup time.Time) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.DeleteTalliesBefore(ctx, latestRollup)
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetBandwidthSince retrieves all bandwidth rollup entires since latestRollup
|
|
|
|
func (m *lockedStoragenodeAccounting) GetBandwidthSince(ctx context.Context, latestRollup time.Time) ([]*accounting.StoragenodeBandwidthRollup, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetBandwidthSince(ctx, latestRollup)
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetTallies retrieves all tallies
|
|
|
|
func (m *lockedStoragenodeAccounting) GetTallies(ctx context.Context) ([]*accounting.StoragenodeStorageTally, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetTallies(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetTalliesSince retrieves all tallies since latestRollup
|
|
|
|
func (m *lockedStoragenodeAccounting) GetTalliesSince(ctx context.Context, latestRollup time.Time) ([]*accounting.StoragenodeStorageTally, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetTalliesSince(ctx, latestRollup)
|
|
|
|
}
|
|
|
|
|
|
|
|
// LastTimestamp records and returns the latest last tallied time.
|
|
|
|
func (m *lockedStoragenodeAccounting) LastTimestamp(ctx context.Context, timestampType string) (time.Time, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.LastTimestamp(ctx, timestampType)
|
|
|
|
}
|
|
|
|
|
2019-08-08 14:47:04 +01:00
|
|
|
// QueryPaymentInfo queries Nodes and Accounting_Rollup on nodeID
|
|
|
|
func (m *lockedStoragenodeAccounting) QueryPaymentInfo(ctx context.Context, start time.Time, end time.Time) ([]*accounting.CSVRow, error) {
|
2019-07-02 11:42:09 +01:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-08-08 14:47:04 +01:00
|
|
|
return m.db.QueryPaymentInfo(ctx, start, end)
|
2019-07-02 11:42:09 +01:00
|
|
|
}
|
|
|
|
|
2019-08-08 14:47:04 +01:00
|
|
|
// QueryStorageNodeUsage returns slice of StorageNodeUsage for given period
|
|
|
|
func (m *lockedStoragenodeAccounting) QueryStorageNodeUsage(ctx context.Context, nodeID storj.NodeID, start time.Time, end time.Time) ([]accounting.StorageNodeUsage, error) {
|
2019-05-10 20:05:42 +01:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-08-08 14:47:04 +01:00
|
|
|
return m.db.QueryStorageNodeUsage(ctx, nodeID, start, end)
|
2019-05-10 20:05:42 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// SaveRollup records tally and bandwidth rollup aggregations to the database
|
|
|
|
func (m *lockedStoragenodeAccounting) SaveRollup(ctx context.Context, latestTally time.Time, stats accounting.RollupStats) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.SaveRollup(ctx, latestTally, stats)
|
|
|
|
}
|
|
|
|
|
|
|
|
// SaveTallies records tallies of data at rest
|
|
|
|
func (m *lockedStoragenodeAccounting) SaveTallies(ctx context.Context, latestTally time.Time, nodeData map[storj.NodeID]float64) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.SaveTallies(ctx, latestTally, nodeData)
|
|
|
|
}
|