2019-01-02 17:53:27 +00:00
|
|
|
// Code generated by lockedgen using 'go generate'. DO NOT EDIT.
|
|
|
|
|
2019-01-11 16:07:26 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
2019-01-02 17:53:27 +00:00
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package satellitedb
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2019-02-07 19:22:49 +00:00
|
|
|
"crypto"
|
2019-01-02 17:53:27 +00:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
"github.com/skyrings/skyring-common/tools/uuid"
|
|
|
|
|
2019-01-02 17:53:27 +00:00
|
|
|
"storj.io/storj/pkg/accounting"
|
|
|
|
"storj.io/storj/pkg/bwagreement"
|
2019-02-07 19:22:49 +00:00
|
|
|
"storj.io/storj/pkg/certdb"
|
2019-01-02 17:53:27 +00:00
|
|
|
"storj.io/storj/pkg/datarepair/irreparable"
|
|
|
|
"storj.io/storj/pkg/datarepair/queue"
|
2019-01-15 16:08:45 +00:00
|
|
|
"storj.io/storj/pkg/overlay"
|
2019-01-02 17:53:27 +00:00
|
|
|
"storj.io/storj/pkg/pb"
|
|
|
|
"storj.io/storj/pkg/storj"
|
|
|
|
"storj.io/storj/satellite"
|
2019-01-16 20:23:28 +00:00
|
|
|
"storj.io/storj/satellite/console"
|
2019-03-27 10:24:35 +00:00
|
|
|
"storj.io/storj/satellite/orders"
|
2019-01-02 17:53:27 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// locked implements a locking wrapper around satellite.DB.
|
|
|
|
type locked struct {
|
|
|
|
sync.Locker
|
|
|
|
db satellite.DB
|
|
|
|
}
|
|
|
|
|
|
|
|
// newLocked returns database wrapped with locker.
|
|
|
|
func newLocked(db satellite.DB) satellite.DB {
|
|
|
|
return &locked{&sync.Mutex{}, db}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Accounting returns database for storing information about data use
|
|
|
|
func (m *locked) Accounting() accounting.DB {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedAccounting{m.Locker, m.db.Accounting()}
|
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// lockedAccounting implements locking wrapper for accounting.DB
|
|
|
|
type lockedAccounting struct {
|
|
|
|
sync.Locker
|
|
|
|
db accounting.DB
|
|
|
|
}
|
|
|
|
|
2019-04-02 19:21:18 +01:00
|
|
|
// CreateBucketStorageTally creates a record for BucketStorageTally in the accounting DB table
|
|
|
|
func (m *lockedAccounting) CreateBucketStorageTally(ctx context.Context, tally accounting.BucketStorageTally) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.CreateBucketStorageTally(ctx, tally)
|
|
|
|
}
|
|
|
|
|
2019-03-12 13:08:23 +00:00
|
|
|
// DeleteRawBefore deletes all raw tallies prior to some time
|
2019-03-14 21:12:47 +00:00
|
|
|
func (m *lockedAccounting) DeleteRawBefore(ctx context.Context, latestRollup time.Time) error {
|
2019-03-12 13:08:23 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-03-14 21:12:47 +00:00
|
|
|
return m.db.DeleteRawBefore(ctx, latestRollup)
|
2019-03-12 13:08:23 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// GetRaw retrieves all raw tallies
|
|
|
|
func (m *lockedAccounting) GetRaw(ctx context.Context) ([]*accounting.Raw, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetRaw(ctx)
|
|
|
|
}
|
|
|
|
|
2019-04-04 16:20:59 +01:00
|
|
|
// GetRawSince retrieves all raw tallies since latestRollup
|
2019-01-16 20:23:28 +00:00
|
|
|
func (m *lockedAccounting) GetRawSince(ctx context.Context, latestRollup time.Time) ([]*accounting.Raw, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetRawSince(ctx, latestRollup)
|
|
|
|
}
|
|
|
|
|
2019-04-04 16:20:59 +01:00
|
|
|
// GetStoragenodeBandwidthSince retrieves all storagenode_bandwidth_rollup entires since latestRollup
|
|
|
|
func (m *lockedAccounting) GetStoragenodeBandwidthSince(ctx context.Context, latestRollup time.Time) ([]*accounting.StoragenodeBandwidthRollup, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetStoragenodeBandwidthSince(ctx, latestRollup)
|
|
|
|
}
|
|
|
|
|
2019-02-01 18:50:12 +00:00
|
|
|
// LastTimestamp records the latest last tallied time.
|
|
|
|
func (m *lockedAccounting) LastTimestamp(ctx context.Context, timestampType string) (time.Time, error) {
|
2019-01-16 20:23:28 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-02-01 18:50:12 +00:00
|
|
|
return m.db.LastTimestamp(ctx, timestampType)
|
2019-01-16 20:23:28 +00:00
|
|
|
}
|
|
|
|
|
2019-04-08 22:35:54 +01:00
|
|
|
// ProjectAllocatedBandwidthTotal returns the sum of GET bandwidth usage allocated for a projectID in the past time frame
|
|
|
|
func (m *lockedAccounting) ProjectAllocatedBandwidthTotal(ctx context.Context, bucketID []byte, from time.Time) (int64, error) {
|
2019-04-02 19:21:18 +01:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-04-08 22:35:54 +01:00
|
|
|
return m.db.ProjectAllocatedBandwidthTotal(ctx, bucketID, from)
|
2019-04-02 19:21:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// ProjectStorageTotals returns the current inline and remote storage usage for a projectID
|
|
|
|
func (m *lockedAccounting) ProjectStorageTotals(ctx context.Context, projectID uuid.UUID) (int64, int64, error) {
|
2019-04-01 14:42:17 +01:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-04-02 19:21:18 +01:00
|
|
|
return m.db.ProjectStorageTotals(ctx, projectID)
|
2019-04-01 14:42:17 +01:00
|
|
|
}
|
|
|
|
|
2019-03-28 02:46:49 +00:00
|
|
|
// QueryPaymentInfo queries Overlay, Accounting Rollup on nodeID
|
2019-01-28 21:16:21 +00:00
|
|
|
func (m *lockedAccounting) QueryPaymentInfo(ctx context.Context, start time.Time, end time.Time) ([]*accounting.CSVRow, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.QueryPaymentInfo(ctx, start, end)
|
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// SaveAtRestRaw records raw tallies of at-rest-data.
|
2019-02-07 04:16:24 +00:00
|
|
|
func (m *lockedAccounting) SaveAtRestRaw(ctx context.Context, latestTally time.Time, created time.Time, nodeData map[storj.NodeID]float64) error {
|
2019-01-16 20:23:28 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-02-07 04:16:24 +00:00
|
|
|
return m.db.SaveAtRestRaw(ctx, latestTally, created, nodeData)
|
2019-01-16 20:23:28 +00:00
|
|
|
}
|
|
|
|
|
2019-04-02 19:21:18 +01:00
|
|
|
// SaveBucketTallies saves the latest bucket info
|
2019-04-09 14:48:35 +01:00
|
|
|
func (m *lockedAccounting) SaveBucketTallies(ctx context.Context, intervalStart time.Time, bucketTallies map[string]*accounting.BucketTally) ([]accounting.BucketTally, error) {
|
2019-04-02 19:21:18 +01:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.SaveBucketTallies(ctx, intervalStart, bucketTallies)
|
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// SaveRollup records raw tallies of at rest data to the database
|
2019-02-01 18:50:12 +00:00
|
|
|
func (m *lockedAccounting) SaveRollup(ctx context.Context, latestTally time.Time, stats accounting.RollupStats) error {
|
2019-01-16 20:23:28 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-02-01 18:50:12 +00:00
|
|
|
return m.db.SaveRollup(ctx, latestTally, stats)
|
2019-01-16 20:23:28 +00:00
|
|
|
}
|
|
|
|
|
2019-01-02 17:53:27 +00:00
|
|
|
// BandwidthAgreement returns database for storing bandwidth agreements
|
|
|
|
func (m *locked) BandwidthAgreement() bwagreement.DB {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedBandwidthAgreement{m.Locker, m.db.BandwidthAgreement()}
|
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// lockedBandwidthAgreement implements locking wrapper for bwagreement.DB
|
|
|
|
type lockedBandwidthAgreement struct {
|
|
|
|
sync.Locker
|
|
|
|
db bwagreement.DB
|
|
|
|
}
|
|
|
|
|
2019-03-12 20:57:21 +00:00
|
|
|
// DeleteExpired deletes orders that are expired and were created before some time
|
2019-03-14 21:12:47 +00:00
|
|
|
func (m *lockedBandwidthAgreement) DeleteExpired(ctx context.Context, a1 time.Time, a2 time.Time) error {
|
2019-01-16 20:23:28 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-03-14 21:12:47 +00:00
|
|
|
return m.db.DeleteExpired(ctx, a1, a2)
|
2019-03-12 20:57:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetExpired gets orders that are expired and were created before some time
|
2019-03-14 21:12:47 +00:00
|
|
|
func (m *lockedBandwidthAgreement) GetExpired(ctx context.Context, a1 time.Time, a2 time.Time) ([]bwagreement.SavedOrder, error) {
|
2019-03-12 20:57:21 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-03-14 21:12:47 +00:00
|
|
|
return m.db.GetExpired(ctx, a1, a2)
|
2019-01-16 20:23:28 +00:00
|
|
|
}
|
|
|
|
|
2019-02-01 18:50:12 +00:00
|
|
|
// GetTotalsSince returns the sum of each bandwidth type after (exluding) a given date range
|
|
|
|
func (m *lockedBandwidthAgreement) GetTotals(ctx context.Context, a1 time.Time, a2 time.Time) (map[storj.NodeID][]int64, error) {
|
2019-01-16 20:23:28 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-02-01 18:50:12 +00:00
|
|
|
return m.db.GetTotals(ctx, a1, a2)
|
2019-01-16 20:23:28 +00:00
|
|
|
}
|
|
|
|
|
2019-02-01 18:50:12 +00:00
|
|
|
// GetTotals returns stats about an uplink
|
|
|
|
func (m *lockedBandwidthAgreement) GetUplinkStats(ctx context.Context, a1 time.Time, a2 time.Time) ([]bwagreement.UplinkStat, error) {
|
2019-01-16 20:23:28 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-02-01 18:50:12 +00:00
|
|
|
return m.db.GetUplinkStats(ctx, a1, a2)
|
2019-01-16 20:23:28 +00:00
|
|
|
}
|
|
|
|
|
2019-03-12 20:57:21 +00:00
|
|
|
// SaveOrder saves an order for accounting
|
2019-03-14 21:12:47 +00:00
|
|
|
func (m *lockedBandwidthAgreement) SaveOrder(ctx context.Context, a1 *pb.RenterBandwidthAllocation) error {
|
2019-03-12 20:57:21 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-03-14 21:12:47 +00:00
|
|
|
return m.db.SaveOrder(ctx, a1)
|
2019-03-12 20:57:21 +00:00
|
|
|
}
|
|
|
|
|
2019-02-07 19:22:49 +00:00
|
|
|
// CertDB returns database for storing uplink's public key & ID
|
|
|
|
func (m *locked) CertDB() certdb.DB {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedCertDB{m.Locker, m.db.CertDB()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedCertDB implements locking wrapper for certdb.DB
|
|
|
|
type lockedCertDB struct {
|
|
|
|
sync.Locker
|
|
|
|
db certdb.DB
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetPublicKey gets the public key of uplink corresponding to uplink id
|
2019-02-07 20:39:20 +00:00
|
|
|
func (m *lockedCertDB) GetPublicKey(ctx context.Context, a1 storj.NodeID) (crypto.PublicKey, error) {
|
2019-02-07 19:22:49 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetPublicKey(ctx, a1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// SavePublicKey adds a new bandwidth agreement.
|
|
|
|
func (m *lockedCertDB) SavePublicKey(ctx context.Context, a1 storj.NodeID, a2 crypto.PublicKey) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.SavePublicKey(ctx, a1, a2)
|
|
|
|
}
|
|
|
|
|
2019-01-02 17:53:27 +00:00
|
|
|
// Close closes the database
|
|
|
|
func (m *locked) Close() error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Close()
|
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// Console returns database for satellite console
|
|
|
|
func (m *locked) Console() console.DB {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedConsole{m.Locker, m.db.Console()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedConsole implements locking wrapper for console.DB
|
|
|
|
type lockedConsole struct {
|
|
|
|
sync.Locker
|
|
|
|
db console.DB
|
|
|
|
}
|
|
|
|
|
|
|
|
// APIKeys is a getter for APIKeys repository
|
|
|
|
func (m *lockedConsole) APIKeys() console.APIKeys {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedAPIKeys{m.Locker, m.db.APIKeys()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedAPIKeys implements locking wrapper for console.APIKeys
|
|
|
|
type lockedAPIKeys struct {
|
|
|
|
sync.Locker
|
|
|
|
db console.APIKeys
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create creates and stores new APIKeyInfo
|
|
|
|
func (m *lockedAPIKeys) Create(ctx context.Context, key console.APIKey, info console.APIKeyInfo) (*console.APIKeyInfo, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Create(ctx, key, info)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete deletes APIKeyInfo from store
|
|
|
|
func (m *lockedAPIKeys) Delete(ctx context.Context, id uuid.UUID) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Delete(ctx, id)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get retrieves APIKeyInfo with given ID
|
|
|
|
func (m *lockedAPIKeys) Get(ctx context.Context, id uuid.UUID) (*console.APIKeyInfo, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Get(ctx, id)
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetByKey retrieves APIKeyInfo for given key
|
|
|
|
func (m *lockedAPIKeys) GetByKey(ctx context.Context, key console.APIKey) (*console.APIKeyInfo, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetByKey(ctx, key)
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetByProjectID retrieves list of APIKeys for given projectID
|
|
|
|
func (m *lockedAPIKeys) GetByProjectID(ctx context.Context, projectID uuid.UUID) ([]console.APIKeyInfo, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetByProjectID(ctx, projectID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Update updates APIKeyInfo in store
|
|
|
|
func (m *lockedAPIKeys) Update(ctx context.Context, key console.APIKeyInfo) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Update(ctx, key)
|
|
|
|
}
|
|
|
|
|
2019-03-06 15:54:48 +00:00
|
|
|
// BucketUsage is a getter for accounting.BucketUsage repository
|
|
|
|
func (m *lockedConsole) BucketUsage() accounting.BucketUsage {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedBucketUsage{m.Locker, m.db.BucketUsage()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedBucketUsage implements locking wrapper for accounting.BucketUsage
|
|
|
|
type lockedBucketUsage struct {
|
|
|
|
sync.Locker
|
|
|
|
db accounting.BucketUsage
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *lockedBucketUsage) Create(ctx context.Context, rollup accounting.BucketRollup) (*accounting.BucketRollup, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Create(ctx, rollup)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *lockedBucketUsage) Delete(ctx context.Context, id uuid.UUID) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Delete(ctx, id)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *lockedBucketUsage) Get(ctx context.Context, id uuid.UUID) (*accounting.BucketRollup, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Get(ctx, id)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *lockedBucketUsage) GetPaged(ctx context.Context, cursor *accounting.BucketRollupCursor) ([]accounting.BucketRollup, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetPaged(ctx, cursor)
|
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// ProjectMembers is a getter for ProjectMembers repository
|
|
|
|
func (m *lockedConsole) ProjectMembers() console.ProjectMembers {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-16 20:23:28 +00:00
|
|
|
return &lockedProjectMembers{m.Locker, m.db.ProjectMembers()}
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// lockedProjectMembers implements locking wrapper for console.ProjectMembers
|
|
|
|
type lockedProjectMembers struct {
|
|
|
|
sync.Locker
|
|
|
|
db console.ProjectMembers
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete is a method for deleting project member by memberID and projectID from the database.
|
|
|
|
func (m *lockedProjectMembers) Delete(ctx context.Context, memberID uuid.UUID, projectID uuid.UUID) error {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-16 20:23:28 +00:00
|
|
|
return m.db.Delete(ctx, memberID, projectID)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// GetByMemberID is a method for querying project members from the database by memberID.
|
|
|
|
func (m *lockedProjectMembers) GetByMemberID(ctx context.Context, memberID uuid.UUID) ([]console.ProjectMember, error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-16 20:23:28 +00:00
|
|
|
return m.db.GetByMemberID(ctx, memberID)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// GetByProjectID is a method for querying project members from the database by projectID, offset and limit.
|
|
|
|
func (m *lockedProjectMembers) GetByProjectID(ctx context.Context, projectID uuid.UUID, pagination console.Pagination) ([]console.ProjectMember, error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-16 20:23:28 +00:00
|
|
|
return m.db.GetByProjectID(ctx, projectID, pagination)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// Insert is a method for inserting project member into the database.
|
|
|
|
func (m *lockedProjectMembers) Insert(ctx context.Context, memberID uuid.UUID, projectID uuid.UUID) (*console.ProjectMember, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Insert(ctx, memberID, projectID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Projects is a getter for Projects repository
|
|
|
|
func (m *lockedConsole) Projects() console.Projects {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedProjects{m.Locker, m.db.Projects()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedProjects implements locking wrapper for console.Projects
|
|
|
|
type lockedProjects struct {
|
2019-01-02 17:53:27 +00:00
|
|
|
sync.Locker
|
2019-01-16 20:23:28 +00:00
|
|
|
db console.Projects
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// Delete is a method for deleting project by Id from the database.
|
|
|
|
func (m *lockedProjects) Delete(ctx context.Context, id uuid.UUID) error {
|
2019-01-16 19:30:33 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-16 20:23:28 +00:00
|
|
|
return m.db.Delete(ctx, id)
|
2019-01-16 19:30:33 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// Get is a method for querying project from the database by id.
|
|
|
|
func (m *lockedProjects) Get(ctx context.Context, id uuid.UUID) (*console.Project, error) {
|
2019-01-16 19:30:33 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-16 20:23:28 +00:00
|
|
|
return m.db.Get(ctx, id)
|
2019-01-16 19:30:33 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// GetAll is a method for querying all projects from the database.
|
|
|
|
func (m *lockedProjects) GetAll(ctx context.Context) ([]console.Project, error) {
|
2019-01-16 19:30:33 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-16 20:23:28 +00:00
|
|
|
return m.db.GetAll(ctx)
|
2019-01-16 19:30:33 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// GetByUserID is a method for querying all projects from the database by userID.
|
|
|
|
func (m *lockedProjects) GetByUserID(ctx context.Context, userID uuid.UUID) ([]console.Project, error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-16 20:23:28 +00:00
|
|
|
return m.db.GetByUserID(ctx, userID)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// Insert is a method for inserting project into the database.
|
|
|
|
func (m *lockedProjects) Insert(ctx context.Context, project *console.Project) (*console.Project, error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-16 20:23:28 +00:00
|
|
|
return m.db.Insert(ctx, project)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// Update is a method for updating project entity.
|
|
|
|
func (m *lockedProjects) Update(ctx context.Context, project *console.Project) error {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-16 20:23:28 +00:00
|
|
|
return m.db.Update(ctx, project)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-03-19 17:55:43 +00:00
|
|
|
// RegistrationTokens is a getter for RegistrationTokens repository
|
|
|
|
func (m *lockedConsole) RegistrationTokens() console.RegistrationTokens {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedRegistrationTokens{m.Locker, m.db.RegistrationTokens()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedRegistrationTokens implements locking wrapper for console.RegistrationTokens
|
|
|
|
type lockedRegistrationTokens struct {
|
|
|
|
sync.Locker
|
|
|
|
db console.RegistrationTokens
|
|
|
|
}
|
|
|
|
|
|
|
|
// Create creates new registration token
|
|
|
|
func (m *lockedRegistrationTokens) Create(ctx context.Context, projectLimit int) (*console.RegistrationToken, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Create(ctx, projectLimit)
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetByOwnerID retrieves RegTokenInfo by ownerID
|
|
|
|
func (m *lockedRegistrationTokens) GetByOwnerID(ctx context.Context, ownerID uuid.UUID) (*console.RegistrationToken, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetByOwnerID(ctx, ownerID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetBySecret retrieves RegTokenInfo with given Secret
|
|
|
|
func (m *lockedRegistrationTokens) GetBySecret(ctx context.Context, secret console.RegistrationSecret) (*console.RegistrationToken, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetBySecret(ctx, secret)
|
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateOwner updates registration token's owner
|
|
|
|
func (m *lockedRegistrationTokens) UpdateOwner(ctx context.Context, secret console.RegistrationSecret, ownerID uuid.UUID) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.UpdateOwner(ctx, secret, ownerID)
|
|
|
|
}
|
|
|
|
|
2019-04-04 15:56:20 +01:00
|
|
|
// UsageRollups is a getter for UsageRollups repository
|
|
|
|
func (m *lockedConsole) UsageRollups() console.UsageRollups {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedUsageRollups{m.Locker, m.db.UsageRollups()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedUsageRollups implements locking wrapper for console.UsageRollups
|
|
|
|
type lockedUsageRollups struct {
|
|
|
|
sync.Locker
|
|
|
|
db console.UsageRollups
|
|
|
|
}
|
|
|
|
|
2019-04-10 00:14:19 +01:00
|
|
|
func (m *lockedUsageRollups) GetBucketUsageRollups(ctx context.Context, projectID uuid.UUID, since time.Time, before time.Time) ([]console.BucketUsageRollup, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetBucketUsageRollups(ctx, projectID, since, before)
|
|
|
|
}
|
|
|
|
|
2019-04-04 15:56:20 +01:00
|
|
|
func (m *lockedUsageRollups) GetProjectTotal(ctx context.Context, projectID uuid.UUID, since time.Time, before time.Time) (*console.ProjectUsage, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetProjectTotal(ctx, projectID, since, before)
|
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// Users is a getter for Users repository
|
|
|
|
func (m *lockedConsole) Users() console.Users {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedUsers{m.Locker, m.db.Users()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedUsers implements locking wrapper for console.Users
|
|
|
|
type lockedUsers struct {
|
2019-01-02 17:53:27 +00:00
|
|
|
sync.Locker
|
2019-01-16 20:23:28 +00:00
|
|
|
db console.Users
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// Delete is a method for deleting user by Id from the database.
|
|
|
|
func (m *lockedUsers) Delete(ctx context.Context, id uuid.UUID) error {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-16 20:23:28 +00:00
|
|
|
return m.db.Delete(ctx, id)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-01-30 15:04:40 +00:00
|
|
|
// Get is a method for querying user from the database by id.
|
2019-01-16 20:23:28 +00:00
|
|
|
func (m *lockedUsers) Get(ctx context.Context, id uuid.UUID) (*console.User, error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-16 20:23:28 +00:00
|
|
|
return m.db.Get(ctx, id)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// GetByEmail is a method for querying user by email from the database.
|
|
|
|
func (m *lockedUsers) GetByEmail(ctx context.Context, email string) (*console.User, error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-16 20:23:28 +00:00
|
|
|
return m.db.GetByEmail(ctx, email)
|
|
|
|
}
|
|
|
|
|
2019-01-30 15:04:40 +00:00
|
|
|
// Insert is a method for inserting user into the database.
|
2019-01-16 20:23:28 +00:00
|
|
|
func (m *lockedUsers) Insert(ctx context.Context, user *console.User) (*console.User, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Insert(ctx, user)
|
|
|
|
}
|
|
|
|
|
2019-01-30 15:04:40 +00:00
|
|
|
// Update is a method for updating user entity.
|
2019-01-16 20:23:28 +00:00
|
|
|
func (m *lockedUsers) Update(ctx context.Context, user *console.User) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Update(ctx, user)
|
|
|
|
}
|
|
|
|
|
2019-02-04 20:37:46 +00:00
|
|
|
// CreateSchema sets the schema
|
|
|
|
func (m *locked) CreateSchema(schema string) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.CreateSchema(schema)
|
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// CreateTables initializes the database
|
|
|
|
func (m *locked) CreateTables() error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.CreateTables()
|
|
|
|
}
|
|
|
|
|
2019-01-31 19:17:12 +00:00
|
|
|
// DropSchema drops the schema
|
|
|
|
func (m *locked) DropSchema(schema string) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.DropSchema(schema)
|
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// Irreparable returns database for failed repairs
|
|
|
|
func (m *locked) Irreparable() irreparable.DB {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedIrreparable{m.Locker, m.db.Irreparable()}
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// lockedIrreparable implements locking wrapper for irreparable.DB
|
|
|
|
type lockedIrreparable struct {
|
|
|
|
sync.Locker
|
|
|
|
db irreparable.DB
|
|
|
|
}
|
|
|
|
|
|
|
|
// Delete removes irreparable segment info based on segmentPath.
|
|
|
|
func (m *lockedIrreparable) Delete(ctx context.Context, segmentPath []byte) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Delete(ctx, segmentPath)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Get returns irreparable segment info based on segmentPath.
|
2019-03-15 20:21:52 +00:00
|
|
|
func (m *lockedIrreparable) Get(ctx context.Context, segmentPath []byte) (*pb.IrreparableSegment, error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Get(ctx, segmentPath)
|
|
|
|
}
|
|
|
|
|
2019-03-22 13:14:17 +00:00
|
|
|
// GetLimited number of segments from offset
|
2019-03-15 20:21:52 +00:00
|
|
|
func (m *lockedIrreparable) GetLimited(ctx context.Context, limit int, offset int64) ([]*pb.IrreparableSegment, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.GetLimited(ctx, limit, offset)
|
|
|
|
}
|
|
|
|
|
2019-01-02 17:53:27 +00:00
|
|
|
// IncrementRepairAttempts increments the repair attempts.
|
2019-03-15 20:21:52 +00:00
|
|
|
func (m *lockedIrreparable) IncrementRepairAttempts(ctx context.Context, segmentInfo *pb.IrreparableSegment) error {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.IncrementRepairAttempts(ctx, segmentInfo)
|
|
|
|
}
|
|
|
|
|
2019-03-27 10:24:35 +00:00
|
|
|
// Orders returns database for orders
|
|
|
|
func (m *locked) Orders() orders.DB {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedOrders{m.Locker, m.db.Orders()}
|
|
|
|
}
|
|
|
|
|
|
|
|
// lockedOrders implements locking wrapper for orders.DB
|
|
|
|
type lockedOrders struct {
|
|
|
|
sync.Locker
|
|
|
|
db orders.DB
|
|
|
|
}
|
|
|
|
|
2019-04-01 21:14:58 +01:00
|
|
|
// CreateSerialInfo creates serial number entry in database
|
2019-03-28 20:09:23 +00:00
|
|
|
func (m *lockedOrders) CreateSerialInfo(ctx context.Context, serialNumber storj.SerialNumber, bucketID []byte, limitExpiration time.Time) error {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.CreateSerialInfo(ctx, serialNumber, bucketID, limitExpiration)
|
|
|
|
}
|
|
|
|
|
2019-04-01 21:14:58 +01:00
|
|
|
// GetBucketBandwidth gets total bucket bandwidth from period of time
|
|
|
|
func (m *lockedOrders) GetBucketBandwidth(ctx context.Context, bucketID []byte, from time.Time, to time.Time) (int64, error) {
|
2019-03-27 10:24:35 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-04-01 21:14:58 +01:00
|
|
|
return m.db.GetBucketBandwidth(ctx, bucketID, from, to)
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|
|
|
|
|
2019-04-01 21:14:58 +01:00
|
|
|
// GetStorageNodeBandwidth gets total storage node bandwidth from period of time
|
|
|
|
func (m *lockedOrders) GetStorageNodeBandwidth(ctx context.Context, nodeID storj.NodeID, from time.Time, to time.Time) (int64, error) {
|
2019-03-27 10:24:35 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-04-01 21:14:58 +01:00
|
|
|
return m.db.GetStorageNodeBandwidth(ctx, nodeID, from, to)
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|
|
|
|
|
2019-04-02 19:21:18 +01:00
|
|
|
// UnuseSerialNumber removes pair serial number -> storage node id from database
|
2019-04-01 21:14:58 +01:00
|
|
|
func (m *lockedOrders) UnuseSerialNumber(ctx context.Context, serialNumber storj.SerialNumber, storageNodeID storj.NodeID) error {
|
2019-03-27 10:24:35 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-04-01 21:14:58 +01:00
|
|
|
return m.db.UnuseSerialNumber(ctx, serialNumber, storageNodeID)
|
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateBucketBandwidthAllocation updates 'allocated' bandwidth for given bucket
|
2019-04-04 16:20:59 +01:00
|
|
|
func (m *lockedOrders) UpdateBucketBandwidthAllocation(ctx context.Context, bucketID []byte, action pb.PieceAction, amount int64, intervalStart time.Time) error {
|
2019-04-01 21:14:58 +01:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-04-04 16:20:59 +01:00
|
|
|
return m.db.UpdateBucketBandwidthAllocation(ctx, bucketID, action, amount, intervalStart)
|
2019-04-01 21:14:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateBucketBandwidthInline updates 'inline' bandwidth for given bucket
|
2019-04-04 16:20:59 +01:00
|
|
|
func (m *lockedOrders) UpdateBucketBandwidthInline(ctx context.Context, bucketID []byte, action pb.PieceAction, amount int64, intervalStart time.Time) error {
|
2019-04-01 21:14:58 +01:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-04-04 16:20:59 +01:00
|
|
|
return m.db.UpdateBucketBandwidthInline(ctx, bucketID, action, amount, intervalStart)
|
2019-04-01 21:14:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateBucketBandwidthSettle updates 'settled' bandwidth for given bucket
|
2019-04-04 16:20:59 +01:00
|
|
|
func (m *lockedOrders) UpdateBucketBandwidthSettle(ctx context.Context, bucketID []byte, action pb.PieceAction, amount int64, intervalStart time.Time) error {
|
2019-04-01 21:14:58 +01:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-04-04 16:20:59 +01:00
|
|
|
return m.db.UpdateBucketBandwidthSettle(ctx, bucketID, action, amount, intervalStart)
|
2019-04-01 21:14:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateStoragenodeBandwidthAllocation updates 'allocated' bandwidth for given storage node
|
2019-04-04 16:20:59 +01:00
|
|
|
func (m *lockedOrders) UpdateStoragenodeBandwidthAllocation(ctx context.Context, storageNode storj.NodeID, action pb.PieceAction, amount int64, intervalStart time.Time) error {
|
2019-04-01 21:14:58 +01:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-04-04 16:20:59 +01:00
|
|
|
return m.db.UpdateStoragenodeBandwidthAllocation(ctx, storageNode, action, amount, intervalStart)
|
2019-04-01 21:14:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// UpdateStoragenodeBandwidthSettle updates 'settled' bandwidth for given storage node
|
2019-04-04 16:20:59 +01:00
|
|
|
func (m *lockedOrders) UpdateStoragenodeBandwidthSettle(ctx context.Context, storageNode storj.NodeID, action pb.PieceAction, amount int64, intervalStart time.Time) error {
|
2019-04-01 21:14:58 +01:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-04-04 16:20:59 +01:00
|
|
|
return m.db.UpdateStoragenodeBandwidthSettle(ctx, storageNode, action, amount, intervalStart)
|
2019-04-01 21:14:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// UseSerialNumber creates serial number entry in database
|
|
|
|
func (m *lockedOrders) UseSerialNumber(ctx context.Context, serialNumber storj.SerialNumber, storageNodeID storj.NodeID) ([]byte, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.UseSerialNumber(ctx, serialNumber, storageNodeID)
|
2019-03-27 10:24:35 +00:00
|
|
|
}
|
|
|
|
|
2019-01-16 20:23:28 +00:00
|
|
|
// OverlayCache returns database for caching overlay information
|
|
|
|
func (m *locked) OverlayCache() overlay.DB {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return &lockedOverlayCache{m.Locker, m.db.OverlayCache()}
|
|
|
|
}
|
|
|
|
|
2019-01-15 16:08:45 +00:00
|
|
|
// lockedOverlayCache implements locking wrapper for overlay.DB
|
2019-01-02 17:53:27 +00:00
|
|
|
type lockedOverlayCache struct {
|
|
|
|
sync.Locker
|
2019-01-15 16:08:45 +00:00
|
|
|
db overlay.DB
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-03-29 08:53:43 +00:00
|
|
|
// CreateStats initializes the stats for node.
|
|
|
|
func (m *lockedOverlayCache) CreateStats(ctx context.Context, nodeID storj.NodeID, initial *overlay.NodeStats) (stats *overlay.NodeStats, err error) {
|
2019-03-25 22:25:09 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-03-29 08:53:43 +00:00
|
|
|
return m.db.CreateStats(ctx, nodeID, initial)
|
2019-03-25 22:25:09 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// FindInvalidNodes finds a subset of storagenodes that have stats below provided reputation requirements.
|
|
|
|
func (m *lockedOverlayCache) FindInvalidNodes(ctx context.Context, nodeIDs storj.NodeIDList, maxStats *overlay.NodeStats) (invalid storj.NodeIDList, err error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.FindInvalidNodes(ctx, nodeIDs, maxStats)
|
|
|
|
}
|
|
|
|
|
2019-01-15 16:08:45 +00:00
|
|
|
// Get looks up the node by nodeID
|
2019-04-04 17:34:36 +01:00
|
|
|
func (m *lockedOverlayCache) Get(ctx context.Context, nodeID storj.NodeID) (*overlay.NodeDossier, error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-15 16:08:45 +00:00
|
|
|
return m.db.Get(ctx, nodeID)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-01-15 16:08:45 +00:00
|
|
|
// GetAll looks up nodes based on the ids from the overlay cache
|
2019-04-04 17:34:36 +01:00
|
|
|
func (m *lockedOverlayCache) GetAll(ctx context.Context, nodeIDs storj.NodeIDList) ([]*overlay.NodeDossier, error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-15 16:08:45 +00:00
|
|
|
return m.db.GetAll(ctx, nodeIDs)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-01-15 16:08:45 +00:00
|
|
|
// List lists nodes starting from cursor
|
2019-04-04 17:34:36 +01:00
|
|
|
func (m *lockedOverlayCache) List(ctx context.Context, cursor storj.NodeID, limit int) ([]*overlay.NodeDossier, error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-01-15 16:08:45 +00:00
|
|
|
return m.db.List(ctx, cursor, limit)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-01-30 16:29:18 +00:00
|
|
|
// Paginate will page through the database nodes
|
2019-04-04 17:34:36 +01:00
|
|
|
func (m *lockedOverlayCache) Paginate(ctx context.Context, offset int64, limit int) ([]*overlay.NodeDossier, bool, error) {
|
2019-01-30 16:29:18 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.Paginate(ctx, offset, limit)
|
|
|
|
}
|
|
|
|
|
2019-02-11 19:24:51 +00:00
|
|
|
// SelectNewStorageNodes looks up nodes based on new node criteria
|
|
|
|
func (m *lockedOverlayCache) SelectNewStorageNodes(ctx context.Context, count int, criteria *overlay.NewNodeCriteria) ([]*pb.Node, error) {
|
2019-01-31 18:49:00 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-02-11 19:24:51 +00:00
|
|
|
return m.db.SelectNewStorageNodes(ctx, count, criteria)
|
2019-01-31 18:49:00 +00:00
|
|
|
}
|
|
|
|
|
2019-02-11 19:24:51 +00:00
|
|
|
// SelectStorageNodes looks up nodes based on criteria
|
|
|
|
func (m *lockedOverlayCache) SelectStorageNodes(ctx context.Context, count int, criteria *overlay.NodeCriteria) ([]*pb.Node, error) {
|
2019-01-31 18:49:00 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-02-11 19:24:51 +00:00
|
|
|
return m.db.SelectStorageNodes(ctx, count, criteria)
|
2019-01-31 18:49:00 +00:00
|
|
|
}
|
|
|
|
|
2019-04-22 10:07:50 +01:00
|
|
|
// Update updates node address
|
|
|
|
func (m *lockedOverlayCache) UpdateAddress(ctx context.Context, value *pb.Node) error {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-04-22 10:07:50 +01:00
|
|
|
return m.db.UpdateAddress(ctx, value)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-04-22 10:07:50 +01:00
|
|
|
// UpdateNodeInfo updates node dossier with info requested from the node itself like node type, email, wallet, capacity, and version.
|
2019-04-10 07:04:24 +01:00
|
|
|
func (m *lockedOverlayCache) UpdateNodeInfo(ctx context.Context, node storj.NodeID, nodeInfo *pb.InfoResponse) (stats *overlay.NodeDossier, err error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-04-10 07:04:24 +01:00
|
|
|
return m.db.UpdateNodeInfo(ctx, node, nodeInfo)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-03-25 22:25:09 +00:00
|
|
|
// UpdateStats all parts of single storagenode's stats.
|
|
|
|
func (m *lockedOverlayCache) UpdateStats(ctx context.Context, request *overlay.UpdateRequest) (stats *overlay.NodeStats, err error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-03-25 22:25:09 +00:00
|
|
|
return m.db.UpdateStats(ctx, request)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-03-25 22:25:09 +00:00
|
|
|
// UpdateUptime updates a single storagenode's uptime stats.
|
|
|
|
func (m *lockedOverlayCache) UpdateUptime(ctx context.Context, nodeID storj.NodeID, isUp bool) (stats *overlay.NodeStats, err error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-03-25 22:25:09 +00:00
|
|
|
return m.db.UpdateUptime(ctx, nodeID, isUp)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-03-25 22:25:09 +00:00
|
|
|
// RepairQueue returns queue for segments that need repairing
|
|
|
|
func (m *locked) RepairQueue() queue.RepairQueue {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-03-25 22:25:09 +00:00
|
|
|
return &lockedRepairQueue{m.Locker, m.db.RepairQueue()}
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-03-25 22:25:09 +00:00
|
|
|
// lockedRepairQueue implements locking wrapper for queue.RepairQueue
|
|
|
|
type lockedRepairQueue struct {
|
|
|
|
sync.Locker
|
|
|
|
db queue.RepairQueue
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-04-16 19:14:09 +01:00
|
|
|
// Delete removes an injured segment.
|
|
|
|
func (m *lockedRepairQueue) Delete(ctx context.Context, s *pb.InjuredSegment) error {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-04-16 19:14:09 +01:00
|
|
|
return m.db.Delete(ctx, s)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|
|
|
|
|
2019-04-16 19:14:09 +01:00
|
|
|
// Insert adds an injured segment.
|
|
|
|
func (m *lockedRepairQueue) Insert(ctx context.Context, s *pb.InjuredSegment) error {
|
2019-03-01 17:46:34 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-04-16 19:14:09 +01:00
|
|
|
return m.db.Insert(ctx, s)
|
2019-03-01 17:46:34 +00:00
|
|
|
}
|
|
|
|
|
2019-04-16 19:14:09 +01:00
|
|
|
// Select gets an injured segment.
|
|
|
|
func (m *lockedRepairQueue) Select(ctx context.Context) (*pb.InjuredSegment, error) {
|
2019-01-02 17:53:27 +00:00
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
2019-04-16 19:14:09 +01:00
|
|
|
return m.db.Select(ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
// SelectN lists limit amount of injured segments.
|
|
|
|
func (m *lockedRepairQueue) SelectN(ctx context.Context, limit int) ([]pb.InjuredSegment, error) {
|
|
|
|
m.Lock()
|
|
|
|
defer m.Unlock()
|
|
|
|
return m.db.SelectN(ctx, limit)
|
2019-01-02 17:53:27 +00:00
|
|
|
}
|