ConsoleDB merge and buckets table creation (#1066)

* init

* db merge

* bucket table renamed to bucket info

* remove id and fix comments

* fix imports
This commit is contained in:
Yaroslav Vorobiov 2019-01-16 15:23:28 -05:00 committed by GitHub
parent f97883f990
commit d832789481
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
25 changed files with 3405 additions and 3192 deletions

View File

@ -21,6 +21,8 @@ type APIKeys interface {
GetByProjectID(ctx context.Context, projectID uuid.UUID) ([]APIKeyInfo, error)
// Get retrieves APIKeyInfo with given ID
Get(ctx context.Context, id uuid.UUID) (*APIKeyInfo, error)
//GetByKey retrieves APIKeyInfo for given key
GetByKey(ctx context.Context, key APIKey) (*APIKeyInfo, error)
// Create creates and stores new APIKeyInfo
Create(ctx context.Context, key APIKey, info APIKeyInfo) (*APIKeyInfo, error)
// Update updates APIKeyInfo in store

View File

@ -0,0 +1,32 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package console
import (
"context"
"time"
"github.com/skyrings/skyring-common/tools/uuid"
)
// Buckets is interface for working with bucket to project relations
type Buckets interface {
// ListBuckets returns bucket list of a given project
ListBuckets(ctx context.Context, projectID uuid.UUID) ([]Bucket, error)
// GetBucket retrieves bucket info of bucket with given name
GetBucket(ctx context.Context, name string) (*Bucket, error)
// AttachBucket attaches a bucket to a project
AttachBucket(ctx context.Context, name string, projectID uuid.UUID) (*Bucket, error)
// DeattachBucket deletes bucket info for a bucket by name
DeattachBucket(ctx context.Context, name string) error
}
// Bucket represents bucket to project relationship
type Bucket struct {
Name string
ProjectID uuid.UUID
CreatedAt time.Time
}

View File

@ -7,16 +7,18 @@ import (
"context"
"github.com/graphql-go/graphql"
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/storj/pkg/provider"
"storj.io/storj/pkg/utils"
"storj.io/storj/satellite/console"
"storj.io/storj/satellite/console/consoleauth"
"storj.io/storj/satellite/console/consoleweb/consoleql"
"storj.io/storj/satellite/satellitedb"
)
// Error is satellite console error type
var Error = errs.Class("satellite console error")
// Config contains info needed for satellite account related services
type Config struct {
GatewayConfig
@ -28,36 +30,28 @@ type Config struct {
func (c Config) Run(ctx context.Context, server *provider.Provider) error {
log := zap.NewExample()
// Create satellite DB
driver, source, err := utils.SplitDBURL(c.DatabaseURL)
if err != nil {
return err
}
db, ok := ctx.Value("masterdb").(interface {
Console() console.DB
})
db, err := satellitedb.NewConsoleDB(driver, source)
if err != nil {
return err
}
err = db.CreateTables()
if err != nil {
log.Error(err.Error())
if !ok {
return Error.Wrap(errs.New("unable to get master db instance"))
}
service, err := console.NewService(
log,
&consoleauth.Hmac{Secret: []byte("my-suppa-secret-key")},
db,
db.Console(),
)
if err != nil {
return err
return Error.Wrap(err)
}
creator := consoleql.TypeCreator{}
err = creator.Create(service)
if err != nil {
return err
return Error.Wrap(err)
}
schema, err := graphql.NewSchema(graphql.SchemaConfig{
@ -66,7 +60,7 @@ func (c Config) Run(ctx context.Context, server *provider.Provider) error {
})
if err != nil {
return err
return Error.Wrap(err)
}
go (&gateway{

View File

@ -15,6 +15,8 @@ type DB interface {
ProjectMembers() ProjectMembers
// APIKeys is a getter for APIKeys repository
APIKeys() APIKeys
// Buckets is a getter for Buckets repository
Buckets() Buckets
// CreateTables is a method for creating all tables for satellitedb
CreateTables() error

View File

@ -10,6 +10,7 @@ import (
"storj.io/storj/pkg/datarepair/queue"
"storj.io/storj/pkg/overlay"
"storj.io/storj/pkg/statdb"
"storj.io/storj/satellite/console"
)
// DB is the master database for the satellite
@ -31,4 +32,6 @@ type DB interface {
RepairQueue() queue.RepairQueue
// Irreparable returns database for failed repairs
Irreparable() irreparable.DB
// Console returns database for satellite console
Console() console.DB
}

View File

@ -10,7 +10,7 @@ import (
"github.com/zeebo/errs"
"storj.io/storj/satellite/console"
dbx "storj.io/storj/satellite/satellitedb/consoledbx"
dbx "storj.io/storj/satellite/satellitedb/dbx"
)
// apikeys is an implementation of satellite.APIKeys
@ -55,6 +55,16 @@ func (keys *apikeys) Get(ctx context.Context, id uuid.UUID) (*console.APIKeyInfo
return fromDBXAPIKey(dbKey)
}
// GetByKey implements satellite.APIKeys
func (keys *apikeys) GetByKey(ctx context.Context, key console.APIKey) (*console.APIKeyInfo, error) {
dbKey, err := keys.db.Get_ApiKey_By_Key(ctx, dbx.ApiKey_Key(key[:]))
if err != nil {
return nil, err
}
return fromDBXAPIKey(dbKey)
}
// Create implements satellite.APIKeys
func (keys *apikeys) Create(ctx context.Context, key console.APIKey, info console.APIKeyInfo) (*console.APIKeyInfo, error) {
id, err := uuid.New()

View File

@ -0,0 +1,92 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package satellitedb
import (
"context"
"github.com/skyrings/skyring-common/tools/uuid"
"github.com/zeebo/errs"
"storj.io/storj/satellite/console"
dbx "storj.io/storj/satellite/satellitedb/dbx"
)
type buckets struct {
db dbx.Methods
}
// ListBuckets returns bucket list of a given project
func (buck *buckets) ListBuckets(ctx context.Context, projectID uuid.UUID) ([]console.Bucket, error) {
buckets, err := buck.db.All_BucketInfo_By_ProjectId_OrderBy_Asc_Name(
ctx,
dbx.BucketInfo_ProjectId(projectID[:]),
)
if err != nil {
return nil, err
}
var consoleBuckets []console.Bucket
for _, bucket := range buckets {
consoleBucket, bucketErr := fromDBXBucket(bucket)
if err != nil {
err = errs.Combine(err, bucketErr)
continue
}
consoleBuckets = append(consoleBuckets, *consoleBucket)
}
if err != nil {
return nil, err
}
return consoleBuckets, nil
}
// GetBucket retrieves bucket info of bucket with given name
func (buck *buckets) GetBucket(ctx context.Context, name string) (*console.Bucket, error) {
bucket, err := buck.db.Get_BucketInfo_By_Name(ctx, dbx.BucketInfo_Name(name))
if err != nil {
return nil, err
}
return fromDBXBucket(bucket)
}
// AttachBucket attaches a bucket to a project
func (buck *buckets) AttachBucket(ctx context.Context, name string, projectID uuid.UUID) (*console.Bucket, error) {
bucket, err := buck.db.Create_BucketInfo(
ctx,
dbx.BucketInfo_ProjectId(projectID[:]),
dbx.BucketInfo_Name(name),
)
if err != nil {
return nil, err
}
return fromDBXBucket(bucket)
}
// DeattachBucket deletes bucket info for a bucket by name
func (buck *buckets) DeattachBucket(ctx context.Context, name string) error {
_, err := buck.db.Delete_BucketInfo_By_Name(ctx, dbx.BucketInfo_Name(name))
return err
}
// fromDBXBucket creates console.Bucket from dbx.Bucket
func fromDBXBucket(bucket *dbx.BucketInfo) (*console.Bucket, error) {
projectID, err := bytesToUUID(bucket.ProjectId)
if err != nil {
return nil, err
}
return &console.Bucket{
ProjectID: projectID,
Name: bucket.Name,
CreatedAt: bucket.CreatedAt,
}, nil
}

View File

@ -10,7 +10,7 @@ import (
"storj.io/storj/internal/migrate"
"storj.io/storj/satellite/console"
dbx "storj.io/storj/satellite/satellitedb/consoledbx"
dbx "storj.io/storj/satellite/satellitedb/dbx"
)
// ConsoleDB contains access to different satellite databases
@ -57,6 +57,11 @@ func (db *ConsoleDB) APIKeys() console.APIKeys {
return &apikeys{db.methods}
}
// Buckets is a getter for Buckets repository
func (db *ConsoleDB) Buckets() console.Buckets {
return &buckets{db.methods}
}
// CreateTables is a method for creating all tables for satellitedb
func (db *ConsoleDB) CreateTables() error {
if db.db == nil {

View File

@ -1,106 +0,0 @@
//--- satellite console ---//
model user (
key id
unique email
field id blob
field first_name text ( updatable )
field last_name text ( updatable )
field email text ( updatable )
field password_hash blob ( updatable )
field created_at timestamp ( autoinsert )
)
read one (
select user
where user.email = ?
)
read one (
select user
where user.id = ?
)
create user ( )
update user ( where user.id = ? )
delete user ( where user.id = ? )
model project (
key id
field id blob
field name text
field description text ( updatable )
// stores last accepted version of terms of use
field terms_accepted int ( updatable )
field created_at timestamp ( autoinsert )
)
read all ( select project)
read one (
select project
where project.id = ?
)
read all (
select project
join project.id = project_member.project_id
where project_member.member_id = ?
orderby asc project.name
)
create project ( )
update project ( where project.id = ? )
delete project ( where project.id = ? )
model project_member (
key member_id project_id
field member_id user.id cascade
field project_id project.id cascade
field created_at timestamp ( autoinsert )
)
read all (
select project_member
where project_member.member_id = ?
)
read limitoffset (
select project_member
where project_member.project_id = ?
)
create project_member ( )
delete project_member (
where project_member.member_id = ?
where project_member.project_id = ?
)
model api_key (
key id
unique key
unique name project_id
field id blob
field project_id project.id cascade
field key blob
field name text (updatable)
field created_at timestamp ( autoinsert )
)
create api_key ()
update api_key ( where api_key.id = ? )
delete api_key ( where api_key.id = ? )
read one (
select api_key
where api_key.id = ?
)
read all (
select api_key
where api_key.project_id = ?
orderby asc api_key.name
)

File diff suppressed because it is too large Load Diff

View File

@ -1,36 +0,0 @@
-- AUTOGENERATED BY gopkg.in/spacemonkeygo/dbx.v1
-- DO NOT EDIT
CREATE TABLE projects (
id bytea NOT NULL,
name text NOT NULL,
description text NOT NULL,
terms_accepted integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE users (
id bytea NOT NULL,
first_name text NOT NULL,
last_name text NOT NULL,
email text NOT NULL,
password_hash bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( email )
);
CREATE TABLE api_keys (
id bytea NOT NULL,
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
key bytea NOT NULL,
name text NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( key ),
UNIQUE ( name, project_id )
);
CREATE TABLE project_members (
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( member_id, project_id )
);

View File

@ -1,36 +0,0 @@
-- AUTOGENERATED BY gopkg.in/spacemonkeygo/dbx.v1
-- DO NOT EDIT
CREATE TABLE projects (
id BLOB NOT NULL,
name TEXT NOT NULL,
description TEXT NOT NULL,
terms_accepted INTEGER NOT NULL,
created_at TIMESTAMP NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE users (
id BLOB NOT NULL,
first_name TEXT NOT NULL,
last_name TEXT NOT NULL,
email TEXT NOT NULL,
password_hash BLOB NOT NULL,
created_at TIMESTAMP NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( email )
);
CREATE TABLE api_keys (
id BLOB NOT NULL,
project_id BLOB NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
key BLOB NOT NULL,
name TEXT NOT NULL,
created_at TIMESTAMP NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( key ),
UNIQUE ( name, project_id )
);
CREATE TABLE project_members (
member_id BLOB NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
project_id BLOB NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
created_at TIMESTAMP NOT NULL,
PRIMARY KEY ( member_id, project_id )
);

View File

@ -1,7 +0,0 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package consoledbx
//go:generate dbx.v1 schema -d postgres -d sqlite3 consoledb.dbx .
//go:generate dbx.v1 golang -d postgres -d sqlite3 consoledb.dbx .

View File

@ -15,6 +15,7 @@ import (
"storj.io/storj/pkg/statdb"
"storj.io/storj/pkg/utils"
"storj.io/storj/satellite"
"storj.io/storj/satellite/console"
dbx "storj.io/storj/satellite/satellitedb/dbx"
)
@ -90,6 +91,14 @@ func (db *DB) Irreparable() irreparable.DB {
return &irreparableDB{db: db.db}
}
// Console returns database for storing users, projects and api keys
func (db *DB) Console() console.DB {
return &ConsoleDB{
db: db.db,
methods: db.db,
}
}
// CreateTables is a method for creating all tables for database
func (db *DB) CreateTables() error {
return migrate.Create("database", db.db)

View File

@ -219,3 +219,138 @@ read limitoffset (
select injuredsegment
)
delete injuredsegment ( where injuredsegment.id = ? )
//--- satellite console ---//
model user (
key id
unique email
field id blob
field first_name text ( updatable )
field last_name text ( updatable )
field email text ( updatable )
field password_hash blob ( updatable )
field created_at timestamp ( autoinsert )
)
read one (
select user
where user.email = ?
)
read one (
select user
where user.id = ?
)
create user ( )
update user ( where user.id = ? )
delete user ( where user.id = ? )
model project (
key id
field id blob
field name text
field description text ( updatable )
// stores last accepted version of terms of use
field terms_accepted int ( updatable )
field created_at timestamp ( autoinsert )
)
read all ( select project)
read one (
select project
where project.id = ?
)
read all (
select project
join project.id = project_member.project_id
where project_member.member_id = ?
orderby asc project.name
)
create project ( )
update project ( where project.id = ? )
delete project ( where project.id = ? )
model project_member (
key member_id project_id
field member_id user.id cascade
field project_id project.id cascade
field created_at timestamp ( autoinsert )
)
read all (
select project_member
where project_member.member_id = ?
)
read limitoffset (
select project_member
where project_member.project_id = ?
)
create project_member ( )
delete project_member (
where project_member.member_id = ?
where project_member.project_id = ?
)
model api_key (
key id
unique key
unique name project_id
field id blob
field project_id project.id cascade
field key blob
field name text (updatable)
field created_at timestamp ( autoinsert )
)
create api_key ()
update api_key ( where api_key.id = ? )
delete api_key ( where api_key.id = ? )
read one (
select api_key
where api_key.id = ?
)
read one (
select api_key
where api_key.key = ?
)
read all (
select api_key
where api_key.project_id = ?
orderby asc api_key.name
)
model bucket_info (
key name
field project_id project.id cascade
field name text
field created_at timestamp ( autoinsert )
)
create bucket_info ()
delete bucket_info ( where bucket_info.name = ? )
read one (
select bucket_info
where bucket_info.name = ?
)
read all (
select bucket_info
where bucket_info.project_id = ?
orderby asc bucket_info.name
)

File diff suppressed because it is too large Load Diff

View File

@ -79,3 +79,43 @@ CREATE TABLE overlay_cache_nodes (
PRIMARY KEY ( node_id ),
UNIQUE ( node_id )
);
CREATE TABLE projects (
id bytea NOT NULL,
name text NOT NULL,
description text NOT NULL,
terms_accepted integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE users (
id bytea NOT NULL,
first_name text NOT NULL,
last_name text NOT NULL,
email text NOT NULL,
password_hash bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( email )
);
CREATE TABLE api_keys (
id bytea NOT NULL,
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
key bytea NOT NULL,
name text NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( key ),
UNIQUE ( name, project_id )
);
CREATE TABLE bucket_infos (
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
name text NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( name )
);
CREATE TABLE project_members (
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( member_id, project_id )
);

View File

@ -79,3 +79,43 @@ CREATE TABLE overlay_cache_nodes (
PRIMARY KEY ( node_id ),
UNIQUE ( node_id )
);
CREATE TABLE projects (
id BLOB NOT NULL,
name TEXT NOT NULL,
description TEXT NOT NULL,
terms_accepted INTEGER NOT NULL,
created_at TIMESTAMP NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE users (
id BLOB NOT NULL,
first_name TEXT NOT NULL,
last_name TEXT NOT NULL,
email TEXT NOT NULL,
password_hash BLOB NOT NULL,
created_at TIMESTAMP NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( email )
);
CREATE TABLE api_keys (
id BLOB NOT NULL,
project_id BLOB NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
key BLOB NOT NULL,
name TEXT NOT NULL,
created_at TIMESTAMP NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( key ),
UNIQUE ( name, project_id )
);
CREATE TABLE bucket_infos (
project_id BLOB NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
name TEXT NOT NULL,
created_at TIMESTAMP NOT NULL,
PRIMARY KEY ( name )
);
CREATE TABLE project_members (
member_id BLOB NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
project_id BLOB NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
created_at TIMESTAMP NOT NULL,
PRIMARY KEY ( member_id, project_id )
);

View File

@ -10,6 +10,8 @@ import (
"sync"
"time"
"github.com/skyrings/skyring-common/tools/uuid"
"storj.io/storj/pkg/accounting"
"storj.io/storj/pkg/bwagreement"
"storj.io/storj/pkg/datarepair/irreparable"
@ -19,6 +21,7 @@ import (
"storj.io/storj/pkg/statdb"
"storj.io/storj/pkg/storj"
"storj.io/storj/satellite"
"storj.io/storj/satellite/console"
)
// locked implements a locking wrapper around satellite.DB.
@ -39,55 +42,6 @@ func (m *locked) Accounting() accounting.DB {
return &lockedAccounting{m.Locker, m.db.Accounting()}
}
// BandwidthAgreement returns database for storing bandwidth agreements
func (m *locked) BandwidthAgreement() bwagreement.DB {
m.Lock()
defer m.Unlock()
return &lockedBandwidthAgreement{m.Locker, m.db.BandwidthAgreement()}
}
// Close closes the database
func (m *locked) Close() error {
m.Lock()
defer m.Unlock()
return m.db.Close()
}
// CreateTables initializes the database
func (m *locked) CreateTables() error {
m.Lock()
defer m.Unlock()
return m.db.CreateTables()
}
// Irreparable returns database for failed repairs
func (m *locked) Irreparable() irreparable.DB {
m.Lock()
defer m.Unlock()
return &lockedIrreparable{m.Locker, m.db.Irreparable()}
}
// OverlayCache returns database for caching overlay information
func (m *locked) OverlayCache() overlay.DB {
m.Lock()
defer m.Unlock()
return &lockedOverlayCache{m.Locker, m.db.OverlayCache()}
}
// RepairQueue returns queue for segments that need repairing
func (m *locked) RepairQueue() queue.RepairQueue {
m.Lock()
defer m.Unlock()
return &lockedRepairQueue{m.Locker, m.db.RepairQueue()}
}
// StatDB returns database for storing node statistics
func (m *locked) StatDB() statdb.DB {
m.Lock()
defer m.Unlock()
return &lockedStatDB{m.Locker, m.db.StatDB()}
}
// lockedAccounting implements locking wrapper for accounting.DB
type lockedAccounting struct {
sync.Locker
@ -108,13 +62,6 @@ func (m *lockedAccounting) GetRawSince(ctx context.Context, latestRollup time.Ti
return m.db.GetRawSince(ctx, latestRollup)
}
// SaveRollup records raw tallies of at rest data to the database
func (m *lockedAccounting) SaveRollup(ctx context.Context, latestTally time.Time, stats accounting.RollupStats) error {
m.Lock()
defer m.Unlock()
return m.db.SaveRollup(ctx, latestTally, stats)
}
// LastRawTime records the latest last tallied time.
func (m *lockedAccounting) LastRawTime(ctx context.Context, timestampType string) (time.Time, bool, error) {
m.Lock()
@ -136,6 +83,20 @@ func (m *lockedAccounting) SaveBWRaw(ctx context.Context, latestBwa time.Time, b
return m.db.SaveBWRaw(ctx, latestBwa, bwTotals)
}
// SaveRollup records raw tallies of at rest data to the database
func (m *lockedAccounting) SaveRollup(ctx context.Context, latestTally time.Time, stats accounting.RollupStats) error {
m.Lock()
defer m.Unlock()
return m.db.SaveRollup(ctx, latestTally, stats)
}
// BandwidthAgreement returns database for storing bandwidth agreements
func (m *locked) BandwidthAgreement() bwagreement.DB {
m.Lock()
defer m.Unlock()
return &lockedBandwidthAgreement{m.Locker, m.db.BandwidthAgreement()}
}
// lockedBandwidthAgreement implements locking wrapper for bwagreement.DB
type lockedBandwidthAgreement struct {
sync.Locker
@ -163,6 +124,290 @@ func (m *lockedBandwidthAgreement) GetAgreementsSince(ctx context.Context, a1 ti
return m.db.GetAgreementsSince(ctx, a1)
}
// Close closes the database
func (m *locked) Close() error {
m.Lock()
defer m.Unlock()
return m.db.Close()
}
// Console returns database for satellite console
func (m *locked) Console() console.DB {
m.Lock()
defer m.Unlock()
return &lockedConsole{m.Locker, m.db.Console()}
}
// lockedConsole implements locking wrapper for console.DB
type lockedConsole struct {
sync.Locker
db console.DB
}
// APIKeys is a getter for APIKeys repository
func (m *lockedConsole) APIKeys() console.APIKeys {
m.Lock()
defer m.Unlock()
return &lockedAPIKeys{m.Locker, m.db.APIKeys()}
}
// lockedAPIKeys implements locking wrapper for console.APIKeys
type lockedAPIKeys struct {
sync.Locker
db console.APIKeys
}
// Create creates and stores new APIKeyInfo
func (m *lockedAPIKeys) Create(ctx context.Context, key console.APIKey, info console.APIKeyInfo) (*console.APIKeyInfo, error) {
m.Lock()
defer m.Unlock()
return m.db.Create(ctx, key, info)
}
// Delete deletes APIKeyInfo from store
func (m *lockedAPIKeys) Delete(ctx context.Context, id uuid.UUID) error {
m.Lock()
defer m.Unlock()
return m.db.Delete(ctx, id)
}
// Get retrieves APIKeyInfo with given ID
func (m *lockedAPIKeys) Get(ctx context.Context, id uuid.UUID) (*console.APIKeyInfo, error) {
m.Lock()
defer m.Unlock()
return m.db.Get(ctx, id)
}
// GetByKey retrieves APIKeyInfo for given key
func (m *lockedAPIKeys) GetByKey(ctx context.Context, key console.APIKey) (*console.APIKeyInfo, error) {
m.Lock()
defer m.Unlock()
return m.db.GetByKey(ctx, key)
}
// GetByProjectID retrieves list of APIKeys for given projectID
func (m *lockedAPIKeys) GetByProjectID(ctx context.Context, projectID uuid.UUID) ([]console.APIKeyInfo, error) {
m.Lock()
defer m.Unlock()
return m.db.GetByProjectID(ctx, projectID)
}
// Update updates APIKeyInfo in store
func (m *lockedAPIKeys) Update(ctx context.Context, key console.APIKeyInfo) error {
m.Lock()
defer m.Unlock()
return m.db.Update(ctx, key)
}
// Buckets is a getter for Buckets repository
func (m *lockedConsole) Buckets() console.Buckets {
m.Lock()
defer m.Unlock()
return &lockedBuckets{m.Locker, m.db.Buckets()}
}
// lockedBuckets implements locking wrapper for console.Buckets
type lockedBuckets struct {
sync.Locker
db console.Buckets
}
func (m *lockedBuckets) AttachBucket(ctx context.Context, name string, projectID uuid.UUID) (*console.Bucket, error) {
m.Lock()
defer m.Unlock()
return m.db.AttachBucket(ctx, name, projectID)
}
func (m *lockedBuckets) DeattachBucket(ctx context.Context, name string) error {
m.Lock()
defer m.Unlock()
return m.db.DeattachBucket(ctx, name)
}
func (m *lockedBuckets) GetBucket(ctx context.Context, name string) (*console.Bucket, error) {
m.Lock()
defer m.Unlock()
return m.db.GetBucket(ctx, name)
}
func (m *lockedBuckets) ListBuckets(ctx context.Context, projectID uuid.UUID) ([]console.Bucket, error) {
m.Lock()
defer m.Unlock()
return m.db.ListBuckets(ctx, projectID)
}
// Close is used to close db connection
func (m *lockedConsole) Close() error {
m.Lock()
defer m.Unlock()
return m.db.Close()
}
// CreateTables is a method for creating all tables for satellitedb
func (m *lockedConsole) CreateTables() error {
m.Lock()
defer m.Unlock()
return m.db.CreateTables()
}
// ProjectMembers is a getter for ProjectMembers repository
func (m *lockedConsole) ProjectMembers() console.ProjectMembers {
m.Lock()
defer m.Unlock()
return &lockedProjectMembers{m.Locker, m.db.ProjectMembers()}
}
// lockedProjectMembers implements locking wrapper for console.ProjectMembers
type lockedProjectMembers struct {
sync.Locker
db console.ProjectMembers
}
// Delete is a method for deleting project member by memberID and projectID from the database.
func (m *lockedProjectMembers) Delete(ctx context.Context, memberID uuid.UUID, projectID uuid.UUID) error {
m.Lock()
defer m.Unlock()
return m.db.Delete(ctx, memberID, projectID)
}
// GetByMemberID is a method for querying project members from the database by memberID.
func (m *lockedProjectMembers) GetByMemberID(ctx context.Context, memberID uuid.UUID) ([]console.ProjectMember, error) {
m.Lock()
defer m.Unlock()
return m.db.GetByMemberID(ctx, memberID)
}
// GetByProjectID is a method for querying project members from the database by projectID, offset and limit.
func (m *lockedProjectMembers) GetByProjectID(ctx context.Context, projectID uuid.UUID, pagination console.Pagination) ([]console.ProjectMember, error) {
m.Lock()
defer m.Unlock()
return m.db.GetByProjectID(ctx, projectID, pagination)
}
// Insert is a method for inserting project member into the database.
func (m *lockedProjectMembers) Insert(ctx context.Context, memberID uuid.UUID, projectID uuid.UUID) (*console.ProjectMember, error) {
m.Lock()
defer m.Unlock()
return m.db.Insert(ctx, memberID, projectID)
}
// Projects is a getter for Projects repository
func (m *lockedConsole) Projects() console.Projects {
m.Lock()
defer m.Unlock()
return &lockedProjects{m.Locker, m.db.Projects()}
}
// lockedProjects implements locking wrapper for console.Projects
type lockedProjects struct {
sync.Locker
db console.Projects
}
// Delete is a method for deleting project by Id from the database.
func (m *lockedProjects) Delete(ctx context.Context, id uuid.UUID) error {
m.Lock()
defer m.Unlock()
return m.db.Delete(ctx, id)
}
// Get is a method for querying project from the database by id.
func (m *lockedProjects) Get(ctx context.Context, id uuid.UUID) (*console.Project, error) {
m.Lock()
defer m.Unlock()
return m.db.Get(ctx, id)
}
// GetAll is a method for querying all projects from the database.
func (m *lockedProjects) GetAll(ctx context.Context) ([]console.Project, error) {
m.Lock()
defer m.Unlock()
return m.db.GetAll(ctx)
}
// GetByUserID is a method for querying all projects from the database by userID.
func (m *lockedProjects) GetByUserID(ctx context.Context, userID uuid.UUID) ([]console.Project, error) {
m.Lock()
defer m.Unlock()
return m.db.GetByUserID(ctx, userID)
}
// Insert is a method for inserting project into the database.
func (m *lockedProjects) Insert(ctx context.Context, project *console.Project) (*console.Project, error) {
m.Lock()
defer m.Unlock()
return m.db.Insert(ctx, project)
}
// Update is a method for updating project entity.
func (m *lockedProjects) Update(ctx context.Context, project *console.Project) error {
m.Lock()
defer m.Unlock()
return m.db.Update(ctx, project)
}
// Users is a getter for Users repository
func (m *lockedConsole) Users() console.Users {
m.Lock()
defer m.Unlock()
return &lockedUsers{m.Locker, m.db.Users()}
}
// lockedUsers implements locking wrapper for console.Users
type lockedUsers struct {
sync.Locker
db console.Users
}
// Delete is a method for deleting user by Id from the database.
func (m *lockedUsers) Delete(ctx context.Context, id uuid.UUID) error {
m.Lock()
defer m.Unlock()
return m.db.Delete(ctx, id)
}
// Get is a method for querying user from the database by id
func (m *lockedUsers) Get(ctx context.Context, id uuid.UUID) (*console.User, error) {
m.Lock()
defer m.Unlock()
return m.db.Get(ctx, id)
}
// GetByEmail is a method for querying user by email from the database.
func (m *lockedUsers) GetByEmail(ctx context.Context, email string) (*console.User, error) {
m.Lock()
defer m.Unlock()
return m.db.GetByEmail(ctx, email)
}
// Insert is a method for inserting user into the database
func (m *lockedUsers) Insert(ctx context.Context, user *console.User) (*console.User, error) {
m.Lock()
defer m.Unlock()
return m.db.Insert(ctx, user)
}
// Update is a method for updating user entity
func (m *lockedUsers) Update(ctx context.Context, user *console.User) error {
m.Lock()
defer m.Unlock()
return m.db.Update(ctx, user)
}
// CreateTables initializes the database
func (m *locked) CreateTables() error {
m.Lock()
defer m.Unlock()
return m.db.CreateTables()
}
// Irreparable returns database for failed repairs
func (m *locked) Irreparable() irreparable.DB {
m.Lock()
defer m.Unlock()
return &lockedIrreparable{m.Locker, m.db.Irreparable()}
}
// lockedIrreparable implements locking wrapper for irreparable.DB
type lockedIrreparable struct {
sync.Locker
@ -190,6 +435,13 @@ func (m *lockedIrreparable) IncrementRepairAttempts(ctx context.Context, segment
return m.db.IncrementRepairAttempts(ctx, segmentInfo)
}
// OverlayCache returns database for caching overlay information
func (m *locked) OverlayCache() overlay.DB {
m.Lock()
defer m.Unlock()
return &lockedOverlayCache{m.Locker, m.db.OverlayCache()}
}
// lockedOverlayCache implements locking wrapper for overlay.DB
type lockedOverlayCache struct {
sync.Locker
@ -231,6 +483,13 @@ func (m *lockedOverlayCache) Update(ctx context.Context, value *pb.Node) error {
return m.db.Update(ctx, value)
}
// RepairQueue returns queue for segments that need repairing
func (m *locked) RepairQueue() queue.RepairQueue {
m.Lock()
defer m.Unlock()
return &lockedRepairQueue{m.Locker, m.db.RepairQueue()}
}
// lockedRepairQueue implements locking wrapper for queue.RepairQueue
type lockedRepairQueue struct {
sync.Locker
@ -258,6 +517,13 @@ func (m *lockedRepairQueue) Peekqueue(ctx context.Context, limit int) ([]pb.Inju
return m.db.Peekqueue(ctx, limit)
}
// StatDB returns database for storing node statistics
func (m *locked) StatDB() statdb.DB {
m.Lock()
defer m.Unlock()
return &lockedStatDB{m.Locker, m.db.StatDB()}
}
// lockedStatDB implements locking wrapper for statdb.DB
type lockedStatDB struct {
sync.Locker

View File

@ -0,0 +1,42 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package satellitedb
import (
"context"
"sync"
"storj.io/storj/satellite/console"
)
// BeginTransaction is a method for opening transaction
func (m *lockedConsole) BeginTx(ctx context.Context) (console.DBTx, error) {
m.Lock()
db, err := m.db.BeginTx(ctx)
txlocked := &lockedConsole{&sync.Mutex{}, db}
return &lockedTx{m, txlocked, db, sync.Once{}}, err
}
// lockedTx extends Database with transaction scope
type lockedTx struct {
parent *lockedConsole
*lockedConsole
tx console.DBTx
once sync.Once
}
// Commit is a method for committing and closing transaction
func (db *lockedTx) Commit() error {
err := db.tx.Commit()
db.once.Do(db.parent.Unlock)
return err
}
// Rollback is a method for rollback and closing transaction
func (db *lockedTx) Rollback() error {
err := db.tx.Rollback()
db.once.Do(db.parent.Unlock)
return err
}

View File

@ -11,7 +11,7 @@ import (
"storj.io/storj/pkg/utils"
"storj.io/storj/satellite/console"
dbx "storj.io/storj/satellite/satellitedb/consoledbx"
dbx "storj.io/storj/satellite/satellitedb/dbx"
)
// ProjectMembers exposes methods to manage ProjectMembers table in database.

View File

@ -11,7 +11,7 @@ import (
"storj.io/storj/pkg/utils"
"storj.io/storj/satellite/console"
dbx "storj.io/storj/satellite/satellitedb/consoledbx"
dbx "storj.io/storj/satellite/satellitedb/dbx"
)
// implementation of Projects interface repository using spacemonkeygo/dbx orm

View File

@ -10,7 +10,7 @@ import (
"storj.io/storj/internal/testcontext"
"storj.io/storj/satellite/console"
dbx "storj.io/storj/satellite/satellitedb/consoledbx"
dbx "storj.io/storj/satellite/satellitedb/dbx"
)
func TestProjectsRepository(t *testing.T) {

View File

@ -10,7 +10,7 @@ import (
"github.com/zeebo/errs"
"storj.io/storj/satellite/console"
dbx "storj.io/storj/satellite/satellitedb/consoledbx"
dbx "storj.io/storj/satellite/satellitedb/dbx"
)
// implementation of Users interface repository using spacemonkeygo/dbx orm

View File

@ -12,7 +12,7 @@ import (
"storj.io/storj/internal/testcontext"
"storj.io/storj/satellite/console"
dbx "storj.io/storj/satellite/satellitedb/consoledbx"
dbx "storj.io/storj/satellite/satellitedb/dbx"
)
func TestUserRepository(t *testing.T) {