Add bucket to project relationship on satellite (#1143)
* integrate console api keys with buckets in pointerdb * fix test * fix tests kvmetainfo * linter fix * disable account activation * fix test * review fixes * fix comments * little refactoring * remove debug println * fix typo * disable activation in a propper way * fix test * fix imports * fix uplink count in testplanet * move key creation to planet.newUplink
This commit is contained in:
parent
b16f27c54b
commit
2ff0d9d435
165
cmd/storj-sim/console.go
Normal file
165
cmd/storj-sim/console.go
Normal file
@ -0,0 +1,165 @@
|
|||||||
|
// Copyright (C) 2019 Storj Labs, Inc.
|
||||||
|
// See LICENSE for copying information.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"github.com/zeebo/errs"
|
||||||
|
)
|
||||||
|
|
||||||
|
func graphqlDo(client *http.Client, request *http.Request, jsonResponse interface{}) error {
|
||||||
|
resp, err := client.Do(request)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
err = resp.Body.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
b, err := ioutil.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var response struct {
|
||||||
|
Data json.RawMessage
|
||||||
|
Errors []interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = json.NewDecoder(bytes.NewReader(b)).Decode(&response); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.Errors != nil {
|
||||||
|
return errs.New("inner graphql error")
|
||||||
|
}
|
||||||
|
|
||||||
|
if jsonResponse == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return json.NewDecoder(bytes.NewReader(response.Data)).Decode(jsonResponse)
|
||||||
|
}
|
||||||
|
|
||||||
|
func addExampleProjectWithKey(key *string, address string) error {
|
||||||
|
client := http.Client{}
|
||||||
|
|
||||||
|
// create user
|
||||||
|
{
|
||||||
|
createUserQuery := fmt.Sprintf(
|
||||||
|
"mutation {createUser(input:{email:\"%s\",password:\"%s\",firstName:\"%s\",lastName:\"\"})}",
|
||||||
|
"example@mail.com",
|
||||||
|
"123a123",
|
||||||
|
"Alice")
|
||||||
|
|
||||||
|
request, err := http.NewRequest(
|
||||||
|
http.MethodPost,
|
||||||
|
address,
|
||||||
|
bytes.NewReader([]byte(createUserQuery)))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
request.Header.Add("Content-Type", "application/graphql")
|
||||||
|
|
||||||
|
if err := graphqlDo(&client, request, nil); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// get token
|
||||||
|
var token struct {
|
||||||
|
Token struct {
|
||||||
|
Token string
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{
|
||||||
|
tokenQuery := fmt.Sprintf(
|
||||||
|
"query {token(email:\"%s\",password:\"%s\"){token}}",
|
||||||
|
"example@mail.com",
|
||||||
|
"123a123")
|
||||||
|
|
||||||
|
request, err := http.NewRequest(
|
||||||
|
http.MethodPost,
|
||||||
|
address,
|
||||||
|
bytes.NewReader([]byte(tokenQuery)))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
request.Header.Add("Content-Type", "application/graphql")
|
||||||
|
|
||||||
|
if err := graphqlDo(&client, request, &token); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// create project
|
||||||
|
var createProject struct {
|
||||||
|
CreateProject struct {
|
||||||
|
ID string
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{
|
||||||
|
createProjectQuery := fmt.Sprintf(
|
||||||
|
"mutation {createProject(input:{name:\"%s\",description:\"\"}){id}}",
|
||||||
|
"TestProject")
|
||||||
|
|
||||||
|
request, err := http.NewRequest(
|
||||||
|
http.MethodPost,
|
||||||
|
address,
|
||||||
|
bytes.NewReader([]byte(createProjectQuery)))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
request.Header.Add("Content-Type", "application/graphql")
|
||||||
|
request.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token.Token.Token))
|
||||||
|
|
||||||
|
if err := graphqlDo(&client, request, &createProject); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// create api key
|
||||||
|
var createAPIKey struct {
|
||||||
|
CreateAPIKey struct {
|
||||||
|
Key string
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{
|
||||||
|
createAPIKeyQuery := fmt.Sprintf(
|
||||||
|
"mutation {createAPIKey(projectID:\"%s\",name:\"%s\"){key}}",
|
||||||
|
createProject.CreateProject.ID,
|
||||||
|
"testKey")
|
||||||
|
|
||||||
|
request, err := http.NewRequest(
|
||||||
|
http.MethodPost,
|
||||||
|
address,
|
||||||
|
bytes.NewReader([]byte(createAPIKeyQuery)))
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
request.Header.Add("Content-Type", "application/graphql")
|
||||||
|
request.Header.Add("Authorization", fmt.Sprintf("Bearer %s", token.Token.Token))
|
||||||
|
|
||||||
|
if err := graphqlDo(&client, request, &createAPIKey); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// return key to the caller
|
||||||
|
*key = createAPIKey.CreateAPIKey.Key
|
||||||
|
return nil
|
||||||
|
}
|
@ -14,6 +14,7 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
"github.com/zeebo/errs"
|
"github.com/zeebo/errs"
|
||||||
@ -206,6 +207,29 @@ func newNetwork(flags *Flags) (*Processes, error) {
|
|||||||
|
|
||||||
// TODO: maybe all the config flags should be exposed for all processes?
|
// TODO: maybe all the config flags should be exposed for all processes?
|
||||||
|
|
||||||
|
// check if gateway config has an api key, if it's not
|
||||||
|
// create example project with key and add it to the config
|
||||||
|
// so that gateway can have access to the satellite
|
||||||
|
apiKey := vip.GetString("client.api-key")
|
||||||
|
if apiKey == "" {
|
||||||
|
consoleAddress := fmt.Sprintf(
|
||||||
|
"http://%s/api/graphql/v0",
|
||||||
|
net.JoinHostPort(host, strconv.Itoa(consolePort+i)))
|
||||||
|
|
||||||
|
// wait for console server to start
|
||||||
|
time.Sleep(3 * time.Second)
|
||||||
|
|
||||||
|
if err := addExampleProjectWithKey(&apiKey, consoleAddress); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
vip.Set("client.api-key", apiKey)
|
||||||
|
|
||||||
|
if err := vip.WriteConfig(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
accessKey := vip.GetString("minio.access-key")
|
accessKey := vip.GetString("minio.access-key")
|
||||||
secretKey := vip.GetString("minio.secret-key")
|
secretKey := vip.GetString("minio.secret-key")
|
||||||
|
|
||||||
|
@ -267,7 +267,7 @@ func (planet *Planet) Shutdown() error {
|
|||||||
return errlist.Err()
|
return errlist.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
// newUplinks creates initializes uplinks
|
// newUplinks creates initializes uplinks, requires peer to have at least one satellite
|
||||||
func (planet *Planet) newUplinks(prefix string, count, storageNodeCount int) ([]*Uplink, error) {
|
func (planet *Planet) newUplinks(prefix string, count, storageNodeCount int) ([]*Uplink, error) {
|
||||||
var xs []*Uplink
|
var xs []*Uplink
|
||||||
for i := 0; i < count; i++ {
|
for i := 0; i < count; i++ {
|
||||||
|
@ -30,6 +30,7 @@ import (
|
|||||||
"storj.io/storj/pkg/stream"
|
"storj.io/storj/pkg/stream"
|
||||||
"storj.io/storj/pkg/transport"
|
"storj.io/storj/pkg/transport"
|
||||||
"storj.io/storj/satellite"
|
"storj.io/storj/satellite"
|
||||||
|
"storj.io/storj/satellite/console"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Uplink is a general purpose
|
// Uplink is a general purpose
|
||||||
@ -39,6 +40,7 @@ type Uplink struct {
|
|||||||
Identity *identity.FullIdentity
|
Identity *identity.FullIdentity
|
||||||
Transport transport.Client
|
Transport transport.Client
|
||||||
StorageNodeCount int
|
StorageNodeCount int
|
||||||
|
APIKey map[storj.NodeID]string
|
||||||
}
|
}
|
||||||
|
|
||||||
// newUplink creates a new uplink
|
// newUplink creates a new uplink
|
||||||
@ -67,6 +69,42 @@ func (planet *Planet) newUplink(name string, storageNodeCount int) (*Uplink, err
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
apiKeys := make(map[storj.NodeID]string)
|
||||||
|
for j, satellite := range planet.Satellites {
|
||||||
|
// TODO: find a nicer way to do this
|
||||||
|
// populate satellites console with example
|
||||||
|
// project and API key and pass that to uplinks
|
||||||
|
consoleDB := satellite.DB.Console()
|
||||||
|
|
||||||
|
projectName := fmt.Sprintf("%s_%d", name, j)
|
||||||
|
key := console.APIKeyFromBytes([]byte(projectName))
|
||||||
|
|
||||||
|
project, err := consoleDB.Projects().Insert(
|
||||||
|
context.Background(),
|
||||||
|
&console.Project{
|
||||||
|
Name: projectName,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = consoleDB.APIKeys().Create(
|
||||||
|
context.Background(),
|
||||||
|
*key,
|
||||||
|
console.APIKeyInfo{
|
||||||
|
Name: "root",
|
||||||
|
ProjectID: project.ID,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
apiKeys[satellite.ID()] = key.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
uplink.APIKey = apiKeys
|
||||||
planet.uplinks = append(planet.uplinks, uplink)
|
planet.uplinks = append(planet.uplinks, uplink)
|
||||||
|
|
||||||
return uplink, nil
|
return uplink, nil
|
||||||
@ -208,7 +246,7 @@ func (uplink *Uplink) getMetainfo(satellite *satellite.Peer) (db storj.Metainfo,
|
|||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
pdb, err := uplink.DialPointerDB(satellite, "") // TODO pass api key?
|
pdb, err := uplink.DialPointerDB(satellite, uplink.APIKey[satellite.ID()]) // TODO pass api key?
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -9,6 +9,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
"storj.io/storj/satellite/console"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/vivint/infectious"
|
"github.com/vivint/infectious"
|
||||||
|
|
||||||
@ -25,11 +27,12 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
TestAPIKey = "test-api-key"
|
|
||||||
TestEncKey = "test-encryption-key"
|
TestEncKey = "test-encryption-key"
|
||||||
TestBucket = "test-bucket"
|
TestBucket = "test-bucket"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var TestAPIKey = "test-api-key"
|
||||||
|
|
||||||
func TestBucketsBasic(t *testing.T) {
|
func TestBucketsBasic(t *testing.T) {
|
||||||
runTest(t, func(ctx context.Context, db *kvmetainfo.DB, buckets buckets.Store, streams streams.Store) {
|
runTest(t, func(ctx context.Context, db *kvmetainfo.DB, buckets buckets.Store, streams streams.Store) {
|
||||||
// Create new bucket
|
// Create new bucket
|
||||||
@ -331,7 +334,30 @@ func runTest(t *testing.T, test func(context.Context, *kvmetainfo.DB, buckets.St
|
|||||||
|
|
||||||
func newMetainfoParts(planet *testplanet.Planet) (*kvmetainfo.DB, buckets.Store, streams.Store, error) {
|
func newMetainfoParts(planet *testplanet.Planet) (*kvmetainfo.DB, buckets.Store, streams.Store, error) {
|
||||||
// TODO(kaloyan): We should have a better way for configuring the Satellite's API Key
|
// TODO(kaloyan): We should have a better way for configuring the Satellite's API Key
|
||||||
err := flag.Set("pointer-db.auth.api-key", TestAPIKey)
|
// add project to satisfy constraint
|
||||||
|
project, err := planet.Satellites[0].DB.Console().Projects().Insert(context.Background(), &console.Project{
|
||||||
|
Name: "testProject",
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
apiKey := console.APIKey{}
|
||||||
|
apiKeyInfo := console.APIKeyInfo{
|
||||||
|
ProjectID: project.ID,
|
||||||
|
Name: "testKey",
|
||||||
|
}
|
||||||
|
|
||||||
|
// add api key to db
|
||||||
|
_, err = planet.Satellites[0].DB.Console().APIKeys().Create(context.Background(), apiKey, apiKeyInfo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
TestAPIKey = apiKey.String()
|
||||||
|
|
||||||
|
err = flag.Set("pointer-db.auth.api-key", TestAPIKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -30,10 +30,10 @@ import (
|
|||||||
"storj.io/storj/pkg/storage/segments"
|
"storj.io/storj/pkg/storage/segments"
|
||||||
"storj.io/storj/pkg/storage/streams"
|
"storj.io/storj/pkg/storage/streams"
|
||||||
"storj.io/storj/pkg/storj"
|
"storj.io/storj/pkg/storj"
|
||||||
|
"storj.io/storj/satellite/console"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
TestAPIKey = "test-api-key"
|
|
||||||
TestEncKey = "test-encryption-key"
|
TestEncKey = "test-encryption-key"
|
||||||
TestBucket = "test-bucket"
|
TestBucket = "test-bucket"
|
||||||
TestFile = "test-file"
|
TestFile = "test-file"
|
||||||
@ -41,6 +41,8 @@ const (
|
|||||||
DestFile = "dest-file"
|
DestFile = "dest-file"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var TestAPIKey = "test-api-key"
|
||||||
|
|
||||||
func TestMakeBucketWithLocation(t *testing.T) {
|
func TestMakeBucketWithLocation(t *testing.T) {
|
||||||
runTest(t, func(ctx context.Context, layer minio.ObjectLayer, metainfo storj.Metainfo, streams streams.Store) {
|
runTest(t, func(ctx context.Context, layer minio.ObjectLayer, metainfo storj.Metainfo, streams streams.Store) {
|
||||||
// Check the error when creating bucket with empty name
|
// Check the error when creating bucket with empty name
|
||||||
@ -656,7 +658,30 @@ func runTest(t *testing.T, test func(context.Context, minio.ObjectLayer, storj.M
|
|||||||
|
|
||||||
func initEnv(planet *testplanet.Planet) (minio.ObjectLayer, storj.Metainfo, streams.Store, error) {
|
func initEnv(planet *testplanet.Planet) (minio.ObjectLayer, storj.Metainfo, streams.Store, error) {
|
||||||
// TODO(kaloyan): We should have a better way for configuring the Satellite's API Key
|
// TODO(kaloyan): We should have a better way for configuring the Satellite's API Key
|
||||||
err := flag.Set("pointer-db.auth.api-key", TestAPIKey)
|
// add project to satisfy constraint
|
||||||
|
project, err := planet.Satellites[0].DB.Console().Projects().Insert(context.Background(), &console.Project{
|
||||||
|
Name: "testProject",
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
apiKey := console.APIKey{}
|
||||||
|
apiKeyInfo := console.APIKeyInfo{
|
||||||
|
ProjectID: project.ID,
|
||||||
|
Name: "testKey",
|
||||||
|
}
|
||||||
|
|
||||||
|
// add api key to db
|
||||||
|
_, err = planet.Satellites[0].DB.Console().APIKeys().Create(context.Background(), apiKey, apiKeyInfo)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
TestAPIKey = apiKey.String()
|
||||||
|
|
||||||
|
err = flag.Set("pointer-db.auth.api-key", TestAPIKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, nil, err
|
return nil, nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -25,6 +25,7 @@ import (
|
|||||||
"storj.io/storj/pkg/cfgstruct"
|
"storj.io/storj/pkg/cfgstruct"
|
||||||
"storj.io/storj/pkg/identity"
|
"storj.io/storj/pkg/identity"
|
||||||
"storj.io/storj/pkg/miniogw"
|
"storj.io/storj/pkg/miniogw"
|
||||||
|
"storj.io/storj/satellite/console"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestUploadDownload(t *testing.T) {
|
func TestUploadDownload(t *testing.T) {
|
||||||
@ -38,7 +39,24 @@ func TestUploadDownload(t *testing.T) {
|
|||||||
|
|
||||||
defer ctx.Check(planet.Shutdown)
|
defer ctx.Check(planet.Shutdown)
|
||||||
|
|
||||||
err = flag.Set("pointer-db.auth.api-key", "apiKey")
|
// add project to satisfy constraint
|
||||||
|
project, err := planet.Satellites[0].DB.Console().Projects().Insert(context.Background(), &console.Project{
|
||||||
|
Name: "testProject",
|
||||||
|
})
|
||||||
|
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
apiKey := console.APIKey{}
|
||||||
|
apiKeyInfo := console.APIKeyInfo{
|
||||||
|
ProjectID: project.ID,
|
||||||
|
Name: "testKey",
|
||||||
|
}
|
||||||
|
|
||||||
|
// add api key to db
|
||||||
|
_, err = planet.Satellites[0].DB.Console().APIKeys().Create(context.Background(), apiKey, apiKeyInfo)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
err = flag.Set("pointer-db.auth.api-key", apiKey.String())
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
// bind default values to config
|
// bind default values to config
|
||||||
|
@ -16,7 +16,9 @@ import (
|
|||||||
"storj.io/storj/pkg/identity"
|
"storj.io/storj/pkg/identity"
|
||||||
"storj.io/storj/pkg/overlay"
|
"storj.io/storj/pkg/overlay"
|
||||||
"storj.io/storj/pkg/pb"
|
"storj.io/storj/pkg/pb"
|
||||||
pointerdbAuth "storj.io/storj/pkg/pointerdb/auth"
|
_ "storj.io/storj/pkg/pointerdb/auth" // ensures that we add api key flag to current executable
|
||||||
|
"storj.io/storj/pkg/storj"
|
||||||
|
"storj.io/storj/satellite/console"
|
||||||
"storj.io/storj/storage"
|
"storj.io/storj/storage"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -25,6 +27,11 @@ var (
|
|||||||
segmentError = errs.Class("segment error")
|
segmentError = errs.Class("segment error")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// APIKeys is api keys store methods used by pointerdb
|
||||||
|
type APIKeys interface {
|
||||||
|
GetByKey(ctx context.Context, key console.APIKey) (*console.APIKeyInfo, error)
|
||||||
|
}
|
||||||
|
|
||||||
// Server implements the network state RPC service
|
// Server implements the network state RPC service
|
||||||
type Server struct {
|
type Server struct {
|
||||||
logger *zap.Logger
|
logger *zap.Logger
|
||||||
@ -33,10 +40,11 @@ type Server struct {
|
|||||||
cache *overlay.Cache
|
cache *overlay.Cache
|
||||||
config Config
|
config Config
|
||||||
identity *identity.FullIdentity
|
identity *identity.FullIdentity
|
||||||
|
apiKeys APIKeys
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewServer creates instance of Server
|
// NewServer creates instance of Server
|
||||||
func NewServer(logger *zap.Logger, service *Service, allocation *AllocationSigner, cache *overlay.Cache, config Config, identity *identity.FullIdentity) *Server {
|
func NewServer(logger *zap.Logger, service *Service, allocation *AllocationSigner, cache *overlay.Cache, config Config, identity *identity.FullIdentity, apiKeys APIKeys) *Server {
|
||||||
return &Server{
|
return &Server{
|
||||||
logger: logger,
|
logger: logger,
|
||||||
service: service,
|
service: service,
|
||||||
@ -44,27 +52,33 @@ func NewServer(logger *zap.Logger, service *Service, allocation *AllocationSigne
|
|||||||
cache: cache,
|
cache: cache,
|
||||||
config: config,
|
config: config,
|
||||||
identity: identity,
|
identity: identity,
|
||||||
|
apiKeys: apiKeys,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes resources
|
// Close closes resources
|
||||||
func (s *Server) Close() error { return nil }
|
func (s *Server) Close() error { return nil }
|
||||||
|
|
||||||
// TODO: ZZZ temporarily disabled until endpoint and service split
|
func (s *Server) validateAuth(ctx context.Context) (*console.APIKeyInfo, error) {
|
||||||
const disableAuth = true
|
|
||||||
|
|
||||||
func (s *Server) validateAuth(ctx context.Context) error {
|
|
||||||
// TODO: ZZZ temporarily disabled until endpoint and service split
|
|
||||||
if disableAuth {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
APIKey, ok := auth.GetAPIKey(ctx)
|
APIKey, ok := auth.GetAPIKey(ctx)
|
||||||
if !ok || !pointerdbAuth.ValidateAPIKey(string(APIKey)) {
|
if !ok {
|
||||||
s.logger.Error("unauthorized request: ", zap.Error(status.Errorf(codes.Unauthenticated, "Invalid API credential")))
|
s.logger.Error("unauthorized request: ", zap.Error(status.Errorf(codes.Unauthenticated, "Invalid API credential")))
|
||||||
return status.Errorf(codes.Unauthenticated, "Invalid API credential")
|
return nil, status.Errorf(codes.Unauthenticated, "Invalid API credential")
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
|
key, err := console.APIKeyFromBase64(string(APIKey))
|
||||||
|
if err != nil {
|
||||||
|
s.logger.Error("unauthorized request: ", zap.Error(status.Errorf(codes.Unauthenticated, "Invalid API credential")))
|
||||||
|
return nil, status.Errorf(codes.Unauthenticated, "Invalid API credential")
|
||||||
|
}
|
||||||
|
|
||||||
|
keyInfo, err := s.apiKeys.GetByKey(ctx, *key)
|
||||||
|
if err != nil {
|
||||||
|
s.logger.Error("unauthorized request: ", zap.Error(status.Errorf(codes.Unauthenticated, err.Error())))
|
||||||
|
return nil, status.Errorf(codes.Unauthenticated, "Invalid API credential")
|
||||||
|
}
|
||||||
|
|
||||||
|
return keyInfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) validateSegment(req *pb.PutRequest) error {
|
func (s *Server) validateSegment(req *pb.PutRequest) error {
|
||||||
@ -95,11 +109,13 @@ func (s *Server) Put(ctx context.Context, req *pb.PutRequest) (resp *pb.PutRespo
|
|||||||
return nil, status.Errorf(codes.InvalidArgument, err.Error())
|
return nil, status.Errorf(codes.InvalidArgument, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = s.validateAuth(ctx); err != nil {
|
keyInfo, err := s.validateAuth(ctx)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err = s.service.Put(req.GetPath(), req.GetPointer()); err != nil {
|
path := storj.JoinPaths(keyInfo.ProjectID.String(), req.GetPath())
|
||||||
|
if err = s.service.Put(path, req.GetPointer()); err != nil {
|
||||||
s.logger.Error("err putting pointer", zap.Error(err))
|
s.logger.Error("err putting pointer", zap.Error(err))
|
||||||
return nil, status.Errorf(codes.Internal, err.Error())
|
return nil, status.Errorf(codes.Internal, err.Error())
|
||||||
}
|
}
|
||||||
@ -111,11 +127,13 @@ func (s *Server) Put(ctx context.Context, req *pb.PutRequest) (resp *pb.PutRespo
|
|||||||
func (s *Server) Get(ctx context.Context, req *pb.GetRequest) (resp *pb.GetResponse, err error) {
|
func (s *Server) Get(ctx context.Context, req *pb.GetRequest) (resp *pb.GetResponse, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
if err = s.validateAuth(ctx); err != nil {
|
keyInfo, err := s.validateAuth(ctx)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
pointer, err := s.service.Get(req.GetPath())
|
path := storj.JoinPaths(keyInfo.ProjectID.String(), req.GetPath())
|
||||||
|
pointer, err := s.service.Get(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if storage.ErrKeyNotFound.Has(err) {
|
if storage.ErrKeyNotFound.Has(err) {
|
||||||
return nil, status.Errorf(codes.NotFound, err.Error())
|
return nil, status.Errorf(codes.NotFound, err.Error())
|
||||||
@ -176,11 +194,13 @@ func (s *Server) Get(ctx context.Context, req *pb.GetRequest) (resp *pb.GetRespo
|
|||||||
func (s *Server) List(ctx context.Context, req *pb.ListRequest) (resp *pb.ListResponse, err error) {
|
func (s *Server) List(ctx context.Context, req *pb.ListRequest) (resp *pb.ListResponse, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
if err = s.validateAuth(ctx); err != nil {
|
keyInfo, err := s.validateAuth(ctx)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
items, more, err := s.service.List(req.Prefix, req.StartAfter, req.EndBefore, req.Recursive, req.Limit, req.MetaFlags)
|
prefix := storj.JoinPaths(keyInfo.ProjectID.String(), req.Prefix)
|
||||||
|
items, more, err := s.service.List(prefix, req.StartAfter, req.EndBefore, req.Recursive, req.Limit, req.MetaFlags)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, status.Errorf(codes.Internal, "ListV2: %v", err)
|
return nil, status.Errorf(codes.Internal, "ListV2: %v", err)
|
||||||
}
|
}
|
||||||
@ -192,11 +212,13 @@ func (s *Server) List(ctx context.Context, req *pb.ListRequest) (resp *pb.ListRe
|
|||||||
func (s *Server) Delete(ctx context.Context, req *pb.DeleteRequest) (resp *pb.DeleteResponse, err error) {
|
func (s *Server) Delete(ctx context.Context, req *pb.DeleteRequest) (resp *pb.DeleteResponse, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
if err = s.validateAuth(ctx); err != nil {
|
keyInfo, err := s.validateAuth(ctx)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = s.service.Delete(req.GetPath())
|
path := storj.JoinPaths(keyInfo.ProjectID.String(), req.GetPath())
|
||||||
|
err = s.service.Delete(path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Error("err deleting path and pointer", zap.Error(err))
|
s.logger.Error("err deleting path and pointer", zap.Error(err))
|
||||||
return nil, status.Errorf(codes.Internal, err.Error())
|
return nil, status.Errorf(codes.Internal, err.Error())
|
||||||
@ -209,18 +231,21 @@ func (s *Server) Delete(ctx context.Context, req *pb.DeleteRequest) (resp *pb.De
|
|||||||
func (s *Server) Iterate(ctx context.Context, req *pb.IterateRequest, f func(it storage.Iterator) error) (err error) {
|
func (s *Server) Iterate(ctx context.Context, req *pb.IterateRequest, f func(it storage.Iterator) error) (err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
if err = s.validateAuth(ctx); err != nil {
|
keyInfo, err := s.validateAuth(ctx)
|
||||||
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return s.service.Iterate(req.Prefix, req.First, req.Recurse, req.Reverse, f)
|
prefix := storj.JoinPaths(keyInfo.ProjectID.String(), req.Prefix)
|
||||||
|
return s.service.Iterate(prefix, req.First, req.Recurse, req.Reverse, f)
|
||||||
}
|
}
|
||||||
|
|
||||||
// PayerBandwidthAllocation returns PayerBandwidthAllocation struct, signed and with given action type
|
// PayerBandwidthAllocation returns PayerBandwidthAllocation struct, signed and with given action type
|
||||||
func (s *Server) PayerBandwidthAllocation(ctx context.Context, req *pb.PayerBandwidthAllocationRequest) (res *pb.PayerBandwidthAllocationResponse, err error) {
|
func (s *Server) PayerBandwidthAllocation(ctx context.Context, req *pb.PayerBandwidthAllocationRequest) (res *pb.PayerBandwidthAllocationResponse, err error) {
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
if err = s.validateAuth(ctx); err != nil {
|
_, err = s.validateAuth(ctx)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -25,17 +25,33 @@ import (
|
|||||||
"storj.io/storj/pkg/auth"
|
"storj.io/storj/pkg/auth"
|
||||||
"storj.io/storj/pkg/pb"
|
"storj.io/storj/pkg/pb"
|
||||||
"storj.io/storj/pkg/storage/meta"
|
"storj.io/storj/pkg/storage/meta"
|
||||||
|
"storj.io/storj/pkg/storj"
|
||||||
|
"storj.io/storj/satellite/console"
|
||||||
"storj.io/storj/storage"
|
"storj.io/storj/storage"
|
||||||
"storj.io/storj/storage/teststore"
|
"storj.io/storj/storage/teststore"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// mockAPIKeys is mock for api keys store of pointerdb
|
||||||
|
type mockAPIKeys struct {
|
||||||
|
info console.APIKeyInfo
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetByKey return api key info for given key
|
||||||
|
func (keys *mockAPIKeys) GetByKey(ctx context.Context, key console.APIKey) (*console.APIKeyInfo, error) {
|
||||||
|
return &keys.info, keys.err
|
||||||
|
}
|
||||||
|
|
||||||
func TestServicePut(t *testing.T) {
|
func TestServicePut(t *testing.T) {
|
||||||
|
validAPIKey := console.APIKey{}
|
||||||
|
apiKeys := &mockAPIKeys{}
|
||||||
|
|
||||||
for i, tt := range []struct {
|
for i, tt := range []struct {
|
||||||
apiKey []byte
|
apiKey []byte
|
||||||
err error
|
err error
|
||||||
errString string
|
errString string
|
||||||
}{
|
}{
|
||||||
{nil, nil, ""},
|
{[]byte(validAPIKey.String()), nil, ""},
|
||||||
{[]byte("wrong key"), nil, status.Errorf(codes.Unauthenticated, "Invalid API credential").Error()},
|
{[]byte("wrong key"), nil, status.Errorf(codes.Unauthenticated, "Invalid API credential").Error()},
|
||||||
{nil, errors.New("put error"), status.Errorf(codes.Internal, "internal error").Error()},
|
{nil, errors.New("put error"), status.Errorf(codes.Internal, "internal error").Error()},
|
||||||
} {
|
} {
|
||||||
@ -46,7 +62,7 @@ func TestServicePut(t *testing.T) {
|
|||||||
|
|
||||||
db := teststore.New()
|
db := teststore.New()
|
||||||
service := NewService(zap.NewNop(), db)
|
service := NewService(zap.NewNop(), db)
|
||||||
s := Server{service: service, logger: zap.NewNop()}
|
s := Server{service: service, logger: zap.NewNop(), apiKeys: apiKeys}
|
||||||
|
|
||||||
path := "a/b/c"
|
path := "a/b/c"
|
||||||
pr := pb.Pointer{}
|
pr := pb.Pointer{}
|
||||||
@ -79,12 +95,15 @@ func TestServiceGet(t *testing.T) {
|
|||||||
|
|
||||||
info := credentials.TLSInfo{State: tls.ConnectionState{PeerCertificates: peerCertificates}}
|
info := credentials.TLSInfo{State: tls.ConnectionState{PeerCertificates: peerCertificates}}
|
||||||
|
|
||||||
|
validAPIKey := console.APIKey{}
|
||||||
|
apiKeys := &mockAPIKeys{}
|
||||||
|
|
||||||
for i, tt := range []struct {
|
for i, tt := range []struct {
|
||||||
apiKey []byte
|
apiKey []byte
|
||||||
err error
|
err error
|
||||||
errString string
|
errString string
|
||||||
}{
|
}{
|
||||||
{nil, nil, ""},
|
{[]byte(validAPIKey.String()), nil, ""},
|
||||||
{[]byte("wrong key"), nil, status.Errorf(codes.Unauthenticated, "Invalid API credential").Error()},
|
{[]byte("wrong key"), nil, status.Errorf(codes.Unauthenticated, "Invalid API credential").Error()},
|
||||||
{nil, errors.New("get error"), status.Errorf(codes.Internal, "internal error").Error()},
|
{nil, errors.New("get error"), status.Errorf(codes.Internal, "internal error").Error()},
|
||||||
} {
|
} {
|
||||||
@ -96,7 +115,8 @@ func TestServiceGet(t *testing.T) {
|
|||||||
db := teststore.New()
|
db := teststore.New()
|
||||||
service := NewService(zap.NewNop(), db)
|
service := NewService(zap.NewNop(), db)
|
||||||
allocation := NewAllocationSigner(identity, 45)
|
allocation := NewAllocationSigner(identity, 45)
|
||||||
s := NewServer(zap.NewNop(), service, allocation, nil, Config{}, identity)
|
|
||||||
|
s := NewServer(zap.NewNop(), service, allocation, nil, Config{}, identity, apiKeys)
|
||||||
|
|
||||||
path := "a/b/c"
|
path := "a/b/c"
|
||||||
|
|
||||||
@ -104,7 +124,7 @@ func TestServiceGet(t *testing.T) {
|
|||||||
prBytes, err := proto.Marshal(pr)
|
prBytes, err := proto.Marshal(pr)
|
||||||
assert.NoError(t, err, errTag)
|
assert.NoError(t, err, errTag)
|
||||||
|
|
||||||
_ = db.Put(storage.Key(path), storage.Value(prBytes))
|
_ = db.Put(storage.Key(storj.JoinPaths(apiKeys.info.ProjectID.String(), path)), storage.Value(prBytes))
|
||||||
|
|
||||||
if tt.err != nil {
|
if tt.err != nil {
|
||||||
db.ForceError++
|
db.ForceError++
|
||||||
@ -127,12 +147,15 @@ func TestServiceGet(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestServiceDelete(t *testing.T) {
|
func TestServiceDelete(t *testing.T) {
|
||||||
|
validAPIKey := console.APIKey{}
|
||||||
|
apiKeys := &mockAPIKeys{}
|
||||||
|
|
||||||
for i, tt := range []struct {
|
for i, tt := range []struct {
|
||||||
apiKey []byte
|
apiKey []byte
|
||||||
err error
|
err error
|
||||||
errString string
|
errString string
|
||||||
}{
|
}{
|
||||||
{nil, nil, ""},
|
{[]byte(validAPIKey.String()), nil, ""},
|
||||||
{[]byte("wrong key"), nil, status.Errorf(codes.Unauthenticated, "Invalid API credential").Error()},
|
{[]byte("wrong key"), nil, status.Errorf(codes.Unauthenticated, "Invalid API credential").Error()},
|
||||||
{nil, errors.New("delete error"), status.Errorf(codes.Internal, "internal error").Error()},
|
{nil, errors.New("delete error"), status.Errorf(codes.Internal, "internal error").Error()},
|
||||||
} {
|
} {
|
||||||
@ -144,9 +167,9 @@ func TestServiceDelete(t *testing.T) {
|
|||||||
path := "a/b/c"
|
path := "a/b/c"
|
||||||
|
|
||||||
db := teststore.New()
|
db := teststore.New()
|
||||||
_ = db.Put(storage.Key(path), storage.Value("hello"))
|
_ = db.Put(storage.Key(storj.JoinPaths(apiKeys.info.ProjectID.String(), path)), storage.Value("hello"))
|
||||||
service := NewService(zap.NewNop(), db)
|
service := NewService(zap.NewNop(), db)
|
||||||
s := Server{service: service, logger: zap.NewNop()}
|
s := Server{service: service, logger: zap.NewNop(), apiKeys: apiKeys}
|
||||||
|
|
||||||
if tt.err != nil {
|
if tt.err != nil {
|
||||||
db.ForceError++
|
db.ForceError++
|
||||||
@ -164,9 +187,12 @@ func TestServiceDelete(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestServiceList(t *testing.T) {
|
func TestServiceList(t *testing.T) {
|
||||||
|
validAPIKey := console.APIKey{}
|
||||||
|
apiKeys := &mockAPIKeys{}
|
||||||
|
|
||||||
db := teststore.New()
|
db := teststore.New()
|
||||||
service := NewService(zap.NewNop(), db)
|
service := NewService(zap.NewNop(), db)
|
||||||
server := Server{service: service, logger: zap.NewNop()}
|
server := Server{service: service, logger: zap.NewNop(), apiKeys: apiKeys}
|
||||||
|
|
||||||
pointer := &pb.Pointer{}
|
pointer := &pb.Pointer{}
|
||||||
pointer.CreationDate = ptypes.TimestampNow()
|
pointer.CreationDate = ptypes.TimestampNow()
|
||||||
@ -177,7 +203,7 @@ func TestServiceList(t *testing.T) {
|
|||||||
}
|
}
|
||||||
pointerValue := storage.Value(pointerBytes)
|
pointerValue := storage.Value(pointerBytes)
|
||||||
|
|
||||||
err = storage.PutAll(db, []storage.ListItem{
|
items := []storage.ListItem{
|
||||||
{Key: storage.Key("sample.😶"), Value: pointerValue},
|
{Key: storage.Key("sample.😶"), Value: pointerValue},
|
||||||
{Key: storage.Key("müsic"), Value: pointerValue},
|
{Key: storage.Key("müsic"), Value: pointerValue},
|
||||||
{Key: storage.Key("müsic/söng1.mp3"), Value: pointerValue},
|
{Key: storage.Key("müsic/söng1.mp3"), Value: pointerValue},
|
||||||
@ -185,7 +211,13 @@ func TestServiceList(t *testing.T) {
|
|||||||
{Key: storage.Key("müsic/album/söng3.mp3"), Value: pointerValue},
|
{Key: storage.Key("müsic/album/söng3.mp3"), Value: pointerValue},
|
||||||
{Key: storage.Key("müsic/söng4.mp3"), Value: pointerValue},
|
{Key: storage.Key("müsic/söng4.mp3"), Value: pointerValue},
|
||||||
{Key: storage.Key("ビデオ/movie.mkv"), Value: pointerValue},
|
{Key: storage.Key("ビデオ/movie.mkv"), Value: pointerValue},
|
||||||
}...)
|
}
|
||||||
|
|
||||||
|
for i := range items {
|
||||||
|
items[i].Key = storage.Key(storj.JoinPaths(apiKeys.info.ProjectID.String(), items[i].Key.String()))
|
||||||
|
}
|
||||||
|
|
||||||
|
err = storage.PutAll(db, items...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@ -210,6 +242,7 @@ func TestServiceList(t *testing.T) {
|
|||||||
|
|
||||||
tests := []Test{
|
tests := []Test{
|
||||||
{
|
{
|
||||||
|
APIKey: validAPIKey.String(),
|
||||||
Request: pb.ListRequest{Recursive: true},
|
Request: pb.ListRequest{Recursive: true},
|
||||||
Expected: &pb.ListResponse{
|
Expected: &pb.ListResponse{
|
||||||
Items: []*pb.ListResponse_Item{
|
Items: []*pb.ListResponse_Item{
|
||||||
@ -223,6 +256,7 @@ func TestServiceList(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
|
APIKey: validAPIKey.String(),
|
||||||
Request: pb.ListRequest{Recursive: true, MetaFlags: meta.All},
|
Request: pb.ListRequest{Recursive: true, MetaFlags: meta.All},
|
||||||
Expected: &pb.ListResponse{
|
Expected: &pb.ListResponse{
|
||||||
Items: []*pb.ListResponse_Item{
|
Items: []*pb.ListResponse_Item{
|
||||||
@ -242,6 +276,7 @@ func TestServiceList(t *testing.T) {
|
|||||||
// Error: errorWithCode(codes.Unauthenticated),
|
// Error: errorWithCode(codes.Unauthenticated),
|
||||||
// },
|
// },
|
||||||
{
|
{
|
||||||
|
APIKey: validAPIKey.String(),
|
||||||
Request: pb.ListRequest{Recursive: true, Limit: 3},
|
Request: pb.ListRequest{Recursive: true, Limit: 3},
|
||||||
Expected: &pb.ListResponse{
|
Expected: &pb.ListResponse{
|
||||||
Items: []*pb.ListResponse_Item{
|
Items: []*pb.ListResponse_Item{
|
||||||
@ -252,6 +287,7 @@ func TestServiceList(t *testing.T) {
|
|||||||
More: true,
|
More: true,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
|
APIKey: validAPIKey.String(),
|
||||||
Request: pb.ListRequest{MetaFlags: meta.All},
|
Request: pb.ListRequest{MetaFlags: meta.All},
|
||||||
Expected: &pb.ListResponse{
|
Expected: &pb.ListResponse{
|
||||||
Items: []*pb.ListResponse_Item{
|
Items: []*pb.ListResponse_Item{
|
||||||
@ -263,6 +299,7 @@ func TestServiceList(t *testing.T) {
|
|||||||
More: false,
|
More: false,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
|
APIKey: validAPIKey.String(),
|
||||||
Request: pb.ListRequest{EndBefore: "ビデオ"},
|
Request: pb.ListRequest{EndBefore: "ビデオ"},
|
||||||
Expected: &pb.ListResponse{
|
Expected: &pb.ListResponse{
|
||||||
Items: []*pb.ListResponse_Item{
|
Items: []*pb.ListResponse_Item{
|
||||||
@ -273,6 +310,7 @@ func TestServiceList(t *testing.T) {
|
|||||||
More: false,
|
More: false,
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
|
APIKey: validAPIKey.String(),
|
||||||
Request: pb.ListRequest{Recursive: true, Prefix: "müsic/"},
|
Request: pb.ListRequest{Recursive: true, Prefix: "müsic/"},
|
||||||
Expected: &pb.ListResponse{
|
Expected: &pb.ListResponse{
|
||||||
Items: []*pb.ListResponse_Item{
|
Items: []*pb.ListResponse_Item{
|
||||||
@ -283,6 +321,7 @@ func TestServiceList(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
|
APIKey: validAPIKey.String(),
|
||||||
Request: pb.ListRequest{Recursive: true, Prefix: "müsic/", StartAfter: "album/söng3.mp3"},
|
Request: pb.ListRequest{Recursive: true, Prefix: "müsic/", StartAfter: "album/söng3.mp3"},
|
||||||
Expected: &pb.ListResponse{
|
Expected: &pb.ListResponse{
|
||||||
Items: []*pb.ListResponse_Item{
|
Items: []*pb.ListResponse_Item{
|
||||||
@ -292,6 +331,7 @@ func TestServiceList(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
|
APIKey: validAPIKey.String(),
|
||||||
Request: pb.ListRequest{Prefix: "müsic/"},
|
Request: pb.ListRequest{Prefix: "müsic/"},
|
||||||
Expected: &pb.ListResponse{
|
Expected: &pb.ListResponse{
|
||||||
Items: []*pb.ListResponse_Item{
|
Items: []*pb.ListResponse_Item{
|
||||||
@ -302,6 +342,7 @@ func TestServiceList(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
|
APIKey: validAPIKey.String(),
|
||||||
Request: pb.ListRequest{Prefix: "müsic/", StartAfter: "söng1.mp3"},
|
Request: pb.ListRequest{Prefix: "müsic/", StartAfter: "söng1.mp3"},
|
||||||
Expected: &pb.ListResponse{
|
Expected: &pb.ListResponse{
|
||||||
Items: []*pb.ListResponse_Item{
|
Items: []*pb.ListResponse_Item{
|
||||||
@ -310,6 +351,7 @@ func TestServiceList(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
|
APIKey: validAPIKey.String(),
|
||||||
Request: pb.ListRequest{Prefix: "müsic/", EndBefore: "söng4.mp3"},
|
Request: pb.ListRequest{Prefix: "müsic/", EndBefore: "söng4.mp3"},
|
||||||
Expected: &pb.ListResponse{
|
Expected: &pb.ListResponse{
|
||||||
Items: []*pb.ListResponse_Item{
|
Items: []*pb.ListResponse_Item{
|
||||||
@ -319,6 +361,7 @@ func TestServiceList(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
|
APIKey: validAPIKey.String(),
|
||||||
Request: pb.ListRequest{Prefix: "müs", Recursive: true, EndBefore: "ic/söng4.mp3", Limit: 1},
|
Request: pb.ListRequest{Prefix: "müs", Recursive: true, EndBefore: "ic/söng4.mp3", Limit: 1},
|
||||||
Expected: &pb.ListResponse{
|
Expected: &pb.ListResponse{
|
||||||
Items: []*pb.ListResponse_Item{
|
Items: []*pb.ListResponse_Item{
|
||||||
|
@ -4,7 +4,6 @@
|
|||||||
package console
|
package console
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
@ -48,11 +47,6 @@ type APIKey [24]byte
|
|||||||
|
|
||||||
// String implements Stringer
|
// String implements Stringer
|
||||||
func (key APIKey) String() string {
|
func (key APIKey) String() string {
|
||||||
emptyKey := APIKey{}
|
|
||||||
if bytes.Equal(key[:], emptyKey[:]) {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
return base64.URLEncoding.EncodeToString(key[:])
|
return base64.URLEncoding.EncodeToString(key[:])
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -63,6 +57,17 @@ func APIKeyFromBytes(b []byte) *APIKey {
|
|||||||
return key
|
return key
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// APIKeyFromBase64 creates new key from base64 string
|
||||||
|
func APIKeyFromBase64(s string) (*APIKey, error) {
|
||||||
|
b, err := base64.URLEncoding.DecodeString(s)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
key := new(APIKey)
|
||||||
|
copy(key[:], b)
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
// CreateAPIKey creates new api key
|
// CreateAPIKey creates new api key
|
||||||
func CreateAPIKey() (*APIKey, error) {
|
func CreateAPIKey() (*APIKey, error) {
|
||||||
key := new(APIKey)
|
key := new(APIKey)
|
||||||
|
@ -1,32 +0,0 @@
|
|||||||
// Copyright (C) 2019 Storj Labs, Inc.
|
|
||||||
// See LICENSE for copying information.
|
|
||||||
|
|
||||||
package console
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/skyrings/skyring-common/tools/uuid"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Buckets is interface for working with bucket to project relations
|
|
||||||
type Buckets interface {
|
|
||||||
// ListBuckets returns bucket list of a given project
|
|
||||||
ListBuckets(ctx context.Context, projectID uuid.UUID) ([]Bucket, error)
|
|
||||||
// GetBucket retrieves bucket info of bucket with given name
|
|
||||||
GetBucket(ctx context.Context, name string) (*Bucket, error)
|
|
||||||
// AttachBucket attaches a bucket to a project
|
|
||||||
AttachBucket(ctx context.Context, name string, projectID uuid.UUID) (*Bucket, error)
|
|
||||||
// DeattachBucket deletes bucket info for a bucket by name
|
|
||||||
DeattachBucket(ctx context.Context, name string) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bucket represents bucket to project relationship
|
|
||||||
type Bucket struct {
|
|
||||||
Name string
|
|
||||||
|
|
||||||
ProjectID uuid.UUID
|
|
||||||
|
|
||||||
CreatedAt time.Time
|
|
||||||
}
|
|
@ -69,6 +69,9 @@ func TestGrapqhlMutation(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
t.Run("Activate account mutation", func(t *testing.T) {
|
t.Run("Activate account mutation", func(t *testing.T) {
|
||||||
|
t.Skip("skip it until we will have activation flow ready")
|
||||||
|
|
||||||
|
//TODO(yar): skip it until we will have activation flow ready
|
||||||
activationToken, err := service.GenerateActivationToken(
|
activationToken, err := service.GenerateActivationToken(
|
||||||
ctx,
|
ctx,
|
||||||
rootUser.ID,
|
rootUser.ID,
|
||||||
@ -374,6 +377,10 @@ func TestGrapqhlMutation(t *testing.T) {
|
|||||||
t.Fatal(err, project)
|
t.Fatal(err, project)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
t.Run("Activation", func(t *testing.T) {
|
||||||
|
t.Skip("skip it until we will have activation flow ready")
|
||||||
|
|
||||||
|
//TODO(yar): skip it until we will have activation flow ready
|
||||||
activationToken1, err := service.GenerateActivationToken(
|
activationToken1, err := service.GenerateActivationToken(
|
||||||
ctx,
|
ctx,
|
||||||
user1.ID,
|
user1.ID,
|
||||||
@ -388,6 +395,7 @@ func TestGrapqhlMutation(t *testing.T) {
|
|||||||
t.Fatal(err, project)
|
t.Fatal(err, project)
|
||||||
}
|
}
|
||||||
user1.Email = "u1@email.net"
|
user1.Email = "u1@email.net"
|
||||||
|
})
|
||||||
|
|
||||||
user2, err := service.CreateUser(authCtx, console.CreateUser{
|
user2, err := service.CreateUser(authCtx, console.CreateUser{
|
||||||
UserInfo: console.UserInfo{
|
UserInfo: console.UserInfo{
|
||||||
@ -396,9 +404,15 @@ func TestGrapqhlMutation(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Password: "123a123",
|
Password: "123a123",
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err, project)
|
t.Fatal(err, project)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
t.Run("Activation", func(t *testing.T) {
|
||||||
|
t.Skip("skip it until we will have activation flow ready")
|
||||||
|
|
||||||
|
//TODO(yar): skip it until we will have activation flow ready
|
||||||
activationToken2, err := service.GenerateActivationToken(
|
activationToken2, err := service.GenerateActivationToken(
|
||||||
ctx,
|
ctx,
|
||||||
user2.ID,
|
user2.ID,
|
||||||
@ -413,6 +427,7 @@ func TestGrapqhlMutation(t *testing.T) {
|
|||||||
t.Fatal(err, project)
|
t.Fatal(err, project)
|
||||||
}
|
}
|
||||||
user2.Email = "u2@email.net"
|
user2.Email = "u2@email.net"
|
||||||
|
})
|
||||||
|
|
||||||
t.Run("Add project members mutation", func(t *testing.T) {
|
t.Run("Add project members mutation", func(t *testing.T) {
|
||||||
query := fmt.Sprintf(
|
query := fmt.Sprintf(
|
||||||
|
@ -66,6 +66,10 @@ func TestGraphqlQuery(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
t.Run("Activation", func(t *testing.T) {
|
||||||
|
t.Skip("skip it until we will have activation flow ready")
|
||||||
|
|
||||||
|
//TODO(yar): skip it until we will have activation flow ready
|
||||||
activationToken, err := service.GenerateActivationToken(
|
activationToken, err := service.GenerateActivationToken(
|
||||||
ctx,
|
ctx,
|
||||||
rootUser.ID,
|
rootUser.ID,
|
||||||
@ -80,6 +84,7 @@ func TestGraphqlQuery(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
rootUser.Email = "mtest@email.com"
|
rootUser.Email = "mtest@email.com"
|
||||||
|
})
|
||||||
|
|
||||||
token, err := service.Token(ctx, createUser.Email, createUser.Password)
|
token, err := service.Token(ctx, createUser.Email, createUser.Password)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -191,9 +196,15 @@ func TestGraphqlQuery(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Password: "123a123",
|
Password: "123a123",
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
t.Run("Activation", func(t *testing.T) {
|
||||||
|
t.Skip("skip it until we will have activation flow ready")
|
||||||
|
|
||||||
|
//TODO(yar): skip it until we will have activation flow ready
|
||||||
activationToken1, err := service.GenerateActivationToken(
|
activationToken1, err := service.GenerateActivationToken(
|
||||||
ctx,
|
ctx,
|
||||||
user1.ID,
|
user1.ID,
|
||||||
@ -209,6 +220,8 @@ func TestGraphqlQuery(t *testing.T) {
|
|||||||
}
|
}
|
||||||
user1.Email = "muu1@email.com"
|
user1.Email = "muu1@email.com"
|
||||||
|
|
||||||
|
})
|
||||||
|
|
||||||
user2, err := service.CreateUser(authCtx, console.CreateUser{
|
user2, err := service.CreateUser(authCtx, console.CreateUser{
|
||||||
UserInfo: console.UserInfo{
|
UserInfo: console.UserInfo{
|
||||||
FirstName: "Dubas",
|
FirstName: "Dubas",
|
||||||
@ -217,9 +230,15 @@ func TestGraphqlQuery(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Password: "123a123",
|
Password: "123a123",
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
t.Run("Activation", func(t *testing.T) {
|
||||||
|
t.Skip("skip it until we will have activation flow ready")
|
||||||
|
|
||||||
|
//TODO(yar): skip it until we will have activation flow ready
|
||||||
activationToken2, err := service.GenerateActivationToken(
|
activationToken2, err := service.GenerateActivationToken(
|
||||||
ctx,
|
ctx,
|
||||||
user2.ID,
|
user2.ID,
|
||||||
@ -234,6 +253,7 @@ func TestGraphqlQuery(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
user2.Email = "muu2@email.com"
|
user2.Email = "muu2@email.com"
|
||||||
|
})
|
||||||
|
|
||||||
err = service.AddProjectMembers(authCtx, createdProject.ID, []string{
|
err = service.AddProjectMembers(authCtx, createdProject.ID, []string{
|
||||||
user1.Email,
|
user1.Email,
|
||||||
|
@ -15,8 +15,6 @@ type DB interface {
|
|||||||
ProjectMembers() ProjectMembers
|
ProjectMembers() ProjectMembers
|
||||||
// APIKeys is a getter for APIKeys repository
|
// APIKeys is a getter for APIKeys repository
|
||||||
APIKeys() APIKeys
|
APIKeys() APIKeys
|
||||||
// Buckets is a getter for Buckets repository
|
|
||||||
Buckets() Buckets
|
|
||||||
|
|
||||||
// CreateTables is a method for creating all tables for satellitedb
|
// CreateTables is a method for creating all tables for satellitedb
|
||||||
CreateTables() error
|
CreateTables() error
|
||||||
|
@ -130,7 +130,7 @@ func TestProjectsRepository(t *testing.T) {
|
|||||||
|
|
||||||
newProject2 := &console.Project{
|
newProject2 := &console.Project{
|
||||||
Description: description,
|
Description: description,
|
||||||
Name: name,
|
Name: name + "2",
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = projects.Insert(ctx, newProject2)
|
_, err = projects.Insert(ctx, newProject2)
|
||||||
|
@ -76,6 +76,7 @@ func (s *Service) CreateUser(ctx context.Context, user CreateUser) (u *User, err
|
|||||||
}
|
}
|
||||||
|
|
||||||
u, err = s.store.Users().Insert(ctx, &User{
|
u, err = s.store.Users().Insert(ctx, &User{
|
||||||
|
Email: user.Email,
|
||||||
FirstName: user.FirstName,
|
FirstName: user.FirstName,
|
||||||
LastName: user.LastName,
|
LastName: user.LastName,
|
||||||
PasswordHash: hash,
|
PasswordHash: hash,
|
||||||
@ -83,6 +84,9 @@ func (s *Service) CreateUser(ctx context.Context, user CreateUser) (u *User, err
|
|||||||
|
|
||||||
// TODO: send "finish registration email" when email service will be ready
|
// TODO: send "finish registration email" when email service will be ready
|
||||||
//activationToken, err := s.GenerateActivationToken(ctx, u.ID, email, u.CreatedAt.Add(tokenExpirationTime))
|
//activationToken, err := s.GenerateActivationToken(ctx, u.ID, email, u.CreatedAt.Add(tokenExpirationTime))
|
||||||
|
//if err != nil {
|
||||||
|
// return nil, err
|
||||||
|
//}
|
||||||
|
|
||||||
return u, err
|
return u, err
|
||||||
}
|
}
|
||||||
|
@ -286,7 +286,13 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, config *Config) (*
|
|||||||
peer.Metainfo.Database = storelogger.New(peer.Log.Named("pdb"), db)
|
peer.Metainfo.Database = storelogger.New(peer.Log.Named("pdb"), db)
|
||||||
peer.Metainfo.Service = pointerdb.NewService(peer.Log.Named("pointerdb"), peer.Metainfo.Database)
|
peer.Metainfo.Service = pointerdb.NewService(peer.Log.Named("pointerdb"), peer.Metainfo.Database)
|
||||||
peer.Metainfo.Allocation = pointerdb.NewAllocationSigner(peer.Identity, config.PointerDB.BwExpiration)
|
peer.Metainfo.Allocation = pointerdb.NewAllocationSigner(peer.Identity, config.PointerDB.BwExpiration)
|
||||||
peer.Metainfo.Endpoint = pointerdb.NewServer(peer.Log.Named("pointerdb:endpoint"), peer.Metainfo.Service, peer.Metainfo.Allocation, peer.Overlay.Service, config.PointerDB, peer.Identity)
|
peer.Metainfo.Endpoint = pointerdb.NewServer(peer.Log.Named("pointerdb:endpoint"),
|
||||||
|
peer.Metainfo.Service,
|
||||||
|
peer.Metainfo.Allocation,
|
||||||
|
peer.Overlay.Service,
|
||||||
|
config.PointerDB,
|
||||||
|
peer.Identity, peer.DB.Console().APIKeys())
|
||||||
|
|
||||||
pb.RegisterPointerDBServer(peer.Public.Server.GRPC(), peer.Metainfo.Endpoint)
|
pb.RegisterPointerDBServer(peer.Public.Server.GRPC(), peer.Metainfo.Endpoint)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,92 +0,0 @@
|
|||||||
// Copyright (C) 2019 Storj Labs, Inc.
|
|
||||||
// See LICENSE for copying information.
|
|
||||||
|
|
||||||
package satellitedb
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
|
|
||||||
"github.com/skyrings/skyring-common/tools/uuid"
|
|
||||||
"github.com/zeebo/errs"
|
|
||||||
|
|
||||||
"storj.io/storj/satellite/console"
|
|
||||||
dbx "storj.io/storj/satellite/satellitedb/dbx"
|
|
||||||
)
|
|
||||||
|
|
||||||
type buckets struct {
|
|
||||||
db dbx.Methods
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListBuckets returns bucket list of a given project
|
|
||||||
func (buck *buckets) ListBuckets(ctx context.Context, projectID uuid.UUID) ([]console.Bucket, error) {
|
|
||||||
buckets, err := buck.db.All_BucketInfo_By_ProjectId_OrderBy_Asc_Name(
|
|
||||||
ctx,
|
|
||||||
dbx.BucketInfo_ProjectId(projectID[:]),
|
|
||||||
)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var consoleBuckets []console.Bucket
|
|
||||||
for _, bucket := range buckets {
|
|
||||||
consoleBucket, bucketErr := fromDBXBucket(bucket)
|
|
||||||
if err != nil {
|
|
||||||
err = errs.Combine(err, bucketErr)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
consoleBuckets = append(consoleBuckets, *consoleBucket)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return consoleBuckets, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBucket retrieves bucket info of bucket with given name
|
|
||||||
func (buck *buckets) GetBucket(ctx context.Context, name string) (*console.Bucket, error) {
|
|
||||||
bucket, err := buck.db.Get_BucketInfo_By_Name(ctx, dbx.BucketInfo_Name(name))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fromDBXBucket(bucket)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AttachBucket attaches a bucket to a project
|
|
||||||
func (buck *buckets) AttachBucket(ctx context.Context, name string, projectID uuid.UUID) (*console.Bucket, error) {
|
|
||||||
bucket, err := buck.db.Create_BucketInfo(
|
|
||||||
ctx,
|
|
||||||
dbx.BucketInfo_ProjectId(projectID[:]),
|
|
||||||
dbx.BucketInfo_Name(name),
|
|
||||||
)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return fromDBXBucket(bucket)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeattachBucket deletes bucket info for a bucket by name
|
|
||||||
func (buck *buckets) DeattachBucket(ctx context.Context, name string) error {
|
|
||||||
_, err := buck.db.Delete_BucketInfo_By_Name(ctx, dbx.BucketInfo_Name(name))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// fromDBXBucket creates console.Bucket from dbx.Bucket
|
|
||||||
func fromDBXBucket(bucket *dbx.BucketInfo) (*console.Bucket, error) {
|
|
||||||
projectID, err := bytesToUUID(bucket.ProjectId)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &console.Bucket{
|
|
||||||
ProjectID: projectID,
|
|
||||||
Name: bucket.Name,
|
|
||||||
CreatedAt: bucket.CreatedAt,
|
|
||||||
}, nil
|
|
||||||
}
|
|
@ -57,11 +57,6 @@ func (db *ConsoleDB) APIKeys() console.APIKeys {
|
|||||||
return &apikeys{db.methods}
|
return &apikeys{db.methods}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Buckets is a getter for Buckets repository
|
|
||||||
func (db *ConsoleDB) Buckets() console.Buckets {
|
|
||||||
return &buckets{db.methods}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateTables is a method for creating all tables for satellitedb
|
// CreateTables is a method for creating all tables for satellitedb
|
||||||
func (db *ConsoleDB) CreateTables() error {
|
func (db *ConsoleDB) CreateTables() error {
|
||||||
if db.db == nil {
|
if db.db == nil {
|
||||||
|
@ -334,27 +334,3 @@ read all (
|
|||||||
where api_key.project_id = ?
|
where api_key.project_id = ?
|
||||||
orderby asc api_key.name
|
orderby asc api_key.name
|
||||||
)
|
)
|
||||||
|
|
||||||
model bucket_info (
|
|
||||||
key name
|
|
||||||
|
|
||||||
field project_id project.id cascade
|
|
||||||
|
|
||||||
field name text
|
|
||||||
|
|
||||||
field created_at timestamp ( autoinsert )
|
|
||||||
)
|
|
||||||
|
|
||||||
create bucket_info ()
|
|
||||||
delete bucket_info ( where bucket_info.name = ? )
|
|
||||||
|
|
||||||
read one (
|
|
||||||
select bucket_info
|
|
||||||
where bucket_info.name = ?
|
|
||||||
)
|
|
||||||
|
|
||||||
read all (
|
|
||||||
select bucket_info
|
|
||||||
where bucket_info.project_id = ?
|
|
||||||
orderby asc bucket_info.name
|
|
||||||
)
|
|
@ -9,7 +9,6 @@ import (
|
|||||||
"database/sql"
|
"database/sql"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -19,7 +18,9 @@ import (
|
|||||||
"unicode"
|
"unicode"
|
||||||
|
|
||||||
"github.com/lib/pq"
|
"github.com/lib/pq"
|
||||||
|
|
||||||
"github.com/mattn/go-sqlite3"
|
"github.com/mattn/go-sqlite3"
|
||||||
|
"math/rand"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Prevent conditional imports from causing build failures
|
// Prevent conditional imports from causing build failures
|
||||||
@ -379,12 +380,6 @@ CREATE TABLE api_keys (
|
|||||||
UNIQUE ( key ),
|
UNIQUE ( key ),
|
||||||
UNIQUE ( name, project_id )
|
UNIQUE ( name, project_id )
|
||||||
);
|
);
|
||||||
CREATE TABLE bucket_infos (
|
|
||||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
|
||||||
name text NOT NULL,
|
|
||||||
created_at timestamp with time zone NOT NULL,
|
|
||||||
PRIMARY KEY ( name )
|
|
||||||
);
|
|
||||||
CREATE TABLE project_members (
|
CREATE TABLE project_members (
|
||||||
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||||
@ -561,12 +556,6 @@ CREATE TABLE api_keys (
|
|||||||
UNIQUE ( key ),
|
UNIQUE ( key ),
|
||||||
UNIQUE ( name, project_id )
|
UNIQUE ( name, project_id )
|
||||||
);
|
);
|
||||||
CREATE TABLE bucket_infos (
|
|
||||||
project_id BLOB NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
|
||||||
name TEXT NOT NULL,
|
|
||||||
created_at TIMESTAMP NOT NULL,
|
|
||||||
PRIMARY KEY ( name )
|
|
||||||
);
|
|
||||||
CREATE TABLE project_members (
|
CREATE TABLE project_members (
|
||||||
member_id BLOB NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
member_id BLOB NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||||
project_id BLOB NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
project_id BLOB NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||||
@ -2170,74 +2159,6 @@ func (f ApiKey_CreatedAt_Field) value() interface{} {
|
|||||||
|
|
||||||
func (ApiKey_CreatedAt_Field) _Column() string { return "created_at" }
|
func (ApiKey_CreatedAt_Field) _Column() string { return "created_at" }
|
||||||
|
|
||||||
type BucketInfo struct {
|
|
||||||
ProjectId []byte
|
|
||||||
Name string
|
|
||||||
CreatedAt time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func (BucketInfo) _Table() string { return "bucket_infos" }
|
|
||||||
|
|
||||||
type BucketInfo_Update_Fields struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
type BucketInfo_ProjectId_Field struct {
|
|
||||||
_set bool
|
|
||||||
_null bool
|
|
||||||
_value []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
func BucketInfo_ProjectId(v []byte) BucketInfo_ProjectId_Field {
|
|
||||||
return BucketInfo_ProjectId_Field{_set: true, _value: v}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f BucketInfo_ProjectId_Field) value() interface{} {
|
|
||||||
if !f._set || f._null {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return f._value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (BucketInfo_ProjectId_Field) _Column() string { return "project_id" }
|
|
||||||
|
|
||||||
type BucketInfo_Name_Field struct {
|
|
||||||
_set bool
|
|
||||||
_null bool
|
|
||||||
_value string
|
|
||||||
}
|
|
||||||
|
|
||||||
func BucketInfo_Name(v string) BucketInfo_Name_Field {
|
|
||||||
return BucketInfo_Name_Field{_set: true, _value: v}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f BucketInfo_Name_Field) value() interface{} {
|
|
||||||
if !f._set || f._null {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return f._value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (BucketInfo_Name_Field) _Column() string { return "name" }
|
|
||||||
|
|
||||||
type BucketInfo_CreatedAt_Field struct {
|
|
||||||
_set bool
|
|
||||||
_null bool
|
|
||||||
_value time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
func BucketInfo_CreatedAt(v time.Time) BucketInfo_CreatedAt_Field {
|
|
||||||
return BucketInfo_CreatedAt_Field{_set: true, _value: v}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f BucketInfo_CreatedAt_Field) value() interface{} {
|
|
||||||
if !f._set || f._null {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return f._value
|
|
||||||
}
|
|
||||||
|
|
||||||
func (BucketInfo_CreatedAt_Field) _Column() string { return "created_at" }
|
|
||||||
|
|
||||||
type ProjectMember struct {
|
type ProjectMember struct {
|
||||||
MemberId []byte
|
MemberId []byte
|
||||||
ProjectId []byte
|
ProjectId []byte
|
||||||
@ -2849,30 +2770,6 @@ func (obj *postgresImpl) Create_ApiKey(ctx context.Context,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (obj *postgresImpl) Create_BucketInfo(ctx context.Context,
|
|
||||||
bucket_info_project_id BucketInfo_ProjectId_Field,
|
|
||||||
bucket_info_name BucketInfo_Name_Field) (
|
|
||||||
bucket_info *BucketInfo, err error) {
|
|
||||||
|
|
||||||
__now := obj.db.Hooks.Now().UTC()
|
|
||||||
__project_id_val := bucket_info_project_id.value()
|
|
||||||
__name_val := bucket_info_name.value()
|
|
||||||
__created_at_val := __now
|
|
||||||
|
|
||||||
var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_infos ( project_id, name, created_at ) VALUES ( ?, ?, ? ) RETURNING bucket_infos.project_id, bucket_infos.name, bucket_infos.created_at")
|
|
||||||
|
|
||||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
||||||
obj.logStmt(__stmt, __project_id_val, __name_val, __created_at_val)
|
|
||||||
|
|
||||||
bucket_info = &BucketInfo{}
|
|
||||||
err = obj.driver.QueryRow(__stmt, __project_id_val, __name_val, __created_at_val).Scan(&bucket_info.ProjectId, &bucket_info.Name, &bucket_info.CreatedAt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
return bucket_info, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (obj *postgresImpl) Limited_Bwagreement(ctx context.Context,
|
func (obj *postgresImpl) Limited_Bwagreement(ctx context.Context,
|
||||||
limit int, offset int64) (
|
limit int, offset int64) (
|
||||||
rows []*Bwagreement, err error) {
|
rows []*Bwagreement, err error) {
|
||||||
@ -3671,60 +3568,6 @@ func (obj *postgresImpl) All_ApiKey_By_ProjectId_OrderBy_Asc_Name(ctx context.Co
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (obj *postgresImpl) Get_BucketInfo_By_Name(ctx context.Context,
|
|
||||||
bucket_info_name BucketInfo_Name_Field) (
|
|
||||||
bucket_info *BucketInfo, err error) {
|
|
||||||
|
|
||||||
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_infos.project_id, bucket_infos.name, bucket_infos.created_at FROM bucket_infos WHERE bucket_infos.name = ?")
|
|
||||||
|
|
||||||
var __values []interface{}
|
|
||||||
__values = append(__values, bucket_info_name.value())
|
|
||||||
|
|
||||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
||||||
obj.logStmt(__stmt, __values...)
|
|
||||||
|
|
||||||
bucket_info = &BucketInfo{}
|
|
||||||
err = obj.driver.QueryRow(__stmt, __values...).Scan(&bucket_info.ProjectId, &bucket_info.Name, &bucket_info.CreatedAt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
return bucket_info, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (obj *postgresImpl) All_BucketInfo_By_ProjectId_OrderBy_Asc_Name(ctx context.Context,
|
|
||||||
bucket_info_project_id BucketInfo_ProjectId_Field) (
|
|
||||||
rows []*BucketInfo, err error) {
|
|
||||||
|
|
||||||
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_infos.project_id, bucket_infos.name, bucket_infos.created_at FROM bucket_infos WHERE bucket_infos.project_id = ? ORDER BY bucket_infos.name")
|
|
||||||
|
|
||||||
var __values []interface{}
|
|
||||||
__values = append(__values, bucket_info_project_id.value())
|
|
||||||
|
|
||||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
||||||
obj.logStmt(__stmt, __values...)
|
|
||||||
|
|
||||||
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
defer __rows.Close()
|
|
||||||
|
|
||||||
for __rows.Next() {
|
|
||||||
bucket_info := &BucketInfo{}
|
|
||||||
err = __rows.Scan(&bucket_info.ProjectId, &bucket_info.Name, &bucket_info.CreatedAt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
rows = append(rows, bucket_info)
|
|
||||||
}
|
|
||||||
if err := __rows.Err(); err != nil {
|
|
||||||
return nil, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
return rows, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (obj *postgresImpl) Update_Irreparabledb_By_Segmentpath(ctx context.Context,
|
func (obj *postgresImpl) Update_Irreparabledb_By_Segmentpath(ctx context.Context,
|
||||||
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
||||||
update Irreparabledb_Update_Fields) (
|
update Irreparabledb_Update_Fields) (
|
||||||
@ -4382,32 +4225,6 @@ func (obj *postgresImpl) Delete_ApiKey_By_Id(ctx context.Context,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (obj *postgresImpl) Delete_BucketInfo_By_Name(ctx context.Context,
|
|
||||||
bucket_info_name BucketInfo_Name_Field) (
|
|
||||||
deleted bool, err error) {
|
|
||||||
|
|
||||||
var __embed_stmt = __sqlbundle_Literal("DELETE FROM bucket_infos WHERE bucket_infos.name = ?")
|
|
||||||
|
|
||||||
var __values []interface{}
|
|
||||||
__values = append(__values, bucket_info_name.value())
|
|
||||||
|
|
||||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
||||||
obj.logStmt(__stmt, __values...)
|
|
||||||
|
|
||||||
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
||||||
if err != nil {
|
|
||||||
return false, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
__count, err := __res.RowsAffected()
|
|
||||||
if err != nil {
|
|
||||||
return false, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return __count > 0, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (impl postgresImpl) isConstraintError(err error) (
|
func (impl postgresImpl) isConstraintError(err error) (
|
||||||
constraint string, ok bool) {
|
constraint string, ok bool) {
|
||||||
if e, ok := err.(*pq.Error); ok {
|
if e, ok := err.(*pq.Error); ok {
|
||||||
@ -4426,16 +4243,6 @@ func (obj *postgresImpl) deleteAll(ctx context.Context) (count int64, err error)
|
|||||||
return 0, obj.makeErr(err)
|
return 0, obj.makeErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
__count, err = __res.RowsAffected()
|
|
||||||
if err != nil {
|
|
||||||
return 0, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
count += __count
|
|
||||||
__res, err = obj.driver.Exec("DELETE FROM bucket_infos;")
|
|
||||||
if err != nil {
|
|
||||||
return 0, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
__count, err = __res.RowsAffected()
|
__count, err = __res.RowsAffected()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, obj.makeErr(err)
|
return 0, obj.makeErr(err)
|
||||||
@ -4942,33 +4749,6 @@ func (obj *sqlite3Impl) Create_ApiKey(ctx context.Context,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (obj *sqlite3Impl) Create_BucketInfo(ctx context.Context,
|
|
||||||
bucket_info_project_id BucketInfo_ProjectId_Field,
|
|
||||||
bucket_info_name BucketInfo_Name_Field) (
|
|
||||||
bucket_info *BucketInfo, err error) {
|
|
||||||
|
|
||||||
__now := obj.db.Hooks.Now().UTC()
|
|
||||||
__project_id_val := bucket_info_project_id.value()
|
|
||||||
__name_val := bucket_info_name.value()
|
|
||||||
__created_at_val := __now
|
|
||||||
|
|
||||||
var __embed_stmt = __sqlbundle_Literal("INSERT INTO bucket_infos ( project_id, name, created_at ) VALUES ( ?, ?, ? )")
|
|
||||||
|
|
||||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
||||||
obj.logStmt(__stmt, __project_id_val, __name_val, __created_at_val)
|
|
||||||
|
|
||||||
__res, err := obj.driver.Exec(__stmt, __project_id_val, __name_val, __created_at_val)
|
|
||||||
if err != nil {
|
|
||||||
return nil, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
__pk, err := __res.LastInsertId()
|
|
||||||
if err != nil {
|
|
||||||
return nil, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
return obj.getLastBucketInfo(ctx, __pk)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (obj *sqlite3Impl) Limited_Bwagreement(ctx context.Context,
|
func (obj *sqlite3Impl) Limited_Bwagreement(ctx context.Context,
|
||||||
limit int, offset int64) (
|
limit int, offset int64) (
|
||||||
rows []*Bwagreement, err error) {
|
rows []*Bwagreement, err error) {
|
||||||
@ -5767,60 +5547,6 @@ func (obj *sqlite3Impl) All_ApiKey_By_ProjectId_OrderBy_Asc_Name(ctx context.Con
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (obj *sqlite3Impl) Get_BucketInfo_By_Name(ctx context.Context,
|
|
||||||
bucket_info_name BucketInfo_Name_Field) (
|
|
||||||
bucket_info *BucketInfo, err error) {
|
|
||||||
|
|
||||||
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_infos.project_id, bucket_infos.name, bucket_infos.created_at FROM bucket_infos WHERE bucket_infos.name = ?")
|
|
||||||
|
|
||||||
var __values []interface{}
|
|
||||||
__values = append(__values, bucket_info_name.value())
|
|
||||||
|
|
||||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
||||||
obj.logStmt(__stmt, __values...)
|
|
||||||
|
|
||||||
bucket_info = &BucketInfo{}
|
|
||||||
err = obj.driver.QueryRow(__stmt, __values...).Scan(&bucket_info.ProjectId, &bucket_info.Name, &bucket_info.CreatedAt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
return bucket_info, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (obj *sqlite3Impl) All_BucketInfo_By_ProjectId_OrderBy_Asc_Name(ctx context.Context,
|
|
||||||
bucket_info_project_id BucketInfo_ProjectId_Field) (
|
|
||||||
rows []*BucketInfo, err error) {
|
|
||||||
|
|
||||||
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_infos.project_id, bucket_infos.name, bucket_infos.created_at FROM bucket_infos WHERE bucket_infos.project_id = ? ORDER BY bucket_infos.name")
|
|
||||||
|
|
||||||
var __values []interface{}
|
|
||||||
__values = append(__values, bucket_info_project_id.value())
|
|
||||||
|
|
||||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
||||||
obj.logStmt(__stmt, __values...)
|
|
||||||
|
|
||||||
__rows, err := obj.driver.Query(__stmt, __values...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
defer __rows.Close()
|
|
||||||
|
|
||||||
for __rows.Next() {
|
|
||||||
bucket_info := &BucketInfo{}
|
|
||||||
err = __rows.Scan(&bucket_info.ProjectId, &bucket_info.Name, &bucket_info.CreatedAt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
rows = append(rows, bucket_info)
|
|
||||||
}
|
|
||||||
if err := __rows.Err(); err != nil {
|
|
||||||
return nil, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
return rows, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (obj *sqlite3Impl) Update_Irreparabledb_By_Segmentpath(ctx context.Context,
|
func (obj *sqlite3Impl) Update_Irreparabledb_By_Segmentpath(ctx context.Context,
|
||||||
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field,
|
||||||
update Irreparabledb_Update_Fields) (
|
update Irreparabledb_Update_Fields) (
|
||||||
@ -6548,32 +6274,6 @@ func (obj *sqlite3Impl) Delete_ApiKey_By_Id(ctx context.Context,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (obj *sqlite3Impl) Delete_BucketInfo_By_Name(ctx context.Context,
|
|
||||||
bucket_info_name BucketInfo_Name_Field) (
|
|
||||||
deleted bool, err error) {
|
|
||||||
|
|
||||||
var __embed_stmt = __sqlbundle_Literal("DELETE FROM bucket_infos WHERE bucket_infos.name = ?")
|
|
||||||
|
|
||||||
var __values []interface{}
|
|
||||||
__values = append(__values, bucket_info_name.value())
|
|
||||||
|
|
||||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
||||||
obj.logStmt(__stmt, __values...)
|
|
||||||
|
|
||||||
__res, err := obj.driver.Exec(__stmt, __values...)
|
|
||||||
if err != nil {
|
|
||||||
return false, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
__count, err := __res.RowsAffected()
|
|
||||||
if err != nil {
|
|
||||||
return false, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return __count > 0, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (obj *sqlite3Impl) getLastBwagreement(ctx context.Context,
|
func (obj *sqlite3Impl) getLastBwagreement(ctx context.Context,
|
||||||
pk int64) (
|
pk int64) (
|
||||||
bwagreement *Bwagreement, err error) {
|
bwagreement *Bwagreement, err error) {
|
||||||
@ -6790,24 +6490,6 @@ func (obj *sqlite3Impl) getLastApiKey(ctx context.Context,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (obj *sqlite3Impl) getLastBucketInfo(ctx context.Context,
|
|
||||||
pk int64) (
|
|
||||||
bucket_info *BucketInfo, err error) {
|
|
||||||
|
|
||||||
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_infos.project_id, bucket_infos.name, bucket_infos.created_at FROM bucket_infos WHERE _rowid_ = ?")
|
|
||||||
|
|
||||||
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
|
|
||||||
obj.logStmt(__stmt, pk)
|
|
||||||
|
|
||||||
bucket_info = &BucketInfo{}
|
|
||||||
err = obj.driver.QueryRow(__stmt, pk).Scan(&bucket_info.ProjectId, &bucket_info.Name, &bucket_info.CreatedAt)
|
|
||||||
if err != nil {
|
|
||||||
return nil, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
return bucket_info, nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (impl sqlite3Impl) isConstraintError(err error) (
|
func (impl sqlite3Impl) isConstraintError(err error) (
|
||||||
constraint string, ok bool) {
|
constraint string, ok bool) {
|
||||||
if e, ok := err.(sqlite3.Error); ok {
|
if e, ok := err.(sqlite3.Error); ok {
|
||||||
@ -6831,16 +6513,6 @@ func (obj *sqlite3Impl) deleteAll(ctx context.Context) (count int64, err error)
|
|||||||
return 0, obj.makeErr(err)
|
return 0, obj.makeErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
__count, err = __res.RowsAffected()
|
|
||||||
if err != nil {
|
|
||||||
return 0, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
count += __count
|
|
||||||
__res, err = obj.driver.Exec("DELETE FROM bucket_infos;")
|
|
||||||
if err != nil {
|
|
||||||
return 0, obj.makeErr(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
__count, err = __res.RowsAffected()
|
__count, err = __res.RowsAffected()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, obj.makeErr(err)
|
return 0, obj.makeErr(err)
|
||||||
@ -7042,16 +6714,6 @@ func (rx *Rx) All_ApiKey_By_ProjectId_OrderBy_Asc_Name(ctx context.Context,
|
|||||||
return tx.All_ApiKey_By_ProjectId_OrderBy_Asc_Name(ctx, api_key_project_id)
|
return tx.All_ApiKey_By_ProjectId_OrderBy_Asc_Name(ctx, api_key_project_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rx *Rx) All_BucketInfo_By_ProjectId_OrderBy_Asc_Name(ctx context.Context,
|
|
||||||
bucket_info_project_id BucketInfo_ProjectId_Field) (
|
|
||||||
rows []*BucketInfo, err error) {
|
|
||||||
var tx *Tx
|
|
||||||
if tx, err = rx.getTx(ctx); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return tx.All_BucketInfo_By_ProjectId_OrderBy_Asc_Name(ctx, bucket_info_project_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rx *Rx) All_Bwagreement(ctx context.Context) (
|
func (rx *Rx) All_Bwagreement(ctx context.Context) (
|
||||||
rows []*Bwagreement, err error) {
|
rows []*Bwagreement, err error) {
|
||||||
var tx *Tx
|
var tx *Tx
|
||||||
@ -7178,18 +6840,6 @@ func (rx *Rx) Create_ApiKey(ctx context.Context,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rx *Rx) Create_BucketInfo(ctx context.Context,
|
|
||||||
bucket_info_project_id BucketInfo_ProjectId_Field,
|
|
||||||
bucket_info_name BucketInfo_Name_Field) (
|
|
||||||
bucket_info *BucketInfo, err error) {
|
|
||||||
var tx *Tx
|
|
||||||
if tx, err = rx.getTx(ctx); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return tx.Create_BucketInfo(ctx, bucket_info_project_id, bucket_info_name)
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rx *Rx) Create_Bwagreement(ctx context.Context,
|
func (rx *Rx) Create_Bwagreement(ctx context.Context,
|
||||||
bwagreement_serialnum Bwagreement_Serialnum_Field,
|
bwagreement_serialnum Bwagreement_Serialnum_Field,
|
||||||
bwagreement_storage_node_id Bwagreement_StorageNodeId_Field,
|
bwagreement_storage_node_id Bwagreement_StorageNodeId_Field,
|
||||||
@ -7344,16 +6994,6 @@ func (rx *Rx) Delete_ApiKey_By_Id(ctx context.Context,
|
|||||||
return tx.Delete_ApiKey_By_Id(ctx, api_key_id)
|
return tx.Delete_ApiKey_By_Id(ctx, api_key_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rx *Rx) Delete_BucketInfo_By_Name(ctx context.Context,
|
|
||||||
bucket_info_name BucketInfo_Name_Field) (
|
|
||||||
deleted bool, err error) {
|
|
||||||
var tx *Tx
|
|
||||||
if tx, err = rx.getTx(ctx); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return tx.Delete_BucketInfo_By_Name(ctx, bucket_info_name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rx *Rx) Delete_Injuredsegment_By_Id(ctx context.Context,
|
func (rx *Rx) Delete_Injuredsegment_By_Id(ctx context.Context,
|
||||||
injuredsegment_id Injuredsegment_Id_Field) (
|
injuredsegment_id Injuredsegment_Id_Field) (
|
||||||
deleted bool, err error) {
|
deleted bool, err error) {
|
||||||
@ -7484,16 +7124,6 @@ func (rx *Rx) Get_ApiKey_By_Key(ctx context.Context,
|
|||||||
return tx.Get_ApiKey_By_Key(ctx, api_key_key)
|
return tx.Get_ApiKey_By_Key(ctx, api_key_key)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rx *Rx) Get_BucketInfo_By_Name(ctx context.Context,
|
|
||||||
bucket_info_name BucketInfo_Name_Field) (
|
|
||||||
bucket_info *BucketInfo, err error) {
|
|
||||||
var tx *Tx
|
|
||||||
if tx, err = rx.getTx(ctx); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return tx.Get_BucketInfo_By_Name(ctx, bucket_info_name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rx *Rx) Get_Irreparabledb_By_Segmentpath(ctx context.Context,
|
func (rx *Rx) Get_Irreparabledb_By_Segmentpath(ctx context.Context,
|
||||||
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
||||||
irreparabledb *Irreparabledb, err error) {
|
irreparabledb *Irreparabledb, err error) {
|
||||||
@ -7699,10 +7329,6 @@ type Methods interface {
|
|||||||
api_key_project_id ApiKey_ProjectId_Field) (
|
api_key_project_id ApiKey_ProjectId_Field) (
|
||||||
rows []*ApiKey, err error)
|
rows []*ApiKey, err error)
|
||||||
|
|
||||||
All_BucketInfo_By_ProjectId_OrderBy_Asc_Name(ctx context.Context,
|
|
||||||
bucket_info_project_id BucketInfo_ProjectId_Field) (
|
|
||||||
rows []*BucketInfo, err error)
|
|
||||||
|
|
||||||
All_Bwagreement(ctx context.Context) (
|
All_Bwagreement(ctx context.Context) (
|
||||||
rows []*Bwagreement, err error)
|
rows []*Bwagreement, err error)
|
||||||
|
|
||||||
@ -7759,11 +7385,6 @@ type Methods interface {
|
|||||||
api_key_name ApiKey_Name_Field) (
|
api_key_name ApiKey_Name_Field) (
|
||||||
api_key *ApiKey, err error)
|
api_key *ApiKey, err error)
|
||||||
|
|
||||||
Create_BucketInfo(ctx context.Context,
|
|
||||||
bucket_info_project_id BucketInfo_ProjectId_Field,
|
|
||||||
bucket_info_name BucketInfo_Name_Field) (
|
|
||||||
bucket_info *BucketInfo, err error)
|
|
||||||
|
|
||||||
Create_Bwagreement(ctx context.Context,
|
Create_Bwagreement(ctx context.Context,
|
||||||
bwagreement_serialnum Bwagreement_Serialnum_Field,
|
bwagreement_serialnum Bwagreement_Serialnum_Field,
|
||||||
bwagreement_storage_node_id Bwagreement_StorageNodeId_Field,
|
bwagreement_storage_node_id Bwagreement_StorageNodeId_Field,
|
||||||
@ -7844,10 +7465,6 @@ type Methods interface {
|
|||||||
api_key_id ApiKey_Id_Field) (
|
api_key_id ApiKey_Id_Field) (
|
||||||
deleted bool, err error)
|
deleted bool, err error)
|
||||||
|
|
||||||
Delete_BucketInfo_By_Name(ctx context.Context,
|
|
||||||
bucket_info_name BucketInfo_Name_Field) (
|
|
||||||
deleted bool, err error)
|
|
||||||
|
|
||||||
Delete_Injuredsegment_By_Id(ctx context.Context,
|
Delete_Injuredsegment_By_Id(ctx context.Context,
|
||||||
injuredsegment_id Injuredsegment_Id_Field) (
|
injuredsegment_id Injuredsegment_Id_Field) (
|
||||||
deleted bool, err error)
|
deleted bool, err error)
|
||||||
@ -7900,10 +7517,6 @@ type Methods interface {
|
|||||||
api_key_key ApiKey_Key_Field) (
|
api_key_key ApiKey_Key_Field) (
|
||||||
api_key *ApiKey, err error)
|
api_key *ApiKey, err error)
|
||||||
|
|
||||||
Get_BucketInfo_By_Name(ctx context.Context,
|
|
||||||
bucket_info_name BucketInfo_Name_Field) (
|
|
||||||
bucket_info *BucketInfo, err error)
|
|
||||||
|
|
||||||
Get_Irreparabledb_By_Segmentpath(ctx context.Context,
|
Get_Irreparabledb_By_Segmentpath(ctx context.Context,
|
||||||
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
irreparabledb_segmentpath Irreparabledb_Segmentpath_Field) (
|
||||||
irreparabledb *Irreparabledb, err error)
|
irreparabledb *Irreparabledb, err error)
|
||||||
|
@ -107,12 +107,6 @@ CREATE TABLE api_keys (
|
|||||||
UNIQUE ( key ),
|
UNIQUE ( key ),
|
||||||
UNIQUE ( name, project_id )
|
UNIQUE ( name, project_id )
|
||||||
);
|
);
|
||||||
CREATE TABLE bucket_infos (
|
|
||||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
|
||||||
name text NOT NULL,
|
|
||||||
created_at timestamp with time zone NOT NULL,
|
|
||||||
PRIMARY KEY ( name )
|
|
||||||
);
|
|
||||||
CREATE TABLE project_members (
|
CREATE TABLE project_members (
|
||||||
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||||
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||||
|
@ -107,12 +107,6 @@ CREATE TABLE api_keys (
|
|||||||
UNIQUE ( key ),
|
UNIQUE ( key ),
|
||||||
UNIQUE ( name, project_id )
|
UNIQUE ( name, project_id )
|
||||||
);
|
);
|
||||||
CREATE TABLE bucket_infos (
|
|
||||||
project_id BLOB NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
|
||||||
name TEXT NOT NULL,
|
|
||||||
created_at TIMESTAMP NOT NULL,
|
|
||||||
PRIMARY KEY ( name )
|
|
||||||
);
|
|
||||||
CREATE TABLE project_members (
|
CREATE TABLE project_members (
|
||||||
member_id BLOB NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
member_id BLOB NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
|
||||||
project_id BLOB NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
project_id BLOB NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
|
||||||
|
@ -206,47 +206,6 @@ func (m *lockedAPIKeys) Update(ctx context.Context, key console.APIKeyInfo) erro
|
|||||||
return m.db.Update(ctx, key)
|
return m.db.Update(ctx, key)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Buckets is a getter for Buckets repository
|
|
||||||
func (m *lockedConsole) Buckets() console.Buckets {
|
|
||||||
m.Lock()
|
|
||||||
defer m.Unlock()
|
|
||||||
return &lockedBuckets{m.Locker, m.db.Buckets()}
|
|
||||||
}
|
|
||||||
|
|
||||||
// lockedBuckets implements locking wrapper for console.Buckets
|
|
||||||
type lockedBuckets struct {
|
|
||||||
sync.Locker
|
|
||||||
db console.Buckets
|
|
||||||
}
|
|
||||||
|
|
||||||
// AttachBucket attaches a bucket to a project
|
|
||||||
func (m *lockedBuckets) AttachBucket(ctx context.Context, name string, projectID uuid.UUID) (*console.Bucket, error) {
|
|
||||||
m.Lock()
|
|
||||||
defer m.Unlock()
|
|
||||||
return m.db.AttachBucket(ctx, name, projectID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DeattachBucket deletes bucket info for a bucket by name
|
|
||||||
func (m *lockedBuckets) DeattachBucket(ctx context.Context, name string) error {
|
|
||||||
m.Lock()
|
|
||||||
defer m.Unlock()
|
|
||||||
return m.db.DeattachBucket(ctx, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBucket retrieves bucket info of bucket with given name
|
|
||||||
func (m *lockedBuckets) GetBucket(ctx context.Context, name string) (*console.Bucket, error) {
|
|
||||||
m.Lock()
|
|
||||||
defer m.Unlock()
|
|
||||||
return m.db.GetBucket(ctx, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ListBuckets returns bucket list of a given project
|
|
||||||
func (m *lockedBuckets) ListBuckets(ctx context.Context, projectID uuid.UUID) ([]console.Bucket, error) {
|
|
||||||
m.Lock()
|
|
||||||
defer m.Unlock()
|
|
||||||
return m.db.ListBuckets(ctx, projectID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close is used to close db connection
|
// Close is used to close db connection
|
||||||
func (m *lockedConsole) Close() error {
|
func (m *lockedConsole) Close() error {
|
||||||
m.Lock()
|
m.Lock()
|
||||||
|
Loading…
Reference in New Issue
Block a user