Console usage rollup api (#1664)

This commit is contained in:
Yaroslav Vorobiov 2019-04-04 17:56:20 +03:00 committed by GitHub
parent 6d86610bcc
commit b38b87cb14
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 412 additions and 139 deletions

View File

@ -7,23 +7,18 @@ import (
"github.com/graphql-go/graphql" "github.com/graphql-go/graphql"
"storj.io/storj/bootstrap/bootstrapweb" "storj.io/storj/bootstrap/bootstrapweb"
"storj.io/storj/internal/storjql"
) )
// CreateSchema creates a schema for bootstrap graphql api // CreateSchema creates a schema for bootstrap graphql api
func CreateSchema(service *bootstrapweb.Service) (schema graphql.Schema, err error) { func CreateSchema(service *bootstrapweb.Service) (schema graphql.Schema, err error) {
storjql.WithLock(func() { creator := TypeCreator{}
creator := TypeCreator{}
err = creator.Create(service) err = creator.Create(service)
if err != nil { if err != nil {
return return
} }
schema, err = graphql.NewSchema(graphql.SchemaConfig{ return graphql.NewSchema(graphql.SchemaConfig{
Query: creator.RootQuery(), Query: creator.RootQuery(),
})
}) })
return schema, err
} }

2
go.mod
View File

@ -4,7 +4,7 @@ module storj.io/storj
require ( require (
github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a
github.com/garyburd/redigo v1.0.1-0.20170216214944-0d253a66e6e1 // indirect github.com/garyburd/redigo v1.0.1-0.20170216214944-0d253a66e6e1 // indirect
github.com/graphql-go/graphql v0.7.6 github.com/graphql-go/graphql v0.7.9-0.20190403165646-199d20bbfed7
github.com/hanwen/go-fuse v0.0.0-20181027161220-c029b69a13a7 github.com/hanwen/go-fuse v0.0.0-20181027161220-c029b69a13a7
github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/mattn/go-colorable v0.0.9 // indirect github.com/mattn/go-colorable v0.0.9 // indirect

4
go.sum
View File

@ -133,8 +133,8 @@ github.com/gorilla/mux v1.7.0 h1:tOSd0UKHQd6urX6ApfOn4XdBMY6Sh1MfxV3kmaazO+U=
github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
github.com/gorilla/rpc v1.1.0 h1:marKfvVP0Gpd/jHlVBKCQ8RAoUPdX7K1Nuh6l1BNh7A= github.com/gorilla/rpc v1.1.0 h1:marKfvVP0Gpd/jHlVBKCQ8RAoUPdX7K1Nuh6l1BNh7A=
github.com/gorilla/rpc v1.1.0/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36jkTQ= github.com/gorilla/rpc v1.1.0/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36jkTQ=
github.com/graphql-go/graphql v0.7.6 h1:3Bn1IFB5OvPoANEfu03azF8aMyks0G/H6G1XeTfYbM4= github.com/graphql-go/graphql v0.7.9-0.20190403165646-199d20bbfed7 h1:E45QFM7IqRdFnuyFk8GSamb42EckUSyJ55rtVB/w8VQ=
github.com/graphql-go/graphql v0.7.6/go.mod h1:k6yrAYQaSP59DC5UVxbgxESlmVyojThKdORUqGDGmrI= github.com/graphql-go/graphql v0.7.9-0.20190403165646-199d20bbfed7/go.mod h1:k6yrAYQaSP59DC5UVxbgxESlmVyojThKdORUqGDGmrI=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/hanwen/go-fuse v0.0.0-20181027161220-c029b69a13a7 h1:+INF0+TK4ga3O+6Y0Z2ftiujA13KaCO/+kHN9V6Mj4A= github.com/hanwen/go-fuse v0.0.0-20181027161220-c029b69a13a7 h1:+INF0+TK4ga3O+6Y0Z2ftiujA13KaCO/+kHN9V6Mj4A=
github.com/hanwen/go-fuse v0.0.0-20181027161220-c029b69a13a7/go.mod h1:unqXarDXqzAk0rt98O2tVndEPIpUgLD9+rwFisZH3Ok= github.com/hanwen/go-fuse v0.0.0-20181027161220-c029b69a13a7/go.mod h1:unqXarDXqzAk0rt98O2tVndEPIpUgLD9+rwFisZH3Ok=

View File

@ -1,19 +0,0 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package storjql
import (
"sync"
)
// mu allows to lock graphql methods, because some of them are not thread-safe
var mu sync.Mutex
// WithLock locks graphql methods, because some of them are not thread-safe
func WithLock(fn func()) {
mu.Lock()
defer mu.Unlock()
fn()
}

View File

@ -41,7 +41,7 @@ func graphqlAPIKeyInfo() *graphql.Object {
} }
// graphqlCreateAPIKey creates createAPIKey graphql object // graphqlCreateAPIKey creates createAPIKey graphql object
func graphqlCreateAPIKey(types Types) *graphql.Object { func graphqlCreateAPIKey(types *TypeCreator) *graphql.Object {
return graphql.NewObject(graphql.ObjectConfig{ return graphql.NewObject(graphql.ObjectConfig{
Name: CreateAPIKeyType, Name: CreateAPIKeyType,
Fields: graphql.Fields{ Fields: graphql.Fields{
@ -49,7 +49,7 @@ func graphqlCreateAPIKey(types Types) *graphql.Object {
Type: graphql.String, Type: graphql.String,
}, },
APIKeyInfoType: &graphql.Field{ APIKeyInfoType: &graphql.Field{
Type: types.APIKeyInfo(), Type: types.apiKeyInfo,
}, },
}, },
}) })

View File

@ -53,15 +53,15 @@ const (
) )
// rootMutation creates mutation for graphql populated by AccountsClient // rootMutation creates mutation for graphql populated by AccountsClient
func rootMutation(log *zap.Logger, service *console.Service, mailService *mailservice.Service, types Types) *graphql.Object { func rootMutation(log *zap.Logger, service *console.Service, mailService *mailservice.Service, types *TypeCreator) *graphql.Object {
return graphql.NewObject(graphql.ObjectConfig{ return graphql.NewObject(graphql.ObjectConfig{
Name: Mutation, Name: Mutation,
Fields: graphql.Fields{ Fields: graphql.Fields{
CreateUserMutation: &graphql.Field{ CreateUserMutation: &graphql.Field{
Type: types.User(), Type: types.user,
Args: graphql.FieldConfigArgument{ Args: graphql.FieldConfigArgument{
InputArg: &graphql.ArgumentConfig{ InputArg: &graphql.ArgumentConfig{
Type: graphql.NewNonNull(types.UserInput()), Type: graphql.NewNonNull(types.userInput),
}, },
Secret: &graphql.ArgumentConfig{ Secret: &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.String), Type: graphql.NewNonNull(graphql.String),
@ -117,10 +117,10 @@ func rootMutation(log *zap.Logger, service *console.Service, mailService *mailse
}, },
}, },
UpdateAccountMutation: &graphql.Field{ UpdateAccountMutation: &graphql.Field{
Type: types.User(), Type: types.user,
Args: graphql.FieldConfigArgument{ Args: graphql.FieldConfigArgument{
InputArg: &graphql.ArgumentConfig{ InputArg: &graphql.ArgumentConfig{
Type: graphql.NewNonNull(types.UserInput()), Type: graphql.NewNonNull(types.userInput),
}, },
}, },
Resolve: func(p graphql.ResolveParams) (interface{}, error) { Resolve: func(p graphql.ResolveParams) (interface{}, error) {
@ -142,7 +142,7 @@ func rootMutation(log *zap.Logger, service *console.Service, mailService *mailse
}, },
}, },
ChangePasswordMutation: &graphql.Field{ ChangePasswordMutation: &graphql.Field{
Type: types.User(), Type: types.user,
Args: graphql.FieldConfigArgument{ Args: graphql.FieldConfigArgument{
FieldPassword: &graphql.ArgumentConfig{ FieldPassword: &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.String), Type: graphql.NewNonNull(graphql.String),
@ -169,7 +169,7 @@ func rootMutation(log *zap.Logger, service *console.Service, mailService *mailse
}, },
}, },
DeleteAccountMutation: &graphql.Field{ DeleteAccountMutation: &graphql.Field{
Type: types.User(), Type: types.user,
Args: graphql.FieldConfigArgument{ Args: graphql.FieldConfigArgument{
FieldPassword: &graphql.ArgumentConfig{ FieldPassword: &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.String), Type: graphql.NewNonNull(graphql.String),
@ -193,10 +193,10 @@ func rootMutation(log *zap.Logger, service *console.Service, mailService *mailse
}, },
// creates project from input params // creates project from input params
CreateProjectMutation: &graphql.Field{ CreateProjectMutation: &graphql.Field{
Type: types.Project(), Type: types.project,
Args: graphql.FieldConfigArgument{ Args: graphql.FieldConfigArgument{
InputArg: &graphql.ArgumentConfig{ InputArg: &graphql.ArgumentConfig{
Type: graphql.NewNonNull(types.ProjectInput()), Type: graphql.NewNonNull(types.projectInput),
}, },
}, },
Resolve: func(p graphql.ResolveParams) (interface{}, error) { Resolve: func(p graphql.ResolveParams) (interface{}, error) {
@ -207,7 +207,7 @@ func rootMutation(log *zap.Logger, service *console.Service, mailService *mailse
}, },
// deletes project by id, taken from input params // deletes project by id, taken from input params
DeleteProjectMutation: &graphql.Field{ DeleteProjectMutation: &graphql.Field{
Type: types.Project(), Type: types.project,
Args: graphql.FieldConfigArgument{ Args: graphql.FieldConfigArgument{
FieldID: &graphql.ArgumentConfig{ FieldID: &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.String), Type: graphql.NewNonNull(graphql.String),
@ -234,7 +234,7 @@ func rootMutation(log *zap.Logger, service *console.Service, mailService *mailse
}, },
// updates project description // updates project description
UpdateProjectDescriptionMutation: &graphql.Field{ UpdateProjectDescriptionMutation: &graphql.Field{
Type: types.Project(), Type: types.project,
Args: graphql.FieldConfigArgument{ Args: graphql.FieldConfigArgument{
FieldID: &graphql.ArgumentConfig{ FieldID: &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.String), Type: graphql.NewNonNull(graphql.String),
@ -257,7 +257,7 @@ func rootMutation(log *zap.Logger, service *console.Service, mailService *mailse
}, },
// add user as member of given project // add user as member of given project
AddProjectMembersMutation: &graphql.Field{ AddProjectMembersMutation: &graphql.Field{
Type: types.Project(), Type: types.project,
Args: graphql.FieldConfigArgument{ Args: graphql.FieldConfigArgument{
FieldProjectID: &graphql.ArgumentConfig{ FieldProjectID: &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.String), Type: graphql.NewNonNull(graphql.String),
@ -320,7 +320,7 @@ func rootMutation(log *zap.Logger, service *console.Service, mailService *mailse
}, },
// delete user membership for given project // delete user membership for given project
DeleteProjectMembersMutation: &graphql.Field{ DeleteProjectMembersMutation: &graphql.Field{
Type: types.Project(), Type: types.project,
Args: graphql.FieldConfigArgument{ Args: graphql.FieldConfigArgument{
FieldProjectID: &graphql.ArgumentConfig{ FieldProjectID: &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.String), Type: graphql.NewNonNull(graphql.String),
@ -353,7 +353,7 @@ func rootMutation(log *zap.Logger, service *console.Service, mailService *mailse
}, },
// creates new api key // creates new api key
CreateAPIKeyMutation: &graphql.Field{ CreateAPIKeyMutation: &graphql.Field{
Type: types.CreateAPIKey(), Type: types.createAPIKey,
Args: graphql.FieldConfigArgument{ Args: graphql.FieldConfigArgument{
FieldProjectID: &graphql.ArgumentConfig{ FieldProjectID: &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.String), Type: graphql.NewNonNull(graphql.String),
@ -384,7 +384,7 @@ func rootMutation(log *zap.Logger, service *console.Service, mailService *mailse
}, },
// deletes api key // deletes api key
DeleteAPIKeysMutation: &graphql.Field{ DeleteAPIKeysMutation: &graphql.Field{
Type: graphql.NewList(types.APIKeyInfo()), Type: graphql.NewList(types.apiKeyInfo),
Args: graphql.FieldConfigArgument{ Args: graphql.FieldConfigArgument{
FieldID: &graphql.ArgumentConfig{ FieldID: &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.NewList(graphql.String)), Type: graphql.NewNonNull(graphql.NewList(graphql.String)),

View File

@ -4,6 +4,8 @@
package consoleql package consoleql
import ( import (
"time"
"github.com/graphql-go/graphql" "github.com/graphql-go/graphql"
"storj.io/storj/satellite/console" "storj.io/storj/satellite/console"
@ -14,6 +16,8 @@ const (
ProjectType = "project" ProjectType = "project"
// ProjectInputType is a graphql type name for project input // ProjectInputType is a graphql type name for project input
ProjectInputType = "projectInput" ProjectInputType = "projectInput"
// ProjectUsageType is a graphql type name for project usage
ProjectUsageType = "projectUsage"
// FieldName is a field name for "name" // FieldName is a field name for "name"
FieldName = "name" FieldName = "name"
// FieldDescription is a field name for description // FieldDescription is a field name for description
@ -22,7 +26,14 @@ const (
FieldMembers = "members" FieldMembers = "members"
// FieldAPIKeys is a field name for api keys // FieldAPIKeys is a field name for api keys
FieldAPIKeys = "apiKeys" FieldAPIKeys = "apiKeys"
// FieldUsage is a field name for usage rollup
FieldUsage = "usage"
// FieldStorage is a field name for storage total
FieldStorage = "storage"
// FieldEgress is a field name for egress total
FieldEgress = "egress"
// FieldObjectsCount is a field name for objects count
FieldObjectsCount = "objectsCount"
// LimitArg is argument name for limit // LimitArg is argument name for limit
LimitArg = "limit" LimitArg = "limit"
// OffsetArg is argument name for offset // OffsetArg is argument name for offset
@ -31,10 +42,14 @@ const (
SearchArg = "search" SearchArg = "search"
// OrderArg is argument name for order // OrderArg is argument name for order
OrderArg = "order" OrderArg = "order"
// SinceArg marks start of the period
SinceArg = "since"
// BeforeArg marks end of the period
BeforeArg = "before"
) )
// graphqlProject creates *graphql.Object type representation of satellite.ProjectInfo // graphqlProject creates *graphql.Object type representation of satellite.ProjectInfo
func graphqlProject(service *console.Service, types Types) *graphql.Object { func graphqlProject(service *console.Service, types *TypeCreator) *graphql.Object {
return graphql.NewObject(graphql.ObjectConfig{ return graphql.NewObject(graphql.ObjectConfig{
Name: ProjectType, Name: ProjectType,
Fields: graphql.Fields{ Fields: graphql.Fields{
@ -51,7 +66,7 @@ func graphqlProject(service *console.Service, types Types) *graphql.Object {
Type: graphql.DateTime, Type: graphql.DateTime,
}, },
FieldMembers: &graphql.Field{ FieldMembers: &graphql.Field{
Type: graphql.NewList(types.ProjectMember()), Type: graphql.NewList(types.projectMember),
Args: graphql.FieldConfigArgument{ Args: graphql.FieldConfigArgument{
OffsetArg: &graphql.ArgumentConfig{ OffsetArg: &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.Int), Type: graphql.NewNonNull(graphql.Int),
@ -103,13 +118,32 @@ func graphqlProject(service *console.Service, types Types) *graphql.Object {
}, },
}, },
FieldAPIKeys: &graphql.Field{ FieldAPIKeys: &graphql.Field{
Type: graphql.NewList(types.APIKeyInfo()), Type: graphql.NewList(types.apiKeyInfo),
Resolve: func(p graphql.ResolveParams) (interface{}, error) { Resolve: func(p graphql.ResolveParams) (interface{}, error) {
project, _ := p.Source.(*console.Project) project, _ := p.Source.(*console.Project)
return service.GetAPIKeysInfoByProjectID(p.Context, project.ID) return service.GetAPIKeysInfoByProjectID(p.Context, project.ID)
}, },
}, },
FieldUsage: &graphql.Field{
Type: types.projectUsage,
Args: graphql.FieldConfigArgument{
SinceArg: &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.DateTime),
},
BeforeArg: &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.DateTime),
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
project, _ := p.Source.(*console.Project)
since := p.Args[SinceArg].(time.Time)
before := p.Args[BeforeArg].(time.Time)
return service.GetProjectUsage(p.Context, project.ID, since, before)
},
},
}, },
}) })
} }
@ -129,6 +163,30 @@ func graphqlProjectInput() *graphql.InputObject {
}) })
} }
// graphqlProjectUsage creates project usage graphql type
func graphqlProjectUsage() *graphql.Object {
return graphql.NewObject(graphql.ObjectConfig{
Name: ProjectUsageType,
Fields: graphql.Fields{
FieldStorage: &graphql.Field{
Type: graphql.Float,
},
FieldEgress: &graphql.Field{
Type: graphql.Float,
},
FieldObjectsCount: &graphql.Field{
Type: graphql.Float,
},
SinceArg: &graphql.Field{
Type: graphql.DateTime,
},
BeforeArg: &graphql.Field{
Type: graphql.DateTime,
},
},
})
}
// fromMapProjectInfo creates satellite.ProjectInfo from input args // fromMapProjectInfo creates satellite.ProjectInfo from input args
func fromMapProjectInfo(args map[string]interface{}) (project console.ProjectInfo) { func fromMapProjectInfo(args map[string]interface{}) (project console.ProjectInfo) {
project.Name, _ = args[FieldName].(string) project.Name, _ = args[FieldName].(string)

View File

@ -19,12 +19,12 @@ const (
) )
// graphqlProjectMember creates projectMember type // graphqlProjectMember creates projectMember type
func graphqlProjectMember(service *console.Service, types Types) *graphql.Object { func graphqlProjectMember(service *console.Service, types *TypeCreator) *graphql.Object {
return graphql.NewObject(graphql.ObjectConfig{ return graphql.NewObject(graphql.ObjectConfig{
Name: ProjectMemberType, Name: ProjectMemberType,
Fields: graphql.Fields{ Fields: graphql.Fields{
UserType: &graphql.Field{ UserType: &graphql.Field{
Type: types.User(), Type: types.user,
Resolve: func(p graphql.ResolveParams) (interface{}, error) { Resolve: func(p graphql.ResolveParams) (interface{}, error) {
member, _ := p.Source.(projectMember) member, _ := p.Source.(projectMember)
// company sub query expects pointer // company sub query expects pointer

View File

@ -24,12 +24,12 @@ const (
) )
// rootQuery creates query for graphql populated by AccountsClient // rootQuery creates query for graphql populated by AccountsClient
func rootQuery(service *console.Service, types Types) *graphql.Object { func rootQuery(service *console.Service, types *TypeCreator) *graphql.Object {
return graphql.NewObject(graphql.ObjectConfig{ return graphql.NewObject(graphql.ObjectConfig{
Name: Query, Name: Query,
Fields: graphql.Fields{ Fields: graphql.Fields{
UserQuery: &graphql.Field{ UserQuery: &graphql.Field{
Type: types.User(), Type: types.user,
Args: graphql.FieldConfigArgument{ Args: graphql.FieldConfigArgument{
FieldID: &graphql.ArgumentConfig{ FieldID: &graphql.ArgumentConfig{
Type: graphql.String, Type: graphql.String,
@ -45,7 +45,7 @@ func rootQuery(service *console.Service, types Types) *graphql.Object {
}, },
}, },
ProjectQuery: &graphql.Field{ ProjectQuery: &graphql.Field{
Type: types.Project(), Type: types.project,
Args: graphql.FieldConfigArgument{ Args: graphql.FieldConfigArgument{
FieldID: &graphql.ArgumentConfig{ FieldID: &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.String), Type: graphql.NewNonNull(graphql.String),
@ -63,13 +63,13 @@ func rootQuery(service *console.Service, types Types) *graphql.Object {
}, },
}, },
MyProjectsQuery: &graphql.Field{ MyProjectsQuery: &graphql.Field{
Type: graphql.NewList(types.Project()), Type: graphql.NewList(types.project),
Resolve: func(p graphql.ResolveParams) (interface{}, error) { Resolve: func(p graphql.ResolveParams) (interface{}, error) {
return service.GetUsersProjects(p.Context) return service.GetUsersProjects(p.Context)
}, },
}, },
TokenQuery: &graphql.Field{ TokenQuery: &graphql.Field{
Type: types.Token(), Type: types.token,
Args: graphql.FieldConfigArgument{ Args: graphql.FieldConfigArgument{
FieldEmail: &graphql.ArgumentConfig{ FieldEmail: &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.String), Type: graphql.NewNonNull(graphql.String),

View File

@ -7,26 +7,21 @@ import (
"github.com/graphql-go/graphql" "github.com/graphql-go/graphql"
"go.uber.org/zap" "go.uber.org/zap"
"storj.io/storj/internal/storjql"
"storj.io/storj/satellite/console" "storj.io/storj/satellite/console"
"storj.io/storj/satellite/mailservice" "storj.io/storj/satellite/mailservice"
) )
// CreateSchema creates a schema for satellites console graphql api // CreateSchema creates a schema for satellites console graphql api
func CreateSchema(log *zap.Logger, service *console.Service, mailService *mailservice.Service) (schema graphql.Schema, err error) { func CreateSchema(log *zap.Logger, service *console.Service, mailService *mailservice.Service) (schema graphql.Schema, err error) {
storjql.WithLock(func() { creator := TypeCreator{}
creator := TypeCreator{}
err = creator.Create(log, service, mailService) err = creator.Create(log, service, mailService)
if err != nil { if err != nil {
return return
} }
schema, err = graphql.NewSchema(graphql.SchemaConfig{ return graphql.NewSchema(graphql.SchemaConfig{
Query: creator.RootQuery(), Query: creator.RootQuery(),
Mutation: creator.RootMutation(), Mutation: creator.RootMutation(),
})
}) })
return schema, err
} }

View File

@ -16,7 +16,7 @@ const (
) )
// graphqlToken creates *graphql.Object type that encapsulates user and token string // graphqlToken creates *graphql.Object type that encapsulates user and token string
func graphqlToken(service *console.Service, types Types) *graphql.Object { func graphqlToken(service *console.Service, types *TypeCreator) *graphql.Object {
return graphql.NewObject(graphql.ObjectConfig{ return graphql.NewObject(graphql.ObjectConfig{
Name: TokenType, Name: TokenType,
Fields: graphql.Fields{ Fields: graphql.Fields{
@ -24,7 +24,7 @@ func graphqlToken(service *console.Service, types Types) *graphql.Object {
Type: graphql.String, Type: graphql.String,
}, },
UserType: &graphql.Field{ UserType: &graphql.Field{
Type: types.User(), Type: types.user,
Resolve: func(p graphql.ResolveParams) (interface{}, error) { Resolve: func(p graphql.ResolveParams) (interface{}, error) {
wrapper, _ := p.Source.(tokenWrapper) wrapper, _ := p.Source.(tokenWrapper)

View File

@ -11,23 +11,6 @@ import (
"storj.io/storj/satellite/mailservice" "storj.io/storj/satellite/mailservice"
) )
// Types return graphql type objects
type Types interface {
RootQuery() *graphql.Object
RootMutation() *graphql.Object
Token() *graphql.Object
User() *graphql.Object
Project() *graphql.Object
ProjectMember() *graphql.Object
APIKeyInfo() *graphql.Object
CreateAPIKey() *graphql.Object
UserInput() *graphql.InputObject
ProjectInput() *graphql.InputObject
}
// TypeCreator handles graphql type creation and error checking // TypeCreator handles graphql type creation and error checking
type TypeCreator struct { type TypeCreator struct {
query *graphql.Object query *graphql.Object
@ -37,6 +20,7 @@ type TypeCreator struct {
user *graphql.Object user *graphql.Object
project *graphql.Object project *graphql.Object
projectUsage *graphql.Object
projectMember *graphql.Object projectMember *graphql.Object
apiKeyInfo *graphql.Object apiKeyInfo *graphql.Object
createAPIKey *graphql.Object createAPIKey *graphql.Object
@ -48,7 +32,7 @@ type TypeCreator struct {
// Create create types and check for error // Create create types and check for error
func (c *TypeCreator) Create(log *zap.Logger, service *console.Service, mailService *mailservice.Service) error { func (c *TypeCreator) Create(log *zap.Logger, service *console.Service, mailService *mailservice.Service) error {
// inputs // inputs
c.userInput = graphqlUserInput(c) c.userInput = graphqlUserInput()
if err := c.userInput.Error(); err != nil { if err := c.userInput.Error(); err != nil {
return err return err
} }
@ -64,6 +48,11 @@ func (c *TypeCreator) Create(log *zap.Logger, service *console.Service, mailServ
return err return err
} }
c.projectUsage = graphqlProjectUsage()
if err := c.projectUsage.Error(); err != nil {
return err
}
c.apiKeyInfo = graphqlAPIKeyInfo() c.apiKeyInfo = graphqlAPIKeyInfo()
if err := c.apiKeyInfo.Error(); err != nil { if err := c.apiKeyInfo.Error(); err != nil {
return err return err
@ -112,44 +101,3 @@ func (c *TypeCreator) RootQuery() *graphql.Object {
func (c *TypeCreator) RootMutation() *graphql.Object { func (c *TypeCreator) RootMutation() *graphql.Object {
return c.mutation return c.mutation
} }
// Token returns *graphql.Object which encapsulates User and token string
func (c *TypeCreator) Token() *graphql.Object {
return c.token
}
// User returns instance of satellite.User *graphql.Object
func (c *TypeCreator) User() *graphql.Object {
return c.user
}
// APIKeyInfo returns instance of satellite.APIKeyInfo *graphql.Object
func (c *TypeCreator) APIKeyInfo() *graphql.Object {
return c.apiKeyInfo
}
// CreateAPIKey encapsulates api key and key info
// returns *graphql.Object
func (c *TypeCreator) CreateAPIKey() *graphql.Object {
return c.createAPIKey
}
// Project returns instance of satellite.Project *graphql.Object
func (c *TypeCreator) Project() *graphql.Object {
return c.project
}
// ProjectMember returns instance of projectMember *graphql.Object
func (c *TypeCreator) ProjectMember() *graphql.Object {
return c.projectMember
}
// UserInput returns instance of UserInput *graphql.Object
func (c *TypeCreator) UserInput() *graphql.InputObject {
return c.userInput
}
// ProjectInput returns instance of ProjectInfo *graphql.Object
func (c *TypeCreator) ProjectInput() *graphql.InputObject {
return c.projectInput
}

View File

@ -59,7 +59,7 @@ func graphqlUser() *graphql.Object {
} }
// graphqlUserInput creates graphql.InputObject type needed to register/update satellite.User // graphqlUserInput creates graphql.InputObject type needed to register/update satellite.User
func graphqlUserInput(types Types) *graphql.InputObject { func graphqlUserInput() *graphql.InputObject {
return graphql.NewInputObject(graphql.InputObjectConfig{ return graphql.NewInputObject(graphql.InputObjectConfig{
Name: UserInputType, Name: UserInputType,
Fields: graphql.InputObjectConfigFieldMap{ Fields: graphql.InputObjectConfigFieldMap{

View File

@ -23,6 +23,8 @@ type DB interface {
BucketUsage() accounting.BucketUsage BucketUsage() accounting.BucketUsage
// RegistrationTokens is a getter for RegistrationTokens repository // RegistrationTokens is a getter for RegistrationTokens repository
RegistrationTokens() RegistrationTokens RegistrationTokens() RegistrationTokens
// UsageRollups is a getter for UsageRollups repository
UsageRollups() UsageRollups
// BeginTransaction is a method for opening transaction // BeginTransaction is a method for opening transaction
BeginTx(ctx context.Context) (DBTx, error) BeginTx(ctx context.Context) (DBTx, error)

View File

@ -625,6 +625,24 @@ func (s *Service) GetAPIKeysInfoByProjectID(ctx context.Context, projectID uuid.
return s.store.APIKeys().GetByProjectID(ctx, projectID) return s.store.APIKeys().GetByProjectID(ctx, projectID)
} }
// GetProjectUsage retrieves project usage for a given period
func (s *Service) GetProjectUsage(ctx context.Context, projectID uuid.UUID, since, before time.Time) (*ProjectUsage, error) {
var err error
defer mon.Task()(&ctx)(&err)
auth, err := GetAuth(ctx)
if err != nil {
return nil, err
}
_, err = s.isProjectMember(ctx, auth.User.ID, projectID)
if err != nil {
return nil, err
}
return s.store.UsageRollups().GetProjectTotal(ctx, projectID, since, before)
}
// Authorize validates token from context and returns authorized Authorization // Authorize validates token from context and returns authorized Authorization
func (s *Service) Authorize(ctx context.Context) (a Authorization, err error) { func (s *Service) Authorize(ctx context.Context) (a Authorization, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)

View File

@ -0,0 +1,27 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package console
import (
"context"
"time"
"github.com/skyrings/skyring-common/tools/uuid"
)
// UsageRollups defines how console works with usage rollups
type UsageRollups interface {
GetProjectTotal(ctx context.Context, projectID uuid.UUID, since, before time.Time) (*ProjectUsage, error)
}
// ProjectUsage consist of period total storage, egress
// and objects count per hour for certain Project
type ProjectUsage struct {
Storage float64
Egress float64
ObjectsCount float64
Since time.Time
Before time.Time
}

View File

@ -0,0 +1,17 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package console
import (
"fmt"
"testing"
"time"
)
func TestUsageRollups(t *testing.T) {
fmt.Println(time.Now().Format(time.RFC3339))
// 2018-04-02T18:25:11+03:00
// 2020-04-05T18:25:11+03:00
// 2019-04-03 17:16:26.822857431+00:00
}

View File

@ -219,8 +219,8 @@ func (db *accountingDB) SaveBucketTallies(ctx context.Context, intervalStart tim
for bucketID, info := range bucketTallies { for bucketID, info := range bucketTallies {
bucketIDComponents := storj.SplitPath(bucketID) bucketIDComponents := storj.SplitPath(bucketID)
bucketName := dbx.BucketStorageTally_BucketName([]byte(bucketIDComponents[0])) bucketName := dbx.BucketStorageTally_BucketName([]byte(bucketIDComponents[1]))
projectID := dbx.BucketStorageTally_ProjectId([]byte(bucketIDComponents[1])) projectID := dbx.BucketStorageTally_ProjectId([]byte(bucketIDComponents[0]))
interval := dbx.BucketStorageTally_IntervalStart(intervalStart) interval := dbx.BucketStorageTally_IntervalStart(intervalStart)
inlineBytes := dbx.BucketStorageTally_Inline(uint64(info.InlineBytes)) inlineBytes := dbx.BucketStorageTally_Inline(uint64(info.InlineBytes))
remoteBytes := dbx.BucketStorageTally_Remote(uint64(info.RemoteBytes)) remoteBytes := dbx.BucketStorageTally_Remote(uint64(info.RemoteBytes))

View File

@ -51,6 +51,11 @@ func (db *ConsoleDB) RegistrationTokens() console.RegistrationTokens {
return &registrationTokens{db.methods} return &registrationTokens{db.methods}
} }
// UsageRollups is a getter for console.UsageRollups repository
func (db *ConsoleDB) UsageRollups() console.UsageRollups {
return &usagerollups{db.db}
}
// BeginTx is a method for opening transaction // BeginTx is a method for opening transaction
func (db *ConsoleDB) BeginTx(ctx context.Context) (console.DBTx, error) { func (db *ConsoleDB) BeginTx(ctx context.Context) (console.DBTx, error) {
if db.db == nil { if db.db == nil {

View File

@ -444,6 +444,15 @@ read first (
orderby desc bucket_storage_tally.interval_start orderby desc bucket_storage_tally.interval_start
) )
read all (
select bucket_storage_tally
where bucket_storage_tally.project_id = ?
where bucket_storage_tally.bucket_name = ?
where bucket_storage_tally.interval_start >= ?
where bucket_storage_tally.interval_start <= ?
orderby desc bucket_storage_tally.interval_start
)
// --- storage node accounting tables --- // // --- storage node accounting tables --- //
model storagenode_bandwidth_rollup ( model storagenode_bandwidth_rollup (

View File

@ -4963,6 +4963,42 @@ func (obj *postgresImpl) First_BucketStorageTally_By_ProjectId_OrderBy_Desc_Inte
} }
func (obj *postgresImpl) All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart(ctx context.Context,
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
bucket_storage_tally_interval_start_greater_or_equal BucketStorageTally_IntervalStart_Field,
bucket_storage_tally_interval_start_less_or_equal BucketStorageTally_IntervalStart_Field) (
rows []*BucketStorageTally, err error) {
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies WHERE bucket_storage_tallies.project_id = ? AND bucket_storage_tallies.bucket_name = ? AND bucket_storage_tallies.interval_start >= ? AND bucket_storage_tallies.interval_start <= ? ORDER BY bucket_storage_tallies.interval_start DESC")
var __values []interface{}
__values = append(__values, bucket_storage_tally_project_id.value(), bucket_storage_tally_bucket_name.value(), bucket_storage_tally_interval_start_greater_or_equal.value(), bucket_storage_tally_interval_start_less_or_equal.value())
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
__rows, err := obj.driver.Query(__stmt, __values...)
if err != nil {
return nil, obj.makeErr(err)
}
defer __rows.Close()
for __rows.Next() {
bucket_storage_tally := &BucketStorageTally{}
err = __rows.Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize)
if err != nil {
return nil, obj.makeErr(err)
}
rows = append(rows, bucket_storage_tally)
}
if err := __rows.Err(); err != nil {
return nil, obj.makeErr(err)
}
return rows, nil
}
func (obj *postgresImpl) Find_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_And_Action(ctx context.Context, func (obj *postgresImpl) Find_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_And_Action(ctx context.Context,
storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field, storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field,
storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field, storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field,
@ -7400,6 +7436,42 @@ func (obj *sqlite3Impl) First_BucketStorageTally_By_ProjectId_OrderBy_Desc_Inter
} }
func (obj *sqlite3Impl) All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart(ctx context.Context,
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
bucket_storage_tally_interval_start_greater_or_equal BucketStorageTally_IntervalStart_Field,
bucket_storage_tally_interval_start_less_or_equal BucketStorageTally_IntervalStart_Field) (
rows []*BucketStorageTally, err error) {
var __embed_stmt = __sqlbundle_Literal("SELECT bucket_storage_tallies.bucket_name, bucket_storage_tallies.project_id, bucket_storage_tallies.interval_start, bucket_storage_tallies.inline, bucket_storage_tallies.remote, bucket_storage_tallies.remote_segments_count, bucket_storage_tallies.inline_segments_count, bucket_storage_tallies.object_count, bucket_storage_tallies.metadata_size FROM bucket_storage_tallies WHERE bucket_storage_tallies.project_id = ? AND bucket_storage_tallies.bucket_name = ? AND bucket_storage_tallies.interval_start >= ? AND bucket_storage_tallies.interval_start <= ? ORDER BY bucket_storage_tallies.interval_start DESC")
var __values []interface{}
__values = append(__values, bucket_storage_tally_project_id.value(), bucket_storage_tally_bucket_name.value(), bucket_storage_tally_interval_start_greater_or_equal.value(), bucket_storage_tally_interval_start_less_or_equal.value())
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
__rows, err := obj.driver.Query(__stmt, __values...)
if err != nil {
return nil, obj.makeErr(err)
}
defer __rows.Close()
for __rows.Next() {
bucket_storage_tally := &BucketStorageTally{}
err = __rows.Scan(&bucket_storage_tally.BucketName, &bucket_storage_tally.ProjectId, &bucket_storage_tally.IntervalStart, &bucket_storage_tally.Inline, &bucket_storage_tally.Remote, &bucket_storage_tally.RemoteSegmentsCount, &bucket_storage_tally.InlineSegmentsCount, &bucket_storage_tally.ObjectCount, &bucket_storage_tally.MetadataSize)
if err != nil {
return nil, obj.makeErr(err)
}
rows = append(rows, bucket_storage_tally)
}
if err := __rows.Err(); err != nil {
return nil, obj.makeErr(err)
}
return rows, nil
}
func (obj *sqlite3Impl) Find_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_And_Action(ctx context.Context, func (obj *sqlite3Impl) Find_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_And_Action(ctx context.Context,
storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field, storagenode_bandwidth_rollup_storagenode_id StoragenodeBandwidthRollup_StoragenodeId_Field,
storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field, storagenode_bandwidth_rollup_interval_start StoragenodeBandwidthRollup_IntervalStart_Field,
@ -8908,6 +8980,19 @@ func (rx *Rx) All_ApiKey_By_ProjectId_OrderBy_Asc_Name(ctx context.Context,
return tx.All_ApiKey_By_ProjectId_OrderBy_Asc_Name(ctx, api_key_project_id) return tx.All_ApiKey_By_ProjectId_OrderBy_Asc_Name(ctx, api_key_project_id)
} }
func (rx *Rx) All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart(ctx context.Context,
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
bucket_storage_tally_interval_start_greater_or_equal BucketStorageTally_IntervalStart_Field,
bucket_storage_tally_interval_start_less_or_equal BucketStorageTally_IntervalStart_Field) (
rows []*BucketStorageTally, err error) {
var tx *Tx
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart(ctx, bucket_storage_tally_project_id, bucket_storage_tally_bucket_name, bucket_storage_tally_interval_start_greater_or_equal, bucket_storage_tally_interval_start_less_or_equal)
}
func (rx *Rx) All_Node_Id(ctx context.Context) ( func (rx *Rx) All_Node_Id(ctx context.Context) (
rows []*Id_Row, err error) { rows []*Id_Row, err error) {
var tx *Tx var tx *Tx
@ -9677,6 +9762,13 @@ type Methods interface {
api_key_project_id ApiKey_ProjectId_Field) ( api_key_project_id ApiKey_ProjectId_Field) (
rows []*ApiKey, err error) rows []*ApiKey, err error)
All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart(ctx context.Context,
bucket_storage_tally_project_id BucketStorageTally_ProjectId_Field,
bucket_storage_tally_bucket_name BucketStorageTally_BucketName_Field,
bucket_storage_tally_interval_start_greater_or_equal BucketStorageTally_IntervalStart_Field,
bucket_storage_tally_interval_start_less_or_equal BucketStorageTally_IntervalStart_Field) (
rows []*BucketStorageTally, err error)
All_Node_Id(ctx context.Context) ( All_Node_Id(ctx context.Context) (
rows []*Id_Row, err error) rows []*Id_Row, err error)

View File

@ -458,6 +458,25 @@ func (m *lockedRegistrationTokens) UpdateOwner(ctx context.Context, secret conso
return m.db.UpdateOwner(ctx, secret, ownerID) return m.db.UpdateOwner(ctx, secret, ownerID)
} }
// UsageRollups is a getter for UsageRollups repository
func (m *lockedConsole) UsageRollups() console.UsageRollups {
m.Lock()
defer m.Unlock()
return &lockedUsageRollups{m.Locker, m.db.UsageRollups()}
}
// lockedUsageRollups implements locking wrapper for console.UsageRollups
type lockedUsageRollups struct {
sync.Locker
db console.UsageRollups
}
func (m *lockedUsageRollups) GetProjectTotal(ctx context.Context, projectID uuid.UUID, since time.Time, before time.Time) (*console.ProjectUsage, error) {
m.Lock()
defer m.Unlock()
return m.db.GetProjectTotal(ctx, projectID, since, before)
}
// Users is a getter for Users repository // Users is a getter for Users repository
func (m *lockedConsole) Users() console.Users { func (m *lockedConsole) Users() console.Users {
m.Lock() m.Lock()

View File

@ -0,0 +1,107 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package satellitedb
import (
"context"
"time"
"github.com/skyrings/skyring-common/tools/uuid"
"github.com/zeebo/errs"
"storj.io/storj/internal/memory"
"storj.io/storj/pkg/pb"
"storj.io/storj/satellite/console"
dbx "storj.io/storj/satellite/satellitedb/dbx"
)
// usagerollups implements console.UsageRollups
type usagerollups struct {
db *dbx.DB
}
// GetProjectTotal retrieves project usage for a given period
func (db *usagerollups) GetProjectTotal(ctx context.Context, projectID uuid.UUID, since, before time.Time) (usage *console.ProjectUsage, err error) {
storageQuery := db.db.All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart
roullupsQuery := `SELECT SUM(settled), SUM(inline), action
FROM bucket_bandwidth_rollups
WHERE project_id = ? AND interval_start >= ? AND interval_start <= ?
GROUP BY action`
rollupsRows, err := db.db.QueryContext(ctx, db.db.Rebind(roullupsQuery), []byte(projectID.String()), since, before)
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, rollupsRows.Close()) }()
var totalEgress int64
for rollupsRows.Next() {
var action pb.PieceAction
var settled, inline int64
err = rollupsRows.Scan(&settled, &inline, &action)
if err != nil {
return nil, err
}
// add values for egress
if action == pb.PieceAction_GET || action == pb.PieceAction_GET_AUDIT || action == pb.PieceAction_GET_REPAIR {
totalEgress += settled + inline
}
}
bucketsQuery := "SELECT DISTINCT bucket_name FROM bucket_bandwidth_rollups where project_id = ? and interval_start >= ? and interval_start <= ?"
bucketRows, err := db.db.QueryContext(ctx, db.db.Rebind(bucketsQuery), []byte(projectID.String()), since, before)
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, bucketRows.Close()) }()
var buckets []string
for bucketRows.Next() {
var bucket string
err = bucketRows.Scan(&bucket)
if err != nil {
return nil, err
}
buckets = append(buckets, bucket)
}
bucketsTallies := make(map[string]*[]*dbx.BucketStorageTally)
for _, bucket := range buckets {
storageTallies, err := storageQuery(ctx,
dbx.BucketStorageTally_ProjectId([]byte(projectID.String())),
dbx.BucketStorageTally_BucketName([]byte(bucket)),
dbx.BucketStorageTally_IntervalStart(since),
dbx.BucketStorageTally_IntervalStart(before))
if err != nil {
return nil, err
}
bucketsTallies[bucket] = &storageTallies
}
usage = new(console.ProjectUsage)
usage.Egress = memory.Size(totalEgress).GB()
// sum up storage and objects
for _, tallies := range bucketsTallies {
for i := len(*tallies) - 1; i > 0; i-- {
current := (*tallies)[i]
hours := (*tallies)[i-1].IntervalStart.Sub(current.IntervalStart).Hours()
usage.Storage += memory.Size(current.Inline).GB() * hours
usage.Storage += memory.Size(current.Remote).GB() * hours
usage.ObjectsCount += float64(current.ObjectCount) * hours
}
}
usage.Since = since
usage.Before = before
return usage, nil
}