satellite/payments: project usage charges (#3512)

This commit is contained in:
Yehor Butko 2019-11-15 16:27:44 +02:00 committed by GitHub
parent aa7b5b7c53
commit a8e4e9cb03
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
36 changed files with 894 additions and 721 deletions

View File

@ -120,7 +120,7 @@ type SatelliteSystem struct {
Accounting struct {
Tally *tally.Service
Rollup *rollup.Service
ProjectUsage *accounting.ProjectUsage
ProjectUsage *accounting.Service
}
LiveAccounting struct {

View File

@ -53,6 +53,73 @@ type StorageNodeUsage struct {
Timestamp time.Time
}
// ProjectUsage consist of period total storage, egress
// and objects count per hour for certain Project in bytes
type ProjectUsage struct {
Storage float64
Egress int64
ObjectCount float64
Since time.Time
Before time.Time
}
// BucketUsage consist of total bucket usage for period
type BucketUsage struct {
ProjectID uuid.UUID
BucketName string
Storage float64
Egress float64
ObjectCount int64
Since time.Time
Before time.Time
}
// BucketUsageCursor holds info for bucket usage
// cursor pagination
type BucketUsageCursor struct {
Search string
Limit uint
Page uint
}
// BucketUsagePage represents bucket usage page result
type BucketUsagePage struct {
BucketUsages []BucketUsage
Search string
Limit uint
Offset uint64
PageCount uint
CurrentPage uint
TotalCount uint64
}
// BucketUsageRollup is total bucket usage info
// for certain period
type BucketUsageRollup struct {
ProjectID uuid.UUID
BucketName []byte
RemoteStoredData float64
InlineStoredData float64
RemoteSegments float64
InlineSegments float64
ObjectCount float64
MetadataSize float64
RepairEgress float64
GetEgress float64
AuditEgress float64
Since time.Time
Before time.Time
}
// StoragenodeAccounting stores information about bandwidth and storage usage for storage nodes
//
// architecture: Database
@ -93,6 +160,9 @@ type ProjectAccounting interface {
GetStorageTotals(ctx context.Context, projectID uuid.UUID) (int64, int64, error)
// GetProjectUsageLimits returns project usage limit
GetProjectUsageLimits(ctx context.Context, projectID uuid.UUID) (memory.Size, error)
GetProjectTotal(ctx context.Context, projectID uuid.UUID, since, before time.Time) (*ProjectUsage, error)
GetBucketUsageRollups(ctx context.Context, projectID uuid.UUID, since, before time.Time) ([]BucketUsageRollup, error)
GetBucketTotals(ctx context.Context, projectID uuid.UUID, cursor BucketUsageCursor, since, before time.Time) (*BucketUsagePage, error)
}
// Cache stores live information about project storage which has not yet been synced to ProjectAccounting.

View File

@ -30,18 +30,18 @@ var (
ErrProjectUsage = errs.Class("project usage error")
)
// ProjectUsage defines project usage
// Service is handling project usage related logic.
//
// architecture: Service
type ProjectUsage struct {
type Service struct {
projectAccountingDB ProjectAccounting
liveAccounting Cache
maxAlphaUsage memory.Size
}
// NewProjectUsage created new instance of project usage service
func NewProjectUsage(projectAccountingDB ProjectAccounting, liveAccounting Cache, maxAlphaUsage memory.Size) *ProjectUsage {
return &ProjectUsage{
// NewService created new instance of project usage service.
func NewService(projectAccountingDB ProjectAccounting, liveAccounting Cache, maxAlphaUsage memory.Size) *Service {
return &Service{
projectAccountingDB: projectAccountingDB,
liveAccounting: liveAccounting,
maxAlphaUsage: maxAlphaUsage,
@ -52,7 +52,7 @@ func NewProjectUsage(projectAccountingDB ProjectAccounting, liveAccounting Cache
// for a project in the past month (30 days). The usage limit is (e.g 25GB) multiplied by the redundancy
// expansion factor, so that the uplinks have a raw limit.
// Ref: https://storjlabs.atlassian.net/browse/V3-1274
func (usage *ProjectUsage) ExceedsBandwidthUsage(ctx context.Context, projectID uuid.UUID, bucketID []byte) (_ bool, limit memory.Size, err error) {
func (usage *Service) ExceedsBandwidthUsage(ctx context.Context, projectID uuid.UUID, bucketID []byte) (_ bool, limit memory.Size, err error) {
defer mon.Task()(&ctx)(&err)
var group errgroup.Group
@ -91,7 +91,7 @@ func (usage *ProjectUsage) ExceedsBandwidthUsage(ctx context.Context, projectID
// for a project in the past month (30 days). The usage limit is (e.g. 25GB) multiplied by the redundancy
// expansion factor, so that the uplinks have a raw limit.
// Ref: https://storjlabs.atlassian.net/browse/V3-1274
func (usage *ProjectUsage) ExceedsStorageUsage(ctx context.Context, projectID uuid.UUID) (_ bool, limit memory.Size, err error) {
func (usage *Service) ExceedsStorageUsage(ctx context.Context, projectID uuid.UUID) (_ bool, limit memory.Size, err error) {
defer mon.Task()(&ctx)(&err)
var group errgroup.Group
@ -124,7 +124,7 @@ func (usage *ProjectUsage) ExceedsStorageUsage(ctx context.Context, projectID uu
return false, limit, nil
}
func (usage *ProjectUsage) getProjectStorageTotals(ctx context.Context, projectID uuid.UUID) (total int64, err error) {
func (usage *Service) getProjectStorageTotals(ctx context.Context, projectID uuid.UUID) (total int64, err error) {
defer mon.Task()(&ctx)(&err)
lastCountInline, lastCountRemote, err := usage.projectAccountingDB.GetStorageTotals(ctx, projectID)
@ -141,7 +141,7 @@ func (usage *ProjectUsage) getProjectStorageTotals(ctx context.Context, projectI
// AddProjectStorageUsage lets the live accounting know that the given
// project has just added inlineSpaceUsed bytes of inline space usage
// and remoteSpaceUsed bytes of remote space usage.
func (usage *ProjectUsage) AddProjectStorageUsage(ctx context.Context, projectID uuid.UUID, inlineSpaceUsed, remoteSpaceUsed int64) (err error) {
func (usage *Service) AddProjectStorageUsage(ctx context.Context, projectID uuid.UUID, inlineSpaceUsed, remoteSpaceUsed int64) (err error) {
defer mon.Task()(&ctx)(&err)
return usage.liveAccounting.AddProjectStorageUsage(ctx, projectID, inlineSpaceUsed, remoteSpaceUsed)
}

View File

@ -4,6 +4,7 @@
package accounting_test
import (
"encoding/binary"
"fmt"
"testing"
"time"
@ -290,3 +291,149 @@ func TestProjectUsageCustomLimit(t *testing.T) {
assert.Error(t, actualErr)
})
}
func TestUsageRollups(t *testing.T) {
const (
numBuckets = 5
tallyIntervals = 10
tallyInterval = time.Hour
)
satellitedbtest.Run(t, func(t *testing.T, db satellite.DB) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
now := time.Now()
start := now.Add(tallyInterval * time.Duration(-tallyIntervals))
project1 := testrand.UUID()
project2 := testrand.UUID()
p1base := binary.BigEndian.Uint64(project1[:8]) >> 48
p2base := binary.BigEndian.Uint64(project2[:8]) >> 48
getValue := func(i, j int, base uint64) int64 {
a := uint64((i+1)*(j+1)) ^ base
a &^= (1 << 63)
return int64(a)
}
actions := []pb.PieceAction{
pb.PieceAction_GET,
pb.PieceAction_GET_AUDIT,
pb.PieceAction_GET_REPAIR,
}
var buckets []string
for i := 0; i < numBuckets; i++ {
bucketName := fmt.Sprintf("bucket_%d", i)
// project 1
for _, action := range actions {
value := getValue(0, i, p1base)
err := db.Orders().UpdateBucketBandwidthAllocation(ctx, project1, []byte(bucketName), action, value*6, now)
require.NoError(t, err)
err = db.Orders().UpdateBucketBandwidthSettle(ctx, project1, []byte(bucketName), action, value*3, now)
require.NoError(t, err)
err = db.Orders().UpdateBucketBandwidthInline(ctx, project1, []byte(bucketName), action, value, now)
require.NoError(t, err)
}
// project 2
for _, action := range actions {
value := getValue(1, i, p2base)
err := db.Orders().UpdateBucketBandwidthAllocation(ctx, project2, []byte(bucketName), action, value*6, now)
require.NoError(t, err)
err = db.Orders().UpdateBucketBandwidthSettle(ctx, project2, []byte(bucketName), action, value*3, now)
require.NoError(t, err)
err = db.Orders().UpdateBucketBandwidthInline(ctx, project2, []byte(bucketName), action, value, now)
require.NoError(t, err)
}
buckets = append(buckets, bucketName)
}
for i := 0; i < tallyIntervals; i++ {
interval := start.Add(tallyInterval * time.Duration(i))
bucketTallies := make(map[string]*accounting.BucketTally)
for j, bucket := range buckets {
bucketID1 := project1.String() + "/" + bucket
bucketID2 := project2.String() + "/" + bucket
value1 := getValue(i, j, p1base) * 10
value2 := getValue(i, j, p2base) * 10
tally1 := &accounting.BucketTally{
BucketName: []byte(bucket),
ProjectID: project1,
ObjectCount: value1,
InlineSegments: value1,
RemoteSegments: value1,
InlineBytes: value1,
RemoteBytes: value1,
MetadataSize: value1,
}
tally2 := &accounting.BucketTally{
BucketName: []byte(bucket),
ProjectID: project2,
ObjectCount: value2,
InlineSegments: value2,
RemoteSegments: value2,
InlineBytes: value2,
RemoteBytes: value2,
MetadataSize: value2,
}
bucketTallies[bucketID1] = tally1
bucketTallies[bucketID2] = tally2
}
err := db.ProjectAccounting().SaveTallies(ctx, interval, bucketTallies)
require.NoError(t, err)
}
usageRollups := db.ProjectAccounting()
t.Run("test project total", func(t *testing.T) {
projTotal1, err := usageRollups.GetProjectTotal(ctx, project1, start, now)
assert.NoError(t, err)
assert.NotNil(t, projTotal1)
projTotal2, err := usageRollups.GetProjectTotal(ctx, project2, start, now)
assert.NoError(t, err)
assert.NotNil(t, projTotal2)
})
t.Run("test bucket usage rollups", func(t *testing.T) {
rollups1, err := usageRollups.GetBucketUsageRollups(ctx, project1, start, now)
assert.NoError(t, err)
assert.NotNil(t, rollups1)
rollups2, err := usageRollups.GetBucketUsageRollups(ctx, project2, start, now)
assert.NoError(t, err)
assert.NotNil(t, rollups2)
})
t.Run("test bucket totals", func(t *testing.T) {
cursor := accounting.BucketUsageCursor{
Limit: 20,
Page: 1,
}
totals1, err := usageRollups.GetBucketTotals(ctx, project1, cursor, start, now)
assert.NoError(t, err)
assert.NotNil(t, totals1)
totals2, err := usageRollups.GetBucketTotals(ctx, project2, cursor, start, now)
assert.NoError(t, err)
assert.NotNil(t, totals2)
})
})
}

View File

@ -96,7 +96,7 @@ type API struct {
}
Accounting struct {
ProjectUsage *accounting.ProjectUsage
ProjectUsage *accounting.Service
}
LiveAccounting struct {
@ -223,7 +223,7 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metai
{ // setup accounting project usage
log.Debug("Satellite API Process setting up accounting project usage")
peer.Accounting.ProjectUsage = accounting.NewProjectUsage(
peer.Accounting.ProjectUsage = accounting.NewService(
peer.DB.ProjectAccounting(),
peer.LiveAccounting.Cache,
config.Rollup.MaxAlphaUsage,
@ -376,7 +376,11 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metai
peer.Log.Named("stripecoinpayments service"),
pc.StripeCoinPayments,
peer.DB.StripeCoinPayments(),
peer.DB.Console().Projects())
peer.DB.Console().Projects(),
peer.DB.ProjectAccounting(),
pc.PerObjectPrice,
pc.EgressPrice,
pc.TbhPrice)
peer.Payments.Accounts = service.Accounts()
peer.Payments.Inspector = stripecoinpayments.NewEndpoint(service)
@ -431,6 +435,7 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metai
peer.Log.Named("console:service"),
&consoleauth.Hmac{Secret: []byte(consoleConfig.AuthTokenSecret)},
peer.DB.Console(),
peer.DB.ProjectAccounting(),
peer.DB.Rewards(),
peer.Marketing.PartnersService,
peer.Payments.Accounts,

View File

@ -79,6 +79,29 @@ func (p *Payments) AccountBalance(w http.ResponseWriter, r *http.Request) {
}
}
// ProjectsCharges returns how much money current user will be charged for each project which he owns.
func (p *Payments) ProjectsCharges(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var err error
defer mon.Task()(&ctx)(&err)
charges, err := p.service.Payments().ProjectsCharges(ctx)
if err != nil {
if console.ErrUnauthorized.Has(err) {
p.serveJSONError(w, http.StatusUnauthorized, err)
return
}
p.serveJSONError(w, http.StatusInternalServerError, err)
return
}
err = json.NewEncoder(w).Encode(charges)
if err != nil {
p.log.Error("failed to write json response", zap.Error(ErrPaymentsAPI.Wrap(err)))
}
}
// AddCreditCard is used to save new credit card and attach it to payment account.
func (p *Payments) AddCreditCard(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()

View File

@ -61,12 +61,13 @@ func TestGrapqhlMutation(t *testing.T) {
)
paymentsConfig := stripecoinpayments.Config{}
payments := stripecoinpayments.NewService(log, paymentsConfig, db.StripeCoinPayments(), db.Console().Projects())
payments := stripecoinpayments.NewService(log, paymentsConfig, db.StripeCoinPayments(), db.Console().Projects(), db.ProjectAccounting(), 0, 0, 0)
service, err := console.NewService(
log,
&consoleauth.Hmac{Secret: []byte("my-suppa-secret-key")},
db.Console(),
db.ProjectAccounting(),
db.Rewards(),
partnersService,
payments.Accounts(),

View File

@ -8,6 +8,7 @@ import (
"github.com/graphql-go/graphql"
"storj.io/storj/satellite/accounting"
"storj.io/storj/satellite/console"
)
@ -362,7 +363,7 @@ func fromMapProjectInfo(args map[string]interface{}) (project console.ProjectInf
}
// fromMapBucketUsageCursor creates console.BucketUsageCursor from input args
func fromMapBucketUsageCursor(args map[string]interface{}) (cursor console.BucketUsageCursor) {
func fromMapBucketUsageCursor(args map[string]interface{}) (cursor accounting.BucketUsageCursor) {
limit, _ := args[LimitArg].(int)
page, _ := args[PageArg].(int)

View File

@ -43,12 +43,13 @@ func TestGraphqlQuery(t *testing.T) {
)
paymentsConfig := stripecoinpayments.Config{}
payments := stripecoinpayments.NewService(log, paymentsConfig, db.StripeCoinPayments(), db.Console().Projects())
payments := stripecoinpayments.NewService(log, paymentsConfig, db.StripeCoinPayments(), db.Console().Projects(), db.ProjectAccounting(), 0, 0, 0)
service, err := console.NewService(
log,
&consoleauth.Hmac{Secret: []byte("my-suppa-secret-key")},
db.Console(),
db.ProjectAccounting(),
db.Rewards(),
partnersService,
payments.Accounts(),

View File

@ -141,6 +141,7 @@ func NewServer(logger *zap.Logger, config Config, service *console.Service, mail
paymentsRouter.HandleFunc("/cards", paymentController.MakeCreditCardDefault).Methods(http.MethodPatch)
paymentsRouter.HandleFunc("/cards", paymentController.ListCreditCards).Methods(http.MethodGet)
paymentsRouter.HandleFunc("/cards/{cardId}", paymentController.RemoveCreditCard).Methods(http.MethodDelete)
paymentsRouter.HandleFunc("/account/charges", paymentController.ProjectsCharges).Methods(http.MethodGet)
paymentsRouter.HandleFunc("/account/balance", paymentController.AccountBalance).Methods(http.MethodGet)
paymentsRouter.HandleFunc("/account", paymentController.SetupAccount).Methods(http.MethodPost)
paymentsRouter.HandleFunc("/billing-history", paymentController.BillingHistory).Methods(http.MethodGet)

View File

@ -7,36 +7,34 @@ import (
"context"
)
// DB contains access to different satellite databases
// DB contains access to different satellite databases.
//
// architecture: Database
type DB interface {
// Users is a getter for Users repository
// Users is a getter for Users repository.
Users() Users
// Projects is a getter for Projects repository
// Projects is a getter for Projects repository.
Projects() Projects
// ProjectMembers is a getter for ProjectMembers repository
// ProjectMembers is a getter for ProjectMembers repository.
ProjectMembers() ProjectMembers
// APIKeys is a getter for APIKeys repository
// APIKeys is a getter for APIKeys repository.
APIKeys() APIKeys
// RegistrationTokens is a getter for RegistrationTokens repository
// RegistrationTokens is a getter for RegistrationTokens repository.
RegistrationTokens() RegistrationTokens
// ResetPasswordTokens is a getter for ResetPasswordTokens repository
// ResetPasswordTokens is a getter for ResetPasswordTokens repository.
ResetPasswordTokens() ResetPasswordTokens
// UsageRollups is a getter for UsageRollups repository
UsageRollups() UsageRollups
// UserCredits is a getter for UserCredits repository
// UserCredits is a getter for UserCredits repository.
UserCredits() UserCredits
// BeginTransaction is a method for opening transaction
// BeginTransaction is a method for opening transaction.
BeginTx(ctx context.Context) (DBTx, error)
}
// DBTx extends Database with transaction scope
// DBTx extends Database with transaction scope.
type DBTx interface {
DB
// CommitTransaction is a method for committing and closing transaction
// CommitTransaction is a method for committing and closing transaction.
Commit() error
// RollbackTransaction is a method for rollback and closing transaction
// RollbackTransaction is a method for rollback and closing transaction.
Rollback() error
}

View File

@ -16,10 +16,12 @@ import (
type Projects interface {
// GetAll is a method for querying all projects from the database.
GetAll(ctx context.Context) ([]Project, error)
// GetCreatedBefore retrieves all projects created before provided date
// GetCreatedBefore retrieves all projects created before provided date.
GetCreatedBefore(ctx context.Context, before time.Time) ([]Project, error)
// GetByUserID is a method for querying all projects from the database by userID.
GetByUserID(ctx context.Context, userID uuid.UUID) ([]Project, error)
// GetOwn is a method for querying all projects created by current user from the database.
GetOwn(ctx context.Context, userID uuid.UUID) (_ []Project, err error)
// Get is a method for querying project from the database by id.
Get(ctx context.Context, id uuid.UUID) (*Project, error)
// Insert is a method for inserting project into the database.

View File

@ -17,6 +17,7 @@ import (
"storj.io/storj/pkg/auth"
"storj.io/storj/pkg/macaroon"
"storj.io/storj/satellite/accounting"
"storj.io/storj/satellite/console/consoleauth"
"storj.io/storj/satellite/payments"
"storj.io/storj/satellite/rewards"
@ -25,13 +26,11 @@ import (
var mon = monkit.Package()
const (
// maxLimit specifies the limit for all paged queries
// maxLimit specifies the limit for all paged queries.
maxLimit = 50
tokenExpirationTime = 24 * time.Hour
// DefaultPasswordCost is the hashing complexity
DefaultPasswordCost = bcrypt.DefaultCost
// TestPasswordCost is the hashing complexity to use for testing
// TestPasswordCost is the hashing complexity to use for testing.
TestPasswordCost = bcrypt.MinCost
)
@ -53,13 +52,13 @@ const (
projLimitVanguardErrMsg = "Sorry, during the Vanguard release you have a limited number of projects"
)
// Error describes internal console error
// Error describes internal console error.
var Error = errs.Class("service error")
// ErrNoMembership is error type of not belonging to a specific project
// ErrNoMembership is error type of not belonging to a specific project.
var ErrNoMembership = errs.Class("no membership error")
// ErrTokenExpiration is error type of token reached expiration time
// ErrTokenExpiration is error type of token reached expiration time.
var ErrTokenExpiration = errs.Class("token expiration error")
// Service is handling accounts related logic
@ -68,11 +67,12 @@ var ErrTokenExpiration = errs.Class("token expiration error")
type Service struct {
Signer
log *zap.Logger
store DB
rewards rewards.DB
partners *rewards.PartnersService
accounts payments.Accounts
log *zap.Logger
store DB
projectAccounting accounting.ProjectAccounting
rewards rewards.DB
partners *rewards.PartnersService
accounts payments.Accounts
passwordCost int
}
@ -82,8 +82,8 @@ type PaymentsService struct {
service *Service
}
// NewService returns new instance of Service
func NewService(log *zap.Logger, signer Signer, store DB, rewards rewards.DB, partners *rewards.PartnersService, accounts payments.Accounts, passwordCost int) (*Service, error) {
// NewService returns new instance of Service.
func NewService(log *zap.Logger, signer Signer, store DB, projectAccounting accounting.ProjectAccounting, rewards rewards.DB, partners *rewards.PartnersService, accounts payments.Accounts, passwordCost int) (*Service, error) {
if signer == nil {
return nil, errs.New("signer can't be nil")
}
@ -98,13 +98,14 @@ func NewService(log *zap.Logger, signer Signer, store DB, rewards rewards.DB, pa
}
return &Service{
log: log,
Signer: signer,
store: store,
rewards: rewards,
partners: partners,
accounts: accounts,
passwordCost: passwordCost,
log: log,
Signer: signer,
store: store,
projectAccounting: projectAccounting,
rewards: rewards,
partners: partners,
accounts: accounts,
passwordCost: passwordCost,
}, nil
}
@ -161,6 +162,18 @@ func (payments PaymentsService) MakeCreditCardDefault(ctx context.Context, cardI
return payments.service.accounts.CreditCards().MakeDefault(ctx, auth.User.ID, cardID)
}
// ProjectsCharges returns how much money current user will be charged for each project which he owns.
func (payments PaymentsService) ProjectsCharges(ctx context.Context) (_ []payments.ProjectCharge, err error) {
defer mon.Task()(&ctx)(&err)
auth, err := GetAuth(ctx)
if err != nil {
return nil, err
}
return payments.service.accounts.ProjectCharges(ctx, auth.User.ID)
}
// ListCreditCards returns a list of credit cards for a given payment account.
func (payments PaymentsService) ListCreditCards(ctx context.Context) (_ []payments.CreditCard, err error) {
defer mon.Task()(&ctx)(&err)
@ -497,7 +510,7 @@ func (s *Service) ResetPassword(ctx context.Context, resetPasswordToken, passwor
}
if err = s.store.ResetPasswordTokens().Delete(ctx, token.Secret); err != nil {
return err
return Error.Wrap(err)
}
return nil
@ -1115,7 +1128,7 @@ func (s *Service) GetAPIKeys(ctx context.Context, projectID uuid.UUID, cursor AP
}
// GetProjectUsage retrieves project usage for a given period
func (s *Service) GetProjectUsage(ctx context.Context, projectID uuid.UUID, since, before time.Time) (_ *ProjectUsage, err error) {
func (s *Service) GetProjectUsage(ctx context.Context, projectID uuid.UUID, since, before time.Time) (_ *accounting.ProjectUsage, err error) {
defer mon.Task()(&ctx)(&err)
auth, err := GetAuth(ctx)
@ -1125,10 +1138,10 @@ func (s *Service) GetProjectUsage(ctx context.Context, projectID uuid.UUID, sinc
_, err = s.isProjectMember(ctx, auth.User.ID, projectID)
if err != nil {
return nil, err
return nil, ErrUnauthorized.Wrap(err)
}
projectUsage, err := s.store.UsageRollups().GetProjectTotal(ctx, projectID, since, before)
projectUsage, err := s.projectAccounting.GetProjectTotal(ctx, projectID, since, before)
if err != nil {
return nil, Error.Wrap(err)
}
@ -1137,7 +1150,7 @@ func (s *Service) GetProjectUsage(ctx context.Context, projectID uuid.UUID, sinc
}
// GetBucketTotals retrieves paged bucket total usages since project creation
func (s *Service) GetBucketTotals(ctx context.Context, projectID uuid.UUID, cursor BucketUsageCursor, before time.Time) (_ *BucketUsagePage, err error) {
func (s *Service) GetBucketTotals(ctx context.Context, projectID uuid.UUID, cursor accounting.BucketUsageCursor, before time.Time) (_ *accounting.BucketUsagePage, err error) {
defer mon.Task()(&ctx)(&err)
auth, err := GetAuth(ctx)
@ -1147,14 +1160,19 @@ func (s *Service) GetBucketTotals(ctx context.Context, projectID uuid.UUID, curs
isMember, err := s.isProjectMember(ctx, auth.User.ID, projectID)
if err != nil {
return nil, err
return nil, ErrUnauthorized.Wrap(err)
}
return s.store.UsageRollups().GetBucketTotals(ctx, projectID, cursor, isMember.project.CreatedAt, before)
usage, err := s.projectAccounting.GetBucketTotals(ctx, projectID, cursor, isMember.project.CreatedAt, before)
if err != nil {
return nil, Error.Wrap(err)
}
return usage, nil
}
// GetBucketUsageRollups retrieves summed usage rollups for every bucket of particular project for a given period
func (s *Service) GetBucketUsageRollups(ctx context.Context, projectID uuid.UUID, since, before time.Time) (_ []BucketUsageRollup, err error) {
func (s *Service) GetBucketUsageRollups(ctx context.Context, projectID uuid.UUID, since, before time.Time) (_ []accounting.BucketUsageRollup, err error) {
defer mon.Task()(&ctx)(&err)
auth, err := GetAuth(ctx)
@ -1164,10 +1182,10 @@ func (s *Service) GetBucketUsageRollups(ctx context.Context, projectID uuid.UUID
_, err = s.isProjectMember(ctx, auth.User.ID, projectID)
if err != nil {
return nil, err
return nil, ErrUnauthorized.Wrap(err)
}
result, err := s.store.UsageRollups().GetBucketUsageRollups(ctx, projectID, since, before)
result, err := s.projectAccounting.GetBucketUsageRollups(ctx, projectID, since, before)
if err != nil {
return nil, err
}
@ -1221,7 +1239,7 @@ func (s *Service) checkProjectLimit(ctx context.Context, userID uuid.UUID) (err
return Error.Wrap(err)
}
if len(projects) >= registrationToken.ProjectLimit {
return errs.New(projLimitVanguardErrMsg)
return ErrUnauthorized.Wrap(errs.New(projLimitVanguardErrMsg))
}
return nil

View File

@ -1,87 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package console
import (
"context"
"time"
"github.com/skyrings/skyring-common/tools/uuid"
)
// UsageRollups defines how console works with usage rollups
//
// architecture: Database
type UsageRollups interface {
GetProjectTotal(ctx context.Context, projectID uuid.UUID, since, before time.Time) (*ProjectUsage, error)
GetBucketUsageRollups(ctx context.Context, projectID uuid.UUID, since, before time.Time) ([]BucketUsageRollup, error)
GetBucketTotals(ctx context.Context, projectID uuid.UUID, cursor BucketUsageCursor, since, before time.Time) (*BucketUsagePage, error)
}
// ProjectUsage consist of period total storage, egress
// and objects count per hour for certain Project
type ProjectUsage struct {
Storage float64
Egress float64
ObjectCount float64
Since time.Time
Before time.Time
}
// BucketUsage consist of total bucket usage for period
type BucketUsage struct {
ProjectID uuid.UUID
BucketName string
Storage float64
Egress float64
ObjectCount int64
Since time.Time
Before time.Time
}
// BucketUsageCursor holds info for bucket usage
// cursor pagination
type BucketUsageCursor struct {
Search string
Limit uint
Page uint
}
// BucketUsagePage represents bucket usage page result
type BucketUsagePage struct {
BucketUsages []BucketUsage
Search string
Limit uint
Offset uint64
PageCount uint
CurrentPage uint
TotalCount uint64
}
// BucketUsageRollup is total bucket usage info
// for certain period
type BucketUsageRollup struct {
ProjectID uuid.UUID
BucketName []byte
RemoteStoredData float64
InlineStoredData float64
RemoteSegments float64
InlineSegments float64
ObjectCount float64
MetadataSize float64
RepairEgress float64
GetEgress float64
AuditEgress float64
Since time.Time
Before time.Time
}

View File

@ -1,168 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package console_test
import (
"encoding/binary"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"storj.io/storj/pkg/pb"
"storj.io/storj/private/testcontext"
"storj.io/storj/private/testrand"
"storj.io/storj/satellite"
"storj.io/storj/satellite/accounting"
"storj.io/storj/satellite/console"
"storj.io/storj/satellite/satellitedb/satellitedbtest"
)
func TestUsageRollups(t *testing.T) {
const (
numBuckets = 5
tallyIntervals = 10
tallyInterval = time.Hour
)
satellitedbtest.Run(t, func(t *testing.T, db satellite.DB) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
now := time.Now()
start := now.Add(tallyInterval * time.Duration(-tallyIntervals))
project1 := testrand.UUID()
project2 := testrand.UUID()
p1base := binary.BigEndian.Uint64(project1[:8]) >> 48
p2base := binary.BigEndian.Uint64(project2[:8]) >> 48
getValue := func(i, j int, base uint64) int64 {
a := uint64((i+1)*(j+1)) ^ base
a &^= (1 << 63)
return int64(a)
}
actions := []pb.PieceAction{
pb.PieceAction_GET,
pb.PieceAction_GET_AUDIT,
pb.PieceAction_GET_REPAIR,
}
var buckets []string
for i := 0; i < numBuckets; i++ {
bucketName := fmt.Sprintf("bucket_%d", i)
// project 1
for _, action := range actions {
value := getValue(0, i, p1base)
err := db.Orders().UpdateBucketBandwidthAllocation(ctx, project1, []byte(bucketName), action, value*6, now)
require.NoError(t, err)
err = db.Orders().UpdateBucketBandwidthSettle(ctx, project1, []byte(bucketName), action, value*3, now)
require.NoError(t, err)
err = db.Orders().UpdateBucketBandwidthInline(ctx, project1, []byte(bucketName), action, value, now)
require.NoError(t, err)
}
// project 2
for _, action := range actions {
value := getValue(1, i, p2base)
err := db.Orders().UpdateBucketBandwidthAllocation(ctx, project2, []byte(bucketName), action, value*6, now)
require.NoError(t, err)
err = db.Orders().UpdateBucketBandwidthSettle(ctx, project2, []byte(bucketName), action, value*3, now)
require.NoError(t, err)
err = db.Orders().UpdateBucketBandwidthInline(ctx, project2, []byte(bucketName), action, value, now)
require.NoError(t, err)
}
buckets = append(buckets, bucketName)
}
for i := 0; i < tallyIntervals; i++ {
interval := start.Add(tallyInterval * time.Duration(i))
bucketTallies := make(map[string]*accounting.BucketTally)
for j, bucket := range buckets {
bucketID1 := project1.String() + "/" + bucket
bucketID2 := project2.String() + "/" + bucket
value1 := getValue(i, j, p1base) * 10
value2 := getValue(i, j, p2base) * 10
tally1 := &accounting.BucketTally{
BucketName: []byte(bucket),
ProjectID: project1,
ObjectCount: value1,
InlineSegments: value1,
RemoteSegments: value1,
InlineBytes: value1,
RemoteBytes: value1,
MetadataSize: value1,
}
tally2 := &accounting.BucketTally{
BucketName: []byte(bucket),
ProjectID: project2,
ObjectCount: value2,
InlineSegments: value2,
RemoteSegments: value2,
InlineBytes: value2,
RemoteBytes: value2,
MetadataSize: value2,
}
bucketTallies[bucketID1] = tally1
bucketTallies[bucketID2] = tally2
}
err := db.ProjectAccounting().SaveTallies(ctx, interval, bucketTallies)
require.NoError(t, err)
}
usageRollups := db.Console().UsageRollups()
t.Run("test project total", func(t *testing.T) {
projTotal1, err := usageRollups.GetProjectTotal(ctx, project1, start, now)
assert.NoError(t, err)
assert.NotNil(t, projTotal1)
projTotal2, err := usageRollups.GetProjectTotal(ctx, project2, start, now)
assert.NoError(t, err)
assert.NotNil(t, projTotal2)
})
t.Run("test bucket usage rollups", func(t *testing.T) {
rollups1, err := usageRollups.GetBucketUsageRollups(ctx, project1, start, now)
assert.NoError(t, err)
assert.NotNil(t, rollups1)
rollups2, err := usageRollups.GetBucketUsageRollups(ctx, project2, start, now)
assert.NoError(t, err)
assert.NotNil(t, rollups2)
})
t.Run("test bucket totals", func(t *testing.T) {
cursor := console.BucketUsageCursor{
Limit: 20,
Page: 1,
}
totals1, err := usageRollups.GetBucketTotals(ctx, project1, cursor, start, now)
assert.NoError(t, err)
assert.NotNil(t, totals1)
totals2, err := usageRollups.GetBucketTotals(ctx, project2, cursor, start, now)
assert.NoError(t, err)
assert.NotNil(t, totals2)
})
})
}

View File

@ -90,7 +90,7 @@ type Core struct {
Accounting struct {
Tally *tally.Service
Rollup *rollup.Service
ProjectUsage *accounting.ProjectUsage
ProjectUsage *accounting.Service
}
LiveAccounting struct {
@ -155,7 +155,7 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metainfo
{ // setup accounting project usage
log.Debug("Setting up accounting project usage")
peer.Accounting.ProjectUsage = accounting.NewProjectUsage(
peer.Accounting.ProjectUsage = accounting.NewService(
peer.DB.ProjectAccounting(),
peer.LiveAccounting.Cache,
config.Rollup.MaxAlphaUsage,
@ -300,7 +300,11 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, pointerDB metainfo
peer.Log.Named("stripecoinpayments service"),
pc.StripeCoinPayments,
peer.DB.StripeCoinPayments(),
peer.DB.Console().Projects())
peer.DB.Console().Projects(),
peer.DB.ProjectAccounting(),
pc.PerObjectPrice,
pc.EgressPrice,
pc.TbhPrice)
peer.Payments.Accounts = service.Accounts()

View File

@ -72,7 +72,7 @@ type Endpoint struct {
overlay *overlay.Service
partnerinfo attribution.DB
peerIdentities overlay.PeerIdentities
projectUsage *accounting.ProjectUsage
projectUsage *accounting.Service
apiKeys APIKeys
createRequests *createRequests
requiredRSConfig RSConfig
@ -82,7 +82,7 @@ type Endpoint struct {
// NewEndpoint creates new metainfo endpoint instance
func NewEndpoint(log *zap.Logger, metainfo *Service, orders *orders.Service, cache *overlay.Service, partnerinfo attribution.DB, peerIdentities overlay.PeerIdentities,
apiKeys APIKeys, projectUsage *accounting.ProjectUsage, rsConfig RSConfig, satellite signing.Signer, maxCommitInterval time.Duration) *Endpoint {
apiKeys APIKeys, projectUsage *accounting.Service, rsConfig RSConfig, satellite signing.Signer, maxCommitInterval time.Duration) *Endpoint {
// TODO do something with too many params
return &Endpoint{
log: log,

View File

@ -22,6 +22,9 @@ type Accounts interface {
// Balance returns an integer amount in cents that represents the current balance of payment account.
Balance(ctx context.Context, userID uuid.UUID) (int64, error)
// ProjectCharges returns how much money current user will be charged for each project.
ProjectCharges(ctx context.Context, userID uuid.UUID) ([]ProjectCharge, error)
// CreditCards exposes all needed functionality to manage account credit cards.
CreditCards() CreditCards

View File

@ -75,6 +75,13 @@ func (accounts *accounts) Balance(ctx context.Context, userID uuid.UUID) (_ int6
return 0, nil
}
// ProjectCharges returns how much money current user will be charged for each project.
func (accounts *accounts) ProjectCharges(ctx context.Context, userID uuid.UUID) (charges []payments.ProjectCharge, err error) {
defer mon.Task()(&ctx, userID)(&err)
return []payments.ProjectCharge{}, nil
}
// List returns a list of credit cards for a given payment account.
func (creditCards *creditCards) List(ctx context.Context, userID uuid.UUID) (_ []payments.CreditCard, err error) {
defer mon.Task()(&ctx, userID)(&err)

View File

@ -3,10 +3,15 @@
package paymentsconfig
import "storj.io/storj/satellite/payments/stripecoinpayments"
import (
"storj.io/storj/satellite/payments/stripecoinpayments"
)
// Config defines global payments config.
type Config struct {
Provider string `help:"payments provider to use" default:""`
StripeCoinPayments stripecoinpayments.Config
PerObjectPrice int64 `help:"price in cents user should pay for each object storing in network" devDefault:"0" releaseDefault:"0"`
EgressPrice int64 `help:"price in cents user should pay for each TB of egress" devDefault:"0" releaseDefault:"0"`
TbhPrice int64 `help:"price in cents user should pay for storing each TB per hour" devDefault:"0" releaseDefault:"0"`
}

View File

@ -0,0 +1,19 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package payments
import (
"github.com/skyrings/skyring-common/tools/uuid"
)
// ProjectCharge shows how much money current project will charge in the end of the month.
type ProjectCharge struct {
ProjectID uuid.UUID `json:"projectId"`
// StorageGbHrs shows how much cents we should pay for storing GB*Hrs.
StorageGbHrs int64
// Egress shows how many cents we should pay for Egress.
Egress int64
// ObjectCount shows how many cents we should pay for objects count.
ObjectCount int64
}

View File

@ -5,13 +5,19 @@ package stripecoinpayments
import (
"context"
"time"
"github.com/skyrings/skyring-common/tools/uuid"
"github.com/stripe/stripe-go"
"storj.io/storj/private/date"
"storj.io/storj/private/memory"
"storj.io/storj/satellite/payments"
)
// ensures that accounts implements payments.Accounts.
var _ payments.Accounts = (*accounts)(nil)
// accounts is an implementation of payments.Accounts.
type accounts struct {
service *Service
@ -67,6 +73,38 @@ func (accounts *accounts) Balance(ctx context.Context, userID uuid.UUID) (_ int6
return c.Balance, nil
}
// ProjectCharges returns how much money current user will be charged for each project.
func (accounts *accounts) ProjectCharges(ctx context.Context, userID uuid.UUID) (charges []payments.ProjectCharge, err error) {
defer mon.Task()(&ctx, userID)(&err)
// to return empty slice instead of nil if there are no projects
charges = make([]payments.ProjectCharge, 0)
projects, err := accounts.service.projectsDB.GetOwn(ctx, userID)
if err != nil {
return nil, Error.Wrap(err)
}
start, end := date.MonthBoundary(time.Now().UTC())
for _, project := range projects {
usage, err := accounts.service.usageDB.GetProjectTotal(ctx, project.ID, start, end)
if err != nil {
return charges, Error.Wrap(err)
}
charges = append(charges, payments.ProjectCharge{
ProjectID: project.ID,
Egress: usage.Egress / int64(memory.TB) * accounts.service.EgressPrice,
// TODO: check precision
ObjectCount: int64(usage.ObjectCount) * accounts.service.PerObjectPrice,
StorageGbHrs: int64(usage.Storage) / int64(memory.TB) * accounts.service.TBhPrice,
})
}
return charges, nil
}
// StorjTokens exposes all storj token related functionality.
func (accounts *accounts) StorjTokens() payments.StorjTokens {
return &storjTokens{service: accounts.service}

View File

@ -14,6 +14,7 @@ import (
"go.uber.org/zap"
"gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/satellite/accounting"
"storj.io/storj/satellite/console"
"storj.io/storj/satellite/payments"
"storj.io/storj/satellite/payments/coinpayments"
@ -26,6 +27,10 @@ var (
mon = monkit.Package()
)
// $0,013689253935661 is a price per TBh for storagebased
// $50 per tb egress,
// $0.00000168 per object
// Config stores needed information for payment service initialization.
type Config struct {
StripeSecretKey string `help:"stripe API secret key" default:""`
@ -39,15 +44,20 @@ type Config struct {
//
// architecture: Service
type Service struct {
log *zap.Logger
db DB
projectsDB console.Projects
stripeClient *client.API
coinPayments *coinpayments.Client
log *zap.Logger
db DB
config Config
projectsDB console.Projects
usageDB accounting.ProjectAccounting
stripeClient *client.API
coinPayments *coinpayments.Client
PerObjectPrice int64
EgressPrice int64
TBhPrice int64
}
// NewService creates a Service instance.
func NewService(log *zap.Logger, config Config, db DB, projectsDB console.Projects) *Service {
func NewService(log *zap.Logger, config Config, db DB, projectsDB console.Projects, usageDB accounting.ProjectAccounting, perObjectPrice, egressPrice, tbhPrice int64) *Service {
stripeClient := client.New(config.StripeSecretKey, nil)
coinPaymentsClient := coinpayments.NewClient(
@ -58,11 +68,16 @@ func NewService(log *zap.Logger, config Config, db DB, projectsDB console.Projec
)
return &Service{
log: log,
db: db,
projectsDB: projectsDB,
stripeClient: stripeClient,
coinPayments: coinPaymentsClient,
log: log,
db: db,
config: config,
projectsDB: projectsDB,
usageDB: usageDB,
stripeClient: stripeClient,
coinPayments: coinPaymentsClient,
TBhPrice: tbhPrice,
PerObjectPrice: perObjectPrice,
EgressPrice: egressPrice,
}
}

View File

@ -15,7 +15,7 @@ import (
// ensures that ConsoleDB implements console.DB.
var _ console.DB = (*ConsoleDB)(nil)
// ConsoleDB contains access to different satellite databases
// ConsoleDB contains access to different satellite databases.
type ConsoleDB struct {
db *dbx.DB
tx *dbx.Tx
@ -23,47 +23,42 @@ type ConsoleDB struct {
methods dbx.Methods
}
// Users is getter a for Users repository
// Users is getter a for Users repository.
func (db *ConsoleDB) Users() console.Users {
return &users{db.methods}
}
// Projects is a getter for Projects repository
// Projects is a getter for Projects repository.
func (db *ConsoleDB) Projects() console.Projects {
return &projects{db.methods}
}
// ProjectMembers is a getter for ProjectMembers repository
// ProjectMembers is a getter for ProjectMembers repository.
func (db *ConsoleDB) ProjectMembers() console.ProjectMembers {
return &projectMembers{db.methods, db.db}
}
// APIKeys is a getter for APIKeys repository
// APIKeys is a getter for APIKeys repository.
func (db *ConsoleDB) APIKeys() console.APIKeys {
return &apikeys{db.methods, db.db}
}
// RegistrationTokens is a getter for RegistrationTokens repository
// RegistrationTokens is a getter for RegistrationTokens repository.
func (db *ConsoleDB) RegistrationTokens() console.RegistrationTokens {
return &registrationTokens{db.methods}
}
// ResetPasswordTokens is a getter for ResetPasswordTokens repository
// ResetPasswordTokens is a getter for ResetPasswordTokens repository.
func (db *ConsoleDB) ResetPasswordTokens() console.ResetPasswordTokens {
return &resetPasswordTokens{db.methods}
}
// UsageRollups is a getter for console.UsageRollups repository
func (db *ConsoleDB) UsageRollups() console.UsageRollups {
return &usagerollups{db.db}
}
// UserCredits is a getter for console.UserCredits repository
// UserCredits is a getter for console.UserCredits repository.
func (db *ConsoleDB) UserCredits() console.UserCredits {
return &usercredits{db.db, db.tx}
}
// BeginTx is a method for opening transaction
// BeginTx is a method for opening transaction.
func (db *ConsoleDB) BeginTx(ctx context.Context) (console.DBTx, error) {
if db.db == nil {
return nil, errs.New("DB is not initialized!")
@ -84,12 +79,12 @@ func (db *ConsoleDB) BeginTx(ctx context.Context) (console.DBTx, error) {
}, nil
}
// DBTx extends Database with transaction scope
// DBTx extends Database with transaction scope.
type DBTx struct {
*ConsoleDB
}
// Commit is a method for committing and closing transaction
// Commit is a method for committing and closing transaction.
func (db *DBTx) Commit() error {
if db.tx == nil {
return errs.New("begin transaction before commit it!")
@ -98,7 +93,7 @@ func (db *DBTx) Commit() error {
return db.tx.Commit()
}
// Rollback is a method for rollback and closing transaction
// Rollback is a method for rollback and closing transaction.
func (db *DBTx) Rollback() error {
if db.tx == nil {
return errs.New("begin transaction before rollback it!")

View File

@ -285,6 +285,11 @@ read all (
where project.created_at < ?
orderby asc project.created_at
)
read all (
select project
where project.owner_id = ?
orderby asc project.created_at
)
read all (
select project
join project.id = project_member.project_id

View File

@ -7244,6 +7244,39 @@ func (obj *postgresImpl) All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx
}
func (obj *postgresImpl) All_Project_By_OwnerId_OrderBy_Asc_CreatedAt(ctx context.Context,
project_owner_id Project_OwnerId_Field) (
rows []*Project, err error) {
var __embed_stmt = __sqlbundle_Literal("SELECT projects.id, projects.name, projects.description, projects.usage_limit, projects.partner_id, projects.owner_id, projects.created_at FROM projects WHERE projects.owner_id = ? ORDER BY projects.created_at")
var __values []interface{}
__values = append(__values, project_owner_id.value())
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
__rows, err := obj.driver.Query(__stmt, __values...)
if err != nil {
return nil, obj.makeErr(err)
}
defer __rows.Close()
for __rows.Next() {
project := &Project{}
err = __rows.Scan(&project.Id, &project.Name, &project.Description, &project.UsageLimit, &project.PartnerId, &project.OwnerId, &project.CreatedAt)
if err != nil {
return nil, obj.makeErr(err)
}
rows = append(rows, project)
}
if err := __rows.Err(); err != nil {
return nil, obj.makeErr(err)
}
return rows, nil
}
func (obj *postgresImpl) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Project_Name(ctx context.Context,
project_member_member_id ProjectMember_MemberId_Field) (
rows []*Project, err error) {
@ -10576,6 +10609,16 @@ func (rx *Rx) All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx context.Co
return tx.All_Project_By_CreatedAt_Less_OrderBy_Asc_CreatedAt(ctx, project_created_at_less)
}
func (rx *Rx) All_Project_By_OwnerId_OrderBy_Asc_CreatedAt(ctx context.Context,
project_owner_id Project_OwnerId_Field) (
rows []*Project, err error) {
var tx *Tx
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.All_Project_By_OwnerId_OrderBy_Asc_CreatedAt(ctx, project_owner_id)
}
func (rx *Rx) All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Project_Name(ctx context.Context,
project_member_member_id ProjectMember_MemberId_Field) (
rows []*Project, err error) {
@ -11927,6 +11970,10 @@ type Methods interface {
project_created_at_less Project_CreatedAt_Field) (
rows []*Project, err error)
All_Project_By_OwnerId_OrderBy_Asc_CreatedAt(ctx context.Context,
project_owner_id Project_OwnerId_Field) (
rows []*Project, err error)
All_Project_By_ProjectMember_MemberId_OrderBy_Asc_Project_Name(ctx context.Context,
project_member_member_id ProjectMember_MemberId_Field) (
rows []*Project, err error)

View File

@ -9,6 +9,7 @@ import (
"time"
"github.com/skyrings/skyring-common/tools/uuid"
"github.com/zeebo/errs"
"storj.io/storj/pkg/pb"
"storj.io/storj/private/memory"
@ -143,3 +144,344 @@ func (db *ProjectAccounting) GetProjectUsageLimits(ctx context.Context, projectI
}
return memory.Size(project.UsageLimit), nil
}
// GetProjectTotal retrieves project usage for a given period
func (db *ProjectAccounting) GetProjectTotal(ctx context.Context, projectID uuid.UUID, since, before time.Time) (usage *accounting.ProjectUsage, err error) {
defer mon.Task()(&ctx)(&err)
since = timeTruncateDown(since)
storageQuery := db.db.All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart
roullupsQuery := db.db.Rebind(`SELECT SUM(settled), SUM(inline), action
FROM bucket_bandwidth_rollups
WHERE project_id = ? AND interval_start >= ? AND interval_start <= ?
GROUP BY action`)
rollupsRows, err := db.db.QueryContext(ctx, roullupsQuery, projectID[:], since, before)
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, rollupsRows.Close()) }()
var totalEgress int64
for rollupsRows.Next() {
var action pb.PieceAction
var settled, inline int64
err = rollupsRows.Scan(&settled, &inline, &action)
if err != nil {
return nil, err
}
// add values for egress
if action == pb.PieceAction_GET || action == pb.PieceAction_GET_AUDIT || action == pb.PieceAction_GET_REPAIR {
totalEgress += settled + inline
}
}
buckets, err := db.getBuckets(ctx, projectID, since, before)
if err != nil {
return nil, err
}
bucketsTallies := make(map[string]*[]*dbx.BucketStorageTally)
for _, bucket := range buckets {
storageTallies, err := storageQuery(ctx,
dbx.BucketStorageTally_ProjectId(projectID[:]),
dbx.BucketStorageTally_BucketName([]byte(bucket)),
dbx.BucketStorageTally_IntervalStart(since),
dbx.BucketStorageTally_IntervalStart(before))
if err != nil {
return nil, err
}
bucketsTallies[bucket] = &storageTallies
}
usage = new(accounting.ProjectUsage)
usage.Egress = memory.Size(totalEgress).Int64()
// sum up storage and objects
for _, tallies := range bucketsTallies {
for i := len(*tallies) - 1; i > 0; i-- {
current := (*tallies)[i]
hours := (*tallies)[i-1].IntervalStart.Sub(current.IntervalStart).Hours()
usage.Storage += memory.Size(current.Inline).Float64() * hours
usage.Storage += memory.Size(current.Remote).Float64() * hours
usage.ObjectCount += float64(current.ObjectCount) * hours
}
}
usage.Since = since
usage.Before = before
return usage, nil
}
// GetBucketUsageRollups retrieves summed usage rollups for every bucket of particular project for a given period
func (db *ProjectAccounting) GetBucketUsageRollups(ctx context.Context, projectID uuid.UUID, since, before time.Time) (_ []accounting.BucketUsageRollup, err error) {
defer mon.Task()(&ctx)(&err)
since = timeTruncateDown(since)
buckets, err := db.getBuckets(ctx, projectID, since, before)
if err != nil {
return nil, err
}
roullupsQuery := db.db.Rebind(`SELECT SUM(settled), SUM(inline), action
FROM bucket_bandwidth_rollups
WHERE project_id = ? AND bucket_name = ? AND interval_start >= ? AND interval_start <= ?
GROUP BY action`)
storageQuery := db.db.All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart
var bucketUsageRollups []accounting.BucketUsageRollup
for _, bucket := range buckets {
bucketRollup := accounting.BucketUsageRollup{
ProjectID: projectID,
BucketName: []byte(bucket),
Since: since,
Before: before,
}
// get bucket_bandwidth_rollups
rollupsRows, err := db.db.QueryContext(ctx, roullupsQuery, projectID[:], []byte(bucket), since, before)
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, rollupsRows.Close()) }()
// fill egress
for rollupsRows.Next() {
var action pb.PieceAction
var settled, inline int64
err = rollupsRows.Scan(&settled, &inline, &action)
if err != nil {
return nil, err
}
switch action {
case pb.PieceAction_GET:
bucketRollup.GetEgress += memory.Size(settled + inline).GB()
case pb.PieceAction_GET_AUDIT:
bucketRollup.AuditEgress += memory.Size(settled + inline).GB()
case pb.PieceAction_GET_REPAIR:
bucketRollup.RepairEgress += memory.Size(settled + inline).GB()
default:
continue
}
}
bucketStorageTallies, err := storageQuery(ctx,
dbx.BucketStorageTally_ProjectId(projectID[:]),
dbx.BucketStorageTally_BucketName([]byte(bucket)),
dbx.BucketStorageTally_IntervalStart(since),
dbx.BucketStorageTally_IntervalStart(before))
if err != nil {
return nil, err
}
// fill metadata, objects and stored data
// hours calculated from previous tallies,
// so we skip the most recent one
for i := len(bucketStorageTallies) - 1; i > 0; i-- {
current := bucketStorageTallies[i]
hours := bucketStorageTallies[i-1].IntervalStart.Sub(current.IntervalStart).Hours()
bucketRollup.RemoteStoredData += memory.Size(current.Remote).GB() * hours
bucketRollup.InlineStoredData += memory.Size(current.Inline).GB() * hours
bucketRollup.MetadataSize += memory.Size(current.MetadataSize).GB() * hours
bucketRollup.RemoteSegments += float64(current.RemoteSegmentsCount) * hours
bucketRollup.InlineSegments += float64(current.InlineSegmentsCount) * hours
bucketRollup.ObjectCount += float64(current.ObjectCount) * hours
}
bucketUsageRollups = append(bucketUsageRollups, bucketRollup)
}
return bucketUsageRollups, nil
}
// GetBucketTotals retrieves bucket usage totals for period of time
func (db *ProjectAccounting) GetBucketTotals(ctx context.Context, projectID uuid.UUID, cursor accounting.BucketUsageCursor, since, before time.Time) (_ *accounting.BucketUsagePage, err error) {
defer mon.Task()(&ctx)(&err)
since = timeTruncateDown(since)
search := cursor.Search + "%"
if cursor.Limit > 50 {
cursor.Limit = 50
}
if cursor.Page == 0 {
return nil, errs.New("page can not be 0")
}
page := &accounting.BucketUsagePage{
Search: cursor.Search,
Limit: cursor.Limit,
Offset: uint64((cursor.Page - 1) * cursor.Limit),
}
countQuery := db.db.Rebind(`SELECT COUNT(DISTINCT bucket_name)
FROM bucket_bandwidth_rollups
WHERE project_id = ? AND interval_start >= ? AND interval_start <= ?
AND CAST(bucket_name as TEXT) LIKE ?`)
countRow := db.db.QueryRowContext(ctx,
countQuery,
projectID[:],
since, before,
search)
err = countRow.Scan(&page.TotalCount)
if err != nil {
return nil, err
}
if page.TotalCount == 0 {
return page, nil
}
if page.Offset > page.TotalCount-1 {
return nil, errs.New("page is out of range")
}
bucketsQuery := db.db.Rebind(`SELECT DISTINCT bucket_name
FROM bucket_bandwidth_rollups
WHERE project_id = ? AND interval_start >= ? AND interval_start <= ?
AND CAST(bucket_name as TEXT) LIKE ?
ORDER BY bucket_name ASC
LIMIT ? OFFSET ?`)
bucketRows, err := db.db.QueryContext(ctx,
bucketsQuery,
projectID[:],
since, before,
search,
page.Limit,
page.Offset)
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, bucketRows.Close()) }()
var buckets []string
for bucketRows.Next() {
var bucket string
err = bucketRows.Scan(&bucket)
if err != nil {
return nil, err
}
buckets = append(buckets, bucket)
}
roullupsQuery := db.db.Rebind(`SELECT SUM(settled), SUM(inline), action
FROM bucket_bandwidth_rollups
WHERE project_id = ? AND bucket_name = ? AND interval_start >= ? AND interval_start <= ?
GROUP BY action`)
storageQuery := db.db.Rebind(`SELECT inline, remote, object_count
FROM bucket_storage_tallies
WHERE project_id = ? AND bucket_name = ? AND interval_start >= ? AND interval_start <= ?
ORDER BY interval_start DESC
LIMIT 1`)
var bucketUsages []accounting.BucketUsage
for _, bucket := range buckets {
bucketUsage := accounting.BucketUsage{
ProjectID: projectID,
BucketName: bucket,
Since: since,
Before: before,
}
// get bucket_bandwidth_rollups
rollupsRows, err := db.db.QueryContext(ctx, roullupsQuery, projectID[:], []byte(bucket), since, before)
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, rollupsRows.Close()) }()
var totalEgress int64
for rollupsRows.Next() {
var action pb.PieceAction
var settled, inline int64
err = rollupsRows.Scan(&settled, &inline, &action)
if err != nil {
return nil, err
}
// add values for egress
if action == pb.PieceAction_GET || action == pb.PieceAction_GET_AUDIT || action == pb.PieceAction_GET_REPAIR {
totalEgress += settled + inline
}
}
bucketUsage.Egress = memory.Size(totalEgress).GB()
storageRow := db.db.QueryRowContext(ctx, storageQuery, projectID[:], []byte(bucket), since, before)
if err != nil {
return nil, err
}
var inline, remote, objectCount int64
err = storageRow.Scan(&inline, &remote, &objectCount)
if err != nil {
if err != sql.ErrNoRows {
return nil, err
}
}
// fill storage and object count
bucketUsage.Storage = memory.Size(inline + remote).GB()
bucketUsage.ObjectCount = objectCount
bucketUsages = append(bucketUsages, bucketUsage)
}
page.PageCount = uint(page.TotalCount / uint64(cursor.Limit))
if page.TotalCount%uint64(cursor.Limit) != 0 {
page.PageCount++
}
page.BucketUsages = bucketUsages
page.CurrentPage = cursor.Page
return page, nil
}
// getBuckets list all bucket of certain project for given period
func (db *ProjectAccounting) getBuckets(ctx context.Context, projectID uuid.UUID, since, before time.Time) (_ []string, err error) {
defer mon.Task()(&ctx)(&err)
bucketsQuery := db.db.Rebind(`SELECT DISTINCT bucket_name
FROM bucket_bandwidth_rollups
WHERE project_id = ? AND interval_start >= ? AND interval_start <= ?`)
bucketRows, err := db.db.QueryContext(ctx, bucketsQuery, projectID[:], since, before)
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, bucketRows.Close()) }()
var buckets []string
for bucketRows.Next() {
var bucket string
err = bucketRows.Scan(&bucket)
if err != nil {
return nil, err
}
buckets = append(buckets, bucket)
}
return buckets, nil
}
// timeTruncateDown truncates down to the hour before to be in sync with orders endpoint
func timeTruncateDown(t time.Time) time.Time {
return time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, t.Location())
}

View File

@ -34,6 +34,18 @@ func (projects *projects) GetAll(ctx context.Context) (_ []console.Project, err
return projectsFromDbxSlice(ctx, projectsDbx)
}
// GetOwn is a method for querying all projects created by current user from the database.
func (projects *projects) GetOwn(ctx context.Context, userID uuid.UUID) (_ []console.Project, err error) {
defer mon.Task()(&ctx)(&err)
projectsDbx, err := projects.db.All_Project_By_OwnerId_OrderBy_Asc_CreatedAt(ctx, dbx.Project_OwnerId(userID[:]))
if err != nil {
return nil, err
}
return projectsFromDbxSlice(ctx, projectsDbx)
}
// GetCreatedBefore retrieves all projects created before provided date
func (projects *projects) GetCreatedBefore(ctx context.Context, before time.Time) (_ []console.Project, err error) {
defer mon.Task()(&ctx)(&err)

View File

@ -1,367 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package satellitedb
import (
"context"
"database/sql"
"time"
"github.com/skyrings/skyring-common/tools/uuid"
"github.com/zeebo/errs"
"storj.io/storj/pkg/pb"
"storj.io/storj/private/memory"
"storj.io/storj/satellite/console"
dbx "storj.io/storj/satellite/satellitedb/dbx"
)
// ensures that usagerollups implements console.UsageRollups.
var _ console.UsageRollups = (*usagerollups)(nil)
// usagerollups implements console.UsageRollups
type usagerollups struct {
db *dbx.DB
}
// GetProjectTotal retrieves project usage for a given period
func (db *usagerollups) GetProjectTotal(ctx context.Context, projectID uuid.UUID, since, before time.Time) (usage *console.ProjectUsage, err error) {
defer mon.Task()(&ctx)(&err)
since = timeTruncateDown(since)
storageQuery := db.db.All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart
roullupsQuery := db.db.Rebind(`SELECT SUM(settled), SUM(inline), action
FROM bucket_bandwidth_rollups
WHERE project_id = ? AND interval_start >= ? AND interval_start <= ?
GROUP BY action`)
rollupsRows, err := db.db.QueryContext(ctx, roullupsQuery, projectID[:], since, before)
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, rollupsRows.Close()) }()
var totalEgress int64
for rollupsRows.Next() {
var action pb.PieceAction
var settled, inline int64
err = rollupsRows.Scan(&settled, &inline, &action)
if err != nil {
return nil, err
}
// add values for egress
if action == pb.PieceAction_GET || action == pb.PieceAction_GET_AUDIT || action == pb.PieceAction_GET_REPAIR {
totalEgress += settled + inline
}
}
buckets, err := db.getBuckets(ctx, projectID, since, before)
if err != nil {
return nil, err
}
bucketsTallies := make(map[string]*[]*dbx.BucketStorageTally)
for _, bucket := range buckets {
storageTallies, err := storageQuery(ctx,
dbx.BucketStorageTally_ProjectId(projectID[:]),
dbx.BucketStorageTally_BucketName([]byte(bucket)),
dbx.BucketStorageTally_IntervalStart(since),
dbx.BucketStorageTally_IntervalStart(before))
if err != nil {
return nil, err
}
bucketsTallies[bucket] = &storageTallies
}
usage = new(console.ProjectUsage)
usage.Egress = memory.Size(totalEgress).GB()
// sum up storage and objects
for _, tallies := range bucketsTallies {
for i := len(*tallies) - 1; i > 0; i-- {
current := (*tallies)[i]
hours := (*tallies)[i-1].IntervalStart.Sub(current.IntervalStart).Hours()
usage.Storage += memory.Size(current.Inline).GB() * hours
usage.Storage += memory.Size(current.Remote).GB() * hours
usage.ObjectCount += float64(current.ObjectCount) * hours
}
}
usage.Since = since
usage.Before = before
return usage, nil
}
// GetBucketUsageRollups retrieves summed usage rollups for every bucket of particular project for a given period
func (db *usagerollups) GetBucketUsageRollups(ctx context.Context, projectID uuid.UUID, since, before time.Time) (_ []console.BucketUsageRollup, err error) {
defer mon.Task()(&ctx)(&err)
since = timeTruncateDown(since)
buckets, err := db.getBuckets(ctx, projectID, since, before)
if err != nil {
return nil, err
}
roullupsQuery := db.db.Rebind(`SELECT SUM(settled), SUM(inline), action
FROM bucket_bandwidth_rollups
WHERE project_id = ? AND bucket_name = ? AND interval_start >= ? AND interval_start <= ?
GROUP BY action`)
storageQuery := db.db.All_BucketStorageTally_By_ProjectId_And_BucketName_And_IntervalStart_GreaterOrEqual_And_IntervalStart_LessOrEqual_OrderBy_Desc_IntervalStart
var bucketUsageRollups []console.BucketUsageRollup
for _, bucket := range buckets {
bucketRollup := console.BucketUsageRollup{
ProjectID: projectID,
BucketName: []byte(bucket),
Since: since,
Before: before,
}
// get bucket_bandwidth_rollups
rollupsRows, err := db.db.QueryContext(ctx, roullupsQuery, projectID[:], []byte(bucket), since, before)
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, rollupsRows.Close()) }()
// fill egress
for rollupsRows.Next() {
var action pb.PieceAction
var settled, inline int64
err = rollupsRows.Scan(&settled, &inline, &action)
if err != nil {
return nil, err
}
switch action {
case pb.PieceAction_GET:
bucketRollup.GetEgress += memory.Size(settled + inline).GB()
case pb.PieceAction_GET_AUDIT:
bucketRollup.AuditEgress += memory.Size(settled + inline).GB()
case pb.PieceAction_GET_REPAIR:
bucketRollup.RepairEgress += memory.Size(settled + inline).GB()
default:
continue
}
}
bucketStorageTallies, err := storageQuery(ctx,
dbx.BucketStorageTally_ProjectId(projectID[:]),
dbx.BucketStorageTally_BucketName([]byte(bucket)),
dbx.BucketStorageTally_IntervalStart(since),
dbx.BucketStorageTally_IntervalStart(before))
if err != nil {
return nil, err
}
// fill metadata, objects and stored data
// hours calculated from previous tallies,
// so we skip the most recent one
for i := len(bucketStorageTallies) - 1; i > 0; i-- {
current := bucketStorageTallies[i]
hours := bucketStorageTallies[i-1].IntervalStart.Sub(current.IntervalStart).Hours()
bucketRollup.RemoteStoredData += memory.Size(current.Remote).GB() * hours
bucketRollup.InlineStoredData += memory.Size(current.Inline).GB() * hours
bucketRollup.MetadataSize += memory.Size(current.MetadataSize).GB() * hours
bucketRollup.RemoteSegments += float64(current.RemoteSegmentsCount) * hours
bucketRollup.InlineSegments += float64(current.InlineSegmentsCount) * hours
bucketRollup.ObjectCount += float64(current.ObjectCount) * hours
}
bucketUsageRollups = append(bucketUsageRollups, bucketRollup)
}
return bucketUsageRollups, nil
}
// GetBucketTotals retrieves bucket usage totals for period of time
func (db *usagerollups) GetBucketTotals(ctx context.Context, projectID uuid.UUID, cursor console.BucketUsageCursor, since, before time.Time) (_ *console.BucketUsagePage, err error) {
defer mon.Task()(&ctx)(&err)
since = timeTruncateDown(since)
search := cursor.Search + "%"
if cursor.Limit > 50 {
cursor.Limit = 50
}
if cursor.Page == 0 {
return nil, errs.New("page can not be 0")
}
page := &console.BucketUsagePage{
Search: cursor.Search,
Limit: cursor.Limit,
Offset: uint64((cursor.Page - 1) * cursor.Limit),
}
countQuery := db.db.Rebind(`SELECT COUNT(DISTINCT bucket_name)
FROM bucket_bandwidth_rollups
WHERE project_id = ? AND interval_start >= ? AND interval_start <= ?
AND CAST(bucket_name as TEXT) LIKE ?`)
countRow := db.db.QueryRowContext(ctx,
countQuery,
projectID[:],
since, before,
search)
err = countRow.Scan(&page.TotalCount)
if err != nil {
return nil, err
}
if page.TotalCount == 0 {
return page, nil
}
if page.Offset > page.TotalCount-1 {
return nil, errs.New("page is out of range")
}
bucketsQuery := db.db.Rebind(`SELECT DISTINCT bucket_name
FROM bucket_bandwidth_rollups
WHERE project_id = ? AND interval_start >= ? AND interval_start <= ?
AND CAST(bucket_name as TEXT) LIKE ?
ORDER BY bucket_name ASC
LIMIT ? OFFSET ?`)
bucketRows, err := db.db.QueryContext(ctx,
bucketsQuery,
projectID[:],
since, before,
search,
page.Limit,
page.Offset)
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, bucketRows.Close()) }()
var buckets []string
for bucketRows.Next() {
var bucket string
err = bucketRows.Scan(&bucket)
if err != nil {
return nil, err
}
buckets = append(buckets, bucket)
}
roullupsQuery := db.db.Rebind(`SELECT SUM(settled), SUM(inline), action
FROM bucket_bandwidth_rollups
WHERE project_id = ? AND bucket_name = ? AND interval_start >= ? AND interval_start <= ?
GROUP BY action`)
storageQuery := db.db.Rebind(`SELECT inline, remote, object_count
FROM bucket_storage_tallies
WHERE project_id = ? AND bucket_name = ? AND interval_start >= ? AND interval_start <= ?
ORDER BY interval_start DESC
LIMIT 1`)
var bucketUsages []console.BucketUsage
for _, bucket := range buckets {
bucketUsage := console.BucketUsage{
ProjectID: projectID,
BucketName: bucket,
Since: since,
Before: before,
}
// get bucket_bandwidth_rollups
rollupsRows, err := db.db.QueryContext(ctx, roullupsQuery, projectID[:], []byte(bucket), since, before)
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, rollupsRows.Close()) }()
var totalEgress int64
for rollupsRows.Next() {
var action pb.PieceAction
var settled, inline int64
err = rollupsRows.Scan(&settled, &inline, &action)
if err != nil {
return nil, err
}
// add values for egress
if action == pb.PieceAction_GET || action == pb.PieceAction_GET_AUDIT || action == pb.PieceAction_GET_REPAIR {
totalEgress += settled + inline
}
}
bucketUsage.Egress = memory.Size(totalEgress).GB()
storageRow := db.db.QueryRowContext(ctx, storageQuery, projectID[:], []byte(bucket), since, before)
if err != nil {
return nil, err
}
var inline, remote, objectCount int64
err = storageRow.Scan(&inline, &remote, &objectCount)
if err != nil {
if err != sql.ErrNoRows {
return nil, err
}
}
// fill storage and object count
bucketUsage.Storage = memory.Size(inline + remote).GB()
bucketUsage.ObjectCount = objectCount
bucketUsages = append(bucketUsages, bucketUsage)
}
page.PageCount = uint(page.TotalCount / uint64(cursor.Limit))
if page.TotalCount%uint64(cursor.Limit) != 0 {
page.PageCount++
}
page.BucketUsages = bucketUsages
page.CurrentPage = cursor.Page
return page, nil
}
// getBuckets list all bucket of certain project for given period
func (db *usagerollups) getBuckets(ctx context.Context, projectID uuid.UUID, since, before time.Time) (_ []string, err error) {
defer mon.Task()(&ctx)(&err)
bucketsQuery := db.db.Rebind(`SELECT DISTINCT bucket_name
FROM bucket_bandwidth_rollups
WHERE project_id = ? AND interval_start >= ? AND interval_start <= ?`)
bucketRows, err := db.db.QueryContext(ctx, bucketsQuery, projectID[:], since, before)
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, bucketRows.Close()) }()
var buckets []string
for bucketRows.Next() {
var bucket string
err = bucketRows.Scan(&bucket)
if err != nil {
return nil, err
}
buckets = append(buckets, bucket)
}
return buckets, nil
}
// timeTruncateDown truncates down to the hour before to be in sync with orders endpoint
func timeTruncateDown(t time.Time) time.Time {
return time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, t.Location())
}

View File

@ -334,6 +334,12 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
# number of update requests to process per transaction
# overlay.update-stats-batch-size: 100
# price in cents user should pay for each TB of egress
# payments.egress-price: 0
# price in cents user should pay for each object storing in network
# payments.per-object-price: 0
# payments provider to use
# payments.provider: ""
@ -352,6 +358,9 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
# amount of time we wait before running next transaction update loop
# payments.stripe-coin-payments.transaction-update-interval: 30m0s
# price in cents user should pay for storing each TB per hour
# payments.tbh-price: 0
# time limit for downloading pieces from a node for repair
# repairer.download-timeout: 5m0s

View File

@ -1,2 +1,2 @@
VUE_APP_STRIPE_PUBLIC_KEY=pk_test_
VUE_APP_STRIPE_PUBLIC_KEY=pk_test_HMWSkLXmYBkQYeO9VRVQ8eHf00zrxjgwMh
VUE_APP_ENDPOINT_URL=/api/v0/graphql

View File

@ -54,6 +54,24 @@ export class PaymentsHttpApi implements PaymentsApi {
throw new Error('can not setup account');
}
public async projectsCharges(): Promise<any> {
const path = `${this.ROOT_PATH}/account/charges`;
const response = await this.client.get(path);
if (!response.ok) {
if (response.status === 401) {
throw new ErrorUnauthorized();
}
throw new Error('can not get projects charges');
}
// TODO: fiish mapping
const charges = await response.json();
return [];
}
/**
* Add credit card
* @param token - stripe token used to add a credit card as a payment method

View File

@ -104,10 +104,12 @@ export default class UsageReport extends Vue {
return DateFormat.getUSDate(this.$store.state.usageModule.endDate, '/');
}
// TODO: update bytes to GB
public get storage(): string {
return this.$store.state.usageModule.projectUsage.storage.toPrecision(5);
}
// TODO: update bytes to GB
public get egress(): string {
return this.$store.state.usageModule.projectUsage.egress.toPrecision(5);
}

View File

@ -20,6 +20,11 @@ export interface PaymentsApi {
*/
getBalance(): Promise<number>;
/**
*
*/
projectsCharges(): Promise<any>;
/**
* Add credit card
* @param token - stripe token used to add a credit card as a payment method

View File

@ -1,7 +1,7 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
// ProjectUsage sums usage for given period
// ProjectUsage sums usage for given period.
export class ProjectUsage {
public storage: number;
public egress: number;

View File

@ -25,6 +25,7 @@ import DashboardHeader from '@/components/header/HeaderArea.vue';
import NavigationArea from '@/components/navigation/NavigationArea.vue';
import { ErrorUnauthorized } from '@/api/errors/ErrorUnauthorized';
import { PaymentsHttpApi } from '@/api/payments';
import { RouteConfig } from '@/router';
import { BUCKET_ACTIONS } from '@/store/modules/buckets';
import { PAYMENTS_ACTIONS } from '@/store/modules/payments';
@ -73,6 +74,7 @@ export default class DashboardArea extends Vue {
await this.$store.dispatch(GET_BALANCE);
await this.$store.dispatch(GET_CREDIT_CARDS);
await this.$store.dispatch(GET_BILLING_HISTORY);
new PaymentsHttpApi().projectsCharges();
} catch (error) {
if (error instanceof ErrorUnauthorized) {
AuthToken.remove();