Merge remote-tracking branch 'origin/main' into multipart-upload

Conflicts:
	go.mod
	go.sum
	satellite/metainfo/metainfo.go

Change-Id: Ib5c49f3c911c58319855a171f9ce73657da976d9
This commit is contained in:
Kaloyan Raev 2021-01-14 14:33:00 +02:00
commit 6dff40f5c5
41 changed files with 1580 additions and 552 deletions

View File

@ -358,5 +358,5 @@ diagrams-graphml:
.PHONY: bump-dependencies
bump-dependencies:
go get storj.io/common@main storj.io/private@main storj.io/uplink@main
go get storj.io/common@main storj.io/private@main storj.io/uplink@multipart-upload
go mod tidy

View File

@ -13,7 +13,7 @@ import (
"github.com/spf13/cobra"
"github.com/zeebo/errs"
"golang.org/x/crypto/ssh/terminal"
"golang.org/x/term"
"storj.io/common/storj"
)
@ -143,7 +143,7 @@ Enter your encryption passphrase: `)
if err != nil {
return "", err
}
encKey, err := terminal.ReadPassword(int(os.Stdin.Fd()))
encKey, err := term.ReadPassword(int(os.Stdin.Fd()))
if err != nil {
return "", err
}
@ -160,7 +160,7 @@ Enter your encryption passphrase: `)
if err != nil {
return "", err
}
repeatedEncKey, err := terminal.ReadPassword(int(os.Stdin.Fd()))
repeatedEncKey, err := term.ReadPassword(int(os.Stdin.Fd()))
if err != nil {
return "", err
}

View File

@ -27,12 +27,12 @@ func isRunCmd() bool {
}
func main() {
isInteractive, err := svc.IsAnInteractiveSession()
isService, err := svc.IsWindowsService()
if err != nil {
zap.L().Fatal("Failed to determine if session is interactive.", zap.Error(err))
}
if isInteractive || !isRunCmd() {
if !isService || !isRunCmd() {
process.Exec(rootCmd)
return
}

View File

@ -26,13 +26,11 @@ import (
)
func init() {
// Check if session is interactive
interactive, err := svc.IsAnInteractiveSession()
isService, err := svc.IsWindowsService()
if err != nil {
zap.L().Fatal("Failed to determine if session is interactive.", zap.Error(err))
zap.L().Fatal("Failed to determine if session is a service.", zap.Error(err))
}
if interactive {
if !isService {
return
}

15
go.mod
View File

@ -5,7 +5,7 @@ go 1.13
require (
github.com/alessio/shellescape v1.2.2
github.com/alicebob/miniredis/v2 v2.13.3
github.com/btcsuite/btcutil v1.0.3-0.20201124182144-4031bdc69ded
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce
github.com/calebcase/tmpfile v1.0.2
github.com/cheggaaa/pb/v3 v3.0.5
github.com/fatih/color v1.9.0
@ -39,16 +39,17 @@ require (
github.com/zeebo/errs v1.2.2
go.etcd.io/bbolt v1.3.5
go.uber.org/zap v1.16.0
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
golang.org/x/net v0.0.0-20200707034311-ab3426394381 // indirect
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
golang.org/x/sys v0.0.0-20200929083018-4d22bbb62b3c
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a
golang.org/x/sys v0.0.0-20210112091331-59c308dcf3cc
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e
google.golang.org/api v0.20.0 // indirect
google.golang.org/protobuf v1.25.0 // indirect
storj.io/common v0.0.0-20210104180112-e8500e1c37a0
storj.io/common v0.0.0-20210113135631-07a5dc68dc1c
storj.io/drpc v0.0.16
storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b
storj.io/private v0.0.0-20201126162939-6fbb1e924f51
storj.io/uplink v1.4.4-0.20210104090336-abe239c20ec8
storj.io/private v0.0.0-20210108233641-2ba1ef686d1f
storj.io/uplink v1.4.5-0.20210114104337-ce4ca047ab1f
)

29
go.sum
View File

@ -61,6 +61,8 @@ github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+q
github.com/btcsuite/btcutil v1.0.1/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts=
github.com/btcsuite/btcutil v1.0.3-0.20201124182144-4031bdc69ded h1:WcPFZzCIqGt/TdFJHsOiX5dIlB/MUzrftltMhpjzfA8=
github.com/btcsuite/btcutil v1.0.3-0.20201124182144-4031bdc69ded/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o=
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce h1:YtWJF7RHm2pYCvA5t0RPmAaLUhREsKuKd+SLhxFbFeQ=
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
@ -572,8 +574,8 @@ golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200604202706-70a84ac30bf9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a h1:vclmkQCjlDX5OydZ9wv8rBCcS0QyQY66Mpf/7BZbInM=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad h1:DN0cp81fZ3njFcrLCytUHRSUkqBjfTo4Tx9RJTWs0EY=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -637,8 +639,8 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -676,8 +678,10 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200610111108-226ff32320da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200929083018-4d22bbb62b3c h1:/h0vtH0PyU0xAoZJVcRw1k0Ng+U0JAy3QDiFmppIlIE=
golang.org/x/sys v0.0.0-20200929083018-4d22bbb62b3c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112091331-59c308dcf3cc h1:y0Og6AYdwus7SIAnKnDxjc4gJetRiYEWOx4AKbOeyEI=
golang.org/x/sys v0.0.0-20210112091331-59c308dcf3cc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221 h1:/ZHdbVpdR/jk3g30/d4yUL0JU9kksj8+F/bnQUVLGDM=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
@ -808,17 +812,16 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
storj.io/common v0.0.0-20200424175742-65ac59022f4f/go.mod h1:pZyXiIE7bGETIRXtfs0nICqMwp7PM8HqnDuyUeldNA0=
storj.io/common v0.0.0-20201026135900-1aaeec90670b/go.mod h1:GqdmNf3fLm2UZX/7Zr0BLFCJ4gFjgm6eHrk/fnmr5jQ=
storj.io/common v0.0.0-20201210184814-6206aefd1d48 h1:bxIYHG96eFQNsEazsICfiEHjFwo1YqqbXkGfg72d2mg=
storj.io/common v0.0.0-20201210184814-6206aefd1d48/go.mod h1:6sepaQTRLuygvA+GNPzdgRPOB1+wFfjde76KBWofbMY=
storj.io/common v0.0.0-20210104180112-e8500e1c37a0 h1:3EisqXNx2mjd1g+oBSlz1z5s3X7UFc+pXst2aNN/1m8=
storj.io/common v0.0.0-20210104180112-e8500e1c37a0/go.mod h1:GhZn7vlakLMJBMePwaMvaNUS45FhqMTVWzAn7dZxLOg=
storj.io/common v0.0.0-20210113135631-07a5dc68dc1c h1:07A5QJMYYYQrOQv51j6RiOTstzMh7OnbqTZGZljp9/M=
storj.io/common v0.0.0-20210113135631-07a5dc68dc1c/go.mod h1:KhVByBTvjV2rsaUQsft0pKgBRRMvCcY1JsDqt6BWr3I=
storj.io/drpc v0.0.11/go.mod h1:TiFc2obNjL9/3isMW1Rpxjy8V9uE0B2HMeMFGiiI7Iw=
storj.io/drpc v0.0.14/go.mod h1:82nfl+6YwRwF6UG31cEWWUqv/FaKvP5SGqUvoqTxCMA=
storj.io/drpc v0.0.16 h1:9sxypc5lKi/0D69cR21BR0S21+IvXfON8L5nXMVNTwQ=
storj.io/drpc v0.0.16/go.mod h1:zdmQ93nx4Z35u11pQ+GAnBy4DGOK3HJCSOfeh2RryTo=
storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b h1:Bbg9JCtY6l3HrDxs3BXzT2UYnYCBLqNi6i84Y8QIPUs=
storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b/go.mod h1:gj4vuCeyCRjRmH8LIrgoyU9Dc9uR6H+/GcDUXmTbf80=
storj.io/private v0.0.0-20201126162939-6fbb1e924f51 h1:3aNbTNJeZ00cnzgYFdGyBUxtBEYqBCEzvk6Svh0gIwc=
storj.io/private v0.0.0-20201126162939-6fbb1e924f51/go.mod h1:3KcGiA7phL3a0HUCe5ar90SlIU3iFb8hKInaEZQ5P7o=
storj.io/uplink v1.4.4-0.20210104090336-abe239c20ec8 h1:FEqhyAyUuE86cf4UX5jHmlkKKsk/dQ/YKlmyPm4fNCE=
storj.io/uplink v1.4.4-0.20210104090336-abe239c20ec8/go.mod h1:raBVCBf1/DwfkFNzKqjSKLPysk9+o8Ubt/LJIO9TVBw=
storj.io/private v0.0.0-20210108233641-2ba1ef686d1f h1:ctEwD9AsWR8MGv+hKxATjsu114lOPuL2wL7fqO2qusg=
storj.io/private v0.0.0-20210108233641-2ba1ef686d1f/go.mod h1:3KcGiA7phL3a0HUCe5ar90SlIU3iFb8hKInaEZQ5P7o=
storj.io/uplink v1.4.5-0.20210114104337-ce4ca047ab1f h1:jp17GoEKBmo/JvLUQHAbO8E9AjMEfwuabYoUrYI33us=
storj.io/uplink v1.4.5-0.20210114104337-ce4ca047ab1f/go.mod h1:raBVCBf1/DwfkFNzKqjSKLPysk9+o8Ubt/LJIO9TVBw=

View File

@ -49,6 +49,7 @@ type NodeInfo struct {
DiskSpaceUsed int64
DiskSpaceLeft int64
BandwidthUsed int64
TotalEarned int64
}
// NodeInfoSatellite contains satellite specific node internal state.

View File

@ -111,6 +111,7 @@ func (service *Service) ListInfos(ctx context.Context) (_ []NodeInfo, err error)
nodeClient := multinodepb.NewDRPCNodeClient(conn)
storageClient := multinodepb.NewDRPCStorageClient(conn)
bandwidthClient := multinodepb.NewDRPCBandwidthClient(conn)
payoutClient := multinodepb.NewDRPCPayoutClient(conn)
header := &multinodepb.RequestHeader{
ApiKey: node.APISecret,
@ -131,6 +132,11 @@ func (service *Service) ListInfos(ctx context.Context) (_ []NodeInfo, err error)
return NodeInfo{}, Error.Wrap(err)
}
earned, err := payoutClient.Earned(ctx, &multinodepb.EarnedRequest{Header: header})
if err != nil {
return NodeInfo{}, Error.Wrap(err)
}
bandwidthSummaryRequest := &multinodepb.BandwidthMonthSummaryRequest{
Header: header,
}
@ -147,6 +153,7 @@ func (service *Service) ListInfos(ctx context.Context) (_ []NodeInfo, err error)
DiskSpaceUsed: diskSpace.GetUsedPieces() + diskSpace.GetUsedTrash(),
DiskSpaceLeft: diskSpace.GetAvailable(),
BandwidthUsed: bandwidthSummary.GetUsed(),
TotalEarned: earned.Total,
}, nil
}()
if err != nil {

View File

@ -24,7 +24,7 @@ var _ = time.Kitchen
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type RequestHeader struct {
ApiKey []byte `protobuf:"bytes,1,opt,name=api_key,json=apiKey,proto3" json:"api_key,omitempty"`
@ -692,6 +692,82 @@ func (m *TrustedSatellitesResponse_NodeURL) GetAddress() string {
return ""
}
type EarnedRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *EarnedRequest) Reset() { *m = EarnedRequest{} }
func (m *EarnedRequest) String() string { return proto.CompactTextString(m) }
func (*EarnedRequest) ProtoMessage() {}
func (*EarnedRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_9a45fd79b06f3a1b, []int{13}
}
func (m *EarnedRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_EarnedRequest.Unmarshal(m, b)
}
func (m *EarnedRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_EarnedRequest.Marshal(b, m, deterministic)
}
func (m *EarnedRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_EarnedRequest.Merge(m, src)
}
func (m *EarnedRequest) XXX_Size() int {
return xxx_messageInfo_EarnedRequest.Size(m)
}
func (m *EarnedRequest) XXX_DiscardUnknown() {
xxx_messageInfo_EarnedRequest.DiscardUnknown(m)
}
var xxx_messageInfo_EarnedRequest proto.InternalMessageInfo
func (m *EarnedRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
type EarnedResponse struct {
Total int64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *EarnedResponse) Reset() { *m = EarnedResponse{} }
func (m *EarnedResponse) String() string { return proto.CompactTextString(m) }
func (*EarnedResponse) ProtoMessage() {}
func (*EarnedResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_9a45fd79b06f3a1b, []int{14}
}
func (m *EarnedResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_EarnedResponse.Unmarshal(m, b)
}
func (m *EarnedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_EarnedResponse.Marshal(b, m, deterministic)
}
func (m *EarnedResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_EarnedResponse.Merge(m, src)
}
func (m *EarnedResponse) XXX_Size() int {
return xxx_messageInfo_EarnedResponse.Size(m)
}
func (m *EarnedResponse) XXX_DiscardUnknown() {
xxx_messageInfo_EarnedResponse.DiscardUnknown(m)
}
var xxx_messageInfo_EarnedResponse proto.InternalMessageInfo
func (m *EarnedResponse) GetTotal() int64 {
if m != nil {
return m.Total
}
return 0
}
func init() {
proto.RegisterType((*RequestHeader)(nil), "multinode.RequestHeader")
proto.RegisterType((*DiskSpaceRequest)(nil), "multinode.DiskSpaceRequest")
@ -709,61 +785,66 @@ func init() {
proto.RegisterType((*TrustedSatellitesRequest)(nil), "multinode.TrustedSatellitesRequest")
proto.RegisterType((*TrustedSatellitesResponse)(nil), "multinode.TrustedSatellitesResponse")
proto.RegisterType((*TrustedSatellitesResponse_NodeURL)(nil), "multinode.TrustedSatellitesResponse.NodeURL")
proto.RegisterType((*EarnedRequest)(nil), "multinode.EarnedRequest")
proto.RegisterType((*EarnedResponse)(nil), "multinode.EarnedResponse")
}
func init() { proto.RegisterFile("multinode.proto", fileDescriptor_9a45fd79b06f3a1b) }
var fileDescriptor_9a45fd79b06f3a1b = []byte{
// 776 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xd1, 0x6e, 0xe3, 0x44,
0x14, 0xc5, 0x6d, 0xe2, 0x34, 0x37, 0xa1, 0x6d, 0x06, 0x24, 0x5c, 0x93, 0x34, 0x95, 0x5b, 0xa9,
0x41, 0x20, 0x07, 0xd2, 0x27, 0x24, 0x1e, 0x20, 0x54, 0xa5, 0x11, 0x01, 0x8a, 0x53, 0x78, 0x00,
0xa9, 0xd1, 0x24, 0x9e, 0x26, 0xa6, 0x8e, 0xc7, 0x78, 0xc6, 0x81, 0xfe, 0x05, 0x7f, 0x84, 0xf6,
0x65, 0xb5, 0xdf, 0xb0, 0x0f, 0xdd, 0xcf, 0xd8, 0xd7, 0x95, 0x67, 0x26, 0x8e, 0xd3, 0x26, 0xd9,
0x55, 0xf6, 0xcd, 0x73, 0xee, 0xb9, 0xe7, 0x5c, 0xdf, 0xb9, 0x73, 0x61, 0x6f, 0x12, 0xfb, 0xdc,
0x0b, 0xa8, 0x4b, 0xec, 0x30, 0xa2, 0x9c, 0xa2, 0x62, 0x0a, 0x98, 0x30, 0xa2, 0x23, 0x2a, 0x61,
0xb3, 0x3e, 0xa2, 0x74, 0xe4, 0x93, 0xa6, 0x38, 0x0d, 0xe2, 0xdb, 0x26, 0xf7, 0x26, 0x84, 0x71,
0x3c, 0x09, 0x25, 0xc1, 0x6a, 0xc0, 0x87, 0x0e, 0xf9, 0x3b, 0x26, 0x8c, 0x5f, 0x12, 0xec, 0x92,
0x08, 0x7d, 0x02, 0x05, 0x1c, 0x7a, 0xfd, 0x3b, 0x72, 0x6f, 0x68, 0x47, 0x5a, 0xa3, 0xec, 0xe8,
0x38, 0xf4, 0x7e, 0x24, 0xf7, 0xd6, 0x39, 0xec, 0x9f, 0x7b, 0xec, 0xae, 0x17, 0xe2, 0x21, 0x51,
0x29, 0xe8, 0x4b, 0xd0, 0xc7, 0x22, 0x4d, 0x70, 0x4b, 0x2d, 0xc3, 0x9e, 0xd7, 0xb5, 0x20, 0xeb,
0x28, 0x9e, 0xf5, 0xbf, 0x06, 0x95, 0x8c, 0x0c, 0x0b, 0x69, 0xc0, 0x08, 0xaa, 0x42, 0x11, 0xfb,
0x3e, 0x1d, 0x62, 0x4e, 0x5c, 0x21, 0xb5, 0xed, 0xcc, 0x01, 0x54, 0x87, 0x52, 0xcc, 0x88, 0xdb,
0x0f, 0x3d, 0x32, 0x24, 0xcc, 0xd8, 0x12, 0x71, 0x48, 0xa0, 0x2b, 0x81, 0xa0, 0x1a, 0x88, 0x53,
0x9f, 0x47, 0x98, 0x8d, 0x8d, 0x6d, 0x99, 0x9f, 0x20, 0xd7, 0x09, 0x80, 0x10, 0xe4, 0x6e, 0x23,
0x42, 0x8c, 0x9c, 0x08, 0x88, 0x6f, 0xe1, 0x38, 0xc5, 0x9e, 0x8f, 0x07, 0x3e, 0x31, 0xf2, 0xca,
0x71, 0x06, 0x20, 0x13, 0x76, 0xe8, 0x94, 0x44, 0x89, 0x84, 0xa1, 0x8b, 0x60, 0x7a, 0xb6, 0xae,
0xa0, 0xda, 0xc6, 0x81, 0xfb, 0x8f, 0xe7, 0xf2, 0xf1, 0x4f, 0x34, 0xe0, 0xe3, 0x5e, 0x3c, 0x99,
0xe0, 0xe8, 0x7e, 0xf3, 0x9e, 0x9c, 0x41, 0x6d, 0x85, 0xa2, 0x6a, 0x0f, 0x82, 0x9c, 0x28, 0x45,
0x76, 0x46, 0x7c, 0x5b, 0x6d, 0xd8, 0xfd, 0x9d, 0x44, 0xcc, 0xa3, 0xc1, 0xe6, 0xc6, 0x9f, 0xc3,
0x5e, 0xaa, 0xa1, 0xac, 0x0c, 0x28, 0x4c, 0x25, 0x24, 0x54, 0x8a, 0xce, 0xec, 0x68, 0x5d, 0x00,
0xea, 0x62, 0xc6, 0xbf, 0xa7, 0x01, 0xc7, 0x43, 0xbe, 0xb9, 0xe9, 0x0d, 0x7c, 0xb4, 0xa0, 0xa3,
0x8c, 0x7f, 0x80, 0xb2, 0x8f, 0x19, 0xef, 0x0f, 0x25, 0xae, 0xe4, 0x4c, 0x5b, 0x0e, 0xb0, 0x3d,
0x1b, 0x60, 0xfb, 0x7a, 0x36, 0xc0, 0xed, 0x9d, 0x17, 0x0f, 0xf5, 0x0f, 0xfe, 0x7b, 0x55, 0xd7,
0x9c, 0x92, 0x3f, 0x17, 0xb4, 0xfe, 0x85, 0x8a, 0x43, 0xc2, 0x98, 0x63, 0xfe, 0x3e, 0xbd, 0x41,
0x5f, 0x41, 0x99, 0x61, 0x4e, 0x7c, 0xdf, 0xe3, 0xa4, 0xef, 0xb9, 0x62, 0xea, 0xca, 0xed, 0xdd,
0xc4, 0xf3, 0xe5, 0x43, 0x5d, 0xff, 0x99, 0xba, 0xa4, 0x73, 0xee, 0x94, 0x52, 0x4e, 0xc7, 0xb5,
0x5e, 0x6b, 0x80, 0xb2, 0xd6, 0xea, 0xcf, 0xbe, 0x01, 0x9d, 0x06, 0xbe, 0x17, 0x10, 0xe5, 0x7d,
0xb2, 0xe0, 0xfd, 0x98, 0x6e, 0xff, 0x22, 0xb8, 0x8e, 0xca, 0x41, 0x5f, 0x43, 0x1e, 0xc7, 0xae,
0xc7, 0x45, 0x01, 0xa5, 0xd6, 0xf1, 0xfa, 0xe4, 0xef, 0x12, 0xaa, 0x23, 0x33, 0xcc, 0x43, 0xd0,
0xa5, 0x18, 0xfa, 0x18, 0xf2, 0x6c, 0x48, 0x23, 0x59, 0x81, 0xe6, 0xc8, 0x83, 0x79, 0x09, 0x79,
0xc1, 0x5f, 0x1e, 0x46, 0x9f, 0xc1, 0x3e, 0x8b, 0x59, 0x48, 0x82, 0xe4, 0xfa, 0xfb, 0x92, 0xb0,
0x25, 0x08, 0x7b, 0x73, 0xbc, 0x97, 0xc0, 0x56, 0x17, 0x8c, 0xeb, 0x28, 0x66, 0x9c, 0xb8, 0xbd,
0x59, 0x3f, 0xd8, 0xe6, 0x13, 0xf2, 0x5c, 0x83, 0x83, 0x25, 0x72, 0xaa, 0x9d, 0x7f, 0x02, 0xe2,
0x32, 0xd8, 0x4f, 0x9b, 0xcf, 0x0c, 0xed, 0x68, 0xbb, 0x51, 0x6a, 0x7d, 0x91, 0xd1, 0x5e, 0xa9,
0x60, 0x27, 0x77, 0xf7, 0x9b, 0xd3, 0x75, 0x2a, 0xfc, 0x31, 0xc5, 0xec, 0x42, 0x41, 0x45, 0xd1,
0x29, 0x14, 0x12, 0x9d, 0xe4, 0xee, 0xb5, 0xa5, 0x77, 0xaf, 0x27, 0xe1, 0x8e, 0x9b, 0x3c, 0x19,
0xec, 0xba, 0x11, 0x61, 0x72, 0x35, 0x15, 0x9d, 0xd9, 0xb1, 0xf5, 0x2b, 0x14, 0x7a, 0x9c, 0x46,
0x78, 0x44, 0xd0, 0x05, 0x14, 0xd3, 0xb5, 0x87, 0x3e, 0xcd, 0x94, 0xf9, 0x78, 0xa7, 0x9a, 0xd5,
0xe5, 0x41, 0x59, 0x7b, 0x2b, 0x80, 0x62, 0xba, 0x2b, 0x10, 0x86, 0x72, 0x76, 0x5f, 0xa0, 0xd3,
0x4c, 0xea, 0xba, 0x1d, 0x65, 0x36, 0xde, 0x4e, 0x54, 0x7e, 0xcf, 0xb6, 0x20, 0x97, 0xfc, 0x2f,
0xfa, 0x16, 0x0a, 0x6a, 0x57, 0xa0, 0x83, 0x4c, 0xf6, 0xe2, 0x0e, 0x32, 0xcd, 0x65, 0x21, 0x75,
0x71, 0x5d, 0x28, 0x65, 0x1e, 0x3e, 0xaa, 0x65, 0xa8, 0x4f, 0x17, 0x8b, 0x79, 0xb8, 0x2a, 0xac,
0xd4, 0x3a, 0x00, 0xf3, 0xf9, 0x47, 0xd5, 0x15, 0xcf, 0x42, 0x6a, 0xd5, 0xd6, 0x3e, 0x1a, 0x74,
0x03, 0x95, 0x27, 0xc3, 0x82, 0x8e, 0xd7, 0x8f, 0x92, 0x14, 0x3e, 0x79, 0x97, 0x79, 0x6b, 0x9f,
0xfc, 0x61, 0x31, 0x4e, 0xa3, 0xbf, 0x6c, 0x8f, 0x36, 0xc5, 0x47, 0x33, 0x8c, 0xbc, 0x29, 0xe6,
0xa4, 0x99, 0x66, 0x87, 0x83, 0x81, 0x2e, 0x56, 0xdc, 0xd9, 0x9b, 0x00, 0x00, 0x00, 0xff, 0xff,
0x0a, 0x7f, 0x78, 0x13, 0xdb, 0x07, 0x00, 0x00,
// 825 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xd1, 0x6e, 0xeb, 0x44,
0x10, 0xc5, 0x6d, 0xe2, 0xdc, 0x4c, 0x72, 0xdb, 0x9b, 0xe5, 0x4a, 0xb8, 0x26, 0xb9, 0xa9, 0xdc,
0x8a, 0x06, 0x81, 0x1c, 0x48, 0x9f, 0x90, 0x40, 0xa2, 0xa1, 0xb4, 0x8d, 0x08, 0x50, 0x9c, 0xc2,
0x03, 0x48, 0x8d, 0x36, 0xf1, 0x36, 0x31, 0x75, 0xbc, 0xc6, 0xbb, 0x0e, 0xe4, 0x2f, 0xf8, 0x23,
0xc4, 0x0b, 0xe2, 0x1b, 0x78, 0x28, 0x9f, 0xc1, 0x2b, 0xf2, 0xee, 0xc6, 0x71, 0xda, 0x24, 0xa0,
0xdc, 0x37, 0xcf, 0xcc, 0x99, 0x73, 0xd6, 0xb3, 0xb3, 0x07, 0xf6, 0x27, 0xb1, 0xcf, 0xbd, 0x80,
0xba, 0xc4, 0x0e, 0x23, 0xca, 0x29, 0x2a, 0xa6, 0x09, 0x13, 0x46, 0x74, 0x44, 0x65, 0xda, 0xac,
0x8f, 0x28, 0x1d, 0xf9, 0xa4, 0x29, 0xa2, 0x41, 0x7c, 0xd7, 0xe4, 0xde, 0x84, 0x30, 0x8e, 0x27,
0xa1, 0x04, 0x58, 0x0d, 0x78, 0xee, 0x90, 0x9f, 0x62, 0xc2, 0xf8, 0x15, 0xc1, 0x2e, 0x89, 0xd0,
0x5b, 0x50, 0xc0, 0xa1, 0xd7, 0xbf, 0x27, 0x33, 0x43, 0x3b, 0xd4, 0x1a, 0x65, 0x47, 0xc7, 0xa1,
0xf7, 0x05, 0x99, 0x59, 0xe7, 0xf0, 0xe2, 0xdc, 0x63, 0xf7, 0xbd, 0x10, 0x0f, 0x89, 0x6a, 0x41,
0x1f, 0x80, 0x3e, 0x16, 0x6d, 0x02, 0x5b, 0x6a, 0x19, 0xf6, 0xe2, 0x5c, 0x4b, 0xb4, 0x8e, 0xc2,
0x59, 0xbf, 0x69, 0x50, 0xc9, 0xd0, 0xb0, 0x90, 0x06, 0x8c, 0xa0, 0x2a, 0x14, 0xb1, 0xef, 0xd3,
0x21, 0xe6, 0xc4, 0x15, 0x54, 0xbb, 0xce, 0x22, 0x81, 0xea, 0x50, 0x8a, 0x19, 0x71, 0xfb, 0xa1,
0x47, 0x86, 0x84, 0x19, 0x3b, 0xa2, 0x0e, 0x49, 0xea, 0x5a, 0x64, 0x50, 0x0d, 0x44, 0xd4, 0xe7,
0x11, 0x66, 0x63, 0x63, 0x57, 0xf6, 0x27, 0x99, 0x9b, 0x24, 0x81, 0x10, 0xe4, 0xee, 0x22, 0x42,
0x8c, 0x9c, 0x28, 0x88, 0x6f, 0xa1, 0x38, 0xc5, 0x9e, 0x8f, 0x07, 0x3e, 0x31, 0xf2, 0x4a, 0x71,
0x9e, 0x40, 0x26, 0x3c, 0xa3, 0x53, 0x12, 0x25, 0x14, 0x86, 0x2e, 0x8a, 0x69, 0x6c, 0x5d, 0x43,
0xb5, 0x8d, 0x03, 0xf7, 0x67, 0xcf, 0xe5, 0xe3, 0x2f, 0x69, 0xc0, 0xc7, 0xbd, 0x78, 0x32, 0xc1,
0xd1, 0x6c, 0xfb, 0x99, 0x9c, 0x42, 0x6d, 0x0d, 0xa3, 0x1a, 0x0f, 0x82, 0x9c, 0x38, 0x8a, 0x9c,
0x8c, 0xf8, 0xb6, 0xda, 0xb0, 0xf7, 0x1d, 0x89, 0x98, 0x47, 0x83, 0xed, 0x85, 0xdf, 0x83, 0xfd,
0x94, 0x43, 0x49, 0x19, 0x50, 0x98, 0xca, 0x94, 0x60, 0x29, 0x3a, 0xf3, 0xd0, 0xba, 0x00, 0xd4,
0xc5, 0x8c, 0x7f, 0x46, 0x03, 0x8e, 0x87, 0x7c, 0x7b, 0xd1, 0x5b, 0x78, 0x73, 0x89, 0x47, 0x09,
0x5f, 0x42, 0xd9, 0xc7, 0x8c, 0xf7, 0x87, 0x32, 0xaf, 0xe8, 0x4c, 0x5b, 0x2e, 0xb0, 0x3d, 0x5f,
0x60, 0xfb, 0x66, 0xbe, 0xc0, 0xed, 0x67, 0x7f, 0x3e, 0xd4, 0xdf, 0xf8, 0xf5, 0xef, 0xba, 0xe6,
0x94, 0xfc, 0x05, 0xa1, 0xf5, 0x0b, 0x54, 0x1c, 0x12, 0xc6, 0x1c, 0xf3, 0xd7, 0x99, 0x0d, 0xfa,
0x10, 0xca, 0x0c, 0x73, 0xe2, 0xfb, 0x1e, 0x27, 0x7d, 0xcf, 0x15, 0x5b, 0x57, 0x6e, 0xef, 0x25,
0x9a, 0x7f, 0x3d, 0xd4, 0xf5, 0xaf, 0xa8, 0x4b, 0x3a, 0xe7, 0x4e, 0x29, 0xc5, 0x74, 0x5c, 0xeb,
0x1f, 0x0d, 0x50, 0x56, 0x5a, 0xfd, 0xd9, 0xc7, 0xa0, 0xd3, 0xc0, 0xf7, 0x02, 0xa2, 0xb4, 0x8f,
0x97, 0xb4, 0x1f, 0xc3, 0xed, 0xaf, 0x05, 0xd6, 0x51, 0x3d, 0xe8, 0x23, 0xc8, 0xe3, 0xd8, 0xf5,
0xb8, 0x38, 0x40, 0xa9, 0x75, 0xb4, 0xb9, 0xf9, 0x2c, 0x81, 0x3a, 0xb2, 0xc3, 0x7c, 0x05, 0xba,
0x24, 0x43, 0x2f, 0x21, 0xcf, 0x86, 0x34, 0x92, 0x27, 0xd0, 0x1c, 0x19, 0x98, 0x57, 0x90, 0x17,
0xf8, 0xd5, 0x65, 0xf4, 0x2e, 0xbc, 0x60, 0x31, 0x0b, 0x49, 0x90, 0x5c, 0x7f, 0x5f, 0x02, 0x76,
0x04, 0x60, 0x7f, 0x91, 0xef, 0x25, 0x69, 0xab, 0x0b, 0xc6, 0x4d, 0x14, 0x33, 0x4e, 0xdc, 0xde,
0x7c, 0x1e, 0x6c, 0xfb, 0x0d, 0xf9, 0x43, 0x83, 0x83, 0x15, 0x74, 0x6a, 0x9c, 0x3f, 0x00, 0xe2,
0xb2, 0xd8, 0x4f, 0x87, 0xcf, 0x0c, 0xed, 0x70, 0xb7, 0x51, 0x6a, 0xbd, 0x9f, 0xe1, 0x5e, 0xcb,
0x60, 0x27, 0x77, 0xf7, 0xad, 0xd3, 0x75, 0x2a, 0xfc, 0x31, 0xc4, 0xec, 0x42, 0x41, 0x55, 0xd1,
0x09, 0x14, 0x12, 0x9e, 0xe4, 0xee, 0xb5, 0x95, 0x77, 0xaf, 0x27, 0xe5, 0x8e, 0x9b, 0x3c, 0x19,
0xec, 0xba, 0x11, 0x61, 0xd2, 0x9a, 0x8a, 0xce, 0x3c, 0xb4, 0xce, 0xe0, 0xf9, 0xe7, 0x38, 0x0a,
0x88, 0xbb, 0xfd, 0x2c, 0xde, 0x81, 0xbd, 0x39, 0x85, 0xfa, 0xff, 0x97, 0x90, 0xe7, 0x94, 0x63,
0x5f, 0xb9, 0x81, 0x0c, 0x5a, 0xdf, 0x40, 0xa1, 0xc7, 0x69, 0x84, 0x47, 0x04, 0x5d, 0x40, 0x31,
0x75, 0x58, 0xf4, 0x76, 0x46, 0xe1, 0xb1, 0x7d, 0x9b, 0xd5, 0xd5, 0x45, 0x29, 0xd4, 0x0a, 0xa0,
0x98, 0xda, 0x12, 0xc2, 0x50, 0xce, 0x5a, 0x13, 0x3a, 0xc9, 0xb4, 0x6e, 0xb2, 0x43, 0xb3, 0xf1,
0xdf, 0x40, 0xa5, 0xf7, 0xfb, 0x0e, 0xe4, 0x92, 0xd1, 0xa2, 0x4f, 0xa1, 0xa0, 0x6c, 0x09, 0x1d,
0x64, 0xba, 0x97, 0xed, 0xce, 0x34, 0x57, 0x95, 0xd4, 0x8c, 0xba, 0x50, 0xca, 0x78, 0x0c, 0xaa,
0x65, 0xa0, 0x4f, 0x3d, 0xcc, 0x7c, 0xb5, 0xae, 0xac, 0xd8, 0x3a, 0x00, 0x8b, 0xa7, 0x86, 0xaa,
0x6b, 0x5e, 0xa0, 0xe4, 0xaa, 0x6d, 0x7c, 0x9f, 0xe8, 0x16, 0x2a, 0x4f, 0xf6, 0x12, 0x1d, 0x6d,
0xde, 0x5a, 0x49, 0x7c, 0xfc, 0x7f, 0x56, 0xbb, 0x75, 0x09, 0xfa, 0x35, 0x9e, 0xd1, 0x98, 0xa3,
0x4f, 0x40, 0x97, 0x8b, 0x83, 0xb2, 0x4b, 0xb6, 0xb4, 0x8e, 0xe6, 0xc1, 0x8a, 0x8a, 0x24, 0x6a,
0x1f, 0x7f, 0x6f, 0x31, 0x4e, 0xa3, 0x1f, 0x6d, 0x8f, 0x36, 0xc5, 0x47, 0x33, 0x8c, 0xbc, 0x29,
0xe6, 0xa4, 0x99, 0xb6, 0x84, 0x83, 0x81, 0x2e, 0x6c, 0xf9, 0xf4, 0xdf, 0x00, 0x00, 0x00, 0xff,
0xff, 0xd9, 0x0f, 0xd3, 0x9c, 0x8f, 0x08, 0x00, 0x00,
}
// --- DRPC BEGIN ---
@ -1083,4 +1164,73 @@ func (x *drpcNodeTrustedSatellitesStream) SendAndClose(m *TrustedSatellitesRespo
return x.CloseSend()
}
type DRPCPayoutClient interface {
DRPCConn() drpc.Conn
Earned(ctx context.Context, in *EarnedRequest) (*EarnedResponse, error)
}
type drpcPayoutClient struct {
cc drpc.Conn
}
func NewDRPCPayoutClient(cc drpc.Conn) DRPCPayoutClient {
return &drpcPayoutClient{cc}
}
func (c *drpcPayoutClient) DRPCConn() drpc.Conn { return c.cc }
func (c *drpcPayoutClient) Earned(ctx context.Context, in *EarnedRequest) (*EarnedResponse, error) {
out := new(EarnedResponse)
err := c.cc.Invoke(ctx, "/multinode.Payout/Earned", in, out)
if err != nil {
return nil, err
}
return out, nil
}
type DRPCPayoutServer interface {
Earned(context.Context, *EarnedRequest) (*EarnedResponse, error)
}
type DRPCPayoutDescription struct{}
func (DRPCPayoutDescription) NumMethods() int { return 1 }
func (DRPCPayoutDescription) Method(n int) (string, drpc.Receiver, interface{}, bool) {
switch n {
case 0:
return "/multinode.Payout/Earned",
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCPayoutServer).
Earned(
ctx,
in1.(*EarnedRequest),
)
}, DRPCPayoutServer.Earned, true
default:
return "", nil, nil, false
}
}
func DRPCRegisterPayout(mux drpc.Mux, impl DRPCPayoutServer) error {
return mux.Register(impl, DRPCPayoutDescription{})
}
type DRPCPayout_EarnedStream interface {
drpc.Stream
SendAndClose(*EarnedResponse) error
}
type drpcPayoutEarnedStream struct {
drpc.Stream
}
func (x *drpcPayoutEarnedStream) SendAndClose(m *EarnedResponse) error {
if err := x.MsgSend(m); err != nil {
return err
}
return x.CloseSend()
}
// --- DRPC END ---

View File

@ -95,3 +95,15 @@ message TrustedSatellitesResponse {
repeated NodeURL trusted_satellites = 1;
}
service Payout {
rpc Earned(EarnedRequest) returns (EarnedResponse);
}
message EarnedRequest {
RequestHeader header = 1;
}
message EarnedResponse {
int64 total = 1;
}

View File

@ -6,6 +6,8 @@ package accounting
import (
"time"
"github.com/zeebo/errs"
"storj.io/common/storj"
)
@ -19,6 +21,20 @@ const (
LastRollup = "LastRollup"
)
var (
// ErrInvalidArgument is returned when a function argument has an invalid
// business domain value.
ErrInvalidArgument = errs.Class("invalid argument")
// ErrSystemOrNetError is returned when the used storage backend returns an
// internal system or network error.
ErrSystemOrNetError = errs.Class("backend system error")
// ErrKeyNotFound is returned when the key is not found in the cache.
ErrKeyNotFound = errs.Class("key not found")
// ErrUnexpectedValue is returned when an unexpected value according the
// business domain is in the cache.
ErrUnexpectedValue = errs.Class("unexpected value")
)
// CSVRow represents data from QueryPaymentInfo without exposing dbx.
type CSVRow struct {
NodeID storj.NodeID

View File

@ -203,12 +203,40 @@ type ProjectAccounting interface {
// Cache stores live information about project storage which has not yet been synced to ProjectAccounting.
//
// All the implementations must follow the convention of returning errors of one
// of the classes defined in this package.
//
// All the methods return:
//
// ErrInvalidArgument: an implementation may return if some parameter contain a
// value which isn't accepted, nonetheless, not all the implementations impose
// the same constraints on them.
//
// ErrSystemOrNetError: any method will return this if there is an error with
// the underlining system or the network.
//
// ErrKeyNotFound: returned when a key is not found.
//
// ErrUnexpectedValue: returned when a key or value stored in the underlying
// system isn't of the expected format or type according the business domain.
//
// architecture: Database
type Cache interface {
// GetProjectStorageUsage returns the project's storage usage.
GetProjectStorageUsage(ctx context.Context, projectID uuid.UUID) (totalUsed int64, err error)
// GetProjectBandwidthUsage returns the project's bandwidth usage.
GetProjectBandwidthUsage(ctx context.Context, projectID uuid.UUID, now time.Time) (currentUsed int64, err error)
// UpdateProjectBandthUsage updates the project's bandwidth usage increasing
// it. The projectID is inserted to the increment when it doesn't exists,
// hence this method will never return ErrKeyNotFound error's class.
UpdateProjectBandwidthUsage(ctx context.Context, projectID uuid.UUID, increment int64, ttl time.Duration, now time.Time) error
// AddProjectStorageUsage adds to the projects storage usage the spacedUsed.
// The projectID is inserted to the spaceUsed when it doesn't exists, hence
// this method will never return ErrKeyNotFound.
AddProjectStorageUsage(ctx context.Context, projectID uuid.UUID, spaceUsed int64) error
// GetAllProjectTotals return the total projects' storage used space.
GetAllProjectTotals(ctx context.Context) (map[uuid.UUID]int64, error)
// Close the client, releasing any open resources. Once it's called any other
// method must be called.
Close() error
}

View File

@ -28,6 +28,15 @@ type Config struct {
// NewCache creates a new accounting.Cache instance using the type specified backend in
// the provided config.
//
// The cache instance may be returned despite of returning the
// accounting.ErrSystemOrNetError because some backends allows to reconnect on
// each operation if the connection was not established or it was disconnected,
// which is what it could happen at the moment to instance it and the cache will
// work one the backend system will be reachable later on.
// For this reason, the components that uses the cache should operate despite
// the backend is not responding successfully although their service is
// degraded.
func NewCache(log *zap.Logger, config Config) (accounting.Cache, error) {
parts := strings.SplitN(config.StorageBackend, ":", 2)
var backendType string
@ -38,7 +47,7 @@ func NewCache(log *zap.Logger, config Config) (accounting.Cache, error) {
backendType = parts[0]
switch backendType {
case "redis":
return newRedisLiveAccounting(log, config.StorageBackend)
return newRedisLiveAccounting(config.StorageBackend)
default:
return nil, Error.New("unrecognized live accounting backend specifier %q. Currently only redis is supported", backendType)
}

View File

@ -7,6 +7,7 @@ import (
"context"
"math/rand"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -21,7 +22,7 @@ import (
"storj.io/storj/storage/redis/redisserver"
)
func TestLiveAccountingCache(t *testing.T) {
func TestAddGetProjectStorageAndBandwidthUsage(t *testing.T) {
tests := []struct {
backend string
}{
@ -32,123 +33,67 @@ func TestLiveAccountingCache(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
redis, err := redisserver.Mini(ctx)
redis, err := redisserver.Start(ctx)
require.NoError(t, err)
defer ctx.Check(redis.Close)
for _, tt := range tests {
var config live.Config
if tt.backend == "redis" {
config = live.Config{
StorageBackend: "redis://" + redis.Addr() + "?db=0",
}
}
tt := tt
t.Run(tt.backend, func(t *testing.T) {
ctx := testcontext.New(t)
cache, err := live.NewCache(zaptest.NewLogger(t).Named("live-accounting"), config)
require.NoError(t, err)
projectIDs, sum, err := populateCache(ctx, cache)
require.NoError(t, err)
// make sure all of the "projects" got all space updates and got right totals
for _, projID := range projectIDs {
spaceUsed, err := cache.GetProjectStorageUsage(ctx, projID)
require.NoError(t, err)
assert.Equalf(t, sum, spaceUsed, "projectID %v", projID)
}
negativeVal := int64(-100)
sum += negativeVal
for _, projID := range projectIDs {
err = cache.AddProjectStorageUsage(ctx, projID, negativeVal)
require.NoError(t, err)
spaceUsed, err := cache.GetProjectStorageUsage(ctx, projID)
require.NoError(t, err)
assert.EqualValues(t, sum, spaceUsed)
}
}
}
func TestRedisCacheConcurrency(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
redis, err := redisserver.Mini(ctx)
require.NoError(t, err)
defer ctx.Check(redis.Close)
config := live.Config{
StorageBackend: "redis://" + redis.Addr() + "?db=0",
}
cache, err := live.NewCache(zaptest.NewLogger(t).Named("live-accounting"), config)
require.NoError(t, err)
projectID := testrand.UUID()
const (
numConcurrent = 100
spaceUsed = 10
)
expectedSum := spaceUsed * numConcurrent
var group errgroup.Group
for i := 0; i < numConcurrent; i++ {
group.Go(func() error {
return cache.AddProjectStorageUsage(ctx, projectID, spaceUsed)
})
}
require.NoError(t, group.Wait())
total, err := cache.GetProjectStorageUsage(ctx, projectID)
require.NoError(t, err)
require.EqualValues(t, expectedSum, total)
}
func populateCache(ctx context.Context, cache accounting.Cache) (projectIDs []uuid.UUID, sum int64, _ error) {
const (
valuesListSize = 10
valueMultiplier = 4096
numProjects = 100
)
// make a largish list of varying values
someValues := make([]int64, valuesListSize)
for i := range someValues {
someValues[i] = int64((i + 1) * valueMultiplier)
sum += someValues[i]
}
// make up some project IDs
projectIDs = make([]uuid.UUID, numProjects)
for i := range projectIDs {
projectIDs[i] = testrand.UUID()
}
// send lots of space used updates for all of these projects to the live
// accounting store.
errg, ctx := errgroup.WithContext(context.Background())
for _, projID := range projectIDs {
projID := projID
errg.Go(func() error {
// have each project sending the values in a different order
myValues := make([]int64, valuesListSize)
copy(myValues, someValues)
rand.Shuffle(valuesListSize, func(v1, v2 int) {
myValues[v1], myValues[v2] = myValues[v2], myValues[v1]
})
for _, val := range myValues {
if err := cache.AddProjectStorageUsage(ctx, projID, val); err != nil {
return err
var config live.Config
if tt.backend == "redis" {
config = live.Config{
StorageBackend: "redis://" + redis.Addr() + "?db=0",
}
}
return nil
cache, err := live.NewCache(zaptest.NewLogger(t).Named("live-accounting"), config)
require.NoError(t, err)
defer ctx.Check(cache.Close)
populatedData, err := populateCache(ctx, cache)
require.NoError(t, err)
// make sure all of the "projects" got all space updates and got right totals
for _, pdata := range populatedData {
pdata := pdata
t.Run("storage", func(t *testing.T) {
spaceUsed, err := cache.GetProjectStorageUsage(ctx, pdata.projectID)
require.NoError(t, err)
assert.Equalf(t, pdata.storageSum, spaceUsed, "projectID %v", pdata.projectID)
// upate it again and check
negativeVal := -(rand.Int63n(pdata.storageSum) + 1)
pdata.storageSum += negativeVal
err = cache.AddProjectStorageUsage(ctx, pdata.projectID, negativeVal)
require.NoError(t, err)
spaceUsed, err = cache.GetProjectStorageUsage(ctx, pdata.projectID)
require.NoError(t, err)
assert.EqualValues(t, pdata.storageSum, spaceUsed)
})
t.Run("bandwidth", func(t *testing.T) {
bandwidthUsed, err := cache.GetProjectBandwidthUsage(ctx, pdata.projectID, pdata.bandwidthNow)
require.NoError(t, err)
assert.Equalf(t, pdata.bandwidthSum, bandwidthUsed, "projectID %v", pdata.projectID)
// upate it again and check
negativeVal := -(rand.Int63n(pdata.bandwidthSum) + 1)
pdata.bandwidthSum += negativeVal
err = cache.UpdateProjectBandwidthUsage(ctx, pdata.projectID, negativeVal, time.Second*2, pdata.bandwidthNow)
require.NoError(t, err)
bandwidthUsed, err = cache.GetProjectBandwidthUsage(ctx, pdata.projectID, pdata.bandwidthNow)
require.NoError(t, err)
assert.EqualValues(t, pdata.bandwidthSum, bandwidthUsed)
})
}
})
}
return projectIDs, sum, errg.Wait()
}
func TestGetAllProjectTotals(t *testing.T) {
@ -162,37 +107,162 @@ func TestGetAllProjectTotals(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
redis, err := redisserver.Mini(ctx)
redis, err := redisserver.Start(ctx)
require.NoError(t, err)
defer ctx.Check(redis.Close)
for _, tt := range tests {
var config live.Config
if tt.backend == "redis" {
config = live.Config{
StorageBackend: "redis://" + redis.Addr() + "?db=0",
tt := tt
t.Run(tt.backend, func(t *testing.T) {
ctx := testcontext.New(t)
var config live.Config
if tt.backend == "redis" {
config = live.Config{
StorageBackend: "redis://" + redis.Addr() + "?db=0",
}
}
}
cache, err := live.NewCache(zaptest.NewLogger(t).Named("live-accounting"), config)
require.NoError(t, err)
projectIDs := make([]uuid.UUID, 1000)
for i := range projectIDs {
projectIDs[i] = testrand.UUID()
err := cache.AddProjectStorageUsage(ctx, projectIDs[i], int64(i))
cache, err := live.NewCache(zaptest.NewLogger(t).Named("live-accounting"), config)
require.NoError(t, err)
}
defer ctx.Check(cache.Close)
projectTotals, err := cache.GetAllProjectTotals(ctx)
require.NoError(t, err)
require.Len(t, projectTotals, len(projectIDs))
projectIDs := make([]uuid.UUID, 1000)
for i := range projectIDs {
projectIDs[i] = testrand.UUID()
err := cache.AddProjectStorageUsage(ctx, projectIDs[i], int64(i))
require.NoError(t, err)
}
// make sure each project ID and total was received
for _, projID := range projectIDs {
total, err := cache.GetProjectStorageUsage(ctx, projID)
projectTotals, err := cache.GetAllProjectTotals(ctx)
require.NoError(t, err)
assert.Equal(t, total, projectTotals[projID])
}
require.Len(t, projectTotals, len(projectIDs))
// make sure each project ID and total was received
for _, projID := range projectIDs {
total, err := cache.GetProjectStorageUsage(ctx, projID)
require.NoError(t, err)
assert.Equal(t, total, projectTotals[projID])
}
})
}
}
func TestLiveAccountingCache_ProjectBandwidthUsage_expiration(t *testing.T) {
tests := []struct {
backend string
}{
{
backend: "redis",
},
}
ctx := testcontext.New(t)
defer ctx.Cleanup()
redis, err := redisserver.Start(ctx)
require.NoError(t, err)
defer ctx.Check(redis.Close)
for _, tt := range tests {
tt := tt
t.Run(tt.backend, func(t *testing.T) {
ctx := testcontext.New(t)
var config live.Config
if tt.backend == "redis" {
config = live.Config{
StorageBackend: "redis://" + redis.Addr() + "?db=0",
}
}
cache, err := live.NewCache(zaptest.NewLogger(t).Named("live-accounting"), config)
require.NoError(t, err)
defer ctx.Check(cache.Close)
var (
projectID = testrand.UUID()
now = time.Now()
)
err = cache.UpdateProjectBandwidthUsage(ctx, projectID, rand.Int63n(4096)+1, time.Second, now)
require.NoError(t, err)
if tt.backend == "redis" {
redis.TestingFastForward(time.Second)
}
time.Sleep(2 * time.Second)
_, err = cache.GetProjectBandwidthUsage(ctx, projectID, now)
require.Error(t, err)
})
}
}
type populateCacheData struct {
projectID uuid.UUID
storageSum int64
bandwidthSum int64
bandwidthNow time.Time
}
func populateCache(ctx context.Context, cache accounting.Cache) ([]populateCacheData, error) {
var (
valuesListSize = rand.Intn(10) + 10
numProjects = rand.Intn(100) + 100
valueStorageMultiplier = rand.Int63n(4095) + 1
valueBandwdithMultiplier = rand.Int63n(4095) + 1
)
// make a largish list of varying values
baseValues := make([]int64, valuesListSize)
for i := range baseValues {
baseValues[i] = rand.Int63n(int64(valuesListSize)) + 1
}
// make up some project IDs
populatedData := make([]populateCacheData, numProjects)
for i := range populatedData {
populatedData[i] = populateCacheData{
projectID: testrand.UUID(),
}
}
// send lots of space used updates for all of these projects to the live
// accounting store.
errg, ctx := errgroup.WithContext(context.Background())
for i, pdata := range populatedData {
var (
i = i
projID = pdata.projectID
)
errg.Go(func() error {
// have each project sending the values in a different order
myValues := make([]int64, valuesListSize)
copy(myValues, baseValues)
rand.Shuffle(valuesListSize, func(v1, v2 int) {
myValues[v1], myValues[v2] = myValues[v2], myValues[v1]
})
now := time.Now()
populatedData[i].bandwidthNow = now
for _, val := range myValues {
storageVal := val * valueStorageMultiplier
populatedData[i].storageSum += storageVal
if err := cache.AddProjectStorageUsage(ctx, projID, storageVal); err != nil {
return err
}
bandwidthVal := val * valueBandwdithMultiplier
populatedData[i].bandwidthSum += bandwidthVal
if err := cache.UpdateProjectBandwidthUsage(ctx, projID, bandwidthVal, time.Hour, now); err != nil {
return err
}
}
return nil
})
}
return populatedData, errg.Wait()
}

View File

@ -5,73 +5,90 @@ package live
import (
"context"
"errors"
"fmt"
"net/url"
"strconv"
"strings"
"time"
"go.uber.org/zap"
"github.com/go-redis/redis"
"storj.io/common/uuid"
"storj.io/storj/storage"
"storj.io/storj/storage/redis"
"storj.io/storj/satellite/accounting"
)
type redisLiveAccounting struct {
log *zap.Logger
client *redis.Client
}
func newRedisLiveAccounting(log *zap.Logger, address string) (*redisLiveAccounting, error) {
client, err := redis.NewClientFrom(address)
// newRedisLiveAccounting returns a redisLiveAccounting cache instance.
//
// It returns accounting.ErrInvalidArgument if the connection address is invalid
// according to Redis.
//
// The function pings to the Redis server for verifying the connectivity but if
// it fails then it returns an instance and accounting.ErrSystemOrNetError
// because it means that Redis may not be operative at this precise moment but
// it may be in future method calls as it handles automatically reconnects.
func newRedisLiveAccounting(address string) (*redisLiveAccounting, error) {
redisurl, err := url.Parse(address)
if err != nil {
return nil, Error.Wrap(err)
return nil, accounting.ErrInvalidArgument.New("address: invalid URL; %w", err)
}
return &redisLiveAccounting{
log: log,
if redisurl.Scheme != "redis" {
return nil, accounting.ErrInvalidArgument.New("address: not a redis:// formatted address")
}
q := redisurl.Query()
db := q.Get("db")
if db == "" {
return nil, accounting.ErrInvalidArgument.New("address: a database number has to be specified")
}
dbn, err := strconv.Atoi(db)
if err != nil {
return nil, accounting.ErrInvalidArgument.New("address: invalid database number %s", db)
}
client := redis.NewClient(&redis.Options{
Addr: redisurl.Host,
Password: q.Get("password"),
DB: dbn,
})
cache := &redisLiveAccounting{
client: client,
}, nil
}
// ping here to verify we are able to connect to Redis with the initialized client.
if err := client.Ping().Err(); err != nil {
return cache, accounting.ErrSystemOrNetError.New("Redis ping failed: %w", err)
}
return cache, nil
}
// GetProjectStorageUsage gets inline and remote storage totals for a given
// project, back to the time of the last accounting tally.
func (cache *redisLiveAccounting) GetProjectStorageUsage(ctx context.Context, projectID uuid.UUID) (totalUsed int64, err error) {
defer mon.Task()(&ctx, projectID)(&err)
val, err := cache.client.Get(ctx, projectID[:])
if err != nil {
if storage.ErrKeyNotFound.Has(err) {
return 0, nil
}
return 0, Error.Wrap(err)
}
intval, err := strconv.ParseInt(string([]byte(val)), 10, 64)
return intval, Error.Wrap(err)
}
// createBandwidthProjectIDKey creates the bandwidth project key.
// The current month is combined with projectID to create a prefix.
func createBandwidthProjectIDKey(projectID uuid.UUID, now time.Time) []byte {
// Add current month as prefix
_, month, _ := now.Date()
key := append(projectID[:], byte(int(month)))
return append(key, []byte(":bandwidth")...)
return cache.getInt64(ctx, string(projectID[:]))
}
// GetProjectBandwidthUsage returns the current bandwidth usage
// from specific project.
func (cache *redisLiveAccounting) GetProjectBandwidthUsage(ctx context.Context, projectID uuid.UUID, now time.Time) (currentUsed int64, err error) {
val, err := cache.client.Get(ctx, createBandwidthProjectIDKey(projectID, now))
if err != nil {
return 0, err
}
intval, err := strconv.ParseInt(string([]byte(val)), 10, 64)
return intval, Error.Wrap(err)
defer mon.Task()(&ctx, projectID, now)(&err)
return cache.getInt64(ctx, createBandwidthProjectIDKey(projectID, now))
}
// UpdateProjectBandwidthUsage increment the bandwidth cache key value.
func (cache *redisLiveAccounting) UpdateProjectBandwidthUsage(ctx context.Context, projectID uuid.UUID, increment int64, ttl time.Duration, now time.Time) (err error) {
mon.Task()(&ctx, projectID, increment, ttl, now)(&err)
// The following script will increment the cache key
// by a specific value. If the key does not exist, it is
@ -80,18 +97,21 @@ func (cache *redisLiveAccounting) UpdateProjectBandwidthUsage(ctx context.Contex
// To achieve this we compare the increment and key value,
// if they are equal its the first iteration.
// More details on rate limiter section: https://redis.io/commands/incr
script := fmt.Sprintf(`local current
current = redis.call("incrby", KEYS[1], "%d")
if tonumber(current) == "%d" then
if tonumber(current) == %d then
redis.call("expire",KEYS[1], %d)
end
return current
`, increment, increment, int(ttl.Seconds()))
key := createBandwidthProjectIDKey(projectID, now)
err = cache.client.Eval(script, []string{key}).Err()
if err != nil {
return accounting.ErrSystemOrNetError.New("Redis eval failed: %w", err)
}
return cache.client.Eval(ctx, script, []string{string(key)})
return nil
}
// AddProjectStorageUsage lets the live accounting know that the given
@ -99,37 +119,92 @@ func (cache *redisLiveAccounting) UpdateProjectBandwidthUsage(ctx context.Contex
// perspective; i.e. segment size).
func (cache *redisLiveAccounting) AddProjectStorageUsage(ctx context.Context, projectID uuid.UUID, spaceUsed int64) (err error) {
defer mon.Task()(&ctx, projectID, spaceUsed)(&err)
return cache.client.IncrBy(ctx, projectID[:], spaceUsed)
_, err = cache.client.IncrBy(string(projectID[:]), spaceUsed).Result()
if err != nil {
return accounting.ErrSystemOrNetError.New("Redis incrby failed: %w", err)
}
return nil
}
// GetAllProjectTotals iterates through the live accounting DB and returns a map of project IDs and totals.
//
// TODO (https://storjlabs.atlassian.net/browse/IN-173): see if it possible to
// get key/value pairs with one single call.
func (cache *redisLiveAccounting) GetAllProjectTotals(ctx context.Context) (_ map[uuid.UUID]int64, err error) {
defer mon.Task()(&ctx)(&err)
projects := make(map[uuid.UUID]int64)
it := cache.client.Scan(0, "*", 0).Iterator()
for it.Next() {
key := it.Val()
err = cache.client.Iterate(ctx, storage.IterateOptions{Recurse: true}, func(ctx context.Context, it storage.Iterator) error {
var item storage.ListItem
for it.Next(ctx, &item) {
if item.Key == nil {
return Error.New("nil key")
}
id := new(uuid.UUID)
copy(id[:], item.Key[:])
intval, err := strconv.ParseInt(string([]byte(item.Value)), 10, 64)
if err != nil {
return Error.New("could not get total for project %s", id.String())
}
if !strings.HasSuffix(item.Key.String(), "bandwidth") {
projects[*id] = intval
}
// skip bandwidth keys
if strings.HasSuffix(key, "bandwidth") {
continue
}
return nil
})
return projects, err
projectID, err := uuid.FromBytes([]byte(key))
if err != nil {
return nil, accounting.ErrUnexpectedValue.New("cannot parse the key as UUID; key=%q", key)
}
if _, seen := projects[projectID]; seen {
continue
}
val, err := cache.getInt64(ctx, key)
if err != nil {
if accounting.ErrKeyNotFound.Has(err) {
continue
}
return nil, err
}
projects[projectID] = val
}
return projects, nil
}
// Close the DB connection.
func (cache *redisLiveAccounting) Close() error {
return cache.client.Close()
err := cache.client.Close()
if err != nil {
return accounting.ErrSystemOrNetError.New("Redis close failed: %w", err)
}
return nil
}
func (cache *redisLiveAccounting) getInt64(ctx context.Context, key string) (_ int64, err error) {
defer mon.Task()(&ctx)(&err)
val, err := cache.client.Get(key).Bytes()
if err != nil {
if errors.Is(err, redis.Nil) {
return 0, accounting.ErrKeyNotFound.New("%q", key)
}
return 0, accounting.ErrSystemOrNetError.New("Redis get failed: %w", err)
}
intval, err := strconv.ParseInt(string(val), 10, 64)
if err != nil {
return 0, accounting.ErrUnexpectedValue.New("cannot parse the value as int64; key=%q val=%q", key, val)
}
return intval, nil
}
// createBandwidthProjectIDKey creates the bandwidth project key.
// The current month is combined with projectID to create a prefix.
func createBandwidthProjectIDKey(projectID uuid.UUID, now time.Time) string {
// Add current month as prefix
_, month, _ := now.Date()
key := append(projectID[:], byte(int(month)))
return string(key) + ":bandwidth"
}

View File

@ -13,7 +13,6 @@ import (
"storj.io/common/memory"
"storj.io/common/uuid"
"storj.io/storj/storage"
)
var mon = monkit.Package()
@ -48,6 +47,10 @@ func NewService(projectAccountingDB ProjectAccounting, liveAccounting Cache, lim
// ExceedsBandwidthUsage returns true if the bandwidth usage limits have been exceeded
// for a project in the past month (30 days). The usage limit is (e.g 25GB) multiplied by the redundancy
// expansion factor, so that the uplinks have a raw limit.
//
// Among others,it can return one of the following errors returned by
// storj.io/storj/satellite/accounting.Cache except the ErrKeyNotFound, wrapped
// by ErrProjectUsage.
func (usage *Service) ExceedsBandwidthUsage(ctx context.Context, projectID uuid.UUID) (_ bool, limit memory.Size, err error) {
defer mon.Task()(&ctx)(&err)
@ -65,10 +68,9 @@ func (usage *Service) ExceedsBandwidthUsage(ctx context.Context, projectID uuid.
// Get the current bandwidth usage from cache.
bandwidthUsage, err = usage.liveAccounting.GetProjectBandwidthUsage(ctx, projectID, usage.nowFn())
if err != nil {
// Verify If the cache key was not found
if storage.ErrKeyNotFound.Has(err) {
if ErrKeyNotFound.Has(err) {
// Get current bandwidth value from database.
now := usage.nowFn()
@ -133,10 +135,17 @@ func (usage *Service) ExceedsStorageUsage(ctx context.Context, projectID uuid.UU
}
// GetProjectStorageTotals returns total amount of storage used by project.
//
// It can return one of the following errors returned by
// storj.io/storj/satellite/accounting.Cache.GetProjectStorageUsage except the
// ErrKeyNotFound, wrapped by ErrProjectUsage.
func (usage *Service) GetProjectStorageTotals(ctx context.Context, projectID uuid.UUID) (total int64, err error) {
defer mon.Task()(&ctx, projectID)(&err)
total, err = usage.liveAccounting.GetProjectStorageUsage(ctx, projectID)
if ErrKeyNotFound.Has(err) {
return 0, nil
}
return total, ErrProjectUsage.Wrap(err)
}
@ -181,11 +190,19 @@ func (usage *Service) UpdateProjectLimits(ctx context.Context, projectID uuid.UU
}
// GetProjectBandwidthUsage get the current bandwidth usage from cache.
//
// It can return one of the following errors returned by
// storj.io/storj/satellite/accounting.Cache.GetProjectBandwidthUsage, wrapped
// by ErrProjectUsage.
func (usage *Service) GetProjectBandwidthUsage(ctx context.Context, projectID uuid.UUID) (currentUsed int64, err error) {
return usage.liveAccounting.GetProjectBandwidthUsage(ctx, projectID, usage.nowFn())
}
// UpdateProjectBandwidthUsage increments the bandwidth cache key for a specific project.
//
// It can return one of the following errors returned by
// storj.io/storj/satellite/accounting.Cache.UpdatProjectBandwidthUsage, wrapped
// by ErrProjectUsage.
func (usage *Service) UpdateProjectBandwidthUsage(ctx context.Context, projectID uuid.UUID, increment int64) (err error) {
return usage.liveAccounting.UpdateProjectBandwidthUsage(ctx, projectID, increment, usage.bandwidthCacheTTL, usage.nowFn())
}
@ -193,6 +210,10 @@ func (usage *Service) UpdateProjectBandwidthUsage(ctx context.Context, projectID
// AddProjectStorageUsage lets the live accounting know that the given
// project has just added spaceUsed bytes of storage (from the user's
// perspective; i.e. segment size).
//
// It can return one of the following errors returned by
// storj.io/storj/satellite/accounting.Cache.AddProjectStorageUsage, wrapped by
// ErrProjectUsage.
func (usage *Service) AddProjectStorageUsage(ctx context.Context, projectID uuid.UUID, spaceUsed int64) (err error) {
defer mon.Task()(&ctx, projectID)(&err)
return usage.liveAccounting.AddProjectStorageUsage(ctx, projectID, spaceUsed)

View File

@ -65,8 +65,9 @@ type API struct {
Servers *lifecycle.Group
Services *lifecycle.Group
Dialer rpc.Dialer
Server *server.Server
Dialer rpc.Dialer
Server *server.Server
ExternalAddress string
Version struct {
Chore *checker.Chore
@ -172,10 +173,12 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
pointerDB metainfo.PointerDB, metabaseDB metainfo.MetabaseDB, revocationDB extensions.RevocationDB,
liveAccounting accounting.Cache, rollupsWriteCache *orders.RollupsWriteCache,
config *Config, versionInfo version.Info, atomicLogLevel *zap.AtomicLevel) (*API, error) {
peer := &API{
Log: log,
Identity: full,
DB: db,
Log: log,
Identity: full,
DB: db,
ExternalAddress: config.Contact.ExternalAddress,
Servers: lifecycle.NewGroup(log.Named("servers")),
Services: lifecycle.NewGroup(log.Named("services")),
@ -235,6 +238,11 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
return nil, errs.Combine(err, peer.Close())
}
if peer.ExternalAddress == "" {
// not ideal, but better than nothing
peer.ExternalAddress = peer.Server.Addr().String()
}
peer.Servers.Add(lifecycle.Item{
Name: "server",
Run: func(ctx context.Context) error {
@ -267,11 +275,6 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
}
{ // setup contact service
c := config.Contact
if c.ExternalAddress == "" {
c.ExternalAddress = peer.Server.Addr().String()
}
pbVersion, err := versionInfo.Proto()
if err != nil {
return nil, errs.Combine(err, peer.Close())
@ -281,7 +284,7 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
Node: pb.Node{
Id: peer.ID(),
Address: &pb.NodeAddress{
Address: c.ExternalAddress,
Address: peer.Addr(),
},
},
Type: pb.NodeType_SATELLITE,
@ -718,7 +721,7 @@ func (peer *API) ID() storj.NodeID { return peer.Identity.ID }
// Addr returns the public address.
func (peer *API) Addr() string {
return peer.Contact.Service.Local().Node.Address.Address
return peer.ExternalAddress
}
// URL returns the storj.NodeURL.

View File

@ -33,6 +33,7 @@ import (
"storj.io/common/storj"
"storj.io/common/uuid"
"storj.io/storj/private/web"
"storj.io/storj/satellite/accounting"
"storj.io/storj/satellite/console"
"storj.io/storj/satellite/console/consoleauth"
"storj.io/storj/satellite/console/consoleweb/consoleapi"
@ -607,6 +608,7 @@ func (server *Server) projectUsageLimitsHandler(w http.ResponseWriter, r *http.R
Error string `json:"error"`
}
// N.B. we are probably leaking internal details to the client
jsonError.Error = err.Error()
if err := json.NewEncoder(w).Encode(jsonError); err != nil {
@ -618,6 +620,8 @@ func (server *Server) projectUsageLimitsHandler(w http.ResponseWriter, r *http.R
switch {
case console.ErrUnauthorized.Has(err):
handleError(http.StatusUnauthorized, err)
case accounting.ErrInvalidArgument.Has(err):
handleError(http.StatusBadRequest, err)
default:
handleError(http.StatusInternalServerError, err)
}

View File

@ -1514,6 +1514,9 @@ func (s *Service) GetBucketUsageRollups(ctx context.Context, projectID uuid.UUID
}
// GetProjectUsageLimits returns project limits and current usage.
//
// Among others,it can return one of the following errors returned by
// storj.io/storj/satellite/accounting.Service, wrapped Error.
func (s *Service) GetProjectUsageLimits(ctx context.Context, projectID uuid.UUID) (_ *ProjectUsageLimits, err error) {
defer mon.Task()(&ctx)(&err)

View File

@ -70,13 +70,13 @@ func (loc BucketLocation) Verify() error {
// ParseCompactBucketPrefix parses BucketPrefix.
func ParseCompactBucketPrefix(compactPrefix []byte) (BucketLocation, error) {
if len(compactPrefix) < 16 {
if len(compactPrefix) < len(uuid.UUID{}) {
return BucketLocation{}, Error.New("invalid prefix %q", compactPrefix)
}
var loc BucketLocation
copy(loc.ProjectID[:], compactPrefix)
loc.BucketName = string(compactPrefix[16:])
loc.BucketName = string(compactPrefix[len(loc.ProjectID):])
return loc, nil
}
@ -87,7 +87,7 @@ func (loc BucketLocation) Prefix() BucketPrefix {
// CompactPrefix converts bucket location into bucket prefix with compact project ID.
func (loc BucketLocation) CompactPrefix() []byte {
xs := make([]byte, 0, 16+len(loc.BucketName))
xs := make([]byte, 0, len(loc.ProjectID)+len(loc.BucketName))
xs = append(xs, loc.ProjectID[:]...)
xs = append(xs, []byte(loc.BucketName)...)
return xs

View File

@ -1054,8 +1054,7 @@ func (endpoint *Endpoint) GetObjectIPs(ctx context.Context, req *pb.ObjectGetIPs
more := true
cursor := metabase.SegmentPosition{}
var nodeIDs []storj.NodeID
pieceCountByNodeID := map[storj.NodeID]int64{}
for more {
list, err := endpoint.metainfo.metabaseDB.ListSegments(ctx, metabase.ListSegments{
StreamID: object.StreamID,
@ -1068,27 +1067,43 @@ func (endpoint *Endpoint) GetObjectIPs(ctx context.Context, req *pb.ObjectGetIPs
for _, segment := range list.Segments {
for _, piece := range segment.Pieces {
nodeIDs = append(nodeIDs, piece.StorageNode)
pieceCountByNodeID[piece.StorageNode]++
}
cursor = segment.Position
}
more = list.More
}
nodes, err := endpoint.overlay.GetOnlineNodesForGetDelete(ctx, nodeIDs)
nodeIDs := make([]storj.NodeID, 0, len(pieceCountByNodeID))
for nodeID := range pieceCountByNodeID {
nodeIDs = append(nodeIDs, nodeID)
}
nodeIPMap, err := endpoint.overlay.GetNodeIPs(ctx, nodeIDs)
if err != nil {
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
}
resp = &pb.ObjectGetIPsResponse{}
for _, node := range nodes {
address := node.Address.GetAddress()
if address != "" {
resp.Ips = append(resp.Ips, []byte(address))
nodeIPs := make([][]byte, 0, len(nodeIPMap))
pieceCount := int64(0)
reliablePieceCount := int64(0)
for nodeID, count := range pieceCountByNodeID {
pieceCount += count
ip, reliable := nodeIPMap[nodeID]
if !reliable {
continue
}
nodeIPs = append(nodeIPs, []byte(ip))
reliablePieceCount += count
}
return resp, nil
return &pb.ObjectGetIPsResponse{
Ips: nodeIPs,
SegmentCount: int64(object.SegmentCount),
ReliablePieceCount: reliablePieceCount,
PieceCount: pieceCount,
}, nil
}
// BeginSegment begins segment uploading.
@ -1116,16 +1131,8 @@ func (endpoint *Endpoint) BeginSegment(ctx context.Context, req *pb.SegmentBegin
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, "segment index must be greater then 0")
}
exceeded, limit, err := endpoint.projectUsage.ExceedsStorageUsage(ctx, keyInfo.ProjectID)
if err != nil {
endpoint.log.Error("Retrieving project storage totals failed.", zap.Error(err))
}
if exceeded {
endpoint.log.Error("Monthly storage limit exceeded.",
zap.Stringer("Limit", limit),
zap.Stringer("Project ID", keyInfo.ProjectID),
)
return nil, rpcstatus.Error(rpcstatus.ResourceExhausted, "Exceeded Usage Limit")
if err := endpoint.checkExceedsStorageUsage(ctx, keyInfo.ProjectID); err != nil {
return nil, err
}
redundancy, err := eestream.NewRedundancyStrategyFromProto(streamID.Redundancy)
@ -1258,16 +1265,8 @@ func (endpoint *Endpoint) commitSegment(ctx context.Context, req *pb.SegmentComm
// return nil, nil, err
// }
exceeded, limit, err := endpoint.projectUsage.ExceedsStorageUsage(ctx, keyInfo.ProjectID)
if err != nil {
return nil, nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
}
if exceeded {
endpoint.log.Error("The project limit of storage and bandwidth has been exceeded",
zap.Int64("limit", limit.Int64()),
zap.Stringer("Project ID", keyInfo.ProjectID),
)
return nil, nil, rpcstatus.Error(rpcstatus.ResourceExhausted, "Exceeded Usage Limit")
if err := endpoint.checkExceedsStorageUsage(ctx, keyInfo.ProjectID); err != nil {
return nil, nil, err
}
pieces := metabase.Pieces{}
@ -1305,12 +1304,13 @@ func (endpoint *Endpoint) commitSegment(ctx context.Context, req *pb.SegmentComm
}
if err := endpoint.projectUsage.AddProjectStorageUsage(ctx, keyInfo.ProjectID, segmentSize); err != nil {
endpoint.log.Error("Could not track new storage usage by project",
// log it and continue. it's most likely our own fault that we couldn't
// track it, and the only thing that will be affected is our per-project
// storage limits.
endpoint.log.Error("Could not track new project's storage usage",
zap.Stringer("Project ID", keyInfo.ProjectID),
zap.Error(err),
)
// but continue. it's most likely our own fault that we couldn't track it, and the only thing
// that will be affected is our per-project bandwidth and storage limits.
}
id, err := uuid.FromBytes(streamID.StreamId)
@ -1394,22 +1394,18 @@ func (endpoint *Endpoint) makeInlineSegment(ctx context.Context, req *pb.Segment
return nil, nil, rpcstatus.Error(rpcstatus.InvalidArgument, fmt.Sprintf("inline segment size cannot be larger than %s", endpoint.config.MaxInlineSegmentSize))
}
exceeded, limit, err := endpoint.projectUsage.ExceedsStorageUsage(ctx, keyInfo.ProjectID)
if err != nil {
return nil, nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
}
if exceeded {
endpoint.log.Error("Monthly storage limit exceeded.",
zap.Stringer("Limit", limit),
zap.Stringer("Project ID", keyInfo.ProjectID),
)
return nil, nil, rpcstatus.Error(rpcstatus.ResourceExhausted, "Exceeded Usage Limit")
if err := endpoint.checkExceedsStorageUsage(ctx, keyInfo.ProjectID); err != nil {
return nil, nil, err
}
if err := endpoint.projectUsage.AddProjectStorageUsage(ctx, keyInfo.ProjectID, inlineUsed); err != nil {
endpoint.log.Error("Could not track new storage usage.", zap.Stringer("Project ID", keyInfo.ProjectID), zap.Error(err))
// but continue. it's most likely our own fault that we couldn't track it, and the only thing
// that will be affected is our per-project bandwidth and storage limits.
// log it and continue. it's most likely our own fault that we couldn't
// track it, and the only thing that will be affected is our per-project
// bandwidth and storage limits.
endpoint.log.Error("Could not track new project's storage usage",
zap.Stringer("Project ID", keyInfo.ProjectID),
zap.Error(err),
)
}
id, err := uuid.FromBytes(streamID.StreamId)
@ -1532,6 +1528,11 @@ func (endpoint *Endpoint) ListSegments(ctx context.Context, req *pb.SegmentListR
}, nil
}
// GetPendingObjects returns pending objects.
func (endpoint *Endpoint) GetPendingObjects(ctx context.Context, req *pb.GetPendingObjectsRequest) (resp *pb.GetPendingObjectsResponse, err error) {
return nil, rpcstatus.Error(rpcstatus.Unimplemented, "not implemented")
}
// DownloadSegment returns data necessary to download segment.
func (endpoint *Endpoint) DownloadSegment(ctx context.Context, req *pb.SegmentDownloadRequest) (resp *pb.SegmentDownloadResponse, err error) {
defer mon.Task()(&ctx)(&err)
@ -1553,12 +1554,10 @@ func (endpoint *Endpoint) DownloadSegment(ctx context.Context, req *pb.SegmentDo
bucket := metabase.BucketLocation{ProjectID: keyInfo.ProjectID, BucketName: string(streamID.Bucket)}
exceeded, limit, err := endpoint.projectUsage.ExceedsBandwidthUsage(ctx, keyInfo.ProjectID)
if err != nil {
endpoint.log.Error("Retrieving project bandwidth total failed.", zap.Error(err))
}
if exceeded {
endpoint.log.Error("Monthly bandwidth limit exceeded.",
if exceeded, limit, err := endpoint.projectUsage.ExceedsBandwidthUsage(ctx, keyInfo.ProjectID); err != nil {
endpoint.log.Error("Retrieving project bandwidth total failed; bandwidth limit won't be enforced", zap.Error(err))
} else if exceeded {
endpoint.log.Error("Monthly bandwidth limit exceeded",
zap.Stringer("Limit", limit),
zap.Stringer("Project ID", keyInfo.ProjectID),
)
@ -1599,7 +1598,13 @@ func (endpoint *Endpoint) DownloadSegment(ctx context.Context, req *pb.SegmentDo
// Update the current bandwidth cache value incrementing the SegmentSize.
err = endpoint.projectUsage.UpdateProjectBandwidthUsage(ctx, keyInfo.ProjectID, int64(segment.EncryptedSize))
if err != nil {
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
// log it and continue. it's most likely our own fault that we couldn't
// track it, and the only thing that will be affected is our per-project
// bandwidth limits.
endpoint.log.Error("Could not track the new project's bandwidth usage",
zap.Stringer("Project ID", keyInfo.ProjectID),
zap.Error(err),
)
}
encryptedKeyNonce, err := storj.NonceFromBytes(segment.EncryptedKeyNonce)
@ -2056,6 +2061,26 @@ func (endpoint *Endpoint) RevokeAPIKey(ctx context.Context, req *pb.RevokeAPIKey
return &pb.RevokeAPIKeyResponse{}, nil
}
func (endpoint *Endpoint) checkExceedsStorageUsage(ctx context.Context, projectID uuid.UUID) (err error) {
defer mon.Task()(&ctx)(&err)
exceeded, limit, err := endpoint.projectUsage.ExceedsStorageUsage(ctx, projectID)
if err != nil {
endpoint.log.Error(
"Retrieving project storage totals failed; storage usage limit won't be enforced",
zap.Error(err),
)
} else if exceeded {
endpoint.log.Error("Monthly storage limit exceeded",
zap.Stringer("Limit", limit),
zap.Stringer("Project ID", projectID),
)
return rpcstatus.Error(rpcstatus.ResourceExhausted, "Exceeded Usage Limit")
}
return nil
}
// CreatePath creates a segment key.
func CreatePath(ctx context.Context, projectID uuid.UUID, segmentIndex uint32, bucket, path []byte) (_ metabase.SegmentLocation, err error) {
// TODO rename to CreateLocation

View File

@ -22,6 +22,8 @@ type State struct {
stats Stats
// netByID returns subnet based on storj.NodeID
netByID map[storj.NodeID]string
// ipPortByID returns IP based on storj.NodeID
ipPortByID map[storj.NodeID]string
// nonDistinct contains selectors for non-distinct selection.
nonDistinct struct {
Reputable SelectByID
@ -57,11 +59,14 @@ func NewState(reputableNodes, newNodes []*Node) *State {
state := &State{}
state.netByID = map[storj.NodeID]string{}
state.ipPortByID = map[storj.NodeID]string{}
for _, node := range reputableNodes {
state.netByID[node.ID] = node.LastNet
state.ipPortByID[node.ID] = node.LastIPPort
}
for _, node := range newNodes {
state.netByID[node.ID] = node.LastNet
state.ipPortByID[node.ID] = node.LastIPPort
}
state.nonDistinct.Reputable = SelectByID(reputableNodes)
@ -135,6 +140,18 @@ func (state *State) Select(ctx context.Context, request Request) (_ []*Node, err
return selected, nil
}
// IPs returns node ip:port for nodes that are in state.
func (state *State) IPs(ctx context.Context, nodes []storj.NodeID) map[storj.NodeID]string {
defer mon.Task()(&ctx)(nil)
xs := make(map[storj.NodeID]string, len(nodes))
for _, nodeID := range nodes {
if ip, exists := state.ipPortByID[nodeID]; exists {
xs[nodeID] = ip
}
}
return xs
}
// Stats returns state information.
func (state *State) Stats() Stats {
state.mu.RLock()

View File

@ -203,3 +203,27 @@ next:
return xs
}
func TestState_IPs(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
reputableNodes := createRandomNodes(2, "1.0.1")
newNodes := createRandomNodes(2, "1.0.3")
state := nodeselection.NewState(reputableNodes, newNodes)
nodeIPs := state.IPs(ctx, nil)
require.Equal(t, map[storj.NodeID]string{}, nodeIPs)
missing := storj.NodeID{}
nodeIPs = state.IPs(ctx, []storj.NodeID{
reputableNodes[0].ID,
newNodes[1].ID,
missing,
})
require.Equal(t, map[storj.NodeID]string{
reputableNodes[0].ID: "1.0.1.0:8080",
newNodes[1].ID: "1.0.3.1:8080",
}, nodeIPs)
}

View File

@ -174,8 +174,6 @@ func (signer *Signer) Sign(ctx context.Context, node storj.NodeURL, pieceNum int
OrderCreation: signer.OrderCreation,
OrderExpiration: signer.OrderExpiration,
SatelliteAddress: nil,
EncryptedMetadataKeyId: signer.EncryptedMetadataKeyID,
EncryptedMetadata: signer.EncryptedMetadata,
}

View File

@ -118,6 +118,26 @@ func (cache *NodeSelectionCache) GetNodes(ctx context.Context, req FindStorageNo
return convNodesToSelectedNodes(selected), err
}
// GetNodeIPs gets the last node ip:port from the cache, refreshing when needed.
func (cache *NodeSelectionCache) GetNodeIPs(ctx context.Context, nodes []storj.NodeID) (_ map[storj.NodeID]string, err error) {
defer mon.Task()(&ctx)(&err)
cache.mu.RLock()
lastRefresh := cache.lastRefresh
state := cache.state
cache.mu.RUnlock()
// if the cache is stale, then refresh it before we get nodes
if state == nil || time.Since(lastRefresh) > cache.staleness {
state, err = cache.refresh(ctx)
if err != nil {
return nil, err
}
}
return state.IPs(ctx, nodes), nil
}
// Size returns how many reputable nodes and new nodes are in the cache.
func (cache *NodeSelectionCache) Size() (reputableNodeCount int, newNodeCount int) {
cache.mu.RLock()

View File

@ -87,11 +87,6 @@ type DB interface {
// GetNodesNetwork returns the /24 subnet for each storage node, order is not guaranteed.
GetNodesNetwork(ctx context.Context, nodeIDs []storj.NodeID) (nodeNets []string, err error)
// GetSuccesfulNodesNotCheckedInSince returns all nodes that last check-in was successful, but haven't checked-in within a given duration.
GetSuccesfulNodesNotCheckedInSince(ctx context.Context, duration time.Duration) (nodeAddresses []NodeLastContact, err error)
// GetOfflineNodesLimited returns a list of the first N offline nodes ordered by least recently contacted.
GetOfflineNodesLimited(ctx context.Context, limit int) ([]NodeLastContact, error)
// DisqualifyNode disqualifies a storage node.
DisqualifyNode(ctx context.Context, nodeID storj.NodeID) (err error)
@ -319,6 +314,12 @@ func (service *Service) GetOnlineNodesForGetDelete(ctx context.Context, nodeIDs
return service.db.GetOnlineNodesForGetDelete(ctx, nodeIDs, service.config.Node.OnlineWindow)
}
// GetNodeIPs returns a map of node ip:port for the supplied nodeIDs.
func (service *Service) GetNodeIPs(ctx context.Context, nodeIDs []storj.NodeID) (_ map[storj.NodeID]string, err error) {
defer mon.Task()(&ctx)(&err)
return service.SelectionCache.GetNodeIPs(ctx, nodeIDs)
}
// IsOnline checks if a node is 'online' based on the collected statistics.
func (service *Service) IsOnline(node *NodeDossier) bool {
return time.Since(node.Reputation.LastContactSuccess) < service.config.Node.OnlineWindow
@ -491,13 +492,6 @@ func (service *Service) UpdateCheckIn(ctx context.Context, node NodeCheckInInfo,
return service.db.UpdateCheckIn(ctx, node, timestamp, service.config.Node)
}
// GetSuccesfulNodesNotCheckedInSince returns all nodes that last check-in was successful, but haven't checked-in within a given duration.
func (service *Service) GetSuccesfulNodesNotCheckedInSince(ctx context.Context, duration time.Duration) (nodeLastContacts []NodeLastContact, err error) {
defer mon.Task()(&ctx)(&err)
return service.db.GetSuccesfulNodesNotCheckedInSince(ctx, duration)
}
// GetMissingPieces returns the list of offline nodes.
func (service *Service) GetMissingPieces(ctx context.Context, pieces metabase.Pieces) (missingPieces []uint16, err error) {
defer mon.Task()(&ctx)(&err)
@ -526,12 +520,6 @@ func (service *Service) DisqualifyNode(ctx context.Context, nodeID storj.NodeID)
return service.db.DisqualifyNode(ctx, nodeID)
}
// GetOfflineNodesLimited returns a list of the first N offline nodes ordered by least recently contacted.
func (service *Service) GetOfflineNodesLimited(ctx context.Context, limit int) (offlineNodes []NodeLastContact, err error) {
defer mon.Task()(&ctx)(&err)
return service.db.GetOfflineNodesLimited(ctx, limit)
}
// ResolveIPAndNetwork resolves the target address and determines its IP and /24 subnet IPv4 or /64 subnet IPv6.
func ResolveIPAndNetwork(ctx context.Context, target string) (ipPort, network string, err error) {
defer mon.Task()(&ctx)(&err)

View File

@ -645,101 +645,6 @@ func TestUpdateCheckIn(t *testing.T) {
})
}
func TestCache_DowntimeTracking(t *testing.T) {
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
cache := db.OverlayCache()
defaults := overlay.NodeSelectionConfig{}
totalNodes := 10
allIDs := make(storj.NodeIDList, totalNodes)
// put nodes in cache
for i := 0; i < totalNodes; i++ {
newID := testrand.NodeID()
addr := fmt.Sprintf("127.0.%d.0:8080", 0)
lastNet := fmt.Sprintf("127.0.%d", 0)
d := overlay.NodeCheckInInfo{
NodeID: newID,
Address: &pb.NodeAddress{Address: addr, Transport: pb.NodeTransport_TCP_TLS_GRPC},
LastIPPort: addr,
LastNet: lastNet,
Version: &pb.NodeVersion{Version: "v1.0.0"},
Capacity: &pb.NodeCapacity{},
IsUp: true,
}
err := cache.UpdateCheckIn(ctx, d, time.Now().UTC(), defaults)
require.NoError(t, err)
allIDs[i] = newID
// make half of the nodes (0, 2, 4, 6, 8) offline + not disqualified
if i%2 == 0 {
_, err := cache.UpdateUptime(ctx, newID, false)
require.NoError(t, err)
}
// make first node (0) offline + disqualified
if i == 0 {
_, err := cache.UpdateUptime(ctx, newID, false)
require.NoError(t, err)
err = cache.DisqualifyNode(ctx, newID)
require.NoError(t, err)
}
}
nodes, err := cache.GetOfflineNodesLimited(ctx, 10)
require.NoError(t, err)
require.Len(t, nodes, 4)
// order of nodes should be least recently checked first
require.Equal(t, allIDs[2], nodes[0].URL.ID)
require.Equal(t, allIDs[4], nodes[1].URL.ID)
require.Equal(t, allIDs[6], nodes[2].URL.ID)
require.Equal(t, allIDs[8], nodes[3].URL.ID)
// test with limit
nodes, err = cache.GetOfflineNodesLimited(ctx, 2)
require.NoError(t, err)
require.Len(t, nodes, 2)
// order of nodes should be least recently checked first
require.Equal(t, allIDs[2], nodes[0].URL.ID)
require.Equal(t, allIDs[4], nodes[1].URL.ID)
})
}
func TestGetSuccesfulNodesNotCheckedInSince(t *testing.T) {
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) { // setup
info1 := getNodeInfo(testrand.NodeID())
info2 := getNodeInfo(testrand.NodeID())
{ // check-in the nodes, which should add them
twoHoursAgo := time.Now().Add(-2 * time.Hour)
err := db.OverlayCache().UpdateCheckIn(ctx, info1, twoHoursAgo, overlay.NodeSelectionConfig{})
require.NoError(t, err)
err = db.OverlayCache().UpdateCheckIn(ctx, info2, twoHoursAgo, overlay.NodeSelectionConfig{})
require.NoError(t, err)
// update uptime so that node 2 has a last contact failure > last contact success
_, err = db.OverlayCache().UpdateUptime(ctx, info2.NodeID, false)
require.NoError(t, err)
// should just get 1 node
nodeLastContacts, err := db.OverlayCache().GetSuccesfulNodesNotCheckedInSince(ctx, time.Duration(0))
require.NoError(t, err)
require.Len(t, nodeLastContacts, 1)
require.WithinDuration(t, twoHoursAgo, nodeLastContacts[0].LastContactSuccess, time.Second)
require.True(t, nodeLastContacts[0].LastContactFailure.IsZero())
}
{ // check-in again with current time
err := db.OverlayCache().UpdateCheckIn(ctx, info1, time.Now(), overlay.NodeSelectionConfig{})
require.NoError(t, err)
nodeLastContacts, err := db.OverlayCache().GetSuccesfulNodesNotCheckedInSince(ctx, time.Minute)
require.NoError(t, err)
require.Len(t, nodeLastContacts, 0)
}
})
}
// TestSuspendedSelection ensures that suspended nodes are not selected by SelectStorageNodes.
func TestSuspendedSelection(t *testing.T) {
totalNodes := 10

View File

@ -154,8 +154,8 @@ model node (
field audit_success_count int64 ( updatable, default 0 )
field total_audit_count int64 ( updatable, default 0 )
field vetted_at timestamp ( updatable, nullable )
field uptime_success_count int64 ( updatable )
field total_uptime_count int64 ( updatable )
field uptime_success_count int64 ( updatable, default 0 )
field total_uptime_count int64 ( updatable, default 0 )
field created_at timestamp ( autoinsert, default current_timestamp )
field updated_at timestamp ( autoinsert, autoupdate, default current_timestamp )

View File

@ -460,8 +460,8 @@ CREATE TABLE nodes (
audit_success_count bigint NOT NULL DEFAULT 0,
total_audit_count bigint NOT NULL DEFAULT 0,
vetted_at timestamp with time zone,
uptime_success_count bigint NOT NULL,
total_uptime_count bigint NOT NULL,
uptime_success_count bigint NOT NULL DEFAULT 0,
total_uptime_count bigint NOT NULL DEFAULT 0,
created_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch',
@ -1012,8 +1012,8 @@ CREATE TABLE nodes (
audit_success_count bigint NOT NULL DEFAULT 0,
total_audit_count bigint NOT NULL DEFAULT 0,
vetted_at timestamp with time zone,
uptime_success_count bigint NOT NULL,
total_uptime_count bigint NOT NULL,
uptime_success_count bigint NOT NULL DEFAULT 0,
total_uptime_count bigint NOT NULL DEFAULT 0,
created_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch',
@ -3302,6 +3302,8 @@ type Node_Create_Fields struct {
AuditSuccessCount Node_AuditSuccessCount_Field
TotalAuditCount Node_TotalAuditCount_Field
VettedAt Node_VettedAt_Field
UptimeSuccessCount Node_UptimeSuccessCount_Field
TotalUptimeCount Node_TotalUptimeCount_Field
LastContactSuccess Node_LastContactSuccess_Field
LastContactFailure Node_LastContactFailure_Field
Contained Node_Contained_Field
@ -9105,8 +9107,6 @@ func (obj *pgxImpl) CreateNoReturn_Node(ctx context.Context,
node_last_net Node_LastNet_Field,
node_email Node_Email_Field,
node_wallet Node_Wallet_Field,
node_uptime_success_count Node_UptimeSuccessCount_Field,
node_total_uptime_count Node_TotalUptimeCount_Field,
optional Node_Create_Fields) (
err error) {
defer mon.Task()(&ctx)(&err)
@ -9116,8 +9116,6 @@ func (obj *pgxImpl) CreateNoReturn_Node(ctx context.Context,
__email_val := node_email.value()
__wallet_val := node_wallet.value()
__vetted_at_val := optional.VettedAt.value()
__uptime_success_count_val := node_uptime_success_count.value()
__total_uptime_count_val := node_total_uptime_count.value()
__disqualified_val := optional.Disqualified.value()
__suspended_val := optional.Suspended.value()
__unknown_audit_suspended_val := optional.UnknownAuditSuspended.value()
@ -9127,14 +9125,14 @@ func (obj *pgxImpl) CreateNoReturn_Node(ctx context.Context,
__exit_loop_completed_at_val := optional.ExitLoopCompletedAt.value()
__exit_finished_at_val := optional.ExitFinishedAt.value()
var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, last_net, last_ip_port, email, wallet, vetted_at, uptime_success_count, total_uptime_count, disqualified, suspended, unknown_audit_suspended, offline_suspended, under_review, exit_initiated_at, exit_loop_completed_at, exit_finished_at")}
var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")}
var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, last_net, last_ip_port, email, wallet, vetted_at, disqualified, suspended, unknown_audit_suspended, offline_suspended, under_review, exit_initiated_at, exit_loop_completed_at, exit_finished_at")}
var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")}
var __clause = &__sqlbundle_Hole{SQL: __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("("), __columns, __sqlbundle_Literal(") VALUES ("), __placeholders, __sqlbundle_Literal(")")}}}
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO nodes "), __clause}}
var __values []interface{}
__values = append(__values, __id_val, __last_net_val, __last_ip_port_val, __email_val, __wallet_val, __vetted_at_val, __uptime_success_count_val, __total_uptime_count_val, __disqualified_val, __suspended_val, __unknown_audit_suspended_val, __offline_suspended_val, __under_review_val, __exit_initiated_at_val, __exit_loop_completed_at_val, __exit_finished_at_val)
__values = append(__values, __id_val, __last_net_val, __last_ip_port_val, __email_val, __wallet_val, __vetted_at_val, __disqualified_val, __suspended_val, __unknown_audit_suspended_val, __offline_suspended_val, __under_review_val, __exit_initiated_at_val, __exit_loop_completed_at_val, __exit_finished_at_val)
__optional_columns := __sqlbundle_Literals{Join: ", "}
__optional_placeholders := __sqlbundle_Literals{Join: ", "}
@ -9217,6 +9215,18 @@ func (obj *pgxImpl) CreateNoReturn_Node(ctx context.Context,
__optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?"))
}
if optional.UptimeSuccessCount._set {
__values = append(__values, optional.UptimeSuccessCount.value())
__optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("uptime_success_count"))
__optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?"))
}
if optional.TotalUptimeCount._set {
__values = append(__values, optional.TotalUptimeCount.value())
__optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("total_uptime_count"))
__optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?"))
}
if optional.LastContactSuccess._set {
__values = append(__values, optional.LastContactSuccess.value())
__optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("last_contact_success"))
@ -15858,8 +15868,6 @@ func (obj *pgxcockroachImpl) CreateNoReturn_Node(ctx context.Context,
node_last_net Node_LastNet_Field,
node_email Node_Email_Field,
node_wallet Node_Wallet_Field,
node_uptime_success_count Node_UptimeSuccessCount_Field,
node_total_uptime_count Node_TotalUptimeCount_Field,
optional Node_Create_Fields) (
err error) {
defer mon.Task()(&ctx)(&err)
@ -15869,8 +15877,6 @@ func (obj *pgxcockroachImpl) CreateNoReturn_Node(ctx context.Context,
__email_val := node_email.value()
__wallet_val := node_wallet.value()
__vetted_at_val := optional.VettedAt.value()
__uptime_success_count_val := node_uptime_success_count.value()
__total_uptime_count_val := node_total_uptime_count.value()
__disqualified_val := optional.Disqualified.value()
__suspended_val := optional.Suspended.value()
__unknown_audit_suspended_val := optional.UnknownAuditSuspended.value()
@ -15880,14 +15886,14 @@ func (obj *pgxcockroachImpl) CreateNoReturn_Node(ctx context.Context,
__exit_loop_completed_at_val := optional.ExitLoopCompletedAt.value()
__exit_finished_at_val := optional.ExitFinishedAt.value()
var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, last_net, last_ip_port, email, wallet, vetted_at, uptime_success_count, total_uptime_count, disqualified, suspended, unknown_audit_suspended, offline_suspended, under_review, exit_initiated_at, exit_loop_completed_at, exit_finished_at")}
var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")}
var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, last_net, last_ip_port, email, wallet, vetted_at, disqualified, suspended, unknown_audit_suspended, offline_suspended, under_review, exit_initiated_at, exit_loop_completed_at, exit_finished_at")}
var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")}
var __clause = &__sqlbundle_Hole{SQL: __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("("), __columns, __sqlbundle_Literal(") VALUES ("), __placeholders, __sqlbundle_Literal(")")}}}
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO nodes "), __clause}}
var __values []interface{}
__values = append(__values, __id_val, __last_net_val, __last_ip_port_val, __email_val, __wallet_val, __vetted_at_val, __uptime_success_count_val, __total_uptime_count_val, __disqualified_val, __suspended_val, __unknown_audit_suspended_val, __offline_suspended_val, __under_review_val, __exit_initiated_at_val, __exit_loop_completed_at_val, __exit_finished_at_val)
__values = append(__values, __id_val, __last_net_val, __last_ip_port_val, __email_val, __wallet_val, __vetted_at_val, __disqualified_val, __suspended_val, __unknown_audit_suspended_val, __offline_suspended_val, __under_review_val, __exit_initiated_at_val, __exit_loop_completed_at_val, __exit_finished_at_val)
__optional_columns := __sqlbundle_Literals{Join: ", "}
__optional_placeholders := __sqlbundle_Literals{Join: ", "}
@ -15970,6 +15976,18 @@ func (obj *pgxcockroachImpl) CreateNoReturn_Node(ctx context.Context,
__optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?"))
}
if optional.UptimeSuccessCount._set {
__values = append(__values, optional.UptimeSuccessCount.value())
__optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("uptime_success_count"))
__optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?"))
}
if optional.TotalUptimeCount._set {
__values = append(__values, optional.TotalUptimeCount.value())
__optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("total_uptime_count"))
__optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?"))
}
if optional.LastContactSuccess._set {
__values = append(__values, optional.LastContactSuccess.value())
__optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("last_contact_success"))
@ -22814,15 +22832,13 @@ func (rx *Rx) CreateNoReturn_Node(ctx context.Context,
node_last_net Node_LastNet_Field,
node_email Node_Email_Field,
node_wallet Node_Wallet_Field,
node_uptime_success_count Node_UptimeSuccessCount_Field,
node_total_uptime_count Node_TotalUptimeCount_Field,
optional Node_Create_Fields) (
err error) {
var tx *Tx
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.CreateNoReturn_Node(ctx, node_id, node_last_net, node_email, node_wallet, node_uptime_success_count, node_total_uptime_count, optional)
return tx.CreateNoReturn_Node(ctx, node_id, node_last_net, node_email, node_wallet, optional)
}
@ -24458,8 +24474,6 @@ type Methods interface {
node_last_net Node_LastNet_Field,
node_email Node_Email_Field,
node_wallet Node_Wallet_Field,
node_uptime_success_count Node_UptimeSuccessCount_Field,
node_total_uptime_count Node_TotalUptimeCount_Field,
optional Node_Create_Fields) (
err error)

View File

@ -140,8 +140,8 @@ CREATE TABLE nodes (
audit_success_count bigint NOT NULL DEFAULT 0,
total_audit_count bigint NOT NULL DEFAULT 0,
vetted_at timestamp with time zone,
uptime_success_count bigint NOT NULL,
total_uptime_count bigint NOT NULL,
uptime_success_count bigint NOT NULL DEFAULT 0,
total_uptime_count bigint NOT NULL DEFAULT 0,
created_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch',

View File

@ -140,8 +140,8 @@ CREATE TABLE nodes (
audit_success_count bigint NOT NULL DEFAULT 0,
total_audit_count bigint NOT NULL DEFAULT 0,
vetted_at timestamp with time zone,
uptime_success_count bigint NOT NULL,
total_uptime_count bigint NOT NULL,
uptime_success_count bigint NOT NULL DEFAULT 0,
total_uptime_count bigint NOT NULL DEFAULT 0,
created_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch',

View File

@ -1167,6 +1167,15 @@ func (db *satelliteDB) PostgresMigration() *migrate.Migration {
`DROP TABLE nodes_offline_times;`,
},
},
{
DB: &db.migrationDB,
Description: "set default on uptime count columns",
Version: 139,
Action: migrate.SQL{
`ALTER TABLE nodes ALTER COLUMN uptime_success_count SET DEFAULT 0;`,
`ALTER TABLE nodes ALTER COLUMN total_uptime_count SET DEFAULT 0;`,
},
},
},
}
}

View File

@ -1003,38 +1003,6 @@ func (cache *overlaycache) UpdateExitStatus(ctx context.Context, request *overla
return convertDBNode(ctx, dbNode)
}
// GetSuccesfulNodesNotCheckedInSince returns all nodes that last check-in was successful, but haven't checked-in within a given duration.
func (cache *overlaycache) GetSuccesfulNodesNotCheckedInSince(ctx context.Context, duration time.Duration) (nodeLastContacts []overlay.NodeLastContact, err error) {
// get successful nodes that have not checked-in with the hour
defer mon.Task()(&ctx)(&err)
dbxNodes, err := cache.db.DB.All_Node_Id_Node_Address_Node_LastIpPort_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_And_LastContactSuccess_Greater_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactSuccess(
ctx, dbx.Node_LastContactSuccess(time.Now().UTC().Add(-duration)))
if err != nil {
return nil, Error.Wrap(err)
}
for _, node := range dbxNodes {
nodeID, err := storj.NodeIDFromBytes(node.Id)
if err != nil {
return nil, err
}
nodeLastContact := overlay.NodeLastContact{
URL: storj.NodeURL{ID: nodeID, Address: node.Address},
LastContactSuccess: node.LastContactSuccess.UTC(),
LastContactFailure: node.LastContactFailure.UTC(),
}
if node.LastIpPort != nil {
nodeLastContact.LastIPPort = *node.LastIpPort
}
nodeLastContacts = append(nodeLastContacts, nodeLastContact)
}
return nodeLastContacts, nil
}
func populateExitStatusFields(req *overlay.ExitStatusRequest) dbx.Node_Update_Fields {
dbxUpdateFields := dbx.Node_Update_Fields{}
@ -1052,36 +1020,6 @@ func populateExitStatusFields(req *overlay.ExitStatusRequest) dbx.Node_Update_Fi
return dbxUpdateFields
}
// GetOfflineNodesLimited returns a list of the first N offline nodes ordered by least recently contacted.
func (cache *overlaycache) GetOfflineNodesLimited(ctx context.Context, limit int) (nodeLastContacts []overlay.NodeLastContact, err error) {
defer mon.Task()(&ctx)(&err)
dbxNodes, err := cache.db.DB.Limited_Node_Id_Node_Address_Node_LastIpPort_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactFailure(
ctx, limit, 0)
if err != nil {
return nil, Error.Wrap(err)
}
for _, node := range dbxNodes {
nodeID, err := storj.NodeIDFromBytes(node.Id)
if err != nil {
return nil, err
}
nodeLastContact := overlay.NodeLastContact{
URL: storj.NodeURL{ID: nodeID, Address: node.Address},
LastContactSuccess: node.LastContactSuccess.UTC(),
LastContactFailure: node.LastContactFailure.UTC(),
}
if node.LastIpPort != nil {
nodeLastContact.LastIPPort = *node.LastIpPort
}
nodeLastContacts = append(nodeLastContacts, nodeLastContact)
}
return nodeLastContacts, nil
}
func convertDBNode(ctx context.Context, info *dbx.Node) (_ *overlay.NodeDossier, err error) {
if info == nil {
return nil, Error.New("missing info")

View File

@ -0,0 +1,571 @@
-- AUTOGENERATED BY storj.io/dbx
-- DO NOT EDIT
CREATE TABLE accounting_rollups (
node_id bytea NOT NULL,
start_time timestamp with time zone NOT NULL,
put_total bigint NOT NULL,
get_total bigint NOT NULL,
get_audit_total bigint NOT NULL,
get_repair_total bigint NOT NULL,
put_repair_total bigint NOT NULL,
at_rest_total double precision NOT NULL,
PRIMARY KEY ( node_id, start_time )
);
CREATE TABLE accounting_timestamps (
name text NOT NULL,
value timestamp with time zone NOT NULL,
PRIMARY KEY ( name )
);
CREATE TABLE audit_histories (
node_id bytea NOT NULL,
history bytea NOT NULL,
PRIMARY KEY ( node_id )
);
CREATE TABLE bucket_bandwidth_rollups (
bucket_name bytea NOT NULL,
project_id bytea NOT NULL,
interval_start timestamp with time zone NOT NULL,
interval_seconds integer NOT NULL,
action integer NOT NULL,
inline bigint NOT NULL,
allocated bigint NOT NULL,
settled bigint NOT NULL,
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
);
CREATE TABLE bucket_storage_tallies (
bucket_name bytea NOT NULL,
project_id bytea NOT NULL,
interval_start timestamp with time zone NOT NULL,
inline bigint NOT NULL,
remote bigint NOT NULL,
remote_segments_count integer NOT NULL,
inline_segments_count integer NOT NULL,
object_count integer NOT NULL,
metadata_size bigint NOT NULL,
PRIMARY KEY ( bucket_name, project_id, interval_start )
);
CREATE TABLE coinpayments_transactions (
id text NOT NULL,
user_id bytea NOT NULL,
address text NOT NULL,
amount bytea NOT NULL,
received bytea NOT NULL,
status integer NOT NULL,
key text NOT NULL,
timeout integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE consumed_serials (
storage_node_id bytea NOT NULL,
serial_number bytea NOT NULL,
expires_at timestamp with time zone NOT NULL,
PRIMARY KEY ( storage_node_id, serial_number )
);
CREATE TABLE coupons (
id bytea NOT NULL,
user_id bytea NOT NULL,
amount bigint NOT NULL,
description text NOT NULL,
type integer NOT NULL,
status integer NOT NULL,
duration bigint NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE coupon_usages (
coupon_id bytea NOT NULL,
amount bigint NOT NULL,
status integer NOT NULL,
period timestamp with time zone NOT NULL,
PRIMARY KEY ( coupon_id, period )
);
CREATE TABLE graceful_exit_progress (
node_id bytea NOT NULL,
bytes_transferred bigint NOT NULL,
pieces_transferred bigint NOT NULL DEFAULT 0,
pieces_failed bigint NOT NULL DEFAULT 0,
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY ( node_id )
);
CREATE TABLE graceful_exit_transfer_queue (
node_id bytea NOT NULL,
path bytea NOT NULL,
piece_num integer NOT NULL,
root_piece_id bytea,
durability_ratio double precision NOT NULL,
queued_at timestamp with time zone NOT NULL,
requested_at timestamp with time zone,
last_failed_at timestamp with time zone,
last_failed_code integer,
failed_count integer,
finished_at timestamp with time zone,
order_limit_send_count integer NOT NULL DEFAULT 0,
PRIMARY KEY ( node_id, path, piece_num )
);
CREATE TABLE injuredsegments (
path bytea NOT NULL,
data bytea NOT NULL,
attempted timestamp with time zone,
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
segment_health double precision NOT NULL DEFAULT 1,
PRIMARY KEY ( path )
);
CREATE TABLE irreparabledbs (
segmentpath bytea NOT NULL,
segmentdetail bytea NOT NULL,
pieces_lost_count bigint NOT NULL,
seg_damaged_unix_sec bigint NOT NULL,
repair_attempt_count bigint NOT NULL,
PRIMARY KEY ( segmentpath )
);
CREATE TABLE nodes (
id bytea NOT NULL,
address text NOT NULL DEFAULT '',
last_net text NOT NULL,
last_ip_port text,
protocol integer NOT NULL DEFAULT 0,
type integer NOT NULL DEFAULT 0,
email text NOT NULL,
wallet text NOT NULL,
free_disk bigint NOT NULL DEFAULT -1,
piece_count bigint NOT NULL DEFAULT 0,
major bigint NOT NULL DEFAULT 0,
minor bigint NOT NULL DEFAULT 0,
patch bigint NOT NULL DEFAULT 0,
hash text NOT NULL DEFAULT '',
timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00',
release boolean NOT NULL DEFAULT false,
latency_90 bigint NOT NULL DEFAULT 0,
audit_success_count bigint NOT NULL DEFAULT 0,
total_audit_count bigint NOT NULL DEFAULT 0,
vetted_at timestamp with time zone,
uptime_success_count bigint NOT NULL DEFAULT 0,
total_uptime_count bigint NOT NULL DEFAULT 0,
created_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch',
last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch',
contained boolean NOT NULL DEFAULT false,
disqualified timestamp with time zone,
suspended timestamp with time zone,
unknown_audit_suspended timestamp with time zone,
offline_suspended timestamp with time zone,
under_review timestamp with time zone,
online_score double precision NOT NULL DEFAULT 1,
audit_reputation_alpha double precision NOT NULL DEFAULT 1,
audit_reputation_beta double precision NOT NULL DEFAULT 0,
unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1,
unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0,
uptime_reputation_alpha double precision NOT NULL DEFAULT 1,
uptime_reputation_beta double precision NOT NULL DEFAULT 0,
exit_initiated_at timestamp with time zone,
exit_loop_completed_at timestamp with time zone,
exit_finished_at timestamp with time zone,
exit_success boolean NOT NULL DEFAULT false,
PRIMARY KEY ( id )
);
CREATE TABLE node_api_versions (
id bytea NOT NULL,
api_version integer NOT NULL,
created_at timestamp with time zone NOT NULL,
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE offers (
id serial NOT NULL,
name text NOT NULL,
description text NOT NULL,
award_credit_in_cents integer NOT NULL DEFAULT 0,
invitee_credit_in_cents integer NOT NULL DEFAULT 0,
award_credit_duration_days integer,
invitee_credit_duration_days integer,
redeemable_cap integer,
expires_at timestamp with time zone NOT NULL,
created_at timestamp with time zone NOT NULL,
status integer NOT NULL,
type integer NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE peer_identities (
node_id bytea NOT NULL,
leaf_serial_number bytea NOT NULL,
chain bytea NOT NULL,
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY ( node_id )
);
CREATE TABLE pending_audits (
node_id bytea NOT NULL,
piece_id bytea NOT NULL,
stripe_index bigint NOT NULL,
share_size bigint NOT NULL,
expected_share_hash bytea NOT NULL,
reverify_count bigint NOT NULL,
path bytea NOT NULL,
PRIMARY KEY ( node_id )
);
CREATE TABLE pending_serial_queue (
storage_node_id bytea NOT NULL,
bucket_id bytea NOT NULL,
serial_number bytea NOT NULL,
action integer NOT NULL,
settled bigint NOT NULL,
expires_at timestamp with time zone NOT NULL,
PRIMARY KEY ( storage_node_id, bucket_id, serial_number )
);
CREATE TABLE projects (
id bytea NOT NULL,
name text NOT NULL,
description text NOT NULL,
usage_limit bigint,
bandwidth_limit bigint,
rate_limit integer,
max_buckets integer,
partner_id bytea,
owner_id bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE project_bandwidth_rollups (
project_id bytea NOT NULL,
interval_month date NOT NULL,
egress_allocated bigint NOT NULL,
PRIMARY KEY ( project_id, interval_month )
);
CREATE TABLE registration_tokens (
secret bytea NOT NULL,
owner_id bytea,
project_limit integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( secret ),
UNIQUE ( owner_id )
);
CREATE TABLE reported_serials (
expires_at timestamp with time zone NOT NULL,
storage_node_id bytea NOT NULL,
bucket_id bytea NOT NULL,
action integer NOT NULL,
serial_number bytea NOT NULL,
settled bigint NOT NULL,
observed_at timestamp with time zone NOT NULL,
PRIMARY KEY ( expires_at, storage_node_id, bucket_id, action, serial_number )
);
CREATE TABLE reset_password_tokens (
secret bytea NOT NULL,
owner_id bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( secret ),
UNIQUE ( owner_id )
);
CREATE TABLE revocations (
revoked bytea NOT NULL,
api_key_id bytea NOT NULL,
PRIMARY KEY ( revoked )
);
CREATE TABLE serial_numbers (
id serial NOT NULL,
serial_number bytea NOT NULL,
bucket_id bytea NOT NULL,
expires_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE storagenode_bandwidth_rollups (
storagenode_id bytea NOT NULL,
interval_start timestamp with time zone NOT NULL,
interval_seconds integer NOT NULL,
action integer NOT NULL,
allocated bigint DEFAULT 0,
settled bigint NOT NULL,
PRIMARY KEY ( storagenode_id, interval_start, action )
);
CREATE TABLE storagenode_bandwidth_rollups_phase2 (
storagenode_id bytea NOT NULL,
interval_start timestamp with time zone NOT NULL,
interval_seconds integer NOT NULL,
action integer NOT NULL,
allocated bigint DEFAULT 0,
settled bigint NOT NULL,
PRIMARY KEY ( storagenode_id, interval_start, action )
);
CREATE TABLE storagenode_payments (
id bigserial NOT NULL,
created_at timestamp with time zone NOT NULL,
node_id bytea NOT NULL,
period text NOT NULL,
amount bigint NOT NULL,
receipt text,
notes text,
PRIMARY KEY ( id )
);
CREATE TABLE storagenode_paystubs (
period text NOT NULL,
node_id bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
codes text NOT NULL,
usage_at_rest double precision NOT NULL,
usage_get bigint NOT NULL,
usage_put bigint NOT NULL,
usage_get_repair bigint NOT NULL,
usage_put_repair bigint NOT NULL,
usage_get_audit bigint NOT NULL,
comp_at_rest bigint NOT NULL,
comp_get bigint NOT NULL,
comp_put bigint NOT NULL,
comp_get_repair bigint NOT NULL,
comp_put_repair bigint NOT NULL,
comp_get_audit bigint NOT NULL,
surge_percent bigint NOT NULL,
held bigint NOT NULL,
owed bigint NOT NULL,
disposed bigint NOT NULL,
paid bigint NOT NULL,
PRIMARY KEY ( period, node_id )
);
CREATE TABLE storagenode_storage_tallies (
node_id bytea NOT NULL,
interval_end_time timestamp with time zone NOT NULL,
data_total double precision NOT NULL,
PRIMARY KEY ( interval_end_time, node_id )
);
CREATE TABLE stripe_customers (
user_id bytea NOT NULL,
customer_id text NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( user_id ),
UNIQUE ( customer_id )
);
CREATE TABLE stripecoinpayments_invoice_project_records (
id bytea NOT NULL,
project_id bytea NOT NULL,
storage double precision NOT NULL,
egress bigint NOT NULL,
objects bigint NOT NULL,
period_start timestamp with time zone NOT NULL,
period_end timestamp with time zone NOT NULL,
state integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( project_id, period_start, period_end )
);
CREATE TABLE stripecoinpayments_tx_conversion_rates (
tx_id text NOT NULL,
rate bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( tx_id )
);
CREATE TABLE users (
id bytea NOT NULL,
email text NOT NULL,
normalized_email text NOT NULL,
full_name text NOT NULL,
short_name text,
password_hash bytea NOT NULL,
status integer NOT NULL,
partner_id bytea,
created_at timestamp with time zone NOT NULL,
project_limit integer NOT NULL DEFAULT 0,
PRIMARY KEY ( id )
);
CREATE TABLE value_attributions (
project_id bytea NOT NULL,
bucket_name bytea NOT NULL,
partner_id bytea NOT NULL,
last_updated timestamp with time zone NOT NULL,
PRIMARY KEY ( project_id, bucket_name )
);
CREATE TABLE api_keys (
id bytea NOT NULL,
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
head bytea NOT NULL,
name text NOT NULL,
secret bytea NOT NULL,
partner_id bytea,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( head ),
UNIQUE ( name, project_id )
);
CREATE TABLE bucket_metainfos (
id bytea NOT NULL,
project_id bytea NOT NULL REFERENCES projects( id ),
name bytea NOT NULL,
partner_id bytea,
path_cipher integer NOT NULL,
created_at timestamp with time zone NOT NULL,
default_segment_size integer NOT NULL,
default_encryption_cipher_suite integer NOT NULL,
default_encryption_block_size integer NOT NULL,
default_redundancy_algorithm integer NOT NULL,
default_redundancy_share_size integer NOT NULL,
default_redundancy_required_shares integer NOT NULL,
default_redundancy_repair_shares integer NOT NULL,
default_redundancy_optimal_shares integer NOT NULL,
default_redundancy_total_shares integer NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( name, project_id ),
UNIQUE ( project_id, name )
);
CREATE TABLE project_members (
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( member_id, project_id )
);
CREATE TABLE stripecoinpayments_apply_balance_intents (
tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE,
state integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( tx_id )
);
CREATE TABLE used_serials (
serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE,
storage_node_id bytea NOT NULL,
PRIMARY KEY ( serial_number_id, storage_node_id )
);
CREATE TABLE user_credits (
id serial NOT NULL,
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
offer_id integer NOT NULL REFERENCES offers( id ),
referred_by bytea REFERENCES users( id ) ON DELETE SET NULL,
type text NOT NULL,
credits_earned_in_cents integer NOT NULL,
credits_used_in_cents integer NOT NULL,
expires_at timestamp with time zone NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( id, offer_id )
);
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time );
CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start );
CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id );
CREATE INDEX bucket_storage_tallies_project_id_index ON bucket_storage_tallies ( project_id );
CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at );
CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at );
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
CREATE INDEX node_last_ip ON nodes ( last_net );
CREATE INDEX nodes_dis_unk_exit_fin_last_success_index ON nodes ( disqualified, unknown_audit_suspended, exit_finished_at, last_contact_success );
CREATE UNIQUE INDEX serial_number_index ON serial_numbers ( serial_number );
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start );
CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period );
CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id );
CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id );
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits ( id, offer_id );
INSERT INTO "accounting_rollups"("node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 3000, 6000, 9000, 12000, 0, 15000);
INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00');
INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00');
INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00');
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 1, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "vetted_at", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 300, 0, 1, 0, 300, 100, false, '2020-03-18 12:00:00.000000+00', 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "last_ip_port", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55516', '127.0.0.0', '127.0.0.1:55516', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', '1EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00');
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00');
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00');
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00');
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, '2019-02-13 08:28:24.677953+00');
INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10);
INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00');
INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00');
INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n');
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
INSERT INTO "storagenode_storage_tallies" VALUES (E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000);
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00');
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (1, 'Default referral offer', 'Is active when no other active referral offer', 300, 600, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 2, 365, 14);
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (2, 'Default free credit offer', 'Is active when no active free credit offer', 0, 300, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 1, NULL, 14);
INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "partner_id", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, NULL, '2019-02-14 08:28:24.267934+00');
INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-02-14 08:07:31.028103+00');
INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "type", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, 'invalid', '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10);
INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "path") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, 'not null');
INSERT INTO "peer_identities" VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:07:31.335028+00');
INSERT INTO "graceful_exit_progress" ("node_id", "bytes_transferred", "pieces_transferred", "pieces_failed", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', 1000000000000000, 0, 0, '2019-09-12 10:07:31.028103+00');
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "stripe_customers" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id', '2019-06-01 08:28:24.267934+00');
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "period_start", "period_end", "state", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\021\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00');
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "root_piece_id", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 10, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "stripecoinpayments_tx_conversion_rates" ("tx_id", "rate", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci,'::bytea, '2019-06-01 08:28:24.267934+00');
INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount", "received", "status", "key", "timeout", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', E'\\363\\311\\033w'::bytea, E'\\363\\311\\033w'::bytea, 1, 'key', 60, '2019-06-01 08:28:24.267934+00');
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2020-01-11 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 2024);
INSERT INTO "coupons" ("id", "user_id", "amount", "description", "type", "status", "duration", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 50, 'description', 0, 0, 2, '2019-06-01 08:28:24.267934+00');
INSERT INTO "coupon_usages" ("coupon_id", "amount", "status", "period") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 22, 0, '2019-06-01 09:28:24.267934+00');
INSERT INTO "reported_serials" ("expires_at", "storage_node_id", "bucket_id", "action", "serial_number", "settled", "observed_at") VALUES ('2020-01-11 08:00:00.000000+00', E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, 1, E'0123456701234567'::bytea, 100, '2020-01-11 08:00:00.000000+00');
INSERT INTO "stripecoinpayments_apply_balance_intents" ("tx_id", "state", "created_at") VALUES ('tx_id', 0, '2019-06-01 08:28:24.267934+00');
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-01-15 08:28:24.636949+00');
INSERT INTO "pending_serial_queue" ("storage_node_id", "bucket_id", "serial_number", "action", "settled", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, E'5123456701234567'::bytea, 1, 100, '2020-01-11 08:00:00.000000+00');
INSERT INTO "consumed_serials" ("storage_node_id", "serial_number", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'1234567012345678'::bytea, '2020-01-12 08:00:00.000000+00');
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('0', '\x0a0130120100', 1.0, '2020-09-01 00:00:00.000000+00');
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/this/is/a/new/path', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/some/path/1/23/4', '\x0a23736f2f6d618e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 0.2, '2020-09-01 00:00:00.000000+00');
INSERT INTO "project_bandwidth_rollups"("project_id", "interval_month", egress_allocated) VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, '2020-04-01', 10000);
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets","rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\345'::bytea, 'egress101', 'High Bandwidth Project', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-05-15 08:46:24.000000+00');
INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid") VALUES ('2020-01', '\xf2a3b4c4dfdf7221310382fd5db5aa73e1d227d6df09734ec4e5305000000000', '2020-04-07T20:14:21.479141Z', '', 1327959864508416, 294054066688, 159031363328, 226751, 0, 836608, 2861984, 5881081, 0, 226751, 0, 8, 300, 0, 26909472, 0, 26909472);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "unknown_audit_suspended", "offline_suspended", "under_review") VALUES (E'\\153\\313\\233\\074\\327\\255\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00');
INSERT INTO "audit_histories" ("node_id", "history") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a');
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', 1, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\256\\263'::bytea, 'egress102', 'High Bandwidth Project 2', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\255\\244'::bytea, 'egress103', 'High Bandwidth Project 3', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\253\\231'::bytea, 'Limit Test 1', 'This project is above the default', 50000000001, 50000000001, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:10.000000+00', 101);
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\252\\230'::bytea, 'Limit Test 2', 'This project is below the default', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:11.000000+00', NULL);
INSERT INTO "storagenode_bandwidth_rollups_phase2" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
-- NEW DATA --
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);

View File

@ -36,6 +36,13 @@ const (
type Server interface {
Addr() string
Close() error
// TestingFastForward is a function for enforce the TTL of keys in
// implementations what they have not exercise the expiration by themselves
// (e.g. Minitredis). This method is a no-op in implementations which support
// the expiration as usual.
//
// All the keys whose TTL minus d become <= 0 will be removed.
TestingFastForward(d time.Duration)
}
func freeport() (addr string, port int) {
@ -151,8 +158,16 @@ type process struct {
close func()
}
func (process *process) Addr() string { return process.addr }
func (process *process) Close() error { process.close(); return nil }
func (process *process) Addr() string {
return process.addr
}
func (process *process) Close() error {
process.close()
return nil
}
func (process *process) TestingFastForward(_ time.Duration) {}
func pingServer(addr string) error {
client := redis.NewClient(&redis.Options{Addr: addr, DB: 1})
@ -184,3 +199,7 @@ func (s *miniserver) Close() error {
s.Miniredis.Close()
return nil
}
func (s *miniserver) TestingFastForward(d time.Duration) {
s.FastForward(d)
}

View File

@ -0,0 +1,53 @@
// Copyright (C) 2021 Storj Labs, Inc.
// See LICENSE for copying information.
package multinode
import (
"context"
"go.uber.org/zap"
"storj.io/common/rpc/rpcstatus"
"storj.io/storj/private/multinodepb"
"storj.io/storj/storagenode/apikeys"
"storj.io/storj/storagenode/payout"
)
var _ multinodepb.DRPCPayoutServer = (*PayoutEndpoint)(nil)
// PayoutEndpoint implements multinode payout endpoint.
//
// architecture: Endpoint
type PayoutEndpoint struct {
log *zap.Logger
apiKeys *apikeys.Service
db payout.DB
}
// NewPayoutEndpoint creates new multinode payout endpoint.
func NewPayoutEndpoint(log *zap.Logger, apiKeys *apikeys.Service, db payout.DB) *PayoutEndpoint {
return &PayoutEndpoint{
log: log,
apiKeys: apiKeys,
db: db,
}
}
// Earned returns total earned amount.
func (payout *PayoutEndpoint) Earned(ctx context.Context, req *multinodepb.EarnedRequest) (_ *multinodepb.EarnedResponse, err error) {
defer mon.Task()(&ctx)(&err)
if err = authenticate(ctx, payout.apiKeys, req.GetHeader()); err != nil {
return nil, rpcstatus.Wrap(rpcstatus.Unauthenticated, err)
}
earned, err := payout.db.GetTotalEarned(ctx)
if err != nil {
return nil, rpcstatus.Wrap(rpcstatus.Internal, err)
}
return &multinodepb.EarnedResponse{
Total: earned,
}, nil
}

View File

@ -34,6 +34,8 @@ type DB interface {
StorePayment(ctx context.Context, payment Payment) error
// GetReceipt retrieves receipt for specific satellite and period.
GetReceipt(ctx context.Context, satelliteID storj.NodeID, period string) (string, error)
// GetTotalEarned returns total earned amount of node from all paystubs.
GetTotalEarned(ctx context.Context) (_ int64, err error)
}
// ErrNoPayStubForPeriod represents errors from the payout database.

View File

@ -250,7 +250,6 @@ func (service *Service) AllSatellitesPayoutPeriod(ctx context.Context, period st
if !ErrNoPayStubForPeriod.Has(err) {
return nil, ErrPayoutService.Wrap(err)
}
receipt = "no receipt for this period"
}
stats, err := service.reputationDB.Get(ctx, satelliteIDs[i])

View File

@ -393,3 +393,34 @@ func (db *payoutDB) GetReceipt(ctx context.Context, satelliteID storj.NodeID, pe
return receipt, nil
}
// GetTotalEarned returns total earned value for node from all paystubs.
func (db *payoutDB) GetTotalEarned(ctx context.Context) (_ int64, err error) {
defer mon.Task()(&ctx)(&err)
query := `SELECT comp_at_rest, comp_get, comp_get_repair, comp_get_audit FROM paystubs`
rows, err := db.QueryContext(ctx, query)
if err != nil {
return 0, err
}
defer func() { err = errs.Combine(err, rows.Close()) }()
var totalEarned int64
for rows.Next() {
var compAtRest, compGet, compGetRepair, compGetAudit int64
err := rows.Scan(&compAtRest, &compGet, &compGetRepair, &compGetAudit)
if err != nil {
return 0, ErrPayout.Wrap(err)
}
totalEarned += compGetAudit + compGet + compGetRepair + compAtRest
}
if err = rows.Err(); err != nil {
return 0, ErrPayout.Wrap(err)
}
return totalEarned, nil
}

7
web/multinode/src/app/types/svg.d.ts vendored Normal file
View File

@ -0,0 +1,7 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
declare module '*.svg' {
const content: any;
export default content;
}

7
web/multinode/src/app/types/vue.d.ts vendored Normal file
View File

@ -0,0 +1,7 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
declare module '*.vue' {
import Vue from 'vue';
export default Vue;
}