Merge remote-tracking branch 'origin/main' into multipart-upload

Change-Id: I075aaff42ca3f5dc538356cedfccd5939c75e791
This commit is contained in:
Michał Niewrzał 2021-02-11 11:46:16 +01:00
commit 908a96ae30
193 changed files with 20912 additions and 5129 deletions

73
.clabot
View File

@ -1,73 +0,0 @@
{
"message": "Thank you for your pull request and welcome to our community. We require contributors to sign our [Contributor License Agreement](https://docs.google.com/forms/d/e/1FAIpQLSdVzD5W8rx-J_jLaPuG31nbOzS8yhNIIu4yHvzonji6NeZ4ig/viewform), and we don't seem to have the users {{usersWithoutCLA}} on file. Once you have signed the CLA, please let us know, so we can manually review and add you to the approved contributors list.",
"contributors": [
"aleitner",
"aligeti",
"Barterio",
"brimstone",
"bryanchriswhite",
"cam-a",
"coyle",
"crawter",
"dylanlott",
"egonelbre",
"fadila82",
"iglesiasbrandon",
"jenlij",
"jhagans3",
"jtolio",
"kaloyan-raev",
"littleskunk",
"mniewrzal",
"mobyvb",
"navillasa",
"nfarah86",
"NikolaiYurchenko",
"phutchins",
"rikysya",
"stefanbenten",
"thepaul",
"wthorp",
"RichardLitt",
"fuskovic",
"keleffew",
"oCroso",
"pgerbes1",
"JessicaGreben",
"benjaminsirb",
"simongui",
"ifraixedes",
"VinozzZ",
"zeebo",
"barlock",
"sndrr",
"BlackDuck888",
"3bl3gamer",
"ethanadams",
"ReneSmeekes",
"VitaliiShpital",
"isaachess",
"azdagron",
"phthano",
"nerdatwork",
"kmozurkewich",
"TopperDEL",
"kristaxox",
"calebcase",
"mbouzi",
"AlexeyALeonov",
"Qweder93",
"cpustejovsky",
"grafael",
"ihaid",
"montyanderson",
"sixcorners",
"alexottoboni",
"dominickmarino",
"hectorj2f",
"nergdron",
"Doom4535",
"harrymaurya05",
"gregoirevda"
]
}

View File

@ -3,7 +3,6 @@ ARG DOCKER_ARCH
FROM node:14.15.3 as ui
WORKDIR /app
COPY web/satellite/ /app
COPY web/marketing/ /app/marketing
# Need to clean up (or ignore) local folders like node_modules, etc...
RUN npm install
RUN npm run build
@ -24,7 +23,6 @@ EXPOSE 10100
WORKDIR /app
COPY --from=ui /app/static /app/static
COPY --from=ui /app/dist /app/dist
COPY --from=ui /app/marketing /app/marketing
COPY --from=ca-cert /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
COPY release/${TAG}/wasm/access.wasm /app/static/wasm/
COPY release/${TAG}/wasm/wasm_exec.js /app/static/wasm/

View File

@ -95,11 +95,12 @@ func generateInvoicesCSV(ctx context.Context, period compensation.Period, out io
}
invoice := compensation.Invoice{
Period: period,
NodeID: compensation.NodeID(usage.NodeID),
NodeWallet: node.Operator.Wallet,
NodeAddress: nodeAddress,
NodeLastIP: nodeLastIP,
Period: period,
NodeID: compensation.NodeID(usage.NodeID),
NodeWallet: node.Operator.Wallet,
NodeWalletFeatures: node.Operator.WalletFeatures,
NodeAddress: nodeAddress,
NodeLastIP: nodeLastIP,
}
if err := invoice.MergeNodeInfo(nodeInfo); err != nil {

View File

@ -58,11 +58,10 @@ const (
storagenodePeer = 3
// Endpoints.
publicRPC = 0
privateRPC = 1
publicHTTP = 2
privateHTTP = 3
debugHTTP = 9
publicRPC = 0
privateRPC = 1
publicHTTP = 2
debugHTTP = 9
// Satellite specific constants.
redisPort = 4
@ -337,10 +336,6 @@ func newNetwork(flags *Flags) (*Processes, error) {
"--console.open-registration-enabled",
"--console.rate-limit.burst", "100",
"--marketing.base-url", "",
"--marketing.address", net.JoinHostPort(host, port(satellitePeer, i, privateHTTP)),
"--marketing.static-dir", filepath.Join(storjRoot, "web/marketing/"),
"--server.address", apiProcess.Address,
"--server.private-address", net.JoinHostPort(host, port(satellitePeer, i, privateRPC)),

View File

@ -202,13 +202,14 @@ func createURL(newAccessData string, sharePrefixes []sharePrefixExtension) (err
fmt.Println("=========== BROWSER URL ==================================================================")
fmt.Println("REMINDER : Object key must end in '/' when trying to share recursively")
var printFormat string
if p.Path() == "" || !sharePrefixes[0].hasTrailingSlash { // Check if the path is empty (aka sharing the entire bucket) or the path is not a directory or an object that ends in "/".
printFormat = "URL : %s/%s/%s/%s\n"
} else {
printFormat = "URL : %s/%s/%s/%s/\n"
path := p.Path()
// If we're not sharing the entire bucket (the path is empty)
// and the requested share prefix has a trailing slash, then
// make sure to append a trailing slash to the URL.
if path != "" && sharePrefixes[0].hasTrailingSlash {
path += "/"
}
fmt.Printf(printFormat, shareCfg.BaseURL, url.PathEscape(newAccessData), p.Bucket(), p.Path())
fmt.Printf("URL : %s/s/%s/%s/%s\n", shareCfg.BaseURL, url.PathEscape(newAccessData), p.Bucket(), path)
return nil
}

9
go.mod
View File

@ -10,7 +10,7 @@ require (
github.com/cheggaaa/pb/v3 v3.0.5
github.com/fatih/color v1.9.0
github.com/go-redis/redis v6.15.9+incompatible
github.com/gogo/protobuf v1.3.1
github.com/gogo/protobuf v1.3.2
github.com/golang-migrate/migrate/v4 v4.7.0
github.com/google/go-cmp v0.5.2
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3 // indirect
@ -44,12 +44,11 @@ require (
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e
golang.org/x/tools v0.0.0-20200923182640-463111b69878 // indirect
google.golang.org/api v0.20.0 // indirect
google.golang.org/protobuf v1.25.0 // indirect
storj.io/common v0.0.0-20210203121719-6c48157d3f5f
storj.io/common v0.0.0-20210208122718-577b1f8a0a0f
storj.io/drpc v0.0.16
storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b
storj.io/monkit-jaeger v0.0.0-20210205021559-85f08034688c
storj.io/private v0.0.0-20210203200143-9d2ec06f0d3c
storj.io/uplink v1.4.6-0.20210201122710-48b82ce14a37
storj.io/uplink v1.4.6-0.20210209192332-b38c7a40bcd4
)

34
go.sum
View File

@ -172,8 +172,8 @@ github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRx
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-migrate/migrate/v4 v4.7.0 h1:gONcHxHApDTKXDyLH/H97gEHmpu1zcnnbAaq2zgrPrs=
github.com/golang-migrate/migrate/v4 v4.7.0/go.mod h1:Qvut3N4xKWjoH3sokBccML6WyHSnggXm/DvMMnTsQIc=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
@ -351,7 +351,7 @@ github.com/jtolds/tracetagger/v2 v2.0.0-rc5/go.mod h1:61Fh+XhbBONy+RsqkA+xTtmaFb
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@ -580,6 +580,7 @@ github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhe
github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb h1:ZkM6LRnq40pR1Ox0hTHlnpkcOTuFIDQpZ1IN8rKKhX0=
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb/go.mod h1:gqRgreBUhTSL0GeU64rtZ3Uq3wtjOa/TB2YfrtkCbVQ=
@ -674,6 +675,7 @@ golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCc
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@ -701,10 +703,11 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -722,7 +725,7 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -764,6 +767,7 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200610111108-226ff32320da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201231184435-2d18734c6014/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112091331-59c308dcf3cc/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c h1:VwygUrnw9jn88c4u8GD3rZQbqrP/tgas88tPUbBxQrk=
@ -785,7 +789,6 @@ golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGm
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
@ -810,8 +813,9 @@ golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200923182640-463111b69878 h1:VUw1+Jf6KJPf82mbTQMia6HCnNMv2BbAipkEZ4KTcqQ=
golang.org/x/tools v0.0.0-20200923182640-463111b69878/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a h1:CB3a9Nez8M13wwlr/E2YtwoU+qYHKfC+JrDa45RXXoQ=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@ -917,17 +921,17 @@ sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
storj.io/common v0.0.0-20200424175742-65ac59022f4f/go.mod h1:pZyXiIE7bGETIRXtfs0nICqMwp7PM8HqnDuyUeldNA0=
storj.io/common v0.0.0-20201026135900-1aaeec90670b/go.mod h1:GqdmNf3fLm2UZX/7Zr0BLFCJ4gFjgm6eHrk/fnmr5jQ=
storj.io/common v0.0.0-20210119231202-8321551aa24d h1:lOLCRtsKISuZlK2lBI5O0uBAc44mp/yO3CtUTXNNSUc=
storj.io/common v0.0.0-20210119231202-8321551aa24d/go.mod h1:KhVByBTvjV2rsaUQsft0pKgBRRMvCcY1JsDqt6BWr3I=
storj.io/common v0.0.0-20210203121719-6c48157d3f5f h1:k0XvINvUag6E3v58QmknmvpgQMBPPNlC9OCUC537XcI=
storj.io/common v0.0.0-20210203121719-6c48157d3f5f/go.mod h1:KhVByBTvjV2rsaUQsft0pKgBRRMvCcY1JsDqt6BWr3I=
storj.io/common v0.0.0-20210208122718-577b1f8a0a0f h1:O2/ia55Q/xhMBJ/WgeTQBEST7h8IWXZE4FEQyiM+RYc=
storj.io/common v0.0.0-20210208122718-577b1f8a0a0f/go.mod h1:b8XP/TdW8OyTZ/J2BDFOIE9KojSUNZgImBFZI99zS04=
storj.io/drpc v0.0.11/go.mod h1:TiFc2obNjL9/3isMW1Rpxjy8V9uE0B2HMeMFGiiI7Iw=
storj.io/drpc v0.0.14/go.mod h1:82nfl+6YwRwF6UG31cEWWUqv/FaKvP5SGqUvoqTxCMA=
storj.io/drpc v0.0.16 h1:9sxypc5lKi/0D69cR21BR0S21+IvXfON8L5nXMVNTwQ=
storj.io/drpc v0.0.16/go.mod h1:zdmQ93nx4Z35u11pQ+GAnBy4DGOK3HJCSOfeh2RryTo=
storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b h1:Bbg9JCtY6l3HrDxs3BXzT2UYnYCBLqNi6i84Y8QIPUs=
storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b/go.mod h1:gj4vuCeyCRjRmH8LIrgoyU9Dc9uR6H+/GcDUXmTbf80=
storj.io/monkit-jaeger v0.0.0-20210205021559-85f08034688c h1:6B1nHL8pGEjxzAHoADZBNpYAqLfpqEtmji1YgU4ByDA=
storj.io/monkit-jaeger v0.0.0-20210205021559-85f08034688c/go.mod h1:gj4vuCeyCRjRmH8LIrgoyU9Dc9uR6H+/GcDUXmTbf80=
storj.io/private v0.0.0-20210203200143-9d2ec06f0d3c h1:9sLvfSIZgUhw98J8/3FBOVVJ+huhgYedhYpbrLbE+uk=
storj.io/private v0.0.0-20210203200143-9d2ec06f0d3c/go.mod h1:VHaDkpBka3Pp5rXqFSDHbEmzMaFFW4BYrXJfGIN1Udo=
storj.io/uplink v1.4.6-0.20210201122710-48b82ce14a37 h1:cpmjAqILEM/h0ttabvkvj0YFxarjtG8hTZ7zw2MHpjY=
storj.io/uplink v1.4.6-0.20210201122710-48b82ce14a37/go.mod h1:6a95Ux48DWIhFDaNo3fV3ehyfD9lX//fGK9JiIdFbXo=
storj.io/uplink v1.4.6-0.20210209192332-b38c7a40bcd4 h1:6sNuRj9xZO3iDlFDd42pIECXSG3IIfn4+vVkN+Qtjpo=
storj.io/uplink v1.4.6-0.20210209192332-b38c7a40bcd4/go.mod h1:IXHjzdYHnFChyTE7YSD7UlSz2jyAMlFFFeFoQEgsCmg=

View File

@ -0,0 +1,68 @@
// Copyright (C) 2021 Storj Labs, Inc.
// See LICENSE for copying information.
package controllers
import (
"encoding/json"
"net/http"
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/storj/multinode/payouts"
)
var (
// ErrPayouts is an internal error type for payouts web api controller.
ErrPayouts = errs.Class("payouts web api controller error")
)
// Payouts is a web api controller.
type Payouts struct {
log *zap.Logger
service *payouts.Service
}
// NewPayouts is a constructor for Payouts.
func NewPayouts(log *zap.Logger, service *payouts.Service) *Payouts {
return &Payouts{
log: log,
service: service,
}
}
// GetAllNodesTotalEarned handles retrieval total earned amount .
func (controller *Payouts) GetAllNodesTotalEarned(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var err error
defer mon.Task()(&ctx)(&err)
earned, err := controller.service.GetAllNodesAllTimeEarned(ctx)
if err != nil {
controller.log.Error("all node total earned internal error", zap.Error(err))
controller.serveError(w, http.StatusInternalServerError, ErrPayouts.Wrap(err))
return
}
if err = json.NewEncoder(w).Encode(earned); err != nil {
controller.log.Error("failed to write json response", zap.Error(err))
return
}
}
// serveError set http statuses and send json error.
func (controller *Payouts) serveError(w http.ResponseWriter, status int, err error) {
w.WriteHeader(status)
var response struct {
Error string `json:"error"`
}
response.Error = err.Error()
err = json.NewEncoder(w).Encode(response)
if err != nil {
controller.log.Error("failed to write json error response", zap.Error(err))
}
}

View File

@ -17,6 +17,7 @@ import (
"storj.io/storj/multinode/console/controllers"
"storj.io/storj/multinode/nodes"
"storj.io/storj/multinode/payouts"
)
var (
@ -36,8 +37,9 @@ type Config struct {
type Server struct {
log *zap.Logger
config Config
nodes *nodes.Service
config Config
nodes *nodes.Service
payouts *payouts.Service
listener net.Listener
http http.Server
@ -46,12 +48,13 @@ type Server struct {
}
// NewServer returns new instance of Multinode Dashboard http server.
func NewServer(log *zap.Logger, config Config, nodes *nodes.Service, listener net.Listener) (*Server, error) {
func NewServer(log *zap.Logger, config Config, nodes *nodes.Service, payouts *payouts.Service, listener net.Listener) (*Server, error) {
server := Server{
log: log,
config: config,
nodes: nodes,
listener: listener,
payouts: payouts,
}
router := mux.NewRouter()
@ -70,6 +73,10 @@ func NewServer(log *zap.Logger, config Config, nodes *nodes.Service, listener ne
nodesRouter.HandleFunc("/{id}", nodesController.UpdateName).Methods(http.MethodPatch)
nodesRouter.HandleFunc("/{id}", nodesController.Delete).Methods(http.MethodDelete)
payoutsController := controllers.NewPayouts(server.log, server.payouts)
payoutsRouter := apiRouter.PathPrefix("/payouts").Subrouter()
payoutsRouter.HandleFunc("/total-earned", payoutsController.GetAllNodesTotalEarned).Methods(http.MethodGet)
if server.config.StaticDir != "" {
router.PathPrefix("/static/").Handler(http.StripPrefix("/static", fs))
router.PathPrefix("/").HandlerFunc(server.appHandler)

View File

@ -0,0 +1,14 @@
// Copyright (C) 2021 Storj Labs, Inc.
// See LICENSE for copying information.
package payouts
import (
"storj.io/common/storj"
)
// SatelliteSummary contains satellite id and earned amount.
type SatelliteSummary struct {
SatelliteID storj.NodeID `json:"satelliteID"`
Earned int64 `json:"earned"`
}

View File

@ -0,0 +1,165 @@
// Copyright (C) 2021 Storj Labs, Inc.
// See LICENSE for copying information.
package payouts
import (
"context"
"github.com/spacemonkeygo/monkit/v3"
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/common/rpc"
"storj.io/common/storj"
"storj.io/storj/multinode/nodes"
"storj.io/storj/private/multinodepb"
)
var (
mon = monkit.Package()
// Error is an error class for payouts service error.
Error = errs.Class("payouts service error")
)
// Service exposes all payouts related logic.
//
// architecture: Service
type Service struct {
log *zap.Logger
dialer rpc.Dialer
nodes nodes.DB
}
// NewService creates new instance of Service.
func NewService(log *zap.Logger, dialer rpc.Dialer, nodes nodes.DB) *Service {
return &Service{
log: log,
dialer: dialer,
nodes: nodes,
}
}
// GetAllNodesAllTimeEarned retrieves all nodes earned amount for all time.
func (service *Service) GetAllNodesAllTimeEarned(ctx context.Context) (earned int64, err error) {
defer mon.Task()(&ctx)(&err)
storageNodes, err := service.nodes.List(ctx)
if err != nil {
return 0, Error.Wrap(err)
}
for _, node := range storageNodes {
amount, err := service.getAmount(ctx, node)
if err != nil {
service.log.Error("failed to getAmount", zap.Error(err))
continue
}
earned += amount
}
return earned, nil
}
// GetAllNodesEarnedOnSatellite retrieves all nodes earned amount for all time per satellite.
func (service *Service) GetAllNodesEarnedOnSatellite(ctx context.Context) (earned []SatelliteSummary, err error) {
defer mon.Task()(&ctx)(&err)
storageNodes, err := service.nodes.List(ctx)
if err != nil {
return nil, Error.Wrap(err)
}
var listSatellites storj.NodeIDList
var listNodesEarnedPerSatellite []multinodepb.EarnedPerSatelliteResponse
for _, node := range storageNodes {
earnedPerSatellite, err := service.getEarnedOnSatellite(ctx, node)
if err != nil {
service.log.Error("failed to getEarnedFromSatellite", zap.Error(err))
continue
}
listNodesEarnedPerSatellite = append(listNodesEarnedPerSatellite, earnedPerSatellite)
for i := 0; i < len(earnedPerSatellite.EarnedSatellite); i++ {
listSatellites = append(listSatellites, earnedPerSatellite.EarnedSatellite[i].SatelliteId)
}
}
if listSatellites == nil {
return []SatelliteSummary{}, nil
}
uniqueSatelliteIDs := listSatellites.Unique()
for t := 0; t < len(uniqueSatelliteIDs); t++ {
earned = append(earned, SatelliteSummary{
SatelliteID: uniqueSatelliteIDs[t],
})
}
for i := 0; i < len(listNodesEarnedPerSatellite); i++ {
singleNodeEarnedPerSatellite := listNodesEarnedPerSatellite[i].EarnedSatellite
for j := 0; j < len(singleNodeEarnedPerSatellite); j++ {
for k := 0; k < len(earned); k++ {
if singleNodeEarnedPerSatellite[j].SatelliteId == earned[k].SatelliteID {
earned[k].Earned += singleNodeEarnedPerSatellite[j].Total
}
}
}
}
return earned, nil
}
func (service *Service) getAmount(ctx context.Context, node nodes.Node) (_ int64, err error) {
conn, err := service.dialer.DialNodeURL(ctx, storj.NodeURL{
ID: node.ID,
Address: node.PublicAddress,
})
if err != nil {
return 0, Error.Wrap(err)
}
defer func() {
err = errs.Combine(err, conn.Close())
}()
payoutClient := multinodepb.NewDRPCPayoutClient(conn)
header := &multinodepb.RequestHeader{
ApiKey: node.APISecret,
}
amount, err := payoutClient.Earned(ctx, &multinodepb.EarnedRequest{Header: header})
if err != nil {
return 0, Error.Wrap(err)
}
return amount.Total, nil
}
func (service *Service) getEarnedOnSatellite(ctx context.Context, node nodes.Node) (_ multinodepb.EarnedPerSatelliteResponse, err error) {
conn, err := service.dialer.DialNodeURL(ctx, storj.NodeURL{
ID: node.ID,
Address: node.PublicAddress,
})
if err != nil {
return multinodepb.EarnedPerSatelliteResponse{}, Error.Wrap(err)
}
defer func() {
err = errs.Combine(err, conn.Close())
}()
payoutClient := multinodepb.NewDRPCPayoutClient(conn)
header := &multinodepb.RequestHeader{
ApiKey: node.APISecret,
}
response, err := payoutClient.EarnedPerSatellite(ctx, &multinodepb.EarnedPerSatelliteRequest{Header: header})
if err != nil {
return multinodepb.EarnedPerSatelliteResponse{}, Error.Wrap(err)
}
return *response, nil
}

View File

@ -18,6 +18,7 @@ import (
"storj.io/storj/multinode/console"
"storj.io/storj/multinode/console/server"
"storj.io/storj/multinode/nodes"
"storj.io/storj/multinode/payouts"
"storj.io/storj/private/lifecycle"
)
@ -64,6 +65,11 @@ type Peer struct {
Service *nodes.Service
}
// contains logic of payouts domain.
Payouts struct {
Service *payouts.Service
}
// Web server with web UI.
Console struct {
Listener net.Listener
@ -102,6 +108,14 @@ func New(log *zap.Logger, full *identity.FullIdentity, config Config, db DB) (_
)
}
{ // payouts setup
peer.Payouts.Service = payouts.NewService(
peer.Log.Named("payouts:service"),
peer.Dialer,
peer.DB.Nodes(),
)
}
{ // console setup
peer.Console.Listener, err = net.Listen("tcp", config.Console.Address)
if err != nil {
@ -112,6 +126,7 @@ func New(log *zap.Logger, full *identity.FullIdentity, config Config, db DB) (_
peer.Log.Named("console:endpoint"),
config.Console,
peer.Nodes.Service,
peer.Payouts.Service,
peer.Console.Listener,
)
if err != nil {

View File

@ -9,7 +9,9 @@ import (
"database/sql/driver"
"errors"
"io"
"net"
"strings"
"syscall"
"github.com/jackc/pgx/v4/stdlib"
"github.com/zeebo/errs"
@ -331,6 +333,26 @@ func translateName(name string) string {
// NeedsRetry checks if the error code means a retry is needed,
// borrowed from code in crdb.
func NeedsRetry(err error) bool {
if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) {
mon.Event("crdb_error_eof")
// Currently we don't retry with EOF because it's unclear if
// a query succeeded or failed.
return false
}
if errors.Is(err, syscall.ECONNRESET) {
mon.Event("crdb_error_conn_reset_needed_retry")
return true
}
if errors.Is(err, syscall.ECONNREFUSED) {
mon.Event("crdb_error_conn_refused_needed_retry")
return true
}
var netErr net.Error
if errors.As(err, &netErr) {
mon.Event("crdb_net_error_needed_retry")
return true
}
code := pgerrcode.FromError(err)
// 57P01 occurs when a CRDB node rejoins the cluster but is not ready to accept connections

View File

@ -24,7 +24,7 @@ var _ = time.Kitchen
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type RequestHeader struct {
ApiKey []byte `protobuf:"bytes,1,opt,name=api_key,json=apiKey,proto3" json:"api_key,omitempty"`
@ -768,6 +768,121 @@ func (m *EarnedResponse) GetTotal() int64 {
return 0
}
type EarnedPerSatelliteRequest struct {
Header *RequestHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *EarnedPerSatelliteRequest) Reset() { *m = EarnedPerSatelliteRequest{} }
func (m *EarnedPerSatelliteRequest) String() string { return proto.CompactTextString(m) }
func (*EarnedPerSatelliteRequest) ProtoMessage() {}
func (*EarnedPerSatelliteRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_9a45fd79b06f3a1b, []int{15}
}
func (m *EarnedPerSatelliteRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_EarnedPerSatelliteRequest.Unmarshal(m, b)
}
func (m *EarnedPerSatelliteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_EarnedPerSatelliteRequest.Marshal(b, m, deterministic)
}
func (m *EarnedPerSatelliteRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_EarnedPerSatelliteRequest.Merge(m, src)
}
func (m *EarnedPerSatelliteRequest) XXX_Size() int {
return xxx_messageInfo_EarnedPerSatelliteRequest.Size(m)
}
func (m *EarnedPerSatelliteRequest) XXX_DiscardUnknown() {
xxx_messageInfo_EarnedPerSatelliteRequest.DiscardUnknown(m)
}
var xxx_messageInfo_EarnedPerSatelliteRequest proto.InternalMessageInfo
func (m *EarnedPerSatelliteRequest) GetHeader() *RequestHeader {
if m != nil {
return m.Header
}
return nil
}
type EarnedPerSatelliteResponse struct {
EarnedSatellite []*EarnedSatellite `protobuf:"bytes,1,rep,name=earned_satellite,json=earnedSatellite,proto3" json:"earned_satellite,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *EarnedPerSatelliteResponse) Reset() { *m = EarnedPerSatelliteResponse{} }
func (m *EarnedPerSatelliteResponse) String() string { return proto.CompactTextString(m) }
func (*EarnedPerSatelliteResponse) ProtoMessage() {}
func (*EarnedPerSatelliteResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_9a45fd79b06f3a1b, []int{16}
}
func (m *EarnedPerSatelliteResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_EarnedPerSatelliteResponse.Unmarshal(m, b)
}
func (m *EarnedPerSatelliteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_EarnedPerSatelliteResponse.Marshal(b, m, deterministic)
}
func (m *EarnedPerSatelliteResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_EarnedPerSatelliteResponse.Merge(m, src)
}
func (m *EarnedPerSatelliteResponse) XXX_Size() int {
return xxx_messageInfo_EarnedPerSatelliteResponse.Size(m)
}
func (m *EarnedPerSatelliteResponse) XXX_DiscardUnknown() {
xxx_messageInfo_EarnedPerSatelliteResponse.DiscardUnknown(m)
}
var xxx_messageInfo_EarnedPerSatelliteResponse proto.InternalMessageInfo
func (m *EarnedPerSatelliteResponse) GetEarnedSatellite() []*EarnedSatellite {
if m != nil {
return m.EarnedSatellite
}
return nil
}
type EarnedSatellite struct {
Total int64 `protobuf:"varint,1,opt,name=total,proto3" json:"total,omitempty"`
SatelliteId NodeID `protobuf:"bytes,2,opt,name=satellite_id,json=satelliteId,proto3,customtype=NodeID" json:"satellite_id"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *EarnedSatellite) Reset() { *m = EarnedSatellite{} }
func (m *EarnedSatellite) String() string { return proto.CompactTextString(m) }
func (*EarnedSatellite) ProtoMessage() {}
func (*EarnedSatellite) Descriptor() ([]byte, []int) {
return fileDescriptor_9a45fd79b06f3a1b, []int{17}
}
func (m *EarnedSatellite) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_EarnedSatellite.Unmarshal(m, b)
}
func (m *EarnedSatellite) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_EarnedSatellite.Marshal(b, m, deterministic)
}
func (m *EarnedSatellite) XXX_Merge(src proto.Message) {
xxx_messageInfo_EarnedSatellite.Merge(m, src)
}
func (m *EarnedSatellite) XXX_Size() int {
return xxx_messageInfo_EarnedSatellite.Size(m)
}
func (m *EarnedSatellite) XXX_DiscardUnknown() {
xxx_messageInfo_EarnedSatellite.DiscardUnknown(m)
}
var xxx_messageInfo_EarnedSatellite proto.InternalMessageInfo
func (m *EarnedSatellite) GetTotal() int64 {
if m != nil {
return m.Total
}
return 0
}
func init() {
proto.RegisterType((*RequestHeader)(nil), "multinode.RequestHeader")
proto.RegisterType((*DiskSpaceRequest)(nil), "multinode.DiskSpaceRequest")
@ -787,64 +902,71 @@ func init() {
proto.RegisterType((*TrustedSatellitesResponse_NodeURL)(nil), "multinode.TrustedSatellitesResponse.NodeURL")
proto.RegisterType((*EarnedRequest)(nil), "multinode.EarnedRequest")
proto.RegisterType((*EarnedResponse)(nil), "multinode.EarnedResponse")
proto.RegisterType((*EarnedPerSatelliteRequest)(nil), "multinode.EarnedPerSatelliteRequest")
proto.RegisterType((*EarnedPerSatelliteResponse)(nil), "multinode.EarnedPerSatelliteResponse")
proto.RegisterType((*EarnedSatellite)(nil), "multinode.EarnedSatellite")
}
func init() { proto.RegisterFile("multinode.proto", fileDescriptor_9a45fd79b06f3a1b) }
var fileDescriptor_9a45fd79b06f3a1b = []byte{
// 825 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xd1, 0x6e, 0xeb, 0x44,
0x10, 0xc5, 0x6d, 0xe2, 0xdc, 0x4c, 0x72, 0xdb, 0x9b, 0xe5, 0x4a, 0xb8, 0x26, 0xb9, 0xa9, 0xdc,
0x8a, 0x06, 0x81, 0x1c, 0x48, 0x9f, 0x90, 0x40, 0xa2, 0xa1, 0xb4, 0x8d, 0x08, 0x50, 0x9c, 0xc2,
0x03, 0x48, 0x8d, 0x36, 0xf1, 0x36, 0x31, 0x75, 0xbc, 0xc6, 0xbb, 0x0e, 0xe4, 0x2f, 0xf8, 0x23,
0xc4, 0x0b, 0xe2, 0x1b, 0x78, 0x28, 0x9f, 0xc1, 0x2b, 0xf2, 0xee, 0xc6, 0x71, 0xda, 0x24, 0xa0,
0xdc, 0x37, 0xcf, 0xcc, 0x99, 0x73, 0xd6, 0xb3, 0xb3, 0x07, 0xf6, 0x27, 0xb1, 0xcf, 0xbd, 0x80,
0xba, 0xc4, 0x0e, 0x23, 0xca, 0x29, 0x2a, 0xa6, 0x09, 0x13, 0x46, 0x74, 0x44, 0x65, 0xda, 0xac,
0x8f, 0x28, 0x1d, 0xf9, 0xa4, 0x29, 0xa2, 0x41, 0x7c, 0xd7, 0xe4, 0xde, 0x84, 0x30, 0x8e, 0x27,
0xa1, 0x04, 0x58, 0x0d, 0x78, 0xee, 0x90, 0x9f, 0x62, 0xc2, 0xf8, 0x15, 0xc1, 0x2e, 0x89, 0xd0,
0x5b, 0x50, 0xc0, 0xa1, 0xd7, 0xbf, 0x27, 0x33, 0x43, 0x3b, 0xd4, 0x1a, 0x65, 0x47, 0xc7, 0xa1,
0xf7, 0x05, 0x99, 0x59, 0xe7, 0xf0, 0xe2, 0xdc, 0x63, 0xf7, 0xbd, 0x10, 0x0f, 0x89, 0x6a, 0x41,
0x1f, 0x80, 0x3e, 0x16, 0x6d, 0x02, 0x5b, 0x6a, 0x19, 0xf6, 0xe2, 0x5c, 0x4b, 0xb4, 0x8e, 0xc2,
0x59, 0xbf, 0x69, 0x50, 0xc9, 0xd0, 0xb0, 0x90, 0x06, 0x8c, 0xa0, 0x2a, 0x14, 0xb1, 0xef, 0xd3,
0x21, 0xe6, 0xc4, 0x15, 0x54, 0xbb, 0xce, 0x22, 0x81, 0xea, 0x50, 0x8a, 0x19, 0x71, 0xfb, 0xa1,
0x47, 0x86, 0x84, 0x19, 0x3b, 0xa2, 0x0e, 0x49, 0xea, 0x5a, 0x64, 0x50, 0x0d, 0x44, 0xd4, 0xe7,
0x11, 0x66, 0x63, 0x63, 0x57, 0xf6, 0x27, 0x99, 0x9b, 0x24, 0x81, 0x10, 0xe4, 0xee, 0x22, 0x42,
0x8c, 0x9c, 0x28, 0x88, 0x6f, 0xa1, 0x38, 0xc5, 0x9e, 0x8f, 0x07, 0x3e, 0x31, 0xf2, 0x4a, 0x71,
0x9e, 0x40, 0x26, 0x3c, 0xa3, 0x53, 0x12, 0x25, 0x14, 0x86, 0x2e, 0x8a, 0x69, 0x6c, 0x5d, 0x43,
0xb5, 0x8d, 0x03, 0xf7, 0x67, 0xcf, 0xe5, 0xe3, 0x2f, 0x69, 0xc0, 0xc7, 0xbd, 0x78, 0x32, 0xc1,
0xd1, 0x6c, 0xfb, 0x99, 0x9c, 0x42, 0x6d, 0x0d, 0xa3, 0x1a, 0x0f, 0x82, 0x9c, 0x38, 0x8a, 0x9c,
0x8c, 0xf8, 0xb6, 0xda, 0xb0, 0xf7, 0x1d, 0x89, 0x98, 0x47, 0x83, 0xed, 0x85, 0xdf, 0x83, 0xfd,
0x94, 0x43, 0x49, 0x19, 0x50, 0x98, 0xca, 0x94, 0x60, 0x29, 0x3a, 0xf3, 0xd0, 0xba, 0x00, 0xd4,
0xc5, 0x8c, 0x7f, 0x46, 0x03, 0x8e, 0x87, 0x7c, 0x7b, 0xd1, 0x5b, 0x78, 0x73, 0x89, 0x47, 0x09,
0x5f, 0x42, 0xd9, 0xc7, 0x8c, 0xf7, 0x87, 0x32, 0xaf, 0xe8, 0x4c, 0x5b, 0x2e, 0xb0, 0x3d, 0x5f,
0x60, 0xfb, 0x66, 0xbe, 0xc0, 0xed, 0x67, 0x7f, 0x3e, 0xd4, 0xdf, 0xf8, 0xf5, 0xef, 0xba, 0xe6,
0x94, 0xfc, 0x05, 0xa1, 0xf5, 0x0b, 0x54, 0x1c, 0x12, 0xc6, 0x1c, 0xf3, 0xd7, 0x99, 0x0d, 0xfa,
0x10, 0xca, 0x0c, 0x73, 0xe2, 0xfb, 0x1e, 0x27, 0x7d, 0xcf, 0x15, 0x5b, 0x57, 0x6e, 0xef, 0x25,
0x9a, 0x7f, 0x3d, 0xd4, 0xf5, 0xaf, 0xa8, 0x4b, 0x3a, 0xe7, 0x4e, 0x29, 0xc5, 0x74, 0x5c, 0xeb,
0x1f, 0x0d, 0x50, 0x56, 0x5a, 0xfd, 0xd9, 0xc7, 0xa0, 0xd3, 0xc0, 0xf7, 0x02, 0xa2, 0xb4, 0x8f,
0x97, 0xb4, 0x1f, 0xc3, 0xed, 0xaf, 0x05, 0xd6, 0x51, 0x3d, 0xe8, 0x23, 0xc8, 0xe3, 0xd8, 0xf5,
0xb8, 0x38, 0x40, 0xa9, 0x75, 0xb4, 0xb9, 0xf9, 0x2c, 0x81, 0x3a, 0xb2, 0xc3, 0x7c, 0x05, 0xba,
0x24, 0x43, 0x2f, 0x21, 0xcf, 0x86, 0x34, 0x92, 0x27, 0xd0, 0x1c, 0x19, 0x98, 0x57, 0x90, 0x17,
0xf8, 0xd5, 0x65, 0xf4, 0x2e, 0xbc, 0x60, 0x31, 0x0b, 0x49, 0x90, 0x5c, 0x7f, 0x5f, 0x02, 0x76,
0x04, 0x60, 0x7f, 0x91, 0xef, 0x25, 0x69, 0xab, 0x0b, 0xc6, 0x4d, 0x14, 0x33, 0x4e, 0xdc, 0xde,
0x7c, 0x1e, 0x6c, 0xfb, 0x0d, 0xf9, 0x43, 0x83, 0x83, 0x15, 0x74, 0x6a, 0x9c, 0x3f, 0x00, 0xe2,
0xb2, 0xd8, 0x4f, 0x87, 0xcf, 0x0c, 0xed, 0x70, 0xb7, 0x51, 0x6a, 0xbd, 0x9f, 0xe1, 0x5e, 0xcb,
0x60, 0x27, 0x77, 0xf7, 0xad, 0xd3, 0x75, 0x2a, 0xfc, 0x31, 0xc4, 0xec, 0x42, 0x41, 0x55, 0xd1,
0x09, 0x14, 0x12, 0x9e, 0xe4, 0xee, 0xb5, 0x95, 0x77, 0xaf, 0x27, 0xe5, 0x8e, 0x9b, 0x3c, 0x19,
0xec, 0xba, 0x11, 0x61, 0xd2, 0x9a, 0x8a, 0xce, 0x3c, 0xb4, 0xce, 0xe0, 0xf9, 0xe7, 0x38, 0x0a,
0x88, 0xbb, 0xfd, 0x2c, 0xde, 0x81, 0xbd, 0x39, 0x85, 0xfa, 0xff, 0x97, 0x90, 0xe7, 0x94, 0x63,
0x5f, 0xb9, 0x81, 0x0c, 0x5a, 0xdf, 0x40, 0xa1, 0xc7, 0x69, 0x84, 0x47, 0x04, 0x5d, 0x40, 0x31,
0x75, 0x58, 0xf4, 0x76, 0x46, 0xe1, 0xb1, 0x7d, 0x9b, 0xd5, 0xd5, 0x45, 0x29, 0xd4, 0x0a, 0xa0,
0x98, 0xda, 0x12, 0xc2, 0x50, 0xce, 0x5a, 0x13, 0x3a, 0xc9, 0xb4, 0x6e, 0xb2, 0x43, 0xb3, 0xf1,
0xdf, 0x40, 0xa5, 0xf7, 0xfb, 0x0e, 0xe4, 0x92, 0xd1, 0xa2, 0x4f, 0xa1, 0xa0, 0x6c, 0x09, 0x1d,
0x64, 0xba, 0x97, 0xed, 0xce, 0x34, 0x57, 0x95, 0xd4, 0x8c, 0xba, 0x50, 0xca, 0x78, 0x0c, 0xaa,
0x65, 0xa0, 0x4f, 0x3d, 0xcc, 0x7c, 0xb5, 0xae, 0xac, 0xd8, 0x3a, 0x00, 0x8b, 0xa7, 0x86, 0xaa,
0x6b, 0x5e, 0xa0, 0xe4, 0xaa, 0x6d, 0x7c, 0x9f, 0xe8, 0x16, 0x2a, 0x4f, 0xf6, 0x12, 0x1d, 0x6d,
0xde, 0x5a, 0x49, 0x7c, 0xfc, 0x7f, 0x56, 0xbb, 0x75, 0x09, 0xfa, 0x35, 0x9e, 0xd1, 0x98, 0xa3,
0x4f, 0x40, 0x97, 0x8b, 0x83, 0xb2, 0x4b, 0xb6, 0xb4, 0x8e, 0xe6, 0xc1, 0x8a, 0x8a, 0x24, 0x6a,
0x1f, 0x7f, 0x6f, 0x31, 0x4e, 0xa3, 0x1f, 0x6d, 0x8f, 0x36, 0xc5, 0x47, 0x33, 0x8c, 0xbc, 0x29,
0xe6, 0xa4, 0x99, 0xb6, 0x84, 0x83, 0x81, 0x2e, 0x6c, 0xf9, 0xf4, 0xdf, 0x00, 0x00, 0x00, 0xff,
0xff, 0xd9, 0x0f, 0xd3, 0x9c, 0x8f, 0x08, 0x00, 0x00,
// 895 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x5f, 0x73, 0xdb, 0x44,
0x10, 0x47, 0x49, 0x2c, 0xd7, 0x6b, 0x37, 0x4e, 0x8e, 0xce, 0xa0, 0x88, 0xa4, 0xee, 0xa8, 0x81,
0x86, 0x81, 0x91, 0xc1, 0x7d, 0x62, 0x06, 0x66, 0xa8, 0x49, 0x4b, 0x33, 0xb8, 0x10, 0xe4, 0xc0,
0x43, 0x99, 0xa9, 0xe7, 0x62, 0x6d, 0x1d, 0x51, 0x59, 0x27, 0x74, 0xa7, 0x40, 0xbe, 0x05, 0x9f,
0x83, 0x2f, 0xc1, 0xf0, 0xc2, 0xf0, 0x19, 0x78, 0x28, 0x1f, 0x83, 0x57, 0x46, 0x77, 0x27, 0x59,
0x8e, 0x65, 0x87, 0x31, 0x6f, 0xda, 0xdd, 0xdf, 0xfe, 0x76, 0x6f, 0xff, 0x09, 0xda, 0xd3, 0x34,
0x14, 0x41, 0xc4, 0x7c, 0x74, 0xe3, 0x84, 0x09, 0x46, 0x1a, 0x85, 0xc2, 0x86, 0x09, 0x9b, 0x30,
0xa5, 0xb6, 0x3b, 0x13, 0xc6, 0x26, 0x21, 0x76, 0xa5, 0x74, 0x9e, 0xbe, 0xec, 0x8a, 0x60, 0x8a,
0x5c, 0xd0, 0x69, 0xac, 0x00, 0xce, 0x11, 0xdc, 0xf6, 0xf0, 0xc7, 0x14, 0xb9, 0x78, 0x8a, 0xd4,
0xc7, 0x84, 0xbc, 0x05, 0x75, 0x1a, 0x07, 0xa3, 0x57, 0x78, 0x65, 0x19, 0xf7, 0x8c, 0xa3, 0x96,
0x67, 0xd2, 0x38, 0xf8, 0x12, 0xaf, 0x9c, 0x63, 0xd8, 0x39, 0x0e, 0xf8, 0xab, 0x61, 0x4c, 0xc7,
0xa8, 0x5d, 0xc8, 0x87, 0x60, 0x5e, 0x48, 0x37, 0x89, 0x6d, 0xf6, 0x2c, 0x77, 0x96, 0xd7, 0x1c,
0xad, 0xa7, 0x71, 0xce, 0x6f, 0x06, 0xec, 0x96, 0x68, 0x78, 0xcc, 0x22, 0x8e, 0x64, 0x1f, 0x1a,
0x34, 0x0c, 0xd9, 0x98, 0x0a, 0xf4, 0x25, 0xd5, 0xa6, 0x37, 0x53, 0x90, 0x0e, 0x34, 0x53, 0x8e,
0xfe, 0x28, 0x0e, 0x70, 0x8c, 0xdc, 0xda, 0x90, 0x76, 0xc8, 0x54, 0xa7, 0x52, 0x43, 0x0e, 0x40,
0x4a, 0x23, 0x91, 0x50, 0x7e, 0x61, 0x6d, 0x2a, 0xff, 0x4c, 0x73, 0x96, 0x29, 0x08, 0x81, 0xad,
0x97, 0x09, 0xa2, 0xb5, 0x25, 0x0d, 0xf2, 0x5b, 0x46, 0xbc, 0xa4, 0x41, 0x48, 0xcf, 0x43, 0xb4,
0x6a, 0x3a, 0x62, 0xae, 0x20, 0x36, 0xdc, 0x62, 0x97, 0x98, 0x64, 0x14, 0x96, 0x29, 0x8d, 0x85,
0xec, 0x9c, 0xc2, 0x7e, 0x9f, 0x46, 0xfe, 0x4f, 0x81, 0x2f, 0x2e, 0x9e, 0xb1, 0x48, 0x5c, 0x0c,
0xd3, 0xe9, 0x94, 0x26, 0x57, 0xeb, 0xd7, 0xe4, 0x21, 0x1c, 0x2c, 0x61, 0xd4, 0xe5, 0x21, 0xb0,
0x25, 0x53, 0x51, 0x95, 0x91, 0xdf, 0x4e, 0x1f, 0xb6, 0xbf, 0xc3, 0x84, 0x07, 0x2c, 0x5a, 0x3f,
0xf0, 0xfb, 0xd0, 0x2e, 0x38, 0x74, 0x28, 0x0b, 0xea, 0x97, 0x4a, 0x25, 0x59, 0x1a, 0x5e, 0x2e,
0x3a, 0x4f, 0x80, 0x0c, 0x28, 0x17, 0x9f, 0xb3, 0x48, 0xd0, 0xb1, 0x58, 0x3f, 0xe8, 0x0b, 0x78,
0x73, 0x8e, 0x47, 0x07, 0xfe, 0x02, 0x5a, 0x21, 0xe5, 0x62, 0x34, 0x56, 0x7a, 0x4d, 0x67, 0xbb,
0x6a, 0x80, 0xdd, 0x7c, 0x80, 0xdd, 0xb3, 0x7c, 0x80, 0xfb, 0xb7, 0xfe, 0x7c, 0xdd, 0x79, 0xe3,
0x97, 0xbf, 0x3b, 0x86, 0xd7, 0x0c, 0x67, 0x84, 0xce, 0xcf, 0xb0, 0xeb, 0x61, 0x9c, 0x0a, 0x2a,
0xfe, 0x4f, 0x6d, 0xc8, 0x47, 0xd0, 0xe2, 0x54, 0x60, 0x18, 0x06, 0x02, 0x47, 0x81, 0x2f, 0xa7,
0xae, 0xd5, 0xdf, 0xce, 0x62, 0xfe, 0xf5, 0xba, 0x63, 0x7e, 0xc5, 0x7c, 0x3c, 0x39, 0xf6, 0x9a,
0x05, 0xe6, 0xc4, 0x77, 0xfe, 0x31, 0x80, 0x94, 0x43, 0xeb, 0x97, 0x7d, 0x02, 0x26, 0x8b, 0xc2,
0x20, 0x42, 0x1d, 0xfb, 0x70, 0x2e, 0xf6, 0x75, 0xb8, 0xfb, 0xb5, 0xc4, 0x7a, 0xda, 0x87, 0x7c,
0x0c, 0x35, 0x9a, 0xfa, 0x81, 0x90, 0x09, 0x34, 0x7b, 0xf7, 0x57, 0x3b, 0x3f, 0xca, 0xa0, 0x9e,
0xf2, 0xb0, 0xef, 0x82, 0xa9, 0xc8, 0xc8, 0x1d, 0xa8, 0xf1, 0x31, 0x4b, 0x54, 0x06, 0x86, 0xa7,
0x04, 0xfb, 0x29, 0xd4, 0x24, 0xbe, 0xda, 0x4c, 0xde, 0x83, 0x1d, 0x9e, 0xf2, 0x18, 0xa3, 0xac,
0xfd, 0x23, 0x05, 0xd8, 0x90, 0x80, 0xf6, 0x4c, 0x3f, 0xcc, 0xd4, 0xce, 0x00, 0xac, 0xb3, 0x24,
0xe5, 0x02, 0xfd, 0x61, 0x5e, 0x0f, 0xbe, 0xfe, 0x84, 0xfc, 0x61, 0xc0, 0x5e, 0x05, 0x9d, 0x2e,
0xe7, 0xf7, 0x40, 0x84, 0x32, 0x8e, 0x8a, 0xe2, 0x73, 0xcb, 0xb8, 0xb7, 0x79, 0xd4, 0xec, 0x7d,
0x50, 0xe2, 0x5e, 0xca, 0xe0, 0x66, 0xbd, 0xfb, 0xd6, 0x1b, 0x78, 0xbb, 0xe2, 0x3a, 0xc4, 0x1e,
0x40, 0x5d, 0x5b, 0xc9, 0x03, 0xa8, 0x67, 0x3c, 0x59, 0xef, 0x8d, 0xca, 0xde, 0x9b, 0x99, 0xf9,
0xc4, 0xcf, 0x56, 0x86, 0xfa, 0x7e, 0x82, 0x5c, 0x9d, 0xa6, 0x86, 0x97, 0x8b, 0xce, 0x23, 0xb8,
0xfd, 0x98, 0x26, 0x11, 0xfa, 0xeb, 0xd7, 0xe2, 0x5d, 0xd8, 0xce, 0x29, 0xf4, 0xfb, 0xef, 0x40,
0x4d, 0x30, 0x41, 0x43, 0x7d, 0x0d, 0x94, 0xe0, 0x3c, 0x83, 0x3d, 0x85, 0x3b, 0xc5, 0xa4, 0x78,
0xcf, 0xfa, 0x61, 0xc7, 0x60, 0x57, 0xd1, 0xe9, 0x14, 0x1e, 0xc3, 0x0e, 0x4a, 0xeb, 0xac, 0x03,
0xba, 0x01, 0x76, 0x89, 0x59, 0x11, 0xcc, 0xbc, 0xdb, 0x38, 0xaf, 0x70, 0x9e, 0x43, 0xfb, 0x1a,
0xa6, 0xfa, 0x71, 0x6b, 0xec, 0x62, 0xef, 0x1b, 0xa8, 0x0f, 0x05, 0x4b, 0xe8, 0x04, 0xc9, 0x13,
0x68, 0x14, 0x7f, 0x1c, 0xf2, 0x76, 0x29, 0xc1, 0xeb, 0xbf, 0x33, 0x7b, 0xbf, 0xda, 0xa8, 0x5e,
0xdd, 0x8b, 0xa0, 0x51, 0x9c, 0x69, 0x42, 0xa1, 0x55, 0x3e, 0xd5, 0xe4, 0x41, 0xc9, 0x75, 0xd5,
0xef, 0xc1, 0x3e, 0xba, 0x19, 0xa8, 0xe3, 0xfd, 0xbe, 0x01, 0x5b, 0xd9, 0xd3, 0xc8, 0x67, 0x50,
0xd7, 0x67, 0x9a, 0xec, 0x95, 0xbc, 0xe7, 0xcf, 0xbf, 0x6d, 0x57, 0x99, 0x74, 0xc3, 0x06, 0xd0,
0x2c, 0xdd, 0x5c, 0x72, 0x50, 0x82, 0x2e, 0xde, 0x74, 0xfb, 0xee, 0x32, 0xb3, 0x66, 0x3b, 0x01,
0x98, 0x9d, 0x1e, 0xb2, 0xbf, 0xe4, 0x22, 0x29, 0xae, 0x83, 0x95, 0xf7, 0x8a, 0xbc, 0x80, 0xdd,
0x85, 0x3d, 0x25, 0xf7, 0x57, 0x6f, 0xb1, 0x22, 0x3e, 0xfc, 0x2f, 0xab, 0xde, 0xfb, 0xd5, 0x00,
0xf3, 0x94, 0x5e, 0xb1, 0x54, 0x90, 0x4f, 0xc1, 0x54, 0xd3, 0x46, 0xac, 0x85, 0x21, 0xcd, 0x49,
0xf7, 0x2a, 0x2c, 0x3a, 0x53, 0x0a, 0x64, 0x71, 0x23, 0xc8, 0xe1, 0x82, 0x43, 0xc5, 0xfe, 0xd9,
0xef, 0xdc, 0x80, 0x52, 0x21, 0xfa, 0x87, 0xcf, 0x1d, 0x2e, 0x58, 0xf2, 0x83, 0x1b, 0xb0, 0xae,
0xfc, 0xe8, 0xc6, 0x49, 0x70, 0x49, 0x05, 0x76, 0x0b, 0xf7, 0xf8, 0xfc, 0xdc, 0x94, 0xbf, 0xc2,
0x87, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xdd, 0x3f, 0xac, 0xd3, 0x03, 0x0a, 0x00, 0x00,
}
// --- DRPC BEGIN ---
@ -1168,6 +1290,7 @@ type DRPCPayoutClient interface {
DRPCConn() drpc.Conn
Earned(ctx context.Context, in *EarnedRequest) (*EarnedResponse, error)
EarnedPerSatellite(ctx context.Context, in *EarnedPerSatelliteRequest) (*EarnedPerSatelliteResponse, error)
}
type drpcPayoutClient struct {
@ -1189,13 +1312,23 @@ func (c *drpcPayoutClient) Earned(ctx context.Context, in *EarnedRequest) (*Earn
return out, nil
}
func (c *drpcPayoutClient) EarnedPerSatellite(ctx context.Context, in *EarnedPerSatelliteRequest) (*EarnedPerSatelliteResponse, error) {
out := new(EarnedPerSatelliteResponse)
err := c.cc.Invoke(ctx, "/multinode.Payout/EarnedPerSatellite", in, out)
if err != nil {
return nil, err
}
return out, nil
}
type DRPCPayoutServer interface {
Earned(context.Context, *EarnedRequest) (*EarnedResponse, error)
EarnedPerSatellite(context.Context, *EarnedPerSatelliteRequest) (*EarnedPerSatelliteResponse, error)
}
type DRPCPayoutDescription struct{}
func (DRPCPayoutDescription) NumMethods() int { return 1 }
func (DRPCPayoutDescription) NumMethods() int { return 2 }
func (DRPCPayoutDescription) Method(n int) (string, drpc.Receiver, interface{}, bool) {
switch n {
@ -1208,6 +1341,15 @@ func (DRPCPayoutDescription) Method(n int) (string, drpc.Receiver, interface{},
in1.(*EarnedRequest),
)
}, DRPCPayoutServer.Earned, true
case 1:
return "/multinode.Payout/EarnedPerSatellite",
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCPayoutServer).
EarnedPerSatellite(
ctx,
in1.(*EarnedPerSatelliteRequest),
)
}, DRPCPayoutServer.EarnedPerSatellite, true
default:
return "", nil, nil, false
}
@ -1233,4 +1375,20 @@ func (x *drpcPayoutEarnedStream) SendAndClose(m *EarnedResponse) error {
return x.CloseSend()
}
type DRPCPayout_EarnedPerSatelliteStream interface {
drpc.Stream
SendAndClose(*EarnedPerSatelliteResponse) error
}
type drpcPayoutEarnedPerSatelliteStream struct {
drpc.Stream
}
func (x *drpcPayoutEarnedPerSatelliteStream) SendAndClose(m *EarnedPerSatelliteResponse) error {
if err := x.MsgSend(m); err != nil {
return err
}
return x.CloseSend()
}
// --- DRPC END ---

View File

@ -98,6 +98,7 @@ message TrustedSatellitesResponse {
service Payout {
rpc Earned(EarnedRequest) returns (EarnedResponse);
rpc EarnedPerSatellite(EarnedPerSatelliteRequest) returns (EarnedPerSatelliteResponse);
}
message EarnedRequest {
@ -107,3 +108,14 @@ message EarnedRequest {
message EarnedResponse {
int64 total = 1;
}
message EarnedPerSatelliteRequest {
RequestHeader header = 1;
}
message EarnedPerSatelliteResponse {
repeated EarnedSatellite earned_satellite = 1;
}
message EarnedSatellite {
int64 total = 1;
bytes satellite_id = 2 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
}

View File

@ -0,0 +1,56 @@
// Copyright (C) 2021 Storj Labs, Inc.
// See LICENSE for copying information.
package nodeoperator
import (
"strings"
"github.com/zeebo/errs"
)
// DefaultWalletFeaturesValidation contains default wallet features list validation config.
var DefaultWalletFeaturesValidation = WalletFeaturesValidation{
MaxListLength: 5,
MaxFeatureLength: 15,
ReservedCharacters: []rune{',', '|'},
}
// WalletFeatureValidationError wallet feature validation errors class.
var WalletFeatureValidationError = errs.Class("wallet feature validation error")
// WalletFeaturesValidation contains config for wallet feature validation.
type WalletFeaturesValidation struct {
MaxListLength int
MaxFeatureLength int
ReservedCharacters []rune
}
// Validate validates wallet features list.
func (validation *WalletFeaturesValidation) Validate(features []string) error {
var errGroup errs.Group
if len(features) == 0 {
return nil
}
if len(features) > validation.MaxListLength {
errGroup.Add(
errs.New("features list exceeds maximum length, %d > %d", len(features), validation.MaxListLength))
}
for _, feature := range features {
if len(feature) > validation.MaxFeatureLength {
errGroup.Add(
errs.New("feature %q exceeds maximum length, %d > %d", feature, len(feature), validation.MaxFeatureLength))
}
for _, reserved := range validation.ReservedCharacters {
if i := strings.IndexRune(feature, reserved); i >= 0 {
errGroup.Add(errs.New("feature %q contains reserved character '%c' at pos %d", feature, reserved, i))
}
}
}
return WalletFeatureValidationError.Wrap(errGroup.Err())
}

View File

@ -0,0 +1,78 @@
// Copyright (C) 2021 Storj Labs, Inc.
// See LICENSE for copying information.
package nodeoperator_test
import (
"testing"
"github.com/stretchr/testify/require"
"storj.io/storj/private/nodeoperator"
)
func TestWalletFeaturesValidation(t *testing.T) {
t.Run("empty list", func(t *testing.T) {
var validation nodeoperator.WalletFeaturesValidation
err := validation.Validate(make([]string, 0))
require.NoError(t, err)
})
t.Run("exceeds list limit", func(t *testing.T) {
features := []string{
"feature1",
"feature2",
"feature3",
"feature4",
"feature5",
"feature6",
}
validation := nodeoperator.WalletFeaturesValidation{
MaxListLength: 5,
MaxFeatureLength: 20,
}
err := validation.Validate(features)
require.Error(t, err)
})
t.Run("exceeds feature length", func(t *testing.T) {
features := []string{
"feature1",
"feature2",
"feature3",
"feature4",
"feature5",
"invalidFeature",
}
validation := nodeoperator.WalletFeaturesValidation{
MaxListLength: 6,
MaxFeatureLength: 10,
}
err := validation.Validate(features)
require.Error(t, err)
})
t.Run("contains reserved characters", func(t *testing.T) {
features := []string{
"feature1",
"feature2",
"feature3",
"feature4",
"feature5",
"feature|",
}
validation := nodeoperator.WalletFeaturesValidation{
MaxListLength: 6,
MaxFeatureLength: 10,
ReservedCharacters: []rune{'|'},
}
err := validation.Validate(features)
require.Error(t, err)
})
}

View File

@ -7,6 +7,8 @@ import (
"context"
"database/sql"
"github.com/zeebo/errs"
"storj.io/private/traces"
)
@ -37,7 +39,7 @@ type sqlStmt struct {
}
func (s *sqlStmt) Close() error {
return s.stmt.Close()
return errs.Combine(s.tracker.close(), s.stmt.Close())
}
func (s *sqlStmt) Exec(ctx context.Context, args ...interface{}) (sql.Result, error) {

View File

@ -24,7 +24,6 @@ import (
"storj.io/common/identity/testidentity"
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/storj/pkg/server"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/satellitedb/satellitedbtest"
@ -84,8 +83,6 @@ type Planet struct {
StorageNodes []*StorageNode
Uplinks []*Uplink
ReferralManager *server.Server
identities *testidentity.Identities
whitelistPath string // TODO: in-memory
@ -159,11 +156,6 @@ func NewCustom(ctx context.Context, log *zap.Logger, config Config, satelliteDat
return nil, errs.Combine(err, planet.Shutdown())
}
planet.ReferralManager, err = planet.newReferralManager()
if err != nil {
return nil, errs.Combine(err, planet.Shutdown())
}
planet.Satellites, err = planet.newSatellites(ctx, config.SatelliteCount, satelliteDatabases)
if err != nil {
return nil, errs.Combine(err, planet.Shutdown())
@ -198,14 +190,6 @@ func (planet *Planet) Start(ctx context.Context) {
})
})
if planet.ReferralManager != nil {
pprof.Do(ctx, pprof.Labels("peer", "referral-manager"), func(ctx context.Context) {
planet.run.Go(func() error {
return planet.ReferralManager.Run(ctx)
})
})
}
for i := range planet.peers {
peer := &planet.peers[i]
peer.ctx, peer.cancel = context.WithCancel(ctx)
@ -332,10 +316,6 @@ func (planet *Planet) Shutdown() error {
errlist.Add(db.Close())
}
if planet.ReferralManager != nil {
errlist.Add(planet.ReferralManager.Close())
}
errlist.Add(planet.VersionControl.Close())
errlist.Add(os.RemoveAll(planet.directory))

View File

@ -10,7 +10,6 @@ import (
"storj.io/common/identity/testidentity"
"storj.io/common/memory"
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/storj/satellite"
"storj.io/storj/satellite/metainfo"
@ -25,8 +24,6 @@ type Reconfigure struct {
SatelliteMetabaseDB func(log *zap.Logger, index int, db metainfo.MetabaseDB) (metainfo.MetabaseDB, error)
Satellite func(log *zap.Logger, index int, config *satellite.Config)
ReferralManagerServer func(log *zap.Logger) pb.DRPCReferralManagerServer
StorageNodeDB func(index int, db storagenode.DB, log *zap.Logger) (storagenode.DB, error)
StorageNode func(index int, config *storagenode.Config)
UniqueIPCount int

View File

@ -1,97 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information
package testplanet
import (
"context"
"os"
"path/filepath"
"storj.io/common/pb"
"storj.io/common/peertls/extensions"
"storj.io/common/peertls/tlsopts"
"storj.io/common/testrand"
"storj.io/storj/pkg/server"
)
// DefaultReferralManagerServer implements the default behavior of a mock referral manager.
type DefaultReferralManagerServer struct {
tokenCount int
}
// newReferralManager initializes a referral manager server.
func (planet *Planet) newReferralManager() (*server.Server, error) {
prefix := "referralmanager"
log := planet.log.Named(prefix)
referralmanagerDir := filepath.Join(planet.directory, prefix)
if err := os.MkdirAll(referralmanagerDir, 0700); err != nil {
return nil, err
}
identity, err := planet.NewIdentity()
if err != nil {
return nil, err
}
config := server.Config{
Address: "127.0.0.1:0",
PrivateAddress: "127.0.0.1:0",
Config: tlsopts.Config{
RevocationDBURL: "bolt://" + filepath.Join(referralmanagerDir, "revocation.db"),
UsePeerCAWhitelist: true,
PeerIDVersions: "*",
Extensions: extensions.Config{
Revocation: false,
WhitelistSignedLeaf: false,
},
},
}
var endpoints pb.DRPCReferralManagerServer
// only create a referral manager server if testplanet was reconfigured with a custom referral manager endpoint
if planet.config.Reconfigure.ReferralManagerServer != nil {
endpoints = planet.config.Reconfigure.ReferralManagerServer(log)
} else {
return nil, nil
}
tlsOptions, err := tlsopts.NewOptions(identity, config.Config, nil)
if err != nil {
return nil, err
}
referralmanager, err := server.New(log, tlsOptions, config)
if err != nil {
return nil, err
}
if err := pb.DRPCRegisterReferralManager(referralmanager.DRPC(), endpoints); err != nil {
return nil, err
}
log.Debug("id=" + identity.ID.String() + " addr=" + referralmanager.Addr().String())
return referralmanager, nil
}
// GetTokens implements a mock GetTokens endpoint that returns a number of referral tokens. By default, it returns 0 tokens.
func (server *DefaultReferralManagerServer) GetTokens(ctx context.Context, req *pb.GetTokensRequest) (*pb.GetTokensResponse, error) {
tokens := make([][]byte, server.tokenCount)
for i := 0; i < server.tokenCount; i++ {
uuid := testrand.UUID()
tokens[i] = uuid[:]
}
return &pb.GetTokensResponse{
TokenSecrets: tokens,
}, nil
}
// RedeemToken implements a mock RedeemToken endpoint.
func (server *DefaultReferralManagerServer) RedeemToken(ctx context.Context, req *pb.RedeemTokenRequest) (*pb.RedeemTokenResponse, error) {
return &pb.RedeemTokenResponse{}, nil
}
// SetTokenCount sets the number of tokens GetTokens endpoint should return.
func (server *DefaultReferralManagerServer) SetTokenCount(tokenCount int) {
server.tokenCount = tokenCount
}

View File

@ -47,7 +47,6 @@ import (
"storj.io/storj/satellite/gracefulexit"
"storj.io/storj/satellite/inspector"
"storj.io/storj/satellite/mailservice"
"storj.io/storj/satellite/marketingweb"
"storj.io/storj/satellite/metainfo"
"storj.io/storj/satellite/metainfo/expireddeletion"
"storj.io/storj/satellite/metainfo/piecedeletion"
@ -165,11 +164,6 @@ type Satellite struct {
Endpoint *consoleweb.Server
}
Marketing struct {
Listener net.Listener
Endpoint *marketingweb.Server
}
NodeStats struct {
Endpoint *nodestats.Endpoint
}
@ -210,7 +204,7 @@ func (system *Satellite) AddUser(ctx context.Context, newUser console.CreateUser
}
newUser.Password = newUser.FullName
user, err := system.API.Console.Service.CreateUser(ctx, newUser, regToken.Secret, "")
user, err := system.API.Console.Service.CreateUser(ctx, newUser, regToken.Secret)
if err != nil {
return nil, err
}
@ -603,10 +597,6 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
NumLimits: 10,
},
},
Marketing: marketingweb.Config{
Address: "127.0.0.1:0",
StaticDir: filepath.Join(developmentRoot, "web/marketing"),
},
Version: planet.NewVersionConfig(),
GracefulExit: gracefulexit.Config{
Enabled: true,
@ -627,12 +617,6 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
},
}
if planet.ReferralManager != nil {
config.Referrals.ReferralManagerURL = storj.NodeURL{
ID: planet.ReferralManager.Identity().ID,
Address: planet.ReferralManager.Addr().String(),
}
}
if planet.config.Reconfigure.Satellite != nil {
planet.config.Reconfigure.Satellite(log, index, &config)
}
@ -753,9 +737,6 @@ func createNewSystem(name string, log *zap.Logger, config satellite.Config, peer
system.ProjectLimits.Cache = api.ProjectLimits.Cache
system.Marketing.Listener = api.Marketing.Listener
system.Marketing.Endpoint = api.Marketing.Endpoint
system.GracefulExit.Chore = peer.GracefulExit.Chore
system.GracefulExit.Endpoint = api.GracefulExit.Endpoint

View File

@ -126,8 +126,9 @@ func (planet *Planet) newStorageNode(ctx context.Context, prefix string, index,
LocalTimeCheck: false,
},
Operator: storagenode.OperatorConfig{
Email: prefix + "@mail.test",
Wallet: "0x" + strings.Repeat("00", 20),
Email: prefix + "@mail.test",
Wallet: "0x" + strings.Repeat("00", 20),
WalletFeatures: nil,
},
Storage: piecestore.OldConfig{
Path: filepath.Join(storageDir, "pieces/"),

View File

@ -40,7 +40,6 @@ import (
"storj.io/storj/satellite/internalpb"
"storj.io/storj/satellite/mailservice"
"storj.io/storj/satellite/mailservice/simulate"
"storj.io/storj/satellite/marketingweb"
"storj.io/storj/satellite/metainfo"
"storj.io/storj/satellite/metainfo/piecedeletion"
"storj.io/storj/satellite/nodestats"
@ -48,7 +47,6 @@ import (
"storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/payments"
"storj.io/storj/satellite/payments/stripecoinpayments"
"storj.io/storj/satellite/referrals"
"storj.io/storj/satellite/repair/irreparable"
"storj.io/storj/satellite/rewards"
"storj.io/storj/satellite/snopayouts"
@ -136,10 +134,6 @@ type API struct {
Stripe stripecoinpayments.StripeClient
}
Referrals struct {
Service *referrals.Service
}
Console struct {
Listener net.Listener
Service *console.Service
@ -148,9 +142,6 @@ type API struct {
Marketing struct {
PartnersService *rewards.PartnersService
Listener net.Listener
Endpoint *marketingweb.Server
}
NodeStats struct {
@ -360,7 +351,7 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
}
}
{ // setup marketing portal
{ // setup marketing partners service
peer.Marketing.PartnersService = rewards.NewPartnersService(
peer.Log.Named("partners"),
rewards.DefaultPartnersDB,
@ -370,28 +361,6 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
"https://europe-west-1.tardigrade.io/",
},
)
peer.Marketing.Listener, err = net.Listen("tcp", config.Marketing.Address)
if err != nil {
return nil, errs.Combine(err, peer.Close())
}
peer.Marketing.Endpoint, err = marketingweb.NewServer(
peer.Log.Named("marketing:endpoint"),
config.Marketing,
peer.DB.Rewards(),
peer.Marketing.PartnersService,
peer.Marketing.Listener,
)
if err != nil {
return nil, errs.Combine(err, peer.Close())
}
peer.Servers.Add(lifecycle.Item{
Name: "marketing:endpoint",
Run: peer.Marketing.Endpoint.Run,
Close: peer.Marketing.Endpoint.Close,
})
}
{ // setup metainfo
@ -597,15 +566,6 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
return nil, errs.New("Auth token secret required")
}
peer.Referrals.Service = referrals.NewService(
peer.Log.Named("referrals:service"),
signing.SignerFromFullIdentity(peer.Identity),
config.Referrals,
peer.Dialer,
peer.DB.Console().Users(),
consoleConfig.PasswordCost,
)
peer.Console.Service, err = console.NewService(
peer.Log.Named("console:service"),
&consoleauth.Hmac{Secret: []byte(consoleConfig.AuthTokenSecret)},
@ -613,7 +573,6 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
peer.DB.ProjectAccounting(),
peer.Accounting.ProjectUsage,
peer.DB.Buckets(),
peer.DB.Rewards(),
peer.Marketing.PartnersService,
peer.Payments.Accounts,
consoleConfig.Config,
@ -628,7 +587,6 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
consoleConfig,
peer.Console.Service,
peer.Mail.Service,
peer.Referrals.Service,
peer.Marketing.PartnersService,
peer.Console.Listener,
config.Payments.StripeCoinPayments.StripePublicKey,

View File

@ -13,35 +13,36 @@ import (
// Invoice holds the calculations for the amount required to pay to a node
// for a given pay period.
type Invoice struct {
Period Period `csv:"period"` // The payment period
NodeID NodeID `csv:"node-id"` // The node ID
NodeCreatedAt UTCDate `csv:"node-created-at"` // When the node was created
NodeDisqualified *UTCDate `csv:"node-disqualified"` // When and if the node was disqualified
NodeGracefulExit *UTCDate `csv:"node-gracefulexit"` // When and if the node finished a graceful exit
NodeWallet string `csv:"node-wallet"` // The node's wallet address
NodeAddress string `csv:"node-address"` // The node's TODO
NodeLastIP string `csv:"node-last-ip"` // The last known ip the node had
Codes Codes `csv:"codes"` // Any codes providing context to the invoice
UsageAtRest float64 `csv:"usage-at-rest"` // Byte-hours provided during the payment period
UsageGet int64 `csv:"usage-get"` // Number of bytes served in GET requests
UsagePut int64 `csv:"usage-put"` // Number of bytes served in PUT requests
UsageGetRepair int64 `csv:"usage-get-repair"` // Number of bytes served in GET_REPAIR requests
UsagePutRepair int64 `csv:"usage-put-repair"` // Number of bytes served in PUT_REPAIR requests
UsageGetAudit int64 `csv:"usage-get-audit"` // Number of bytes served in GET_AUDIT requests
CompAtRest currency.MicroUnit `csv:"comp-at-rest"` // Compensation for usage-at-rest
CompGet currency.MicroUnit `csv:"comp-get"` // Compensation for usage-get
CompPut currency.MicroUnit `csv:"comp-put"` // Compensation for usage-put
CompGetRepair currency.MicroUnit `csv:"comp-get-repair"` // Compensation for usage-get-repair
CompPutRepair currency.MicroUnit `csv:"comp-put-repair"` // Compensation for usage-put-repair
CompGetAudit currency.MicroUnit `csv:"comp-get-audit"` // Compensation for usage-get-audit
SurgePercent int64 `csv:"surge-percent"` // Surge percent used to calculate compensation, or 0 if no surge
Owed currency.MicroUnit `csv:"owed"` // Amount we intend to pay to the node (sum(comp-*) - held + disposed)
Held currency.MicroUnit `csv:"held"` // Amount held from sum(comp-*) for this period
Disposed currency.MicroUnit `csv:"disposed"` // Amount of owed that is due to graceful-exit or held period ending
TotalHeld currency.MicroUnit `csv:"total-held"` // Total amount ever held from the node
TotalDisposed currency.MicroUnit `csv:"total-disposed"` // Total amount ever disposed to the node
TotalPaid currency.MicroUnit `csv:"total-paid"` // Total amount ever paid to the node (but not necessarily dispensed)
TotalDistributed currency.MicroUnit `csv:"total-distributed"` // Total amount ever distributed to the node (always less than or equal to paid)
Period Period `csv:"period"` // The payment period
NodeID NodeID `csv:"node-id"` // The node ID
NodeCreatedAt UTCDate `csv:"node-created-at"` // When the node was created
NodeDisqualified *UTCDate `csv:"node-disqualified"` // When and if the node was disqualified
NodeGracefulExit *UTCDate `csv:"node-gracefulexit"` // When and if the node finished a graceful exit
NodeWallet string `csv:"node-wallet"` // The node's wallet address
NodeWalletFeatures WalletFeatures `csv:"node-wallet-features"` // The node's wallet features
NodeAddress string `csv:"node-address"` // The node's TODO
NodeLastIP string `csv:"node-last-ip"` // The last known ip the node had
Codes Codes `csv:"codes"` // Any codes providing context to the invoice
UsageAtRest float64 `csv:"usage-at-rest"` // Byte-hours provided during the payment period
UsageGet int64 `csv:"usage-get"` // Number of bytes served in GET requests
UsagePut int64 `csv:"usage-put"` // Number of bytes served in PUT requests
UsageGetRepair int64 `csv:"usage-get-repair"` // Number of bytes served in GET_REPAIR requests
UsagePutRepair int64 `csv:"usage-put-repair"` // Number of bytes served in PUT_REPAIR requests
UsageGetAudit int64 `csv:"usage-get-audit"` // Number of bytes served in GET_AUDIT requests
CompAtRest currency.MicroUnit `csv:"comp-at-rest"` // Compensation for usage-at-rest
CompGet currency.MicroUnit `csv:"comp-get"` // Compensation for usage-get
CompPut currency.MicroUnit `csv:"comp-put"` // Compensation for usage-put
CompGetRepair currency.MicroUnit `csv:"comp-get-repair"` // Compensation for usage-get-repair
CompPutRepair currency.MicroUnit `csv:"comp-put-repair"` // Compensation for usage-put-repair
CompGetAudit currency.MicroUnit `csv:"comp-get-audit"` // Compensation for usage-get-audit
SurgePercent int64 `csv:"surge-percent"` // Surge percent used to calculate compensation, or 0 if no surge
Owed currency.MicroUnit `csv:"owed"` // Amount we intend to pay to the node (sum(comp-*) - held + disposed)
Held currency.MicroUnit `csv:"held"` // Amount held from sum(comp-*) for this period
Disposed currency.MicroUnit `csv:"disposed"` // Amount of owed that is due to graceful-exit or held period ending
TotalHeld currency.MicroUnit `csv:"total-held"` // Total amount ever held from the node
TotalDisposed currency.MicroUnit `csv:"total-disposed"` // Total amount ever disposed to the node
TotalPaid currency.MicroUnit `csv:"total-paid"` // Total amount ever paid to the node (but not necessarily dispensed)
TotalDistributed currency.MicroUnit `csv:"total-distributed"` // Total amount ever distributed to the node (always less than or equal to paid)
}
// MergeNodeInfo updates the fields representing the node information into the invoice.

View File

@ -0,0 +1,37 @@
// Copyright (C) 2021 Storj Labs, Inc.
// See LICENSE for copying information.
package compensation
import "strings"
// WalletFeatures represents wallet features list.
type WalletFeatures []string
// DecodeWalletFeatures decodes wallet features list string separated by "|".
func DecodeWalletFeatures(s string) (WalletFeatures, error) {
if s == "" {
return nil, nil
}
return strings.Split(s, "|"), nil
}
// String outputs .
func (features WalletFeatures) String() string {
return strings.Join(features, "|")
}
// UnmarshalCSV reads the WalletFeatures in CSV form.
func (features *WalletFeatures) UnmarshalCSV(s string) error {
v, err := DecodeWalletFeatures(s)
if err != nil {
return err
}
*features = v
return nil
}
// MarshalCSV returns the CSV form of the WalletFeatures.
func (features WalletFeatures) MarshalCSV() (string, error) {
return features.String(), nil
}

View File

@ -60,14 +60,14 @@ func SetPermission(key string, buckets []string, permission Permission) (*macaro
return nil, errs.New("invalid time range")
}
caveat := macaroon.Caveat{
caveat := macaroon.WithNonce(macaroon.Caveat{
DisallowReads: !permission.AllowDownload,
DisallowWrites: !permission.AllowUpload,
DisallowLists: !permission.AllowList,
DisallowDeletes: !permission.AllowDelete,
NotBefore: notBefore,
NotAfter: notAfter,
}
})
for _, b := range buckets {
caveat.AllowedPaths = append(caveat.AllowedPaths, &macaroon.Caveat_Path{

View File

@ -149,7 +149,6 @@ func (a *Auth) Register(w http.ResponseWriter, r *http.Request) {
Password: registerData.Password,
},
secret,
registerData.ReferrerUserID,
)
if err != nil {
a.serveJSONError(w, err)

View File

@ -1,149 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package consoleapi
import (
"encoding/json"
"net/http"
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/storj/private/post"
"storj.io/storj/satellite/console"
"storj.io/storj/satellite/console/consoleweb/consoleql"
"storj.io/storj/satellite/mailservice"
"storj.io/storj/satellite/referrals"
)
// ErrReferralsAPI - console referrals api error type.
var ErrReferralsAPI = errs.Class("console referrals api error")
// Referrals is an api controller that exposes all referrals functionality.
type Referrals struct {
log *zap.Logger
service *referrals.Service
consoleService *console.Service
mailService *mailservice.Service
ExternalAddress string
}
// NewReferrals is a constructor for api referrals controller.
func NewReferrals(log *zap.Logger, service *referrals.Service, consoleService *console.Service, mailService *mailservice.Service, externalAddress string) *Referrals {
return &Referrals{
log: log,
service: service,
consoleService: consoleService,
mailService: mailService,
ExternalAddress: externalAddress,
}
}
// GetTokens returns referral tokens based on user ID.
func (controller *Referrals) GetTokens(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var err error
defer mon.Task()(&ctx)(&err)
auth, err := console.GetAuth(ctx)
if err != nil {
controller.serveJSONError(w, err)
return
}
tokens, err := controller.service.GetTokens(ctx, &auth.User.ID)
if err != nil {
controller.serveJSONError(w, err)
return
}
w.Header().Set("Content-Type", "application/json")
err = json.NewEncoder(w).Encode(tokens)
if err != nil {
controller.log.Error("token handler could not encode token response", zap.Error(ErrReferralsAPI.Wrap(err)))
return
}
}
// Register creates new user, sends activation e-mail.
func (controller *Referrals) Register(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var err error
defer mon.Task()(&ctx)(&err)
var registerData struct {
FullName string `json:"fullName"`
ShortName string `json:"shortName"`
Email string `json:"email"`
Password string `json:"password"`
ReferralToken string `json:"referralToken"`
}
err = json.NewDecoder(r.Body).Decode(&registerData)
if err != nil {
controller.serveJSONError(w, err)
return
}
user, err := controller.service.CreateUser(ctx, registerData)
if err != nil {
controller.serveJSONError(w, err)
return
}
token, err := controller.consoleService.GenerateActivationToken(ctx, user.ID, user.Email)
if err != nil {
controller.serveJSONError(w, err)
return
}
link := controller.ExternalAddress + "activation/?token=" + token
userName := user.ShortName
if user.ShortName == "" {
userName = user.FullName
}
controller.mailService.SendRenderedAsync(
ctx,
[]post.Address{{Address: user.Email, Name: userName}},
&consoleql.AccountActivationEmail{
ActivationLink: link,
Origin: controller.ExternalAddress,
},
)
w.Header().Set("Content-Type", "application/json")
err = json.NewEncoder(w).Encode(user.ID)
if err != nil {
controller.log.Error("registration handler could not encode userID", zap.Error(ErrReferralsAPI.Wrap(err)))
return
}
}
// serveJSONError writes JSON error to response output stream.
func (controller *Referrals) serveJSONError(w http.ResponseWriter, err error) {
w.WriteHeader(controller.getStatusCode(err))
var response struct {
Error string `json:"error"`
}
response.Error = err.Error()
err = json.NewEncoder(w).Encode(response)
if err != nil {
controller.log.Error("failed to write json error response", zap.Error(ErrReferralsAPI.Wrap(err)))
}
}
// getStatusCode returns http.StatusCode depends on console error class.
func (controller *Referrals) getStatusCode(err error) int {
switch {
case console.ErrValidation.Has(err):
return http.StatusBadRequest
case console.ErrUnauthorized.Has(err):
return http.StatusUnauthorized
default:
return http.StatusInternalServerError
}
}

View File

@ -106,7 +106,6 @@ func TestGraphqlMutation(t *testing.T) {
db.ProjectAccounting(),
projectUsage,
db.Buckets(),
db.Rewards(),
partnersService,
paymentsService.Accounts(),
console.Config{PasswordCost: console.TestPasswordCost, DefaultProjectLimit: 5},
@ -136,12 +135,11 @@ func TestGraphqlMutation(t *testing.T) {
PartnerID: "120bf202-8252-437e-ac12-0e364bee852e",
Password: "123a123",
}
refUserID := ""
regToken, err := service.CreateRegToken(ctx, 1)
require.NoError(t, err)
rootUser, err := service.CreateUser(ctx, createUser, regToken.Secret, refUserID)
rootUser, err := service.CreateUser(ctx, createUser, regToken.Secret)
require.NoError(t, err)
require.Equal(t, createUser.PartnerID, rootUser.PartnerID.String())
@ -227,7 +225,7 @@ func TestGraphqlMutation(t *testing.T) {
FullName: "User1",
Email: "u1@mail.test",
Password: "123a123",
}, regTokenUser1.Secret, refUserID)
}, regTokenUser1.Secret)
require.NoError(t, err)
t.Run("Activation", func(t *testing.T) {
@ -251,7 +249,7 @@ func TestGraphqlMutation(t *testing.T) {
FullName: "User1",
Email: "u2@mail.test",
Password: "123a123",
}, regTokenUser2.Secret, refUserID)
}, regTokenUser2.Secret)
require.NoError(t, err)
t.Run("Activation", func(t *testing.T) {

View File

@ -9,7 +9,6 @@ import (
"storj.io/common/uuid"
"storj.io/storj/satellite/console"
"storj.io/storj/satellite/mailservice"
"storj.io/storj/satellite/rewards"
)
const (
@ -21,10 +20,6 @@ const (
OwnedProjectsQuery = "ownedProjects"
// MyProjectsQuery is a query name for projects related to account.
MyProjectsQuery = "myProjects"
// ActiveRewardQuery is a query name for current active reward offer.
ActiveRewardQuery = "activeReward"
// CreditUsageQuery is a query name for credit usage related to an user.
CreditUsageQuery = "creditUsage"
)
// rootQuery creates query for graphql populated by AccountsClient.
@ -79,35 +74,6 @@ func rootQuery(service *console.Service, mailService *mailservice.Service, types
return projects, nil
},
},
ActiveRewardQuery: &graphql.Field{
Type: types.reward,
Args: graphql.FieldConfigArgument{
FieldType: &graphql.ArgumentConfig{
Type: graphql.NewNonNull(graphql.Int),
},
},
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
rewardType, _ := p.Args[FieldType].(int)
offer, err := service.GetCurrentRewardByType(p.Context, rewards.OfferType(rewardType))
if err != nil {
return nil, err
}
return offer, nil
},
},
CreditUsageQuery: &graphql.Field{
Type: types.creditUsage,
Resolve: func(p graphql.ResolveParams) (interface{}, error) {
usage, err := service.GetUserCreditUsage(p.Context)
if err != nil {
return nil, err
}
return usage, nil
},
},
},
})
}

View File

@ -90,7 +90,6 @@ func TestGraphqlQuery(t *testing.T) {
db.ProjectAccounting(),
projectUsage,
db.Buckets(),
db.Rewards(),
partnersService,
paymentsService.Accounts(),
console.Config{PasswordCost: console.TestPasswordCost, DefaultProjectLimit: 5},
@ -125,12 +124,11 @@ func TestGraphqlQuery(t *testing.T) {
Email: "mtest@mail.test",
Password: "123a123",
}
refUserID := ""
regToken, err := service.CreateRegToken(ctx, 2)
require.NoError(t, err)
rootUser, err := service.CreateUser(ctx, createUser, regToken.Secret, refUserID)
rootUser, err := service.CreateUser(ctx, createUser, regToken.Secret)
require.NoError(t, err)
err = paymentsService.Accounts().Setup(ctx, rootUser.ID, rootUser.Email)
@ -208,7 +206,7 @@ func TestGraphqlQuery(t *testing.T) {
ShortName: "Last",
Password: "123a123",
Email: "muu1@mail.test",
}, regTokenUser1.Secret, refUserID)
}, regTokenUser1.Secret)
require.NoError(t, err)
t.Run("Activation", func(t *testing.T) {
@ -232,7 +230,7 @@ func TestGraphqlQuery(t *testing.T) {
ShortName: "Name",
Email: "muu2@mail.test",
Password: "123a123",
}, regTokenUser2.Secret, refUserID)
}, regTokenUser2.Secret)
require.NoError(t, err)
t.Run("Activation", func(t *testing.T) {

View File

@ -18,7 +18,6 @@ type TypeCreator struct {
user *graphql.Object
reward *graphql.Object
creditUsage *graphql.Object
project *graphql.Object
projectUsage *graphql.Object
projectsPage *graphql.Object
@ -77,11 +76,6 @@ func (c *TypeCreator) Create(log *zap.Logger, service *console.Service, mailServ
return err
}
c.creditUsage = graphqlCreditUsage()
if err := c.creditUsage.Error(); err != nil {
return err
}
c.projectUsage = graphqlProjectUsage()
if err := c.projectUsage.Error(); err != nil {
return err

View File

@ -1,36 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package consoleql
import (
"github.com/graphql-go/graphql"
)
const (
// CreditUsageType is a graphql type for user credit.
CreditUsageType = "creditUsage"
// FieldAvailableCredit is a field name for available credit.
FieldAvailableCredit = "availableCredit"
// FieldUsedCredit is a field name for used credit.
FieldUsedCredit = "usedCredit"
// FieldReferred is a field name for total referred number.
FieldReferred = "referred"
)
func graphqlCreditUsage() *graphql.Object {
return graphql.NewObject(graphql.ObjectConfig{
Name: CreditUsageType,
Fields: graphql.Fields{
FieldAvailableCredit: &graphql.Field{
Type: graphql.Int,
},
FieldUsedCredit: &graphql.Field{
Type: graphql.Int,
},
FieldReferred: &graphql.Field{
Type: graphql.Int,
},
},
})
}

View File

@ -40,7 +40,6 @@ import (
"storj.io/storj/satellite/console/consoleweb/consoleql"
"storj.io/storj/satellite/console/consoleweb/consolewebauth"
"storj.io/storj/satellite/mailservice"
"storj.io/storj/satellite/referrals"
"storj.io/storj/satellite/rewards"
)
@ -77,7 +76,7 @@ type Config struct {
TermsAndConditionsURL string `help:"url link to terms and conditions page" default:"https://storj.io/storage-sla/"`
SegmentIOPublicKey string `help:"used to initialize segment.io at web satellite console" default:""`
AccountActivationRedirectURL string `help:"url link for account activation redirect" default:""`
VerificationPageURL string `help:"url link to sign up verification page" default:"https://tardigrade.io/verify"`
VerificationPageURL string `help:"url link to sign up verification page" devDefault:"" releaseDefault:"https://tardigrade.io/verify"`
PartneredSatelliteNames string `help:"names of partnered satellites" default:"US-Central-1,Europe-West-1,Asia-East-1"`
GoogleTagManagerID string `help:"id for google tag manager" default:""`
GeneralRequestURL string `help:"url link to general request page" default:"https://support.tardigrade.io/hc/en-us/requests/new?ticket_form_id=360000379291"`
@ -95,11 +94,10 @@ type Config struct {
type Server struct {
log *zap.Logger
config Config
service *console.Service
mailService *mailservice.Service
referralsService *referrals.Service
partners *rewards.PartnersService
config Config
service *console.Service
mailService *mailservice.Service
partners *rewards.PartnersService
listener net.Listener
server http.Server
@ -122,18 +120,17 @@ type Server struct {
}
// NewServer creates new instance of console server.
func NewServer(logger *zap.Logger, config Config, service *console.Service, mailService *mailservice.Service, referralsService *referrals.Service, partners *rewards.PartnersService, listener net.Listener, stripePublicKey string, nodeURL storj.NodeURL) *Server {
func NewServer(logger *zap.Logger, config Config, service *console.Service, mailService *mailservice.Service, partners *rewards.PartnersService, listener net.Listener, stripePublicKey string, nodeURL storj.NodeURL) *Server {
server := Server{
log: logger,
config: config,
listener: listener,
service: service,
mailService: mailService,
referralsService: referralsService,
partners: partners,
stripePublicKey: stripePublicKey,
rateLimiter: web.NewIPRateLimiter(config.RateLimit),
nodeURL: nodeURL,
log: logger,
config: config,
listener: listener,
service: service,
mailService: mailService,
partners: partners,
stripePublicKey: stripePublicKey,
rateLimiter: web.NewIPRateLimiter(config.RateLimit),
nodeURL: nodeURL,
}
logger.Debug("Starting Satellite UI.", zap.Stringer("Address", server.listener.Addr()))
@ -169,11 +166,6 @@ func NewServer(logger *zap.Logger, config Config, service *console.Service, mail
server.withAuth(http.HandlerFunc(server.projectUsageLimitsHandler)),
).Methods(http.MethodGet)
referralsController := consoleapi.NewReferrals(logger, referralsService, service, mailService, server.config.ExternalAddress)
referralsRouter := router.PathPrefix("/api/v0/referrals").Subrouter()
referralsRouter.Handle("/tokens", server.withAuth(http.HandlerFunc(referralsController.GetTokens))).Methods(http.MethodGet)
referralsRouter.HandleFunc("/register", referralsController.Register).Methods(http.MethodPost)
authController := consoleapi.NewAuth(logger, service, mailService, server.cookieAuth, partners, server.config.ExternalAddress, config.LetUsKnowURL, config.TermsAndConditionsURL, config.ContactInfoURL)
authRouter := router.PathPrefix("/api/v0/auth").Subrouter()
authRouter.Handle("/account", server.withAuth(http.HandlerFunc(authController.GetAccount))).Methods(http.MethodGet)
@ -284,6 +276,7 @@ func (server *Server) appHandler(w http.ResponseWriter, r *http.Request) {
header.Set("Referrer-Policy", "same-origin") // Only expose the referring url when navigating around the satellite itself.
var data struct {
ExternalAddress string
SatelliteName string
SatelliteNodeURL string
SegmentIOPublicKey string
@ -297,6 +290,7 @@ func (server *Server) appHandler(w http.ResponseWriter, r *http.Request) {
GatewayCredentialsRequestURL string
}
data.ExternalAddress = server.config.ExternalAddress
data.SatelliteName = server.config.SatelliteName
data.SatelliteNodeURL = server.nodeURL.String()
data.SegmentIOPublicKey = server.config.SegmentIOPublicKey

View File

@ -23,8 +23,6 @@ type DB interface {
RegistrationTokens() RegistrationTokens
// ResetPasswordTokens is a getter for ResetPasswordTokens repository.
ResetPasswordTokens() ResetPasswordTokens
// UserCredits is a getter for UserCredits repository.
UserCredits() UserCredits
// WithTx is a method for executing transactions with retrying as necessary.
WithTx(ctx context.Context, fn func(ctx context.Context, tx DBTx) error) error

View File

@ -87,7 +87,6 @@ type Service struct {
projectAccounting accounting.ProjectAccounting
projectUsage *accounting.Service
buckets Buckets
rewards rewards.DB
partners *rewards.PartnersService
accounts payments.Accounts
@ -109,7 +108,7 @@ type PaymentsService struct {
}
// NewService returns new instance of Service.
func NewService(log *zap.Logger, signer Signer, store DB, projectAccounting accounting.ProjectAccounting, projectUsage *accounting.Service, buckets Buckets, rewards rewards.DB, partners *rewards.PartnersService, accounts payments.Accounts, config Config, minCoinPayment int64) (*Service, error) {
func NewService(log *zap.Logger, signer Signer, store DB, projectAccounting accounting.ProjectAccounting, projectUsage *accounting.Service, buckets Buckets, partners *rewards.PartnersService, accounts payments.Accounts, config Config, minCoinPayment int64) (*Service, error) {
if signer == nil {
return nil, errs.New("signer can't be nil")
}
@ -131,7 +130,6 @@ func NewService(log *zap.Logger, signer Signer, store DB, projectAccounting acco
projectAccounting: projectAccounting,
projectUsage: projectUsage,
buckets: buckets,
rewards: rewards,
partners: partners,
accounts: accounts,
config: config,
@ -523,32 +521,12 @@ func (s *Service) checkRegistrationSecret(ctx context.Context, tokenSecret Regis
}
// CreateUser gets password hash value and creates new inactive User.
func (s *Service) CreateUser(ctx context.Context, user CreateUser, tokenSecret RegistrationSecret, refUserID string) (u *User, err error) {
func (s *Service) CreateUser(ctx context.Context, user CreateUser, tokenSecret RegistrationSecret) (u *User, err error) {
defer mon.Task()(&ctx)(&err)
if err := user.IsValid(); err != nil {
return nil, Error.Wrap(err)
}
offerType := rewards.FreeCredit
if user.PartnerID != "" {
offerType = rewards.Partner
} else if refUserID != "" {
offerType = rewards.Referral
}
// TODO: Create a current offer cache to replace database call
offers, err := s.rewards.GetActiveOffersByType(ctx, offerType)
if err != nil && !rewards.ErrOfferNotExist.Has(err) {
s.log.Error("internal error", zap.Error(err))
return nil, Error.Wrap(err)
}
currentReward, err := s.partners.GetActiveOffer(ctx, offers, offerType, user.PartnerID)
if err != nil && !rewards.ErrOfferNotExist.Has(err) {
s.log.Error("internal error", zap.Error(err))
return nil, Error.Wrap(err)
}
registrationToken, err := s.checkRegistrationSecret(ctx, tokenSecret)
if err != nil {
return nil, err
@ -607,26 +585,6 @@ func (s *Service) CreateUser(ctx context.Context, user CreateUser, tokenSecret R
}
}
if currentReward != nil {
_ = currentReward
// ToDo: NB: Uncomment this block when UserCredits().Create is cockroach compatible
// var refID *uuid.UUID
// if refUserID != "" {
// refID, err = uuid.FromString(refUserID)
// if err != nil {
// return Error.Wrap(err)
// }
// }
// newCredit, err := NewCredit(currentReward, Invitee, u.ID, refID)
// if err != nil {
// return err
// }
// err = tx.UserCredits().Create(ctx, *newCredit)
// if err != nil {
// return err
// }
}
return nil
})
@ -712,11 +670,6 @@ func (s *Service) ActivateAccount(ctx context.Context, activationToken string) (
}
s.auditLog(ctx, "activate account", &user.ID, user.Email)
err = s.store.UserCredits().UpdateEarnedCredits(ctx, user.ID)
if err != nil && !NoCreditForUpdateErr.Has(err) {
return Error.Wrap(err)
}
if s.accounts.PaywallEnabled(user.ID) {
return nil
}
@ -1004,40 +957,6 @@ func (s *Service) GetUsersOwnedProjectsPage(ctx context.Context, cursor Projects
return projects, nil
}
// GetCurrentRewardByType is a method for querying current active reward offer based on its type.
func (s *Service) GetCurrentRewardByType(ctx context.Context, offerType rewards.OfferType) (offer *rewards.Offer, err error) {
defer mon.Task()(&ctx)(&err)
offers, err := s.rewards.GetActiveOffersByType(ctx, offerType)
if err != nil {
s.log.Error("internal error", zap.Error(err))
return nil, Error.Wrap(err)
}
result, err := s.partners.GetActiveOffer(ctx, offers, offerType, "")
if err != nil {
return nil, Error.Wrap(err)
}
return result, nil
}
// GetUserCreditUsage is a method for querying users' credit information up until now.
func (s *Service) GetUserCreditUsage(ctx context.Context) (usage *UserCreditUsage, err error) {
defer mon.Task()(&ctx)(&err)
auth, err := s.getAuthAndAuditLog(ctx, "get credit card usage")
if err != nil {
return nil, Error.Wrap(err)
}
usage, err = s.store.UserCredits().GetCreditUsage(ctx, auth.User.ID, time.Now().UTC())
if err != nil {
return nil, Error.Wrap(err)
}
return usage, nil
}
// CreateProject is a method for creating new project.
func (s *Service) CreateProject(ctx context.Context, projectInfo ProjectInfo) (p *Project, err error) {
defer mon.Task()(&ctx)(&err)

View File

@ -1,95 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package console
import (
"context"
"time"
"github.com/zeebo/errs"
"storj.io/common/uuid"
"storj.io/storj/private/currency"
"storj.io/storj/satellite/rewards"
)
// NoCreditForUpdateErr is a error message used when no credits are found for update when new users sign up.
var NoCreditForUpdateErr = errs.Class("no credit found to update")
// UserCredits holds information to interact with database.
//
// architecture: Database
type UserCredits interface {
GetCreditUsage(ctx context.Context, userID uuid.UUID, expirationEndDate time.Time) (*UserCreditUsage, error)
Create(ctx context.Context, userCredit CreateCredit) error
UpdateEarnedCredits(ctx context.Context, userID uuid.UUID) error
UpdateAvailableCredits(ctx context.Context, creditsToCharge int, id uuid.UUID, billingStartDate time.Time) (remainingCharge int, err error)
}
// CreditType indicates a type of a credit.
type CreditType string
const (
// Invitee is a type of credits earned by invitee.
Invitee CreditType = "invitee"
// Referrer is a type of credits earned by referrer.
Referrer CreditType = "referrer"
)
// UserCredit holds information about an user's credit.
type UserCredit struct {
ID int
UserID uuid.UUID
OfferID int
ReferredBy *uuid.UUID
Type CreditType
CreditsEarned currency.USD
CreditsUsed currency.USD
ExpiresAt time.Time
CreatedAt time.Time
}
// UserCreditUsage holds information about credit usage information.
type UserCreditUsage struct {
Referred int64
AvailableCredits currency.USD
UsedCredits currency.USD
}
// CreateCredit holds information that's needed when create a new record of user credit.
type CreateCredit struct {
OfferInfo rewards.RedeemOffer
UserID uuid.UUID
OfferID int
Type CreditType
ReferredBy *uuid.UUID
CreditsEarned currency.USD
ExpiresAt time.Time
}
// NewCredit returns a new credit data.
func NewCredit(currentReward *rewards.Offer, creditType CreditType, userID uuid.UUID, referrerID *uuid.UUID) (*CreateCredit, error) {
var creditEarned currency.USD
switch creditType {
case Invitee:
// Invitee will only earn their credit once they have activated their account. Therefore, we set it to 0 on creation
creditEarned = currency.Cents(0)
case Referrer:
creditEarned = currentReward.AwardCredit
default:
return nil, errs.New("unsupported credit type")
}
return &CreateCredit{
OfferInfo: rewards.RedeemOffer{
RedeemableCap: currentReward.RedeemableCap,
Status: currentReward.Status,
Type: currentReward.Type,
},
UserID: userID,
OfferID: currentReward.ID,
ReferredBy: referrerID,
CreditsEarned: creditEarned,
ExpiresAt: time.Now().UTC().AddDate(0, 0, currentReward.InviteeCreditDurationDays),
}, nil
}

View File

@ -1,342 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package console_test
import (
"context"
"testing"
"time"
"github.com/stretchr/testify/require"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/private/currency"
"storj.io/storj/satellite"
"storj.io/storj/satellite/console"
"storj.io/storj/satellite/rewards"
"storj.io/storj/satellite/satellitedb/satellitedbtest"
)
func TestUserCredits(t *testing.T) {
t.Skip("Skip until usercredits.Create method is cockroach compatible. https://github.com/cockroachdb/cockroach/issues/42881")
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
consoleDB := db.Console()
user, referrer, activeOffer, defaultOffer := setupData(ctx, t, db)
randomID := testrand.UUID()
invalidOffer := rewards.Offer{
ID: 10,
}
// test foreign key constraint for inserting a new user credit entry with randomID
var invalidUserCredits []console.CreateCredit
invalid1, err := console.NewCredit(activeOffer, console.Invitee, randomID, &referrer.ID)
require.NoError(t, err)
invalid2, err := console.NewCredit(&invalidOffer, console.Invitee, user.ID, &referrer.ID)
require.NoError(t, err)
invalid3, err := console.NewCredit(activeOffer, console.Invitee, randomID, &randomID)
require.NoError(t, err)
invalidUserCredits = append(invalidUserCredits, *invalid1, *invalid2, *invalid3)
for _, ivc := range invalidUserCredits {
err := consoleDB.UserCredits().Create(ctx, ivc)
require.Error(t, err)
}
type result struct {
remainingCharge int
usage console.UserCreditUsage
referred int64
hasUpdateErr bool
hasCreateErr bool
}
var validUserCredits = []struct {
userCredit console.CreateCredit
chargedCredits int
expected result
}{
{
userCredit: console.CreateCredit{
OfferInfo: rewards.RedeemOffer{
RedeemableCap: activeOffer.RedeemableCap,
Status: activeOffer.Status,
Type: activeOffer.Type,
},
UserID: user.ID,
OfferID: activeOffer.ID,
ReferredBy: &referrer.ID,
Type: console.Invitee,
CreditsEarned: currency.Cents(100),
ExpiresAt: time.Now().AddDate(0, 1, 0),
},
chargedCredits: 120,
expected: result{
remainingCharge: 20,
usage: console.UserCreditUsage{
AvailableCredits: currency.Cents(0),
UsedCredits: currency.Cents(100),
Referred: 0,
},
referred: 0,
},
},
{
// simulate a credit that's already expired
userCredit: console.CreateCredit{
OfferInfo: rewards.RedeemOffer{
RedeemableCap: activeOffer.RedeemableCap,
Status: activeOffer.Status,
Type: activeOffer.Type,
},
UserID: user.ID,
OfferID: activeOffer.ID,
ReferredBy: &referrer.ID,
Type: console.Invitee,
CreditsEarned: currency.Cents(100),
ExpiresAt: time.Now().AddDate(0, 0, -5),
},
chargedCredits: 60,
expected: result{
remainingCharge: 60,
usage: console.UserCreditUsage{
AvailableCredits: currency.Cents(0),
UsedCredits: currency.Cents(100),
Referred: 0,
},
referred: 0,
hasCreateErr: true,
hasUpdateErr: true,
},
},
{
// simulate a credit that's not expired
userCredit: console.CreateCredit{
OfferInfo: rewards.RedeemOffer{
RedeemableCap: activeOffer.RedeemableCap,
Status: activeOffer.Status,
Type: activeOffer.Type,
},
UserID: user.ID,
OfferID: activeOffer.ID,
ReferredBy: &referrer.ID,
Type: console.Invitee,
CreditsEarned: currency.Cents(100),
ExpiresAt: time.Now().AddDate(0, 0, 5),
},
chargedCredits: 80,
expected: result{
remainingCharge: 0,
usage: console.UserCreditUsage{
AvailableCredits: currency.Cents(20),
UsedCredits: currency.Cents(180),
Referred: 0,
},
referred: 0,
},
},
{
// simulate redeemable capacity has been reached for active offers
userCredit: console.CreateCredit{
OfferInfo: rewards.RedeemOffer{
RedeemableCap: 1,
Status: activeOffer.Status,
Type: activeOffer.Type,
},
UserID: user.ID,
OfferID: activeOffer.ID,
ReferredBy: &randomID,
Type: console.Invitee,
CreditsEarned: currency.Cents(100),
ExpiresAt: time.Now().AddDate(0, 1, 0),
},
expected: result{
usage: console.UserCreditUsage{
Referred: 0,
AvailableCredits: currency.Cents(20),
UsedCredits: currency.Cents(180),
},
referred: 0,
hasCreateErr: true,
},
},
{
// simulate redeemable capacity has been reached for default offers
userCredit: console.CreateCredit{
OfferInfo: rewards.RedeemOffer{
RedeemableCap: defaultOffer.RedeemableCap,
Status: defaultOffer.Status,
Type: defaultOffer.Type,
},
UserID: user.ID,
OfferID: defaultOffer.ID,
ReferredBy: nil,
Type: console.Invitee,
CreditsEarned: currency.Cents(100),
ExpiresAt: time.Now().AddDate(0, 1, 0),
},
expected: result{
usage: console.UserCreditUsage{
Referred: 0,
AvailableCredits: currency.Cents(120),
UsedCredits: currency.Cents(180),
},
referred: 0,
hasCreateErr: false,
},
},
{
// simulate credit on account creation
userCredit: console.CreateCredit{
OfferInfo: rewards.RedeemOffer{
RedeemableCap: defaultOffer.RedeemableCap,
Status: defaultOffer.Status,
Type: defaultOffer.Type,
},
UserID: user.ID,
OfferID: defaultOffer.ID,
ReferredBy: &referrer.ID,
Type: console.Invitee,
CreditsEarned: currency.Cents(0),
ExpiresAt: time.Now().AddDate(0, 1, 0),
},
expected: result{
usage: console.UserCreditUsage{
Referred: 0,
AvailableCredits: currency.Cents(220),
UsedCredits: currency.Cents(180),
},
referred: 0,
hasCreateErr: false,
},
},
{
// simulate credit redemption for referrer
userCredit: console.CreateCredit{
OfferInfo: rewards.RedeemOffer{
RedeemableCap: activeOffer.RedeemableCap,
Status: activeOffer.Status,
Type: activeOffer.Type,
},
UserID: referrer.ID,
OfferID: activeOffer.ID,
ReferredBy: nil,
Type: console.Referrer,
CreditsEarned: activeOffer.AwardCredit,
ExpiresAt: time.Now().AddDate(0, 0, activeOffer.AwardCreditDurationDays),
},
expected: result{
usage: console.UserCreditUsage{
Referred: 1,
AvailableCredits: activeOffer.AwardCredit,
UsedCredits: currency.Cents(0),
},
referred: 1,
hasCreateErr: false,
},
},
}
for _, vc := range validUserCredits {
err := consoleDB.UserCredits().Create(ctx, vc.userCredit)
if vc.expected.hasCreateErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
if vc.userCredit.CreditsEarned.Cents() == 0 {
err = consoleDB.UserCredits().UpdateEarnedCredits(ctx, vc.userCredit.UserID)
require.NoError(t, err)
}
{
remainingCharge, err := consoleDB.UserCredits().UpdateAvailableCredits(ctx, vc.chargedCredits, vc.userCredit.UserID, time.Now())
if vc.expected.hasUpdateErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
require.Equal(t, vc.expected.remainingCharge, remainingCharge)
}
{
usage, err := consoleDB.UserCredits().GetCreditUsage(ctx, vc.userCredit.UserID, time.Now())
require.NoError(t, err)
require.Equal(t, vc.expected.usage, *usage)
}
{
referred, err := consoleDB.UserCredits().GetCreditUsage(ctx, referrer.ID, time.Now())
require.NoError(t, err)
require.Equal(t, vc.expected.referred, referred.Referred)
}
}
})
}
func setupData(ctx context.Context, t *testing.T, db satellite.DB) (user *console.User, referrer *console.User, activeOffer *rewards.Offer, defaultOffer *rewards.Offer) {
consoleDB := db.Console()
offersDB := db.Rewards()
// create user
userPassHash := testrand.Bytes(8)
referrerPassHash := testrand.Bytes(8)
var err error
// create an user
user, err = consoleDB.Users().Insert(ctx, &console.User{
ID: testrand.UUID(),
FullName: "John Doe",
Email: "john@mail.test",
PasswordHash: userPassHash,
Status: console.Active,
})
require.NoError(t, err)
// create an user as referrer
referrer, err = consoleDB.Users().Insert(ctx, &console.User{
ID: testrand.UUID(),
FullName: "referrer",
Email: "referrer@mail.test",
PasswordHash: referrerPassHash,
Status: console.Active,
})
require.NoError(t, err)
// create an active offer
activeOffer, err = offersDB.Create(ctx, &rewards.NewOffer{
Name: "active",
Description: "active offer",
AwardCredit: currency.Cents(100),
InviteeCredit: currency.Cents(50),
AwardCreditDurationDays: 60,
InviteeCreditDurationDays: 30,
RedeemableCap: 50,
ExpiresAt: time.Now().Add(time.Hour * 1),
Status: rewards.Active,
Type: rewards.Referral,
})
require.NoError(t, err)
// create a default offer
defaultOffer, err = offersDB.Create(ctx, &rewards.NewOffer{
Name: "default",
Description: "default offer",
AwardCredit: currency.Cents(0),
InviteeCredit: currency.Cents(100),
AwardCreditDurationDays: 0,
InviteeCreditDurationDays: 14,
RedeemableCap: 0,
ExpiresAt: time.Now().Add(time.Hour * 1),
Status: rewards.Default,
Type: rewards.FreeCredit,
})
require.NoError(t, err)
return user, referrer, activeOffer, defaultOffer
}

View File

@ -49,11 +49,16 @@ func (user *UserInfo) IsValid() error {
// CreateUser struct holds info for User creation.
type CreateUser struct {
FullName string `json:"fullName"`
ShortName string `json:"shortName"`
Email string `json:"email"`
PartnerID string `json:"partnerId"`
Password string `json:"password"`
FullName string `json:"fullName"`
ShortName string `json:"shortName"`
Email string `json:"email"`
PartnerID string `json:"partnerId"`
Password string `json:"password"`
IsProfessional bool `json:"isProfessional"`
Position string `json:"position"`
CompanyName string `json:"companyName"`
CompanySize int `json:"companySize"`
WorkingOn string `json:"workingOn"`
}
// IsValid checks CreateUser validity and returns error describing whats wrong.
@ -105,4 +110,10 @@ type User struct {
CreatedAt time.Time `json:"createdAt"`
ProjectLimit int `json:"projectLimit"`
IsProfessional bool `json:"isProfessional"`
Position string `json:"position"`
CompanyName string `json:"companyName"`
CompanySize int `json:"companySize"`
WorkingOn string `json:"workingOn"`
}

View File

@ -18,14 +18,19 @@ import (
)
const (
lastName = "lastName"
email = "email@mail.test"
passValid = "123456"
name = "name"
newName = "newName"
newLastName = "newLastName"
newEmail = "newEmail@mail.test"
newPass = "newPass1234567890123456789012345"
lastName = "lastName"
email = "email@mail.test"
passValid = "123456"
name = "name"
newName = "newName"
newLastName = "newLastName"
newEmail = "newEmail@mail.test"
newPass = "newPass1234567890123456789012345"
position = "position"
companyName = "companyName"
companySize = 123
workingOn = "workingOn"
isProfessional = true
)
func TestUserRepository(t *testing.T) {
@ -54,6 +59,22 @@ func TestUserRepository(t *testing.T) {
CreatedAt: time.Now(),
}
testUsers(ctx, t, repository, user)
// test professional user
user = &console.User{
ID: testrand.UUID(),
FullName: name,
ShortName: lastName,
Email: email,
PasswordHash: []byte(passValid),
CreatedAt: time.Now(),
IsProfessional: isProfessional,
Position: position,
CompanyName: companyName,
CompanySize: companySize,
WorkingOn: workingOn,
}
testUsers(ctx, t, repository, user)
})
}
@ -114,6 +135,17 @@ func testUsers(ctx context.Context, t *testing.T, repository console.Users, user
assert.Equal(t, name, userByEmail.FullName)
assert.Equal(t, lastName, userByEmail.ShortName)
assert.Equal(t, user.PartnerID, userByEmail.PartnerID)
if user.IsProfessional {
assert.Equal(t, workingOn, userByEmail.WorkingOn)
assert.Equal(t, position, userByEmail.Position)
assert.Equal(t, companyName, userByEmail.CompanyName)
assert.Equal(t, companySize, userByEmail.CompanySize)
} else {
assert.Equal(t, "", userByEmail.WorkingOn)
assert.Equal(t, "", userByEmail.Position)
assert.Equal(t, "", userByEmail.CompanyName)
assert.Equal(t, 0, userByEmail.CompanySize)
}
userByID, err := repository.Get(ctx, userByEmail.ID)
assert.NoError(t, err)
@ -121,6 +153,18 @@ func testUsers(ctx context.Context, t *testing.T, repository console.Users, user
assert.Equal(t, lastName, userByID.ShortName)
assert.Equal(t, user.PartnerID, userByID.PartnerID)
if user.IsProfessional {
assert.Equal(t, workingOn, userByID.WorkingOn)
assert.Equal(t, position, userByID.Position)
assert.Equal(t, companyName, userByID.CompanyName)
assert.Equal(t, companySize, userByID.CompanySize)
} else {
assert.Equal(t, "", userByID.WorkingOn)
assert.Equal(t, "", userByID.Position)
assert.Equal(t, "", userByID.CompanyName)
assert.Equal(t, 0, userByID.CompanySize)
}
assert.Equal(t, userByID.ID, userByEmail.ID)
assert.Equal(t, userByID.FullName, userByEmail.FullName)
assert.Equal(t, userByID.ShortName, userByEmail.ShortName)
@ -128,6 +172,11 @@ func testUsers(ctx context.Context, t *testing.T, repository console.Users, user
assert.Equal(t, userByID.PasswordHash, userByEmail.PasswordHash)
assert.Equal(t, userByID.PartnerID, userByEmail.PartnerID)
assert.Equal(t, userByID.CreatedAt, userByEmail.CreatedAt)
assert.Equal(t, userByID.IsProfessional, userByEmail.IsProfessional)
assert.Equal(t, userByID.WorkingOn, userByEmail.WorkingOn)
assert.Equal(t, userByID.Position, userByEmail.Position)
assert.Equal(t, userByID.CompanyName, userByEmail.CompanyName)
assert.Equal(t, userByID.CompanySize, userByEmail.CompanySize)
})
t.Run("Update user success", func(t *testing.T) {

View File

@ -14,6 +14,7 @@ import (
"storj.io/common/pb"
"storj.io/common/rpc/rpcstatus"
"storj.io/common/storj"
"storj.io/storj/private/nodeoperator"
"storj.io/storj/satellite/overlay"
)
@ -78,6 +79,19 @@ func (endpoint *Endpoint) CheckIn(ctx context.Context, req *pb.CheckInRequest) (
err = errCheckInNetwork.New("failed to ping node (ID: %s) at address: %s, err: %v", nodeID, req.Address, err)
return nil, rpcstatus.Error(rpcstatus.NotFound, err.Error())
}
// check wallet features
if req.Operator != nil {
if err := nodeoperator.DefaultWalletFeaturesValidation.Validate(req.Operator.WalletFeatures); err != nil {
endpoint.log.Debug("ignoring invalid wallet features",
zap.Stringer("Node ID", nodeID),
zap.Strings("Wallet Features", req.Operator.WalletFeatures))
// TODO: Update CheckInResponse to include wallet feature validation error
req.Operator.WalletFeatures = nil
}
}
nodeInfo := overlay.NodeCheckInInfo{
NodeID: peerID.ID,
Address: &pb.NodeAddress{
@ -91,6 +105,7 @@ func (endpoint *Endpoint) CheckIn(ctx context.Context, req *pb.CheckInRequest) (
Operator: req.Operator,
Version: req.Version,
}
err = endpoint.service.overlay.UpdateCheckIn(ctx, nodeInfo, time.Now().UTC())
if err != nil {
endpoint.log.Info("failed to update check in", zap.String("node address", req.Address), zap.Stringer("Node ID", nodeID), zap.Error(err))

View File

@ -21,7 +21,7 @@ var _ = time.Kitchen
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type AuditHistory struct {
Windows []*AuditWindow `protobuf:"bytes,1,rep,name=windows,proto3" json:"windows,omitempty"`

View File

@ -21,7 +21,7 @@ var _ = time.Kitchen
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// InjuredSegment is the queue item used for the data repair queue.
type InjuredSegment struct {

View File

@ -25,7 +25,7 @@ var _ = time.Kitchen
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type RepairJobRequest struct {
// When not the first request, this will include the result of the last job

View File

@ -23,7 +23,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type CountNodesRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`

View File

@ -23,7 +23,7 @@ var _ = time.Kitchen
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type StreamID struct {
Bucket []byte `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`

View File

@ -19,7 +19,7 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
// OrderLimitMetadata is used to transmit meta information about an order limit.
// This data will be encrypted.

View File

@ -1,58 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package marketingweb
import (
"net/http"
"reflect"
"strconv"
"time"
"github.com/gorilla/schema"
"storj.io/storj/private/currency"
"storj.io/storj/satellite/rewards"
)
// parseOfferForm decodes POST form data into a new offer.
func parseOfferForm(w http.ResponseWriter, req *http.Request) (rewards.NewOffer, error) {
err := req.ParseForm()
if err != nil {
return rewards.NewOffer{}, err
}
var offer rewards.NewOffer
err = decoder.Decode(&offer, req.PostForm)
return offer, err
}
var (
decoder = schema.NewDecoder()
)
// init safely registers convertStringToTime for the decoder.
func init() {
decoder.RegisterConverter(time.Time{}, convertStringToTime)
decoder.RegisterConverter(currency.USD{}, convertStringToUSD)
}
// convertStringToUSD formats dollars strings as USD amount.
func convertStringToUSD(s string) reflect.Value {
value, err := strconv.Atoi(s)
if err != nil {
// invalid decoder value
return reflect.Value{}
}
return reflect.ValueOf(currency.Dollars(value))
}
// convertStringToTime formats form time input as time.Time.
func convertStringToTime(value string) reflect.Value {
v, err := time.Parse("2006-01-02", value)
if err != nil {
// invalid decoder value
return reflect.Value{}
}
return reflect.ValueOf(v)
}

View File

@ -1,126 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information
package marketingweb
import (
"context"
"go.uber.org/zap"
"storj.io/storj/satellite/rewards"
)
// OrganizedOffers contains a list of offers organized by status.
type OrganizedOffers struct {
Active rewards.Offer
Default rewards.Offer
Done rewards.Offers
}
// OpenSourcePartner contains all data for an Open Source Partner.
type OpenSourcePartner struct {
rewards.PartnerInfo
PartnerOffers OrganizedOffers
}
// PartnerSet contains a list of Open Source Partners.
type PartnerSet []OpenSourcePartner
// OfferSet provides a separation of marketing offers by type.
type OfferSet struct {
ReferralOffers OrganizedOffers
FreeCredits OrganizedOffers
PartnerTables PartnerSet
}
// OrganizeOffersByStatus organizes offers by OfferStatus.
func (server *Server) OrganizeOffersByStatus(offers rewards.Offers) OrganizedOffers {
var oo OrganizedOffers
for _, offer := range offers {
switch offer.Status {
case rewards.Active:
if !oo.Active.IsZero() {
server.log.Error("duplicate active")
}
oo.Active = offer
case rewards.Default:
if !oo.Active.IsZero() {
server.log.Error("duplicate default")
}
oo.Default = offer
case rewards.Done:
oo.Done = append(oo.Done, offer)
}
}
return oo
}
// OrganizeOffersByType organizes offers by OfferType.
func (server *Server) OrganizeOffersByType(offers rewards.Offers) OfferSet {
var (
fc, ro, p rewards.Offers
offerSet OfferSet
)
for _, offer := range offers {
switch offer.Type {
case rewards.FreeCredit:
fc = append(fc, offer)
case rewards.Referral:
ro = append(ro, offer)
case rewards.Partner:
p = append(p, offer)
default:
continue
}
}
offerSet.FreeCredits = server.OrganizeOffersByStatus(fc)
offerSet.ReferralOffers = server.OrganizeOffersByStatus(ro)
offerSet.PartnerTables = server.organizePartnerData(p)
return offerSet
}
// createPartnerSet generates a PartnerSet from the config file.
func (server *Server) createPartnerSet() PartnerSet {
all, err := server.partners.All(context.TODO()) // TODO: don't ignore error
if err != nil {
server.log.Error("failed to load all partners", zap.Error(err))
return nil
}
var ps PartnerSet
for _, partner := range all {
ps = append(ps, OpenSourcePartner{
PartnerInfo: partner,
})
}
return ps
}
// matchOffersToPartnerSet assigns offers to the partner they belong to.
func (server *Server) matchOffersToPartnerSet(offers rewards.Offers, partnerSet PartnerSet) PartnerSet {
for i := range partnerSet {
var partnerOffersByName rewards.Offers
for _, o := range offers {
if o.Name == partnerSet[i].PartnerInfo.Name {
partnerOffersByName = append(partnerOffersByName, o)
}
}
partnerSet[i].PartnerOffers = server.OrganizeOffersByStatus(partnerOffersByName)
}
return partnerSet
}
// organizePartnerData returns a list of Open Source Partners
// whose offers have been organized by status, type, and
// assigned to the correct partner.
func (server *Server) organizePartnerData(offers rewards.Offers) PartnerSet {
partnerData := server.matchOffersToPartnerSet(offers, server.createPartnerSet())
return partnerData
}

View File

@ -1,288 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package marketingweb
import (
"context"
"errors"
"html/template"
"net"
"net/http"
"path/filepath"
"strconv"
"github.com/gorilla/mux"
"github.com/zeebo/errs"
"go.uber.org/zap"
"golang.org/x/sync/errgroup"
"storj.io/common/errs2"
"storj.io/storj/satellite/rewards"
)
// Error is satellite marketing error type.
var Error = errs.Class("satellite marketing error")
// Config contains configuration for marketingweb server.
type Config struct {
BaseURL string `help:"base url for marketing Admin GUI" default:""`
Address string `help:"server address of the marketing Admin GUI" default:"127.0.0.1:8090"`
StaticDir string `help:"path to static resources" default:""`
}
// Server represents marketing offersweb server.
//
// architecture: Endpoint
type Server struct {
log *zap.Logger
config Config
listener net.Listener
server http.Server
rewards rewards.DB
partners *rewards.PartnersService
templateDir string
templates struct {
home *template.Template
pageNotFound *template.Template
internalError *template.Template
badRequest *template.Template
}
}
// commonPages returns templates that are required for all routes.
func (s *Server) commonPages() []string {
return []string{
filepath.Join(s.templateDir, "base.html"),
filepath.Join(s.templateDir, "index.html"),
filepath.Join(s.templateDir, "banner.html"),
filepath.Join(s.templateDir, "logo.html"),
}
}
// NewServer creates new instance of offersweb server.
func NewServer(logger *zap.Logger, config Config, rewards rewards.DB, partners *rewards.PartnersService, listener net.Listener) (*Server, error) {
s := &Server{
log: logger,
config: config,
listener: listener,
rewards: rewards,
partners: partners,
}
logger.Debug("Starting Marketing Admin UI.", zap.Stringer("Address", s.listener.Addr()))
fs := http.StripPrefix("/static/", http.FileServer(http.Dir(s.config.StaticDir)))
mux := mux.NewRouter()
if s.config.StaticDir != "" {
mux.HandleFunc("/", s.GetOffers)
mux.PathPrefix("/static/").Handler(fs)
mux.HandleFunc("/create/{offer_type}", s.CreateOffer)
mux.HandleFunc("/stop/{offer_id}", s.StopOffer)
}
s.server.Handler = mux
s.templateDir = filepath.Join(s.config.StaticDir, "pages")
if err := s.parseTemplates(); err != nil {
return nil, Error.Wrap(err)
}
return s, nil
}
// GetOffers renders the tables for free credits and referral offers to the UI.
func (s *Server) GetOffers(w http.ResponseWriter, req *http.Request) {
if req.URL.Path != "/" {
s.serveNotFound(w, req)
return
}
offers, err := s.rewards.ListAll(req.Context())
if err != nil {
s.log.Error("failed to retrieve all offers", zap.Error(err))
s.serveInternalError(w, req, err)
return
}
if err := s.templates.home.ExecuteTemplate(w, "base", s.OrganizeOffersByType(offers)); err != nil {
s.log.Error("failed to execute template", zap.Error(err))
}
}
// parseTemplates parses and stores all templates in server.
func (s *Server) parseTemplates() (err error) {
homeFiles := append(s.commonPages(),
filepath.Join(s.templateDir, "home.html"),
filepath.Join(s.templateDir, "referral-offers.html"),
filepath.Join(s.templateDir, "referral-offers-modal.html"),
filepath.Join(s.templateDir, "free-offers.html"),
filepath.Join(s.templateDir, "free-offers-modal.html"),
filepath.Join(s.templateDir, "partner-offers.html"),
filepath.Join(s.templateDir, "partner-offers-modal.html"),
filepath.Join(s.templateDir, "stop-free-credit.html"),
filepath.Join(s.templateDir, "stop-referral-offer.html"),
filepath.Join(s.templateDir, "partner-offers.html"),
filepath.Join(s.templateDir, "stop-partner-offer.html"),
)
pageNotFoundFiles := append(s.commonPages(),
filepath.Join(s.templateDir, "page-not-found.html"),
)
internalErrorFiles := append(s.commonPages(),
filepath.Join(s.templateDir, "internal-server-error.html"),
)
badRequestFiles := append(s.commonPages(),
filepath.Join(s.templateDir, "err.html"),
)
s.templates.home, err = template.New("home-page").Funcs(template.FuncMap{
"BaseURL": s.GetBaseURL,
"ReferralLink": s.generatePartnerLink,
}).ParseFiles(homeFiles...)
if err != nil {
return Error.Wrap(err)
}
s.templates.pageNotFound, err = template.New("page-not-found").Funcs(template.FuncMap{
"BaseURL": s.GetBaseURL,
}).ParseFiles(pageNotFoundFiles...)
if err != nil {
return Error.Wrap(err)
}
s.templates.internalError, err = template.New("internal-server-error").Funcs(template.FuncMap{
"BaseURL": s.GetBaseURL,
}).ParseFiles(internalErrorFiles...)
if err != nil {
return Error.Wrap(err)
}
s.templates.badRequest, err = template.New("bad-request-error").Funcs(template.FuncMap{
"BaseURL": s.GetBaseURL,
}).ParseFiles(badRequestFiles...)
if err != nil {
return Error.Wrap(err)
}
return nil
}
func (s *Server) generatePartnerLink(offerName string) ([]string, error) {
return s.partners.GeneratePartnerLink(context.TODO(), offerName)
}
// CreateOffer handles requests to create new offers.
func (s *Server) CreateOffer(w http.ResponseWriter, req *http.Request) {
offer, err := parseOfferForm(w, req)
if err != nil {
s.log.Error("failed to convert form to struct", zap.Error(err))
s.serveBadRequest(w, req, err)
return
}
offer.Status = rewards.Active
offerType := mux.Vars(req)["offer_type"]
switch offerType {
case "referral":
offer.Type = rewards.Referral
case "free-credit":
offer.Type = rewards.FreeCredit
case "partner":
offer.Type = rewards.Partner
default:
err := errs.New("response status %d : invalid offer type", http.StatusBadRequest)
s.serveBadRequest(w, req, err)
return
}
if _, err := s.rewards.Create(req.Context(), &offer); err != nil {
s.log.Error("failed to insert new offer", zap.Error(err))
s.serveBadRequest(w, req, err)
return
}
http.Redirect(w, req, "/", http.StatusSeeOther)
}
// StopOffer expires the current offer and replaces it with the default offer.
func (s *Server) StopOffer(w http.ResponseWriter, req *http.Request) {
offerID, err := strconv.Atoi(mux.Vars(req)["offer_id"])
if err != nil {
s.log.Error("failed to parse offer id", zap.Error(err))
s.serveBadRequest(w, req, err)
return
}
if err := s.rewards.Finish(req.Context(), offerID); err != nil {
s.log.Error("failed to stop offer", zap.Error(err))
s.serveInternalError(w, req, err)
return
}
http.Redirect(w, req, "/", http.StatusSeeOther)
}
// GetBaseURL returns base url from config.
func (s *Server) GetBaseURL() string {
return s.config.BaseURL
}
// serveNotFound handles 404 errors and defaults to 500 if template parsing fails.
func (s *Server) serveNotFound(w http.ResponseWriter, req *http.Request) {
w.WriteHeader(http.StatusNotFound)
err := s.templates.pageNotFound.ExecuteTemplate(w, "base", nil)
if err != nil {
s.log.Error("failed to execute template", zap.Error(err))
}
}
// serveInternalError handles 500 errors and renders err to the internal-server-error template.
func (s *Server) serveInternalError(w http.ResponseWriter, req *http.Request, errMsg error) {
w.WriteHeader(http.StatusInternalServerError)
if err := s.templates.internalError.ExecuteTemplate(w, "base", errMsg); err != nil {
s.log.Error("failed to execute template", zap.Error(err))
}
}
// serveBadRequest handles 400 errors and renders err to the bad-request template.
func (s *Server) serveBadRequest(w http.ResponseWriter, req *http.Request, errMsg error) {
w.WriteHeader(http.StatusBadRequest)
if err := s.templates.badRequest.ExecuteTemplate(w, "base", errMsg); err != nil {
s.log.Error("failed to execute template", zap.Error(err))
}
}
// Run starts the server that host admin web app and api endpoint.
func (s *Server) Run(ctx context.Context) error {
ctx, cancel := context.WithCancel(ctx)
var group errgroup.Group
group.Go(func() error {
<-ctx.Done()
return Error.Wrap(s.server.Shutdown(context.Background()))
})
group.Go(func() error {
defer cancel()
err := s.server.Serve(s.listener)
if errs2.IsCanceled(err) || errors.Is(err, http.ErrServerClosed) {
err = nil
}
return Error.Wrap(err)
})
return group.Wait()
}
// Close closes server and underlying listener.
func (s *Server) Close() error {
return Error.Wrap(s.server.Close())
}

View File

@ -1,124 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package marketingweb_test
import (
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
"testing"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
"storj.io/common/testcontext"
"storj.io/storj/private/testplanet"
)
type CreateRequest struct {
Path string
Values url.Values
}
func TestCreateAndStopOffers(t *testing.T) {
t.Skip("this test will be removed/modified with rework of offer/rewards code")
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
requests := []CreateRequest{
{
Path: "/create/referral",
Values: url.Values{
"Name": {"Referral Credit"},
"Description": {"desc"},
"ExpiresAt": {"2119-06-27"},
"InviteeCredit": {"50"},
"InviteeCreditDurationDays": {"50"},
"AwardCredit": {"50"},
"AwardCreditDurationDays": {"50"},
"RedeemableCap": {"150"},
},
}, {
Path: "/create/free-credit",
Values: url.Values{
"Name": {"Free Credit"},
"Description": {"desc"},
"ExpiresAt": {"2119-06-27"},
"InviteeCredit": {"50"},
"InviteeCreditDurationDays": {"50"},
"RedeemableCap": {"150"},
},
}, {
Path: "/create/partner",
Values: url.Values{
"Name": {"FileZilla"},
"Description": {"desc"},
"ExpiresAt": {"2119-06-27"},
"InviteeCredit": {"50"},
"InviteeCreditDurationDays": {"50"},
"RedeemableCap": {"150"},
},
},
}
addr := planet.Satellites[0].Marketing.Listener.Addr()
var group errgroup.Group
for index, offer := range requests {
o := offer
id := strconv.Itoa(index + 1)
group.Go(func() error {
baseURL := "http://" + addr.String()
req, err := http.PostForm(baseURL+o.Path, o.Values)
if err != nil {
return err
}
require.Equal(t, http.StatusOK, req.StatusCode)
// reading out the rest of the connection
_, err = io.Copy(ioutil.Discard, req.Body)
if err != nil {
return err
}
if err := req.Body.Close(); err != nil {
return err
}
req, err = http.Get(baseURL)
if err != nil {
return err
}
require.Equal(t, http.StatusOK, req.StatusCode)
_, err = io.Copy(ioutil.Discard, req.Body)
if err != nil {
return err
}
if err := req.Body.Close(); err != nil {
return err
}
req, err = http.Post(baseURL+"/stop/"+id, "application/x-www-form-urlencoded", nil)
if err != nil {
return err
}
require.Equal(t, http.StatusOK, req.StatusCode)
_, err = io.Copy(ioutil.Discard, req.Body)
if err != nil {
return err
}
if err := req.Body.Close(); err != nil {
return err
}
return nil
})
}
err := group.Wait()
require.NoError(t, err)
})
}

View File

@ -103,7 +103,7 @@ func removeUplinkUserAgent(entries []useragent.Entry) []useragent.Entry {
var xs []useragent.Entry
for i := 0; i < len(entries); i++ {
// If it's "uplink" then skip it.
if strings.EqualFold(entries[i].Product, "uplink") {
if strings.EqualFold(entries[i].Product, uplinkProduct) {
// also skip any associated comments
for i+1 < len(entries) && entries[i+1].Comment != "" {
i++

View File

@ -79,6 +79,7 @@ type Endpoint struct {
revocations revocation.DB
defaultRS *pb.RedundancyScheme
config Config
versionCollector *versionCollector
}
// NewEndpoint creates new metainfo endpoint instance.
@ -127,6 +128,7 @@ func NewEndpoint(log *zap.Logger, metainfo *Service, deletePieces *piecedeletion
revocations: revocations,
defaultRS: defaultRSScheme,
config: config,
versionCollector: newVersionCollector(),
}, nil
}
@ -202,6 +204,11 @@ func calculateSpaceUsed(segmentSize int64, numberOfPieces int, rs storj.Redundan
func (endpoint *Endpoint) ProjectInfo(ctx context.Context, req *pb.ProjectInfoRequest) (_ *pb.ProjectInfoResponse, err error) {
defer mon.Task()(&ctx)(&err)
err = endpoint.versionCollector.collect(req.Header.UserAgent, mon.Func().ShortName())
if err != nil {
endpoint.log.Warn("unable to collect uplink version", zap.Error(err))
}
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionProjectInfo,
Time: time.Now(),
@ -221,6 +228,11 @@ func (endpoint *Endpoint) ProjectInfo(ctx context.Context, req *pb.ProjectInfoRe
func (endpoint *Endpoint) GetBucket(ctx context.Context, req *pb.BucketGetRequest) (resp *pb.BucketGetResponse, err error) {
defer mon.Task()(&ctx)(&err)
err = endpoint.versionCollector.collect(req.Header.UserAgent, mon.Func().ShortName())
if err != nil {
endpoint.log.Warn("unable to collect uplink version", zap.Error(err))
}
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionRead,
Bucket: req.Name,
@ -253,6 +265,11 @@ func (endpoint *Endpoint) GetBucket(ctx context.Context, req *pb.BucketGetReques
func (endpoint *Endpoint) CreateBucket(ctx context.Context, req *pb.BucketCreateRequest) (resp *pb.BucketCreateResponse, err error) {
defer mon.Task()(&ctx)(&err)
err = endpoint.versionCollector.collect(req.Header.UserAgent, mon.Func().ShortName())
if err != nil {
endpoint.log.Warn("unable to collect uplink version", zap.Error(err))
}
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionWrite,
Bucket: req.Name,
@ -329,6 +346,11 @@ func (endpoint *Endpoint) CreateBucket(ctx context.Context, req *pb.BucketCreate
func (endpoint *Endpoint) DeleteBucket(ctx context.Context, req *pb.BucketDeleteRequest) (resp *pb.BucketDeleteResponse, err error) {
defer mon.Task()(&ctx)(&err)
err = endpoint.versionCollector.collect(req.Header.UserAgent, mon.Func().ShortName())
if err != nil {
endpoint.log.Warn("unable to collect uplink version", zap.Error(err))
}
now := time.Now()
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
@ -448,6 +470,12 @@ func (endpoint *Endpoint) deleteBucketObjects(ctx context.Context, projectID uui
// ListBuckets returns buckets in a project where the bucket name matches the request cursor.
func (endpoint *Endpoint) ListBuckets(ctx context.Context, req *pb.BucketListRequest) (resp *pb.BucketListResponse, err error) {
defer mon.Task()(&ctx)(&err)
err = endpoint.versionCollector.collect(req.Header.UserAgent, mon.Func().ShortName())
if err != nil {
endpoint.log.Warn("unable to collect uplink version", zap.Error(err))
}
action := macaroon.Action{
// TODO: This has to be ActionList, but it seems to be set to
// ActionRead as a hacky workaround to make bucket listing possible.
@ -591,6 +619,11 @@ func convertBucketToProto(bucket storj.Bucket, rs *pb.RedundancyScheme) (pbBucke
func (endpoint *Endpoint) BeginObject(ctx context.Context, req *pb.ObjectBeginRequest) (resp *pb.ObjectBeginResponse, err error) {
defer mon.Task()(&ctx)(&err)
err = endpoint.versionCollector.collect(req.Header.UserAgent, mon.Func().ShortName())
if err != nil {
endpoint.log.Warn("unable to collect uplink version", zap.Error(err))
}
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionWrite,
Bucket: req.Bucket,
@ -722,6 +755,11 @@ func (endpoint *Endpoint) BeginObject(ctx context.Context, req *pb.ObjectBeginRe
func (endpoint *Endpoint) CommitObject(ctx context.Context, req *pb.ObjectCommitRequest) (resp *pb.ObjectCommitResponse, err error) {
defer mon.Task()(&ctx)(&err)
err = endpoint.versionCollector.collect(req.Header.UserAgent, mon.Func().ShortName())
if err != nil {
endpoint.log.Warn("unable to collect uplink version", zap.Error(err))
}
return endpoint.commitObject(ctx, req, nil)
}
@ -778,6 +816,11 @@ func (endpoint *Endpoint) commitObject(ctx context.Context, req *pb.ObjectCommit
func (endpoint *Endpoint) GetObject(ctx context.Context, req *pb.ObjectGetRequest) (resp *pb.ObjectGetResponse, err error) {
defer mon.Task()(&ctx)(&err)
err = endpoint.versionCollector.collect(req.Header.UserAgent, mon.Func().ShortName())
if err != nil {
endpoint.log.Warn("unable to collect uplink version", zap.Error(err))
}
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionRead,
Bucket: req.Bucket,
@ -939,6 +982,11 @@ func (endpoint *Endpoint) GetPendingObjects(ctx context.Context, req *pb.GetPend
func (endpoint *Endpoint) ListPendingObjectStreams(ctx context.Context, req *pb.ObjectListPendingStreamsRequest) (resp *pb.ObjectListPendingStreamsResponse, err error) {
defer mon.Task()(&ctx)(&err)
err = endpoint.versionCollector.collect(req.Header.UserAgent, mon.Func().ShortName())
if err != nil {
endpoint.log.Warn("unable to collect uplink version", zap.Error(err))
}
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionList,
Bucket: req.Bucket,
@ -1029,6 +1077,11 @@ func (endpoint *Endpoint) ListPendingObjectStreams(ctx context.Context, req *pb.
func (endpoint *Endpoint) BeginDeleteObject(ctx context.Context, req *pb.ObjectBeginDeleteRequest) (resp *pb.ObjectBeginDeleteResponse, err error) {
defer mon.Task()(&ctx)(&err)
err = endpoint.versionCollector.collect(req.Header.UserAgent, mon.Func().ShortName())
if err != nil {
endpoint.log.Warn("unable to collect uplink version", zap.Error(err))
}
now := time.Now()
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
@ -1123,6 +1176,11 @@ func (endpoint *Endpoint) FinishDeleteObject(ctx context.Context, req *pb.Object
func (endpoint *Endpoint) GetObjectIPs(ctx context.Context, req *pb.ObjectGetIPsRequest) (resp *pb.ObjectGetIPsResponse, err error) {
defer mon.Task()(&ctx)(&err)
err = endpoint.versionCollector.collect(req.Header.UserAgent, mon.Func().ShortName())
if err != nil {
endpoint.log.Warn("unable to collect uplink version", zap.Error(err))
}
keyInfo, err := endpoint.validateAuth(ctx, req.Header, macaroon.Action{
Op: macaroon.ActionRead,
Bucket: req.Bucket,
@ -1212,6 +1270,11 @@ func (endpoint *Endpoint) GetObjectIPs(ctx context.Context, req *pb.ObjectGetIPs
func (endpoint *Endpoint) BeginSegment(ctx context.Context, req *pb.SegmentBeginRequest) (resp *pb.SegmentBeginResponse, err error) {
defer mon.Task()(&ctx)(&err)
err = endpoint.versionCollector.collect(req.Header.UserAgent, mon.Func().ShortName())
if err != nil {
endpoint.log.Warn("unable to collect uplink version", zap.Error(err))
}
streamID, err := endpoint.unmarshalSatStreamID(ctx, req.StreamId)
if err != nil {
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
@ -1314,6 +1377,11 @@ func (endpoint *Endpoint) BeginSegment(ctx context.Context, req *pb.SegmentBegin
func (endpoint *Endpoint) CommitSegment(ctx context.Context, req *pb.SegmentCommitRequest) (resp *pb.SegmentCommitResponse, err error) {
defer mon.Task()(&ctx)(&err)
err = endpoint.versionCollector.collect(req.Header.UserAgent, mon.Func().ShortName())
if err != nil {
endpoint.log.Warn("unable to collect uplink version", zap.Error(err))
}
_, resp, err = endpoint.commitSegment(ctx, req, true)
return resp, err
}
@ -1459,6 +1527,11 @@ func (endpoint *Endpoint) commitSegment(ctx context.Context, req *pb.SegmentComm
func (endpoint *Endpoint) MakeInlineSegment(ctx context.Context, req *pb.SegmentMakeInlineRequest) (resp *pb.SegmentMakeInlineResponse, err error) {
defer mon.Task()(&ctx)(&err)
err = endpoint.versionCollector.collect(req.Header.UserAgent, mon.Func().ShortName())
if err != nil {
endpoint.log.Warn("unable to collect uplink version", zap.Error(err))
}
_, resp, err = endpoint.makeInlineSegment(ctx, req, true)
return resp, err
}
@ -1634,6 +1707,11 @@ func (endpoint *Endpoint) ListSegments(ctx context.Context, req *pb.SegmentListR
func (endpoint *Endpoint) DownloadSegment(ctx context.Context, req *pb.SegmentDownloadRequest) (resp *pb.SegmentDownloadResponse, err error) {
defer mon.Task()(&ctx)(&err)
err = endpoint.versionCollector.collect(req.Header.UserAgent, mon.Func().ShortName())
if err != nil {
endpoint.log.Warn("unable to collect uplink version", zap.Error(err))
}
streamID, err := endpoint.unmarshalSatStreamID(ctx, req.StreamId)
if err != nil {
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, err.Error())
@ -2171,6 +2249,12 @@ func groupPiecesByNodeID(segments []metabase.DeletedSegmentInfo) map[storj.NodeI
// RevokeAPIKey handles requests to revoke an api key.
func (endpoint *Endpoint) RevokeAPIKey(ctx context.Context, req *pb.RevokeAPIKeyRequest) (resp *pb.RevokeAPIKeyResponse, err error) {
defer mon.Task()(&ctx)(&err)
err = endpoint.versionCollector.collect(req.Header.UserAgent, mon.Func().ShortName())
if err != nil {
endpoint.log.Warn("unable to collect uplink version", zap.Error(err))
}
macToRevoke, err := macaroon.ParseMacaroon(req.GetApiKey())
if err != nil {
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, "API key to revoke is not a macaroon")

View File

@ -1379,10 +1379,10 @@ func TestDeleteBatchWithoutPermission(t *testing.T) {
err := planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "test-bucket")
require.NoError(t, err)
apiKey, err = apiKey.Restrict(macaroon.Caveat{
apiKey, err = apiKey.Restrict(macaroon.WithNonce(macaroon.Caveat{
DisallowLists: true,
DisallowReads: true,
})
}))
require.NoError(t, err)
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)

View File

@ -0,0 +1,68 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package metainfo
import (
"strings"
"sync"
"github.com/spacemonkeygo/monkit/v3"
"github.com/zeebo/errs"
"storj.io/common/useragent"
)
const uplinkProduct = "uplink"
type versionOccurrence struct {
Version string
Method string
}
type versionCollector struct {
mu sync.Mutex
versions map[versionOccurrence]*monkit.Meter
}
func newVersionCollector() *versionCollector {
return &versionCollector{
versions: make(map[versionOccurrence]*monkit.Meter),
}
}
func (vc *versionCollector) collect(useragentRaw []byte, method string) error {
var meter *monkit.Meter
version := "unknown"
if len(useragentRaw) != 0 {
entries, err := useragent.ParseEntries(useragentRaw)
if err != nil {
return errs.New("invalid user agent %q: %v", string(useragentRaw), err)
}
for _, entry := range entries {
if strings.EqualFold(entry.Product, uplinkProduct) {
version = entry.Version
break
}
}
}
vo := versionOccurrence{
Version: version,
Method: method,
}
vc.mu.Lock()
meter, ok := vc.versions[vo]
if !ok {
meter = monkit.NewMeter(monkit.NewSeriesKey("uplink_versions").WithTag("version", version).WithTag("method", method))
mon.Chain(meter)
vc.versions[vo] = meter
}
vc.mu.Unlock()
meter.Mark(1)
return nil
}

View File

@ -507,8 +507,9 @@ func TestUpdateCheckIn(t *testing.T) {
FreeDisk: int64(5678),
},
Operator: &pb.NodeOperator{
Email: expectedEmail,
Wallet: "0x123",
Email: expectedEmail,
Wallet: "0x123",
WalletFeatures: []string{"example"},
},
Version: &pb.NodeVersion{
Version: "v0.0.0",
@ -529,8 +530,9 @@ func TestUpdateCheckIn(t *testing.T) {
},
Type: pb.NodeType_STORAGE,
Operator: pb.NodeOperator{
Email: info.Operator.GetEmail(),
Wallet: info.Operator.GetWallet(),
Email: info.Operator.GetEmail(),
Wallet: info.Operator.GetWallet(),
WalletFeatures: info.Operator.GetWalletFeatures(),
},
Capacity: pb.NodeCapacity{
FreeDisk: info.Capacity.GetFreeDisk(),

View File

@ -118,44 +118,62 @@ func testDatabase(ctx context.Context, t *testing.T, cache overlay.DB) {
update, err := cache.UpdateNodeInfo(ctx, nodeID, &overlay.InfoResponse{
Operator: &pb.NodeOperator{
Wallet: "0x1111111111111111111111111111111111111111",
Email: "abc123@mail.test",
Wallet: "0x1111111111111111111111111111111111111111",
Email: "abc123@mail.test",
WalletFeatures: []string{"wallet_features"},
},
})
require.NoError(t, err)
require.NotNil(t, update)
require.Equal(t, "0x1111111111111111111111111111111111111111", update.Operator.Wallet)
require.Equal(t, "abc123@mail.test", update.Operator.Email)
require.Equal(t, []string{"wallet_features"}, update.Operator.WalletFeatures)
found, err := cache.Get(ctx, nodeID)
require.NoError(t, err)
require.NotNil(t, found)
require.Equal(t, "0x1111111111111111111111111111111111111111", found.Operator.Wallet)
require.Equal(t, "abc123@mail.test", found.Operator.Email)
require.Equal(t, []string{"wallet_features"}, found.Operator.WalletFeatures)
updateEmail, err := cache.UpdateNodeInfo(ctx, nodeID, &overlay.InfoResponse{
Operator: &pb.NodeOperator{
Wallet: update.Operator.Wallet,
Email: "def456@mail.test",
Wallet: update.Operator.Wallet,
Email: "def456@mail.test",
WalletFeatures: update.Operator.WalletFeatures,
},
})
require.NoError(t, err)
assert.NotNil(t, updateEmail)
assert.Equal(t, "0x1111111111111111111111111111111111111111", updateEmail.Operator.Wallet)
assert.Equal(t, "def456@mail.test", updateEmail.Operator.Email)
assert.Equal(t, []string{"wallet_features"}, updateEmail.Operator.WalletFeatures)
updateWallet, err := cache.UpdateNodeInfo(ctx, nodeID, &overlay.InfoResponse{
Operator: &pb.NodeOperator{
Wallet: "0x2222222222222222222222222222222222222222",
Email: updateEmail.Operator.Email,
Wallet: "0x2222222222222222222222222222222222222222",
Email: updateEmail.Operator.Email,
WalletFeatures: update.Operator.WalletFeatures,
},
})
require.NoError(t, err)
assert.NotNil(t, updateWallet)
assert.Equal(t, "0x2222222222222222222222222222222222222222", updateWallet.Operator.Wallet)
assert.Equal(t, "def456@mail.test", updateWallet.Operator.Email)
assert.Equal(t, []string{"wallet_features"}, updateWallet.Operator.WalletFeatures)
updateWalletFeatures, err := cache.UpdateNodeInfo(ctx, nodeID, &overlay.InfoResponse{
Operator: &pb.NodeOperator{
Wallet: updateWallet.Operator.Wallet,
Email: updateEmail.Operator.Email,
WalletFeatures: []string{"wallet_features_updated"},
},
})
require.NoError(t, err)
assert.NotNil(t, updateWalletFeatures)
assert.Equal(t, "0x2222222222222222222222222222222222222222", updateWalletFeatures.Operator.Wallet)
assert.Equal(t, "def456@mail.test", updateWalletFeatures.Operator.Email)
assert.Equal(t, []string{"wallet_features_updated"}, updateWalletFeatures.Operator.WalletFeatures)
}
{ // TestUpdateExists

View File

@ -29,7 +29,6 @@ import (
"storj.io/storj/satellite/gc"
"storj.io/storj/satellite/gracefulexit"
"storj.io/storj/satellite/mailservice"
"storj.io/storj/satellite/marketingweb"
"storj.io/storj/satellite/metainfo"
"storj.io/storj/satellite/metainfo/expireddeletion"
"storj.io/storj/satellite/metrics"
@ -39,13 +38,11 @@ import (
"storj.io/storj/satellite/overlay/straynodes"
"storj.io/storj/satellite/payments/paymentsconfig"
"storj.io/storj/satellite/payments/stripecoinpayments"
"storj.io/storj/satellite/referrals"
"storj.io/storj/satellite/repair/checker"
"storj.io/storj/satellite/repair/irreparable"
"storj.io/storj/satellite/repair/queue"
"storj.io/storj/satellite/repair/repairer"
"storj.io/storj/satellite/revocation"
"storj.io/storj/satellite/rewards"
"storj.io/storj/satellite/snopayouts"
)
@ -85,8 +82,6 @@ type DB interface {
Irreparable() irreparable.DB
// Console returns database for satellite console
Console() console.DB
// Rewards returns database for marketing admin GUI
Rewards() rewards.DB
// Orders returns database for orders
Orders() orders.DB
// Containment returns database for containment
@ -140,12 +135,8 @@ type Config struct {
Payments paymentsconfig.Config
Referrals referrals.Config
Console consoleweb.Config
Marketing marketingweb.Config
Version version_checker.Config
GracefulExit gracefulexit.Config

View File

@ -1,187 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package referrals
import (
"context"
"github.com/spacemonkeygo/monkit/v3"
"github.com/zeebo/errs"
"go.uber.org/zap"
"golang.org/x/crypto/bcrypt"
"storj.io/common/pb"
"storj.io/common/rpc"
"storj.io/common/signing"
"storj.io/common/storj"
"storj.io/common/uuid"
"storj.io/storj/satellite/console"
)
var mon = monkit.Package()
var (
// ErrUsedEmail is an error class for reporting already used emails.
ErrUsedEmail = errs.Class("email used error")
)
// Config is for referrals service.
type Config struct {
ReferralManagerURL storj.NodeURL `help:"the URL for referral manager"`
}
// Service allows communicating with the Referral Manager.
//
// architecture: Service
type Service struct {
log *zap.Logger
signer signing.Signer
config Config
dialer rpc.Dialer
db console.Users
passwordCost int
}
// NewService returns a service for handling referrals information.
func NewService(log *zap.Logger, signer signing.Signer, config Config, dialer rpc.Dialer, db console.Users, passwordCost int) *Service {
return &Service{
log: log,
signer: signer,
config: config,
dialer: dialer,
db: db,
passwordCost: passwordCost,
}
}
// GetTokens returns tokens based on user ID.
func (service *Service) GetTokens(ctx context.Context, userID *uuid.UUID) (tokens []uuid.UUID, err error) {
defer mon.Task()(&ctx)(&err)
if userID.IsZero() {
return nil, errs.New("user ID is not defined")
}
conn, err := service.referralManagerConn(ctx)
if err != nil {
return nil, errs.Wrap(err)
}
defer func() {
err = conn.Close()
}()
client := pb.NewDRPCReferralManagerClient(conn)
response, err := client.GetTokens(ctx, &pb.GetTokensRequest{
OwnerUserId: userID[:],
OwnerSatelliteId: service.signer.ID(),
})
if err != nil {
return nil, errs.Wrap(err)
}
tokensInBytes := response.GetTokenSecrets()
if tokensInBytes != nil && len(tokensInBytes) == 0 {
return nil, errs.New("no available tokens")
}
tokens = make([]uuid.UUID, len(tokensInBytes))
for i := range tokensInBytes {
token, err := uuid.FromBytes(tokensInBytes[i])
if err != nil {
service.log.Debug("failed to convert bytes to UUID", zap.Error(err))
continue
}
tokens[i] = token
}
return tokens, nil
}
// CreateUser validates user's registration information and creates a new user.
func (service *Service) CreateUser(ctx context.Context, user CreateUser) (_ *console.User, err error) {
defer mon.Task()(&ctx)(&err)
if err := user.IsValid(); err != nil {
return nil, ErrValidation.Wrap(err)
}
if len(user.ReferralToken) == 0 {
return nil, errs.New("referral token is not defined")
}
_, err = service.db.GetByEmail(ctx, user.Email)
if err == nil {
return nil, ErrUsedEmail.New("")
}
userID, err := uuid.New()
if err != nil {
return nil, errs.Wrap(err)
}
err = service.redeemToken(ctx, &userID, user.ReferralToken)
if err != nil {
return nil, errs.Wrap(err)
}
hash, err := bcrypt.GenerateFromPassword([]byte(user.Password), service.passwordCost)
if err != nil {
return nil, errs.Wrap(err)
}
newUser := &console.User{
ID: userID,
Email: user.Email,
FullName: user.FullName,
ShortName: user.ShortName,
PasswordHash: hash,
}
u, err := service.db.Insert(ctx,
newUser,
)
if err != nil {
return nil, errs.Wrap(err)
}
return u, nil
}
func (service *Service) redeemToken(ctx context.Context, userID *uuid.UUID, token string) error {
conn, err := service.referralManagerConn(ctx)
if err != nil {
return errs.Wrap(err)
}
defer func() {
err = conn.Close()
}()
if userID.IsZero() || len(token) == 0 {
return errs.New("invalid argument")
}
referralToken, err := uuid.FromString(token)
if err != nil {
return errs.Wrap(err)
}
client := pb.NewDRPCReferralManagerClient(conn)
_, err = client.RedeemToken(ctx, &pb.RedeemTokenRequest{
Token: referralToken[:],
RedeemUserId: userID[:],
RedeemSatelliteId: service.signer.ID(),
})
if err != nil {
return errs.Wrap(err)
}
return nil
}
func (service *Service) referralManagerConn(ctx context.Context) (*rpc.Conn, error) {
if service.config.ReferralManagerURL.IsZero() {
return nil, errs.New("missing referral manager url configuration")
}
return service.dialer.DialNodeURL(ctx, service.config.ReferralManagerURL)
}

View File

@ -1,93 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package referrals_test
import (
"context"
"testing"
"github.com/stretchr/testify/require"
"go.uber.org/zap"
"storj.io/common/pb"
"storj.io/common/rpc/rpcstatus"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite/referrals"
)
func TestServiceSuccess(t *testing.T) {
tokenCount := 2
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1,
Reconfigure: testplanet.Reconfigure{
ReferralManagerServer: func(logger *zap.Logger) pb.DRPCReferralManagerServer {
endpoint := &endpointHappyPath{}
endpoint.SetTokenCount(tokenCount)
return endpoint
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
userID := testrand.UUID()
tokens, err := satellite.API.Referrals.Service.GetTokens(ctx, &userID)
require.NoError(t, err)
require.Len(t, tokens, tokenCount)
user := referrals.CreateUser{
FullName: "test",
ShortName: "test",
Email: "test@mail.test",
Password: "123a123",
ReferralToken: testrand.UUID().String(),
}
createdUser, err := satellite.API.Referrals.Service.CreateUser(ctx, user)
require.NoError(t, err)
require.Equal(t, user.Email, createdUser.Email)
require.Equal(t, user.FullName, createdUser.FullName)
require.Equal(t, user.ShortName, createdUser.ShortName)
})
}
func TestServiceRedeemFailure(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1,
Reconfigure: testplanet.Reconfigure{
ReferralManagerServer: func(logger *zap.Logger) pb.DRPCReferralManagerServer {
endpoint := &endpointFailedPath{}
endpoint.SetTokenCount(2)
return endpoint
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
user := referrals.CreateUser{
FullName: "test",
ShortName: "test",
Email: "test@mail.test",
Password: "123a123",
ReferralToken: testrand.UUID().String(),
}
_, err := satellite.API.Referrals.Service.CreateUser(ctx, user)
require.Error(t, err)
})
}
type endpointHappyPath struct {
testplanet.DefaultReferralManagerServer
}
type endpointFailedPath struct {
testplanet.DefaultReferralManagerServer
}
func (endpoint *endpointFailedPath) RedeemToken(ctx context.Context, req *pb.RedeemTokenRequest) (*pb.RedeemTokenResponse, error) {
return nil, rpcstatus.Error(rpcstatus.NotFound, "")
}

View File

@ -1,43 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package referrals
import (
"net/mail"
"github.com/zeebo/errs"
"storj.io/common/uuid"
"storj.io/storj/satellite/console"
)
// ErrValidation validation related error class.
var ErrValidation = errs.Class("validation error")
// CreateUser contains information that's necessary for creating a new user through referral program.
type CreateUser struct {
FullName string `json:"fullName"`
ShortName string `json:"shortName"`
Email string `json:"email"`
Password string `json:"password"`
ReferralToken string `json:"referralToken"`
}
// IsValid checks CreateUser validity and returns error describing whats wrong.
func (user *CreateUser) IsValid() error {
var group errs.Group
group.Add(console.ValidateFullName(user.FullName))
group.Add(console.ValidatePassword(user.Password))
// validate email
_, err := mail.ParseAddress(user.Email)
group.Add(err)
if user.ReferralToken != "" {
_, err := uuid.FromString(user.ReferralToken)
group.Add(err)
}
return group.Err()
}

View File

@ -50,12 +50,13 @@ func (ie *irreparableError) Error() string {
// SegmentRepairer for segments.
type SegmentRepairer struct {
log *zap.Logger
metabase metainfo.MetabaseDB
orders *orders.Service
overlay *overlay.Service
ec *ECRepairer
timeout time.Duration
log *zap.Logger
statsCollector *statsCollector
metabase metainfo.MetabaseDB
orders *orders.Service
overlay *overlay.Service
ec *ECRepairer
timeout time.Duration
// multiplierOptimalThreshold is the value that multiplied by the optimal
// threshold results in the maximum limit of number of nodes to upload
@ -87,6 +88,7 @@ func NewSegmentRepairer(
return &SegmentRepairer{
log: log,
statsCollector: newStatsCollector(),
metabase: metabase,
orders: orders,
overlay: overlay,
@ -130,14 +132,25 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, path storj.Path) (s
return true, invalidRepairError.New("cannot repair inline segment")
}
mon.Meter("repair_attempts").Mark(1) //mon:locked
mon.IntVal("repair_segment_size").Observe(int64(segment.EncryptedSize)) //mon:locked
redundancy, err := eestream.NewRedundancyStrategyFromStorj(segment.Redundancy)
if err != nil {
return true, invalidRepairError.New("invalid redundancy strategy: %w", err)
}
stats := repairer.getStatsByRS(&pb.RedundancyScheme{
Type: pb.RedundancyScheme_SchemeType(segment.Redundancy.Algorithm),
ErasureShareSize: segment.Redundancy.ShareSize,
MinReq: int32(segment.Redundancy.RequiredShares),
RepairThreshold: int32(segment.Redundancy.RepairShares),
SuccessThreshold: int32(segment.Redundancy.OptimalShares),
Total: int32(segment.Redundancy.TotalShares),
})
mon.Meter("repair_attempts").Mark(1) //mon:locked
stats.repairAttempts.Mark(1)
mon.IntVal("repair_segment_size").Observe(int64(segment.EncryptedSize)) //mon:locked
stats.repairSegmentSize.Observe(int64(segment.EncryptedSize))
var excludeNodeIDs storj.NodeIDList
pieces := segment.Pieces
missingPieces, err := repairer.overlay.GetMissingPieces(ctx, pieces)
@ -149,7 +162,9 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, path storj.Path) (s
// irreparable piece
if numHealthy < int(segment.Redundancy.RequiredShares) {
mon.Counter("repairer_segments_below_min_req").Inc(1) //mon:locked
mon.Meter("repair_nodes_unavailable").Mark(1) //mon:locked
stats.repairerSegmentsBelowMinReq.Inc(1)
mon.Meter("repair_nodes_unavailable").Mark(1) //mon:locked
stats.repairerNodesUnavailable.Mark(1)
return true, &irreparableError{
path: path,
piecesAvailable: int32(numHealthy),
@ -159,6 +174,7 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, path storj.Path) (s
// ensure we get values, even if only zero values, so that redash can have an alert based on this
mon.Counter("repairer_segments_below_min_req").Inc(0) //mon:locked
stats.repairerSegmentsBelowMinReq.Inc(0)
repairThreshold := int32(segment.Redundancy.RepairShares)
@ -176,6 +192,7 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, path storj.Path) (s
// repair not needed
if numHealthy > int(repairThreshold) {
mon.Meter("repair_unnecessary").Mark(1) //mon:locked
stats.repairUnnecessary.Mark(1)
repairer.log.Debug("segment above repair threshold", zap.Int("numHealthy", numHealthy), zap.Int32("repairThreshold", repairThreshold))
return true, nil
}
@ -185,6 +202,7 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, path storj.Path) (s
healthyRatioBeforeRepair = float64(numHealthy) / float64(segment.Redundancy.TotalShares)
}
mon.FloatVal("healthy_ratio_before_repair").Observe(healthyRatioBeforeRepair) //mon:locked
stats.healthyRatioBeforeRepair.Observe(healthyRatioBeforeRepair)
lostPiecesSet := sliceToSet(missingPieces)
@ -281,6 +299,8 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, path storj.Path) (s
// to wait for nodes to come back online.
if irreparableErr, ok := err.(*irreparableError); ok {
mon.Meter("repair_too_many_nodes_failed").Mark(1) //mon:locked
stats.repairTooManyNodesFailed.Mark(1)
// irreparableErr.segmentInfo = pointer
return true, irreparableErr
}
// The segment's redundancy strategy is invalid, or else there was an internal error.
@ -318,17 +338,22 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, path storj.Path) (s
// repair "succeeded" in that the segment is now healthier than it was, but it is
// not as healthy as we want it to be.
mon.Meter("repair_failed").Mark(1) //mon:locked
stats.repairFailed.Mark(1)
case healthyAfterRepair < int(segment.Redundancy.OptimalShares):
mon.Meter("repair_partial").Mark(1) //mon:locked
stats.repairPartial.Mark(1)
default:
mon.Meter("repair_success").Mark(1) //mon:locked
stats.repairSuccess.Mark(1)
}
healthyRatioAfterRepair := 0.0
if segment.Redundancy.TotalShares != 0 {
healthyRatioAfterRepair = float64(healthyAfterRepair) / float64(segment.Redundancy.TotalShares)
}
mon.FloatVal("healthy_ratio_after_repair").Observe(healthyRatioAfterRepair) //mon:locked
stats.healthyRatioAfterRepair.Observe(healthyRatioAfterRepair)
var toRemove metabase.Pieces
if healthyAfterRepair >= int(segment.Redundancy.OptimalShares) {
@ -376,8 +401,11 @@ func (repairer *SegmentRepairer) Repair(ctx context.Context, path storj.Path) (s
return false, metainfoPutError.Wrap(err)
}
// TODO all values bellow are zero
mon.IntVal("segment_time_until_repair").Observe(int64(segmentAge.Seconds())) //mon:locked
mon.IntVal("segment_repair_count").Observe(repairCount) //mon:locked
stats.segmentTimeUntilRepair.Observe((int64(segmentAge.Seconds())))
mon.IntVal("segment_repair_count").Observe(repairCount) //mon:locked
stats.segmentRepairCount.Observe(repairCount)
return true, nil
}
@ -421,6 +449,20 @@ func updatePieces(orignalPieces, toAddPieces, toRemovePieces metabase.Pieces) (m
return newPieces, nil
}
func (repairer *SegmentRepairer) getStatsByRS(redundancy *pb.RedundancyScheme) *stats {
rsString := getRSString(repairer.loadRedundancy(redundancy))
return repairer.statsCollector.getStatsByRS(rsString)
}
func (repairer *SegmentRepairer) loadRedundancy(redundancy *pb.RedundancyScheme) (int, int, int, int) {
repair := int(redundancy.RepairThreshold)
overrideValue := repairer.repairOverrides.GetOverrideValuePB(redundancy)
if overrideValue != 0 {
repair = int(overrideValue)
}
return int(redundancy.MinReq), repair, int(redundancy.SuccessThreshold), int(redundancy.Total)
}
func (repairer *SegmentRepairer) updateAuditFailStatus(ctx context.Context, failedAuditNodeIDs storj.NodeIDList) (failedNum int, err error) {
updateRequests := make([]*overlay.UpdateRequest, len(failedAuditNodeIDs))
for i, nodeID := range failedAuditNodeIDs {

View File

@ -0,0 +1,92 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package repairer
import (
"fmt"
"github.com/spacemonkeygo/monkit/v3"
)
// statsCollector holds a *stats for each redundancy scheme
// seen by the repairer. These are chained into the monkit scope for
// monitoring as they are initialized.
type statsCollector struct {
stats map[string]*stats
}
func newStatsCollector() *statsCollector {
return &statsCollector{
stats: make(map[string]*stats),
}
}
func (collector *statsCollector) getStatsByRS(rs string) *stats {
stats, ok := collector.stats[rs]
if !ok {
stats = newStats(rs)
mon.Chain(stats)
collector.stats[rs] = stats
}
return stats
}
// stats is used for collecting and reporting repairer metrics.
//
// add any new metrics tagged with rs_scheme to this struct and set them
// in newStats.
type stats struct {
repairAttempts *monkit.Meter
repairSegmentSize *monkit.IntVal
repairerSegmentsBelowMinReq *monkit.Counter
repairerNodesUnavailable *monkit.Meter
repairUnnecessary *monkit.Meter
healthyRatioBeforeRepair *monkit.FloatVal
repairTooManyNodesFailed *monkit.Meter
repairFailed *monkit.Meter
repairPartial *monkit.Meter
repairSuccess *monkit.Meter
healthyRatioAfterRepair *monkit.FloatVal
segmentTimeUntilRepair *monkit.IntVal
segmentRepairCount *monkit.IntVal
}
func newStats(rs string) *stats {
return &stats{
repairAttempts: monkit.NewMeter(monkit.NewSeriesKey("tagged_repair_stats").WithTag("name", "repair_attempts").WithTag("rs_scheme", rs)),
repairSegmentSize: monkit.NewIntVal(monkit.NewSeriesKey("tagged_repair_stats").WithTag("name", "repair_segment_size").WithTag("rs_scheme", rs)),
repairerSegmentsBelowMinReq: monkit.NewCounter(monkit.NewSeriesKey("tagged_repair_stats").WithTag("name", "repairer_segments_below_min_req").WithTag("rs_scheme", rs)),
repairerNodesUnavailable: monkit.NewMeter(monkit.NewSeriesKey("tagged_repair_stats").WithTag("name", "repairer_nodes_unavailable").WithTag("rs_scheme", rs)),
repairUnnecessary: monkit.NewMeter(monkit.NewSeriesKey("tagged_repair_stats").WithTag("name", "repair_unnecessary").WithTag("rs_scheme", rs)),
healthyRatioBeforeRepair: monkit.NewFloatVal(monkit.NewSeriesKey("tagged_repair_stats").WithTag("name", "healthy_ratio_before_repair").WithTag("rs_scheme", rs)),
repairTooManyNodesFailed: monkit.NewMeter(monkit.NewSeriesKey("tagged_repair_stats").WithTag("name", "repair_too_many_nodes_failed").WithTag("rs_scheme", rs)),
repairFailed: monkit.NewMeter(monkit.NewSeriesKey("tagged_repair_stats").WithTag("name", "repair_failed").WithTag("rs_scheme", rs)),
repairPartial: monkit.NewMeter(monkit.NewSeriesKey("tagged_repair_stats").WithTag("name", "repair_partial").WithTag("rs_scheme", rs)),
repairSuccess: monkit.NewMeter(monkit.NewSeriesKey("tagged_repair_stats").WithTag("name", "repair_success").WithTag("rs_scheme", rs)),
healthyRatioAfterRepair: monkit.NewFloatVal(monkit.NewSeriesKey("tagged_repair_stats").WithTag("name", "healthy_ratio_after_repair").WithTag("rs_scheme", rs)),
segmentTimeUntilRepair: monkit.NewIntVal(monkit.NewSeriesKey("tagged_repair_stats").WithTag("name", "segment_time_until_repair").WithTag("rs_scheme", rs)),
segmentRepairCount: monkit.NewIntVal(monkit.NewSeriesKey("tagged_repair_stats").WithTag("name", "segment_repair_count").WithTag("rs_scheme", rs)),
}
}
// Stats implements the monkit.StatSource interface.
func (stats *stats) Stats(cb func(key monkit.SeriesKey, field string, val float64)) {
stats.repairAttempts.Stats(cb)
stats.repairSegmentSize.Stats(cb)
stats.repairerSegmentsBelowMinReq.Stats(cb)
stats.repairerNodesUnavailable.Stats(cb)
stats.repairUnnecessary.Stats(cb)
stats.healthyRatioBeforeRepair.Stats(cb)
stats.repairTooManyNodesFailed.Stats(cb)
stats.repairFailed.Stats(cb)
stats.repairPartial.Stats(cb)
stats.repairSuccess.Stats(cb)
stats.healthyRatioAfterRepair.Stats(cb)
stats.segmentTimeUntilRepair.Stats(cb)
stats.segmentRepairCount.Stats(cb)
}
func getRSString(min, repair, success, total int) string {
return fmt.Sprintf("%d/%d/%d/%d", min, repair, success, total)
}

View File

@ -5,8 +5,6 @@ package rewards
import (
"context"
"encoding/base32"
"path"
"github.com/zeebo/errs"
"go.uber.org/zap"
@ -54,54 +52,6 @@ func NewPartnersService(log *zap.Logger, db PartnersDB, domains []string) *Partn
}
}
// parnterIDEncoding is base32 without padding.
var parnterIDEncoding = base32.StdEncoding.WithPadding(base32.NoPadding)
// GeneratePartnerLink returns partner referral link.
func (service *PartnersService) GeneratePartnerLink(ctx context.Context, offerName string) ([]string, error) {
partner, err := service.db.ByName(ctx, offerName)
if err != nil {
return nil, ErrPartners.Wrap(err)
}
var links []string
for _, domain := range service.domains {
encoded := parnterIDEncoding.EncodeToString([]byte(partner.ID))
links = append(links, path.Join(domain, "ref", encoded))
}
return links, nil
}
// GetActiveOffer returns an offer that is active based on its type.
func (service *PartnersService) GetActiveOffer(ctx context.Context, offers Offers, offerType OfferType, partnerID string) (offer *Offer, err error) {
if len(offers) < 1 {
return nil, ErrOfferNotExist.New("no active offers")
}
switch offerType {
case Partner:
if partnerID == "" {
return nil, errs.New("partner ID is empty")
}
partnerInfo, err := service.db.ByID(ctx, partnerID)
if err != nil {
return nil, ErrPartnerNotExist.Wrap(err)
}
for i := range offers {
if offers[i].Name == partnerInfo.Name {
offer = &offers[i]
}
}
default:
if len(offers) > 1 {
return nil, errs.New("multiple active offers found")
}
offer = &offers[0]
}
return offer, nil
}
// ByName looks up partner by name.
func (service *PartnersService) ByName(ctx context.Context, name string) (PartnerInfo, error) {
return service.db.ByName(ctx, name)

View File

@ -1,128 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information
package rewards
import (
"context"
"time"
"github.com/zeebo/errs"
"storj.io/storj/private/currency"
)
var (
// ErrReachedMaxCapacity is the error class used when an offer has reached its redemption capacity.
ErrReachedMaxCapacity = errs.Class("offer redemption has reached its capacity")
// ErrOfferNotExist is the error class used when no current offer is set.
ErrOfferNotExist = errs.Class("no current offer")
)
// DB holds information about offers.
//
// architecture: Database
type DB interface {
ListAll(ctx context.Context) (Offers, error)
GetActiveOffersByType(ctx context.Context, offerType OfferType) (Offers, error)
Create(ctx context.Context, offer *NewOffer) (*Offer, error)
Finish(ctx context.Context, offerID int) error
}
// NewOffer holds information that's needed for creating a new offer.
type NewOffer struct {
Name string
Description string
AwardCredit currency.USD
InviteeCredit currency.USD
RedeemableCap int
AwardCreditDurationDays int
InviteeCreditDurationDays int
ExpiresAt time.Time
Status OfferStatus
Type OfferType
}
// UpdateOffer holds fields needed for update an offer.
type UpdateOffer struct {
ID int
Status OfferStatus
ExpiresAt time.Time
}
// RedeemOffer holds field needed for redeem an offer.
type RedeemOffer struct {
RedeemableCap int
Status OfferStatus
Type OfferType
}
// Offers contains a slice of offers.
type Offers []Offer
// OfferType indicates the type of an offer.
type OfferType int
const (
// Invalid is a default value for offers that don't have correct type associated with it.
Invalid = OfferType(0)
// FreeCredit is a type of offers used for Free Credit Program.
FreeCredit = OfferType(1)
// Referral is a type of offers used for Referral Program.
Referral = OfferType(2)
// Partner is an OfferType used be the Open Source Partner Program.
Partner = OfferType(3)
)
// OfferStatus represents the different stage an offer can have in its life-cycle.
type OfferStatus int
const (
// Done is the status of an offer that is no longer in use.
Done = OfferStatus(iota)
// Default is the status of an offer when there is no active offer.
Default
// Active is the status of an offer that is currently in use.
Active
)
// Offer contains info needed for giving users free credits through different offer programs.
type Offer struct {
ID int
Name string
Description string
AwardCredit currency.USD
InviteeCredit currency.USD
AwardCreditDurationDays int
InviteeCreditDurationDays int
RedeemableCap int
ExpiresAt time.Time
CreatedAt time.Time
Status OfferStatus
Type OfferType
}
// IsEmpty evaluates whether or not an on offer is empty.
func (offer Offer) IsEmpty() bool {
return offer.Name == ""
}
// IsZero returns whether it's equivalent to empty struct.
func (offer Offer) IsZero() bool {
return offer == Offer{}
}
// IsDefault checks if a offer's status is default.
func (status OfferStatus) IsDefault() bool {
return status == Default
}

View File

@ -1,127 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information
package rewards_test
import (
"testing"
"time"
"github.com/stretchr/testify/require"
"storj.io/common/testcontext"
"storj.io/storj/private/currency"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite/rewards"
)
func TestOffer_Database(t *testing.T) {
t.Skip("this test will be removed/modified with rework of offer/rewards code")
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
// Happy path
validOffers := []rewards.NewOffer{
{
Name: "test",
Description: "test offer 1",
AwardCredit: currency.Cents(100),
InviteeCredit: currency.Cents(50),
AwardCreditDurationDays: 60,
InviteeCreditDurationDays: 30,
RedeemableCap: 50,
ExpiresAt: time.Now().Add(time.Hour * 1).Truncate(time.Millisecond),
Status: rewards.Active,
Type: rewards.Referral,
},
{
Name: "test",
Description: "test offer 2",
AwardCredit: currency.Cents(0),
InviteeCredit: currency.Cents(50),
AwardCreditDurationDays: 0,
InviteeCreditDurationDays: 30,
RedeemableCap: 50,
ExpiresAt: time.Now().Add(time.Hour * 1).Truncate(time.Millisecond),
Status: rewards.Active,
Type: rewards.FreeCredit,
},
{
Name: "Zenko",
Description: "partner offer",
AwardCredit: currency.Cents(0),
InviteeCredit: currency.Cents(50),
AwardCreditDurationDays: 0,
InviteeCreditDurationDays: 30,
RedeemableCap: 50,
ExpiresAt: time.Now().Add(time.Hour * 1).Truncate(time.Millisecond),
Status: rewards.Active,
Type: rewards.Partner,
},
}
for i := range validOffers {
new, err := planet.Satellites[0].DB.Rewards().Create(ctx, &validOffers[i])
require.NoError(t, err)
new.ExpiresAt = new.ExpiresAt.Truncate(time.Microsecond)
new.CreatedAt = new.CreatedAt.Truncate(time.Microsecond)
all, err := planet.Satellites[0].DB.Rewards().ListAll(ctx)
require.NoError(t, err)
require.Contains(t, all, *new)
offers, err := planet.Satellites[0].DB.Rewards().GetActiveOffersByType(ctx, new.Type)
require.NoError(t, err)
var pID string
if new.Type == rewards.Partner {
partner, err := planet.Satellites[0].API.Marketing.PartnersService.ByName(ctx, new.Name)
require.NoError(t, err)
pID = partner.ID
}
c, err := planet.Satellites[0].API.Marketing.PartnersService.GetActiveOffer(ctx, offers, new.Type, pID)
require.NoError(t, err)
require.Equal(t, new, c)
err = planet.Satellites[0].DB.Rewards().Finish(ctx, all[i].ID)
require.NoError(t, err)
updated, err := planet.Satellites[0].DB.Rewards().ListAll(ctx)
require.NoError(t, err)
require.Equal(t, rewards.Done, updated[i].Status)
}
// create with expired offer
expiredOffers := []rewards.NewOffer{
{
Name: "test",
Description: "test offer",
AwardCredit: currency.Cents(0),
InviteeCredit: currency.Cents(50),
AwardCreditDurationDays: 0,
InviteeCreditDurationDays: 30,
RedeemableCap: 50,
ExpiresAt: time.Now().Add(time.Hour * -1),
Status: rewards.Active,
Type: rewards.FreeCredit,
},
{
Name: "test",
Description: "test offer",
AwardCredit: currency.Cents(100),
InviteeCredit: currency.Cents(50),
AwardCreditDurationDays: 60,
InviteeCreditDurationDays: 30,
RedeemableCap: 0,
ExpiresAt: time.Now().Add(time.Hour * -1),
Status: rewards.Default,
Type: rewards.Referral,
},
}
for i := range expiredOffers {
output, err := planet.Satellites[0].DB.Rewards().Create(ctx, &expiredOffers[i])
require.Error(t, err)
require.Nil(t, output)
}
})
}

View File

@ -23,7 +23,7 @@ func (comp *compensationDB) QueryTotalAmounts(ctx context.Context, nodeID storj.
stmt := comp.db.Rebind(`
SELECT
coalesce(SUM(held), 0) AS total_held,
coalesce(SUM(disposed), 0) AS total_disposed
coalesce(SUM(disposed), 0) AS total_disposed,
coalesce(SUM(paid), 0) AS total_paid,
coalesce(SUM(distributed), 0) AS total_distributed
FROM

View File

@ -68,11 +68,6 @@ func (db *ConsoleDB) ResetPasswordTokens() console.ResetPasswordTokens {
return &resetPasswordTokens{db.methods}
}
// UserCredits is a getter for console.UserCredits repository.
func (db *ConsoleDB) UserCredits() console.UserCredits {
return &usercredits{db.db, db.tx}
}
// WithTx is a method for executing and retrying transaction.
func (db *ConsoleDB) WithTx(ctx context.Context, fn func(context.Context, console.DBTx) error) error {
if db.db == nil {

View File

@ -31,7 +31,6 @@ import (
"storj.io/storj/satellite/repair/irreparable"
"storj.io/storj/satellite/repair/queue"
"storj.io/storj/satellite/revocation"
"storj.io/storj/satellite/rewards"
"storj.io/storj/satellite/satellitedb/dbx"
"storj.io/storj/satellite/snopayouts"
)
@ -247,11 +246,6 @@ func (dbc *satelliteDBCollection) Console() console.DB {
return db.consoleDB
}
// Rewards returns database for storing offers.
func (dbc *satelliteDBCollection) Rewards() rewards.DB {
return &offersDB{db: dbc.getByName("rewards")}
}
// Orders returns database for storing orders.
func (dbc *satelliteDBCollection) Orders() orders.DB {
db := dbc.getByName("orders")

View File

@ -130,18 +130,19 @@ model node (
fields disqualified unknown_audit_suspended exit_finished_at last_contact_success
)
field id blob
field id blob
// address is how to contact the node, this can be a hostname or IP and it contains the port
field address text ( updatable, default "" ) // TODO: use compressed format
field address text ( updatable, default "" ) // TODO: use compressed format
// last_net is the /24 subnet of the IP
field last_net text ( updatable )
field last_ip_port text ( updatable, nullable )
field protocol int ( updatable, default 0 )
field type int ( updatable, default 0 )
field email text ( updatable )
field wallet text ( updatable ) // TODO: use compressed format
field free_disk int64 ( updatable, default -1 )
field piece_count int64 ( autoinsert, updatable, default 0 )
field last_net text ( updatable )
field last_ip_port text ( updatable, nullable )
field protocol int ( updatable, default 0 )
field type int ( updatable, default 0 )
field email text ( updatable )
field wallet text ( updatable ) // TODO: use compressed format
field wallet_features text ( updatable, default "" )
field free_disk int64 ( updatable, default -1 )
field piece_count int64 ( autoinsert, updatable, default 0 )
field major int64 ( updatable, default 0 )
field minor int64 ( updatable, default 0 )

View File

@ -453,6 +453,7 @@ CREATE TABLE nodes (
type integer NOT NULL DEFAULT 0,
email text NOT NULL,
wallet text NOT NULL,
wallet_features text NOT NULL DEFAULT '',
free_disk bigint NOT NULL DEFAULT -1,
piece_count bigint NOT NULL DEFAULT 0,
major bigint NOT NULL DEFAULT 0,
@ -993,6 +994,7 @@ CREATE TABLE nodes (
type integer NOT NULL DEFAULT 0,
email text NOT NULL,
wallet text NOT NULL,
wallet_features text NOT NULL DEFAULT '',
free_disk bigint NOT NULL DEFAULT -1,
piece_count bigint NOT NULL DEFAULT 0,
major bigint NOT NULL DEFAULT 0,
@ -3326,6 +3328,7 @@ type Node struct {
Type int
Email string
Wallet string
WalletFeatures string
FreeDisk int64
PieceCount int64
Major int64
@ -3370,6 +3373,7 @@ type Node_Create_Fields struct {
LastIpPort Node_LastIpPort_Field
Protocol Node_Protocol_Field
Type Node_Type_Field
WalletFeatures Node_WalletFeatures_Field
FreeDisk Node_FreeDisk_Field
Major Node_Major_Field
Minor Node_Minor_Field
@ -3412,6 +3416,7 @@ type Node_Update_Fields struct {
Type Node_Type_Field
Email Node_Email_Field
Wallet Node_Wallet_Field
WalletFeatures Node_WalletFeatures_Field
FreeDisk Node_FreeDisk_Field
PieceCount Node_PieceCount_Field
Major Node_Major_Field
@ -3612,6 +3617,25 @@ func (f Node_Wallet_Field) value() interface{} {
func (Node_Wallet_Field) _Column() string { return "wallet" }
type Node_WalletFeatures_Field struct {
_set bool
_null bool
_value string
}
func Node_WalletFeatures(v string) Node_WalletFeatures_Field {
return Node_WalletFeatures_Field{_set: true, _value: v}
}
func (f Node_WalletFeatures_Field) value() interface{} {
if !f._set || f._null {
return nil
}
return f._value
}
func (Node_WalletFeatures_Field) _Column() string { return "wallet_features" }
type Node_FreeDisk_Field struct {
_set bool
_null bool
@ -9180,6 +9204,12 @@ func (obj *pgxImpl) CreateNoReturn_Node(ctx context.Context,
__optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?"))
}
if optional.WalletFeatures._set {
__values = append(__values, optional.WalletFeatures.value())
__optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("wallet_features"))
__optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?"))
}
if optional.FreeDisk._set {
__values = append(__values, optional.FreeDisk.value())
__optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("free_disk"))
@ -10579,7 +10609,7 @@ func (obj *pgxImpl) Get_Node_By_Id(ctx context.Context,
node *Node, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.vetted_at, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.unknown_audit_suspended, nodes.offline_suspended, nodes.under_review, nodes.online_score, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE nodes.id = ?")
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.wallet_features, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.vetted_at, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.unknown_audit_suspended, nodes.offline_suspended, nodes.under_review, nodes.online_score, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE nodes.id = ?")
var __values []interface{}
__values = append(__values, node_id.value())
@ -10588,7 +10618,7 @@ func (obj *pgxImpl) Get_Node_By_Id(ctx context.Context,
obj.logStmt(__stmt, __values...)
node = &Node{}
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&node.Id, &node.Address, &node.LastNet, &node.LastIpPort, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.VettedAt, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.UnknownAuditSuspended, &node.OfflineSuspended, &node.UnderReview, &node.OnlineScore, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&node.Id, &node.Address, &node.LastNet, &node.LastIpPort, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.WalletFeatures, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.VettedAt, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.UnknownAuditSuspended, &node.OfflineSuspended, &node.UnderReview, &node.OnlineScore, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
if err != nil {
return (*Node)(nil), obj.makeErr(err)
}
@ -10645,7 +10675,7 @@ func (obj *pgxImpl) Limited_Node_By_Id_GreaterOrEqual_OrderBy_Asc_Id(ctx context
rows []*Node, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.vetted_at, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.unknown_audit_suspended, nodes.offline_suspended, nodes.under_review, nodes.online_score, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE nodes.id >= ? ORDER BY nodes.id LIMIT ? OFFSET ?")
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.wallet_features, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.vetted_at, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.unknown_audit_suspended, nodes.offline_suspended, nodes.under_review, nodes.online_score, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE nodes.id >= ? ORDER BY nodes.id LIMIT ? OFFSET ?")
var __values []interface{}
__values = append(__values, node_id_greater_or_equal.value())
@ -10665,7 +10695,7 @@ func (obj *pgxImpl) Limited_Node_By_Id_GreaterOrEqual_OrderBy_Asc_Id(ctx context
for __rows.Next() {
node := &Node{}
err = __rows.Scan(&node.Id, &node.Address, &node.LastNet, &node.LastIpPort, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.VettedAt, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.UnknownAuditSuspended, &node.OfflineSuspended, &node.UnderReview, &node.OnlineScore, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
err = __rows.Scan(&node.Id, &node.Address, &node.LastNet, &node.LastIpPort, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.WalletFeatures, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.VettedAt, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.UnknownAuditSuspended, &node.OfflineSuspended, &node.UnderReview, &node.OnlineScore, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
if err != nil {
return nil, err
}
@ -13565,7 +13595,7 @@ func (obj *pgxImpl) Update_Node_By_Id(ctx context.Context,
defer mon.Task()(&ctx)(&err)
var __sets = &__sqlbundle_Hole{}
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE nodes SET "), __sets, __sqlbundle_Literal(" WHERE nodes.id = ? RETURNING nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.vetted_at, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.unknown_audit_suspended, nodes.offline_suspended, nodes.under_review, nodes.online_score, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success")}}
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE nodes SET "), __sets, __sqlbundle_Literal(" WHERE nodes.id = ? RETURNING nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.wallet_features, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.vetted_at, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.unknown_audit_suspended, nodes.offline_suspended, nodes.under_review, nodes.online_score, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success")}}
__sets_sql := __sqlbundle_Literals{Join: ", "}
var __values []interface{}
@ -13606,6 +13636,11 @@ func (obj *pgxImpl) Update_Node_By_Id(ctx context.Context,
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("wallet = ?"))
}
if update.WalletFeatures._set {
__values = append(__values, update.WalletFeatures.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("wallet_features = ?"))
}
if update.FreeDisk._set {
__values = append(__values, update.FreeDisk.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("free_disk = ?"))
@ -13785,7 +13820,7 @@ func (obj *pgxImpl) Update_Node_By_Id(ctx context.Context,
obj.logStmt(__stmt, __values...)
node = &Node{}
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&node.Id, &node.Address, &node.LastNet, &node.LastIpPort, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.VettedAt, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.UnknownAuditSuspended, &node.OfflineSuspended, &node.UnderReview, &node.OnlineScore, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&node.Id, &node.Address, &node.LastNet, &node.LastIpPort, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.WalletFeatures, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.VettedAt, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.UnknownAuditSuspended, &node.OfflineSuspended, &node.UnderReview, &node.OnlineScore, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
if err == sql.ErrNoRows {
return nil, nil
}
@ -13843,6 +13878,11 @@ func (obj *pgxImpl) UpdateNoReturn_Node_By_Id(ctx context.Context,
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("wallet = ?"))
}
if update.WalletFeatures._set {
__values = append(__values, update.WalletFeatures.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("wallet_features = ?"))
}
if update.FreeDisk._set {
__values = append(__values, update.FreeDisk.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("free_disk = ?"))
@ -15996,6 +16036,12 @@ func (obj *pgxcockroachImpl) CreateNoReturn_Node(ctx context.Context,
__optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?"))
}
if optional.WalletFeatures._set {
__values = append(__values, optional.WalletFeatures.value())
__optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("wallet_features"))
__optional_placeholders.SQLs = append(__optional_placeholders.SQLs, __sqlbundle_Literal("?"))
}
if optional.FreeDisk._set {
__values = append(__values, optional.FreeDisk.value())
__optional_columns.SQLs = append(__optional_columns.SQLs, __sqlbundle_Literal("free_disk"))
@ -17395,7 +17441,7 @@ func (obj *pgxcockroachImpl) Get_Node_By_Id(ctx context.Context,
node *Node, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.vetted_at, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.unknown_audit_suspended, nodes.offline_suspended, nodes.under_review, nodes.online_score, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE nodes.id = ?")
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.wallet_features, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.vetted_at, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.unknown_audit_suspended, nodes.offline_suspended, nodes.under_review, nodes.online_score, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE nodes.id = ?")
var __values []interface{}
__values = append(__values, node_id.value())
@ -17404,7 +17450,7 @@ func (obj *pgxcockroachImpl) Get_Node_By_Id(ctx context.Context,
obj.logStmt(__stmt, __values...)
node = &Node{}
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&node.Id, &node.Address, &node.LastNet, &node.LastIpPort, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.VettedAt, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.UnknownAuditSuspended, &node.OfflineSuspended, &node.UnderReview, &node.OnlineScore, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&node.Id, &node.Address, &node.LastNet, &node.LastIpPort, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.WalletFeatures, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.VettedAt, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.UnknownAuditSuspended, &node.OfflineSuspended, &node.UnderReview, &node.OnlineScore, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
if err != nil {
return (*Node)(nil), obj.makeErr(err)
}
@ -17461,7 +17507,7 @@ func (obj *pgxcockroachImpl) Limited_Node_By_Id_GreaterOrEqual_OrderBy_Asc_Id(ct
rows []*Node, err error) {
defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.vetted_at, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.unknown_audit_suspended, nodes.offline_suspended, nodes.under_review, nodes.online_score, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE nodes.id >= ? ORDER BY nodes.id LIMIT ? OFFSET ?")
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.wallet_features, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.vetted_at, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.unknown_audit_suspended, nodes.offline_suspended, nodes.under_review, nodes.online_score, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE nodes.id >= ? ORDER BY nodes.id LIMIT ? OFFSET ?")
var __values []interface{}
__values = append(__values, node_id_greater_or_equal.value())
@ -17481,7 +17527,7 @@ func (obj *pgxcockroachImpl) Limited_Node_By_Id_GreaterOrEqual_OrderBy_Asc_Id(ct
for __rows.Next() {
node := &Node{}
err = __rows.Scan(&node.Id, &node.Address, &node.LastNet, &node.LastIpPort, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.VettedAt, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.UnknownAuditSuspended, &node.OfflineSuspended, &node.UnderReview, &node.OnlineScore, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
err = __rows.Scan(&node.Id, &node.Address, &node.LastNet, &node.LastIpPort, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.WalletFeatures, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.VettedAt, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.UnknownAuditSuspended, &node.OfflineSuspended, &node.UnderReview, &node.OnlineScore, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
if err != nil {
return nil, err
}
@ -20381,7 +20427,7 @@ func (obj *pgxcockroachImpl) Update_Node_By_Id(ctx context.Context,
defer mon.Task()(&ctx)(&err)
var __sets = &__sqlbundle_Hole{}
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE nodes SET "), __sets, __sqlbundle_Literal(" WHERE nodes.id = ? RETURNING nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.vetted_at, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.unknown_audit_suspended, nodes.offline_suspended, nodes.under_review, nodes.online_score, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success")}}
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE nodes SET "), __sets, __sqlbundle_Literal(" WHERE nodes.id = ? RETURNING nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.wallet_features, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.vetted_at, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.unknown_audit_suspended, nodes.offline_suspended, nodes.under_review, nodes.online_score, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success")}}
__sets_sql := __sqlbundle_Literals{Join: ", "}
var __values []interface{}
@ -20422,6 +20468,11 @@ func (obj *pgxcockroachImpl) Update_Node_By_Id(ctx context.Context,
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("wallet = ?"))
}
if update.WalletFeatures._set {
__values = append(__values, update.WalletFeatures.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("wallet_features = ?"))
}
if update.FreeDisk._set {
__values = append(__values, update.FreeDisk.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("free_disk = ?"))
@ -20601,7 +20652,7 @@ func (obj *pgxcockroachImpl) Update_Node_By_Id(ctx context.Context,
obj.logStmt(__stmt, __values...)
node = &Node{}
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&node.Id, &node.Address, &node.LastNet, &node.LastIpPort, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.VettedAt, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.UnknownAuditSuspended, &node.OfflineSuspended, &node.UnderReview, &node.OnlineScore, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
err = obj.queryRowContext(ctx, __stmt, __values...).Scan(&node.Id, &node.Address, &node.LastNet, &node.LastIpPort, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.WalletFeatures, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.VettedAt, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.UnknownAuditSuspended, &node.OfflineSuspended, &node.UnderReview, &node.OnlineScore, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
if err == sql.ErrNoRows {
return nil, nil
}
@ -20659,6 +20710,11 @@ func (obj *pgxcockroachImpl) UpdateNoReturn_Node_By_Id(ctx context.Context,
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("wallet = ?"))
}
if update.WalletFeatures._set {
__values = append(__values, update.WalletFeatures.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("wallet_features = ?"))
}
if update.FreeDisk._set {
__values = append(__values, update.FreeDisk.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("free_disk = ?"))

View File

@ -133,6 +133,7 @@ CREATE TABLE nodes (
type integer NOT NULL DEFAULT 0,
email text NOT NULL,
wallet text NOT NULL,
wallet_features text NOT NULL DEFAULT '',
free_disk bigint NOT NULL DEFAULT -1,
piece_count bigint NOT NULL DEFAULT 0,
major bigint NOT NULL DEFAULT 0,

View File

@ -133,6 +133,7 @@ CREATE TABLE nodes (
type integer NOT NULL DEFAULT 0,
email text NOT NULL,
wallet text NOT NULL,
wallet_features text NOT NULL DEFAULT '',
free_disk bigint NOT NULL DEFAULT -1,
piece_count bigint NOT NULL DEFAULT 0,
major bigint NOT NULL DEFAULT 0,

View File

@ -1302,6 +1302,14 @@ func (db *satelliteDB) PostgresMigration() *migrate.Migration {
`,
},
},
{
DB: &db.migrationDB,
Description: "nodes add wallet_features column",
Version: 146,
Action: migrate.SQL{
`ALTER TABLE nodes ADD COLUMN wallet_features text NOT NULL DEFAULT '';`,
},
},
},
}
}

View File

@ -1,231 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information
package satellitedb
import (
"context"
"database/sql"
"time"
"github.com/zeebo/errs"
"storj.io/storj/private/currency"
"storj.io/storj/private/dbutil/txutil"
"storj.io/storj/private/tagsql"
"storj.io/storj/satellite/rewards"
"storj.io/storj/satellite/satellitedb/dbx"
)
var (
// offerErr is the default offer errors class.
offerErr = errs.Class("offers error")
)
type offersDB struct {
db *satelliteDB
}
// ListAll returns all offersDB from the db.
func (db *offersDB) ListAll(ctx context.Context) (rewards.Offers, error) {
offersDbx, err := db.db.All_Offer_OrderBy_Asc_Id(ctx)
if err != nil {
return nil, offerErr.Wrap(err)
}
return offersFromDBX(offersDbx)
}
// GetCurrent returns offers that has not expired based on offer type.
func (db *offersDB) GetActiveOffersByType(ctx context.Context, offerType rewards.OfferType) (rewards.Offers, error) {
var statement string
const columns = "id, name, description, award_credit_in_cents, invitee_credit_in_cents, award_credit_duration_days, invitee_credit_duration_days, redeemable_cap, expires_at, created_at, status, type"
statement = `
WITH o AS (
SELECT ` + columns + ` FROM offers WHERE status=? AND type=? AND expires_at>?
)
SELECT ` + columns + ` FROM o
UNION ALL
SELECT ` + columns + ` FROM offers
WHERE type=? AND status=?
AND NOT EXISTS (
SELECT id FROM o
) order by created_at desc;`
rows, err := db.db.DB.QueryContext(ctx, db.db.Rebind(statement), rewards.Active, offerType, time.Now().UTC(), offerType, rewards.Default)
if err != nil {
return nil, rewards.ErrOfferNotExist.Wrap(err)
}
defer func() { err = errs.Combine(err, rows.Close()) }()
var (
awardCreditInCents int
inviteeCreditInCents int
awardCreditDurationDays sql.NullInt64
inviteeCreditDurationDays sql.NullInt64
redeemableCap sql.NullInt64
)
results := rewards.Offers{}
for rows.Next() {
o := rewards.Offer{}
err := rows.Scan(&o.ID, &o.Name, &o.Description, &awardCreditInCents, &inviteeCreditInCents, &awardCreditDurationDays, &inviteeCreditDurationDays, &redeemableCap, &o.ExpiresAt, &o.CreatedAt, &o.Status, &o.Type)
if err != nil {
return results, Error.Wrap(err)
}
o.AwardCredit = currency.Cents(awardCreditInCents)
o.InviteeCredit = currency.Cents(inviteeCreditInCents)
if redeemableCap.Valid {
o.RedeemableCap = int(redeemableCap.Int64)
}
if awardCreditDurationDays.Valid {
o.AwardCreditDurationDays = int(awardCreditDurationDays.Int64)
}
if inviteeCreditDurationDays.Valid {
o.InviteeCreditDurationDays = int(inviteeCreditDurationDays.Int64)
}
o.ExpiresAt = o.ExpiresAt.UTC()
o.CreatedAt = o.CreatedAt.UTC()
results = append(results, o)
}
if len(results) < 1 {
return results, rewards.ErrOfferNotExist.New("offerType: %d", offerType)
}
return results, rows.Err()
}
// Create inserts a new offer into the db.
func (db *offersDB) Create(ctx context.Context, o *rewards.NewOffer) (*rewards.Offer, error) {
currentTime := time.Now().UTC()
if o.ExpiresAt.Before(currentTime) {
return nil, offerErr.New("expiration time: %v can't be before: %v", o.ExpiresAt, currentTime)
}
if o.Status == rewards.Default {
o.ExpiresAt = time.Now().UTC().AddDate(100, 0, 0)
}
var id int64
err := txutil.WithTx(ctx, db.db.DB.DB, nil, func(ctx context.Context, tx tagsql.Tx) error {
// If there's an existing current offer, update its status to Done and set its expires_at to be NOW()
switch o.Type {
case rewards.Partner:
statement := `
UPDATE offers SET status=?, expires_at=?
WHERE status=? AND type=? AND expires_at>? AND name=?;`
_, err := tx.ExecContext(ctx, db.db.Rebind(statement), rewards.Done, currentTime, o.Status, o.Type, currentTime, o.Name)
if err != nil {
return offerErr.Wrap(err)
}
default:
statement := `
UPDATE offers SET status=?, expires_at=?
WHERE status=? AND type=? AND expires_at>?;`
_, err := tx.ExecContext(ctx, db.db.Rebind(statement), rewards.Done, currentTime, o.Status, o.Type, currentTime)
if err != nil {
return offerErr.Wrap(err)
}
}
statement := `
INSERT INTO offers (name, description, award_credit_in_cents, invitee_credit_in_cents, award_credit_duration_days,
invitee_credit_duration_days, redeemable_cap, expires_at, created_at, status, type)
VALUES (?::TEXT, ?::TEXT, ?::INT, ?::INT, ?::INT, ?::INT, ?::INT, ?::timestamptz, ?::timestamptz, ?::INT, ?::INT)
RETURNING id;
`
row := tx.QueryRowContext(ctx, db.db.Rebind(statement),
o.Name,
o.Description,
o.AwardCredit.Cents(),
o.InviteeCredit.Cents(),
o.AwardCreditDurationDays,
o.InviteeCreditDurationDays,
o.RedeemableCap,
o.ExpiresAt,
currentTime,
o.Status,
o.Type,
)
return row.Scan(&id)
})
return &rewards.Offer{
ID: int(id),
Name: o.Name,
Description: o.Description,
AwardCredit: o.AwardCredit,
InviteeCredit: o.InviteeCredit,
AwardCreditDurationDays: o.AwardCreditDurationDays,
InviteeCreditDurationDays: o.InviteeCreditDurationDays,
RedeemableCap: o.RedeemableCap,
ExpiresAt: o.ExpiresAt,
CreatedAt: currentTime,
Status: o.Status,
Type: o.Type,
}, offerErr.Wrap(err)
}
// Finish changes the offer status to be Done and its expiration date to be now based on offer id.
func (db *offersDB) Finish(ctx context.Context, oID int) error {
return offerErr.Wrap(
db.db.UpdateNoReturn_Offer_By_Id(ctx,
dbx.Offer_Id(oID), dbx.Offer_Update_Fields{
Status: dbx.Offer_Status(int(rewards.Done)),
ExpiresAt: dbx.Offer_ExpiresAt(time.Now().UTC()),
}))
}
func offersFromDBX(offersDbx []*dbx.Offer) (rewards.Offers, error) {
var offers []rewards.Offer
errList := new(errs.Group)
for _, offerDbx := range offersDbx {
offer, err := convertDBOffer(offerDbx)
if err != nil {
errList.Add(err)
continue
}
offers = append(offers, *offer)
}
return offers, errList.Err()
}
func convertDBOffer(offerDbx *dbx.Offer) (*rewards.Offer, error) {
if offerDbx == nil {
return nil, offerErr.New("offerDbx parameter is nil")
}
var redeemableCap, awardCreditDurationDays, inviteeCreditDurationDays int
if offerDbx.RedeemableCap != nil {
redeemableCap = *offerDbx.RedeemableCap
}
if offerDbx.AwardCreditDurationDays != nil {
awardCreditDurationDays = *offerDbx.AwardCreditDurationDays
}
if offerDbx.InviteeCreditDurationDays != nil {
inviteeCreditDurationDays = *offerDbx.InviteeCreditDurationDays
}
o := rewards.Offer{
ID: offerDbx.Id,
Name: offerDbx.Name,
Description: offerDbx.Description,
AwardCredit: currency.Cents(offerDbx.AwardCreditInCents),
InviteeCredit: currency.Cents(offerDbx.InviteeCreditInCents),
RedeemableCap: redeemableCap,
ExpiresAt: offerDbx.ExpiresAt.UTC(),
AwardCreditDurationDays: awardCreditDurationDays,
InviteeCreditDurationDays: inviteeCreditDurationDays,
CreatedAt: offerDbx.CreatedAt.UTC(),
Status: rewards.OfferStatus(offerDbx.Status),
Type: rewards.OfferType(offerDbx.Type),
}
return &o, nil
}

View File

@ -10,6 +10,7 @@ import (
"errors"
"fmt"
"sort"
"strings"
"time"
"github.com/spacemonkeygo/monkit/v3"
@ -654,8 +655,14 @@ func (cache *overlaycache) UpdateNodeInfo(ctx context.Context, nodeID storj.Node
updateFields.Type = dbx.Node_Type(int(nodeInfo.Type))
}
if nodeInfo.Operator != nil {
walletFeatures, err := encodeWalletFeatures(nodeInfo.Operator.GetWalletFeatures())
if err != nil {
return nil, Error.Wrap(err)
}
updateFields.Wallet = dbx.Node_Wallet(nodeInfo.Operator.GetWallet())
updateFields.Email = dbx.Node_Email(nodeInfo.Operator.GetEmail())
updateFields.WalletFeatures = dbx.Node_WalletFeatures(walletFeatures)
}
if nodeInfo.Capacity != nil {
updateFields.FreeDisk = dbx.Node_FreeDisk(nodeInfo.Capacity.GetFreeDisk())
@ -1035,8 +1042,9 @@ func convertDBNode(ctx context.Context, info *dbx.Node) (_ *overlay.NodeDossier,
},
Type: pb.NodeType(info.Type),
Operator: pb.NodeOperator{
Email: info.Email,
Wallet: info.Wallet,
Email: info.Email,
Wallet: info.Wallet,
WalletFeatures: decodeWalletFeatures(info.WalletFeatures),
},
Capacity: pb.NodeCapacity{
FreeDisk: info.FreeDisk,
@ -1065,6 +1073,31 @@ func convertDBNode(ctx context.Context, info *dbx.Node) (_ *overlay.NodeDossier,
return node, nil
}
// encodeWalletFeatures encodes wallet features into comma separated list string.
func encodeWalletFeatures(features []string) (string, error) {
var errGroup errs.Group
for _, feature := range features {
if strings.Contains(feature, ",") {
errGroup.Add(errs.New("error encoding %s, can not contain separator \",\"", feature))
}
}
if err := errGroup.Err(); err != nil {
return "", Error.Wrap(err)
}
return strings.Join(features, ","), nil
}
// decodeWalletFeatures decodes comma separated wallet features list string.
func decodeWalletFeatures(encoded string) []string {
if encoded == "" {
return nil
}
return strings.Split(encoded, ",")
}
func getNodeStats(dbNode *dbx.Node) *overlay.NodeStats {
nodeStats := &overlay.NodeStats{
Latency90: dbNode.Latency90,
@ -1541,6 +1574,11 @@ func (cache *overlaycache) UpdateCheckIn(ctx context.Context, node overlay.NodeC
return Error.New("unable to convert version to semVer")
}
walletFeatures, err := encodeWalletFeatures(node.Operator.GetWalletFeatures())
if err != nil {
return Error.Wrap(err)
}
query := `
INSERT INTO nodes
(
@ -1551,7 +1589,8 @@ func (cache *overlaycache) UpdateCheckIn(ctx context.Context, node overlay.NodeC
audit_reputation_alpha, audit_reputation_beta,
unknown_audit_reputation_alpha, unknown_audit_reputation_beta,
major, minor, patch, hash, timestamp, release,
last_ip_port
last_ip_port,
wallet_features
)
VALUES (
$1, $2, $3, $4, $5,
@ -1565,7 +1604,8 @@ func (cache *overlaycache) UpdateCheckIn(ctx context.Context, node overlay.NodeC
$10, $11,
$10, $11,
$12, $13, $14, $15, $16, $17,
$19
$19,
$20
)
ON CONFLICT (id)
DO UPDATE
@ -1585,7 +1625,8 @@ func (cache *overlaycache) UpdateCheckIn(ctx context.Context, node overlay.NodeC
THEN $18::timestamptz
ELSE nodes.last_contact_failure
END,
last_ip_port=$19;
last_ip_port=$19,
wallet_features=$20;
`
_, err = cache.db.ExecContext(ctx, query,
// args $1 - $5
@ -1602,6 +1643,8 @@ func (cache *overlaycache) UpdateCheckIn(ctx context.Context, node overlay.NodeC
timestamp,
// args $19
node.LastIPPort,
// args $20,
walletFeatures,
)
if err != nil {
return Error.Wrap(err)

View File

@ -4,6 +4,7 @@
package satellitedb_test
import (
"fmt"
"testing"
"time"
@ -15,6 +16,7 @@ import (
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite"
"storj.io/storj/satellite/overlay"
"storj.io/storj/storagenode"
)
func TestDQNodesLastSeenBefore(t *testing.T) {
@ -159,6 +161,32 @@ func TestBatchUpdateStats(t *testing.T) {
})
}
func TestOperatorConfig(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 2, Reconfigure: testplanet.Reconfigure{
StorageNode: func(index int, config *storagenode.Config) {
config.Operator.Wallet = fmt.Sprintf("0x%d123456789012345678901234567890123456789", index)
config.Operator.WalletFeatures = []string{fmt.Sprintf("test_%d", index)}
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
nodeA := planet.StorageNodes[0]
nodeB := planet.StorageNodes[1]
nodeA.Contact.Chore.Pause(ctx)
nodeB.Contact.Chore.Pause(ctx)
cache := planet.Satellites[0].DB.OverlayCache()
for _, node := range []*testplanet.StorageNode{nodeA, nodeB} {
nodeInfo, err := cache.Get(ctx, node.ID())
require.NoError(t, err)
require.Equal(t, node.Config.Operator.Email, nodeInfo.Operator.Email)
require.Equal(t, node.Config.Operator.Wallet, nodeInfo.Operator.Wallet)
require.Equal(t, []string(node.Config.Operator.WalletFeatures), nodeInfo.Operator.WalletFeatures)
}
})
}
// returns an AuditHistoryConfig with sensible test values.
func testAuditHistoryConfig() overlay.AuditHistoryConfig {
return overlay.AuditHistoryConfig{

View File

@ -0,0 +1,559 @@
-- AUTOGENERATED BY storj.io/dbx
-- DO NOT EDIT
CREATE TABLE accounting_rollups (
node_id bytea NOT NULL,
start_time timestamp with time zone NOT NULL,
put_total bigint NOT NULL,
get_total bigint NOT NULL,
get_audit_total bigint NOT NULL,
get_repair_total bigint NOT NULL,
put_repair_total bigint NOT NULL,
at_rest_total double precision NOT NULL,
PRIMARY KEY ( node_id, start_time )
);
CREATE TABLE accounting_timestamps (
name text NOT NULL,
value timestamp with time zone NOT NULL,
PRIMARY KEY ( name )
);
CREATE TABLE audit_histories (
node_id bytea NOT NULL,
history bytea NOT NULL,
PRIMARY KEY ( node_id )
);
CREATE TABLE bucket_bandwidth_rollups (
bucket_name bytea NOT NULL,
project_id bytea NOT NULL,
interval_start timestamp with time zone NOT NULL,
interval_seconds integer NOT NULL,
action integer NOT NULL,
inline bigint NOT NULL,
allocated bigint NOT NULL,
settled bigint NOT NULL,
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
);
CREATE TABLE bucket_bandwidth_rollup_archives (
bucket_name bytea NOT NULL,
project_id bytea NOT NULL,
interval_start timestamp with time zone NOT NULL,
interval_seconds integer NOT NULL,
action integer NOT NULL,
inline bigint NOT NULL,
allocated bigint NOT NULL,
settled bigint NOT NULL,
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
);
CREATE TABLE bucket_storage_tallies (
bucket_name bytea NOT NULL,
project_id bytea NOT NULL,
interval_start timestamp with time zone NOT NULL,
inline bigint NOT NULL,
remote bigint NOT NULL,
remote_segments_count integer NOT NULL,
inline_segments_count integer NOT NULL,
object_count integer NOT NULL,
metadata_size bigint NOT NULL,
PRIMARY KEY ( bucket_name, project_id, interval_start )
);
CREATE TABLE coinpayments_transactions (
id text NOT NULL,
user_id bytea NOT NULL,
address text NOT NULL,
amount bytea NOT NULL,
received bytea NOT NULL,
status integer NOT NULL,
key text NOT NULL,
timeout integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE coupons (
id bytea NOT NULL,
user_id bytea NOT NULL,
amount bigint NOT NULL,
description text NOT NULL,
type integer NOT NULL,
status integer NOT NULL,
duration bigint NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE coupon_usages (
coupon_id bytea NOT NULL,
amount bigint NOT NULL,
status integer NOT NULL,
period timestamp with time zone NOT NULL,
PRIMARY KEY ( coupon_id, period )
);
CREATE TABLE graceful_exit_progress (
node_id bytea NOT NULL,
bytes_transferred bigint NOT NULL,
pieces_transferred bigint NOT NULL DEFAULT 0,
pieces_failed bigint NOT NULL DEFAULT 0,
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY ( node_id )
);
CREATE TABLE graceful_exit_transfer_queue (
node_id bytea NOT NULL,
path bytea NOT NULL,
piece_num integer NOT NULL,
root_piece_id bytea,
durability_ratio double precision NOT NULL,
queued_at timestamp with time zone NOT NULL,
requested_at timestamp with time zone,
last_failed_at timestamp with time zone,
last_failed_code integer,
failed_count integer,
finished_at timestamp with time zone,
order_limit_send_count integer NOT NULL DEFAULT 0,
PRIMARY KEY ( node_id, path, piece_num )
);
CREATE TABLE injuredsegments (
path bytea NOT NULL,
data bytea NOT NULL,
attempted timestamp with time zone,
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
segment_health double precision NOT NULL DEFAULT 1,
PRIMARY KEY ( path )
);
CREATE TABLE irreparabledbs (
segmentpath bytea NOT NULL,
segmentdetail bytea NOT NULL,
pieces_lost_count bigint NOT NULL,
seg_damaged_unix_sec bigint NOT NULL,
repair_attempt_count bigint NOT NULL,
PRIMARY KEY ( segmentpath )
);
CREATE TABLE nodes (
id bytea NOT NULL,
address text NOT NULL DEFAULT '',
last_net text NOT NULL,
last_ip_port text,
protocol integer NOT NULL DEFAULT 0,
type integer NOT NULL DEFAULT 0,
email text NOT NULL,
wallet text NOT NULL,
wallet_features text NOT NULL DEFAULT '',
free_disk bigint NOT NULL DEFAULT -1,
piece_count bigint NOT NULL DEFAULT 0,
major bigint NOT NULL DEFAULT 0,
minor bigint NOT NULL DEFAULT 0,
patch bigint NOT NULL DEFAULT 0,
hash text NOT NULL DEFAULT '',
timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00',
release boolean NOT NULL DEFAULT false,
latency_90 bigint NOT NULL DEFAULT 0,
audit_success_count bigint NOT NULL DEFAULT 0,
total_audit_count bigint NOT NULL DEFAULT 0,
vetted_at timestamp with time zone,
uptime_success_count bigint NOT NULL DEFAULT 0,
total_uptime_count bigint NOT NULL DEFAULT 0,
created_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch',
last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch',
contained boolean NOT NULL DEFAULT false,
disqualified timestamp with time zone,
suspended timestamp with time zone,
unknown_audit_suspended timestamp with time zone,
offline_suspended timestamp with time zone,
under_review timestamp with time zone,
online_score double precision NOT NULL DEFAULT 1,
audit_reputation_alpha double precision NOT NULL DEFAULT 1,
audit_reputation_beta double precision NOT NULL DEFAULT 0,
unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1,
unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0,
uptime_reputation_alpha double precision NOT NULL DEFAULT 1,
uptime_reputation_beta double precision NOT NULL DEFAULT 0,
exit_initiated_at timestamp with time zone,
exit_loop_completed_at timestamp with time zone,
exit_finished_at timestamp with time zone,
exit_success boolean NOT NULL DEFAULT false,
PRIMARY KEY ( id )
);
CREATE TABLE node_api_versions (
id bytea NOT NULL,
api_version integer NOT NULL,
created_at timestamp with time zone NOT NULL,
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE offers (
id serial NOT NULL,
name text NOT NULL,
description text NOT NULL,
award_credit_in_cents integer NOT NULL DEFAULT 0,
invitee_credit_in_cents integer NOT NULL DEFAULT 0,
award_credit_duration_days integer,
invitee_credit_duration_days integer,
redeemable_cap integer,
expires_at timestamp with time zone NOT NULL,
created_at timestamp with time zone NOT NULL,
status integer NOT NULL,
type integer NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE peer_identities (
node_id bytea NOT NULL,
leaf_serial_number bytea NOT NULL,
chain bytea NOT NULL,
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY ( node_id )
);
CREATE TABLE pending_audits (
node_id bytea NOT NULL,
piece_id bytea NOT NULL,
stripe_index bigint NOT NULL,
share_size bigint NOT NULL,
expected_share_hash bytea NOT NULL,
reverify_count bigint NOT NULL,
path bytea NOT NULL,
PRIMARY KEY ( node_id )
);
CREATE TABLE projects (
id bytea NOT NULL,
name text NOT NULL,
description text NOT NULL,
usage_limit bigint,
bandwidth_limit bigint,
rate_limit integer,
max_buckets integer,
partner_id bytea,
owner_id bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE project_bandwidth_rollups (
project_id bytea NOT NULL,
interval_month date NOT NULL,
egress_allocated bigint NOT NULL,
PRIMARY KEY ( project_id, interval_month )
);
CREATE TABLE registration_tokens (
secret bytea NOT NULL,
owner_id bytea,
project_limit integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( secret ),
UNIQUE ( owner_id )
);
CREATE TABLE reset_password_tokens (
secret bytea NOT NULL,
owner_id bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( secret ),
UNIQUE ( owner_id )
);
CREATE TABLE revocations (
revoked bytea NOT NULL,
api_key_id bytea NOT NULL,
PRIMARY KEY ( revoked )
);
CREATE TABLE storagenode_bandwidth_rollups (
storagenode_id bytea NOT NULL,
interval_start timestamp with time zone NOT NULL,
interval_seconds integer NOT NULL,
action integer NOT NULL,
allocated bigint DEFAULT 0,
settled bigint NOT NULL,
PRIMARY KEY ( storagenode_id, interval_start, action )
);
CREATE TABLE storagenode_bandwidth_rollup_archives (
storagenode_id bytea NOT NULL,
interval_start timestamp with time zone NOT NULL,
interval_seconds integer NOT NULL,
action integer NOT NULL,
allocated bigint DEFAULT 0,
settled bigint NOT NULL,
PRIMARY KEY ( storagenode_id, interval_start, action )
);
CREATE TABLE storagenode_bandwidth_rollups_phase2 (
storagenode_id bytea NOT NULL,
interval_start timestamp with time zone NOT NULL,
interval_seconds integer NOT NULL,
action integer NOT NULL,
allocated bigint DEFAULT 0,
settled bigint NOT NULL,
PRIMARY KEY ( storagenode_id, interval_start, action )
);
CREATE TABLE storagenode_payments (
id bigserial NOT NULL,
created_at timestamp with time zone NOT NULL,
node_id bytea NOT NULL,
period text NOT NULL,
amount bigint NOT NULL,
receipt text,
notes text,
PRIMARY KEY ( id )
);
CREATE TABLE storagenode_paystubs (
period text NOT NULL,
node_id bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
codes text NOT NULL,
usage_at_rest double precision NOT NULL,
usage_get bigint NOT NULL,
usage_put bigint NOT NULL,
usage_get_repair bigint NOT NULL,
usage_put_repair bigint NOT NULL,
usage_get_audit bigint NOT NULL,
comp_at_rest bigint NOT NULL,
comp_get bigint NOT NULL,
comp_put bigint NOT NULL,
comp_get_repair bigint NOT NULL,
comp_put_repair bigint NOT NULL,
comp_get_audit bigint NOT NULL,
surge_percent bigint NOT NULL,
held bigint NOT NULL,
owed bigint NOT NULL,
disposed bigint NOT NULL,
paid bigint NOT NULL,
distributed bigint NOT NULL,
PRIMARY KEY ( period, node_id )
);
CREATE TABLE storagenode_storage_tallies (
node_id bytea NOT NULL,
interval_end_time timestamp with time zone NOT NULL,
data_total double precision NOT NULL,
PRIMARY KEY ( interval_end_time, node_id )
);
CREATE TABLE stripe_customers (
user_id bytea NOT NULL,
customer_id text NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( user_id ),
UNIQUE ( customer_id )
);
CREATE TABLE stripecoinpayments_invoice_project_records (
id bytea NOT NULL,
project_id bytea NOT NULL,
storage double precision NOT NULL,
egress bigint NOT NULL,
objects bigint NOT NULL,
period_start timestamp with time zone NOT NULL,
period_end timestamp with time zone NOT NULL,
state integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( project_id, period_start, period_end )
);
CREATE TABLE stripecoinpayments_tx_conversion_rates (
tx_id text NOT NULL,
rate bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( tx_id )
);
CREATE TABLE users (
id bytea NOT NULL,
email text NOT NULL,
normalized_email text NOT NULL,
full_name text NOT NULL,
short_name text,
password_hash bytea NOT NULL,
status integer NOT NULL,
partner_id bytea,
created_at timestamp with time zone NOT NULL,
project_limit integer NOT NULL DEFAULT 0,
position text,
company_name text,
company_size integer,
working_on text,
is_professional boolean NOT NULL DEFAULT false,
PRIMARY KEY ( id )
);
CREATE TABLE value_attributions (
project_id bytea NOT NULL,
bucket_name bytea NOT NULL,
partner_id bytea NOT NULL,
last_updated timestamp with time zone NOT NULL,
PRIMARY KEY ( project_id, bucket_name )
);
CREATE TABLE api_keys (
id bytea NOT NULL,
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
head bytea NOT NULL,
name text NOT NULL,
secret bytea NOT NULL,
partner_id bytea,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( head ),
UNIQUE ( name, project_id )
);
CREATE TABLE bucket_metainfos (
id bytea NOT NULL,
project_id bytea NOT NULL REFERENCES projects( id ),
name bytea NOT NULL,
partner_id bytea,
path_cipher integer NOT NULL,
created_at timestamp with time zone NOT NULL,
default_segment_size integer NOT NULL,
default_encryption_cipher_suite integer NOT NULL,
default_encryption_block_size integer NOT NULL,
default_redundancy_algorithm integer NOT NULL,
default_redundancy_share_size integer NOT NULL,
default_redundancy_required_shares integer NOT NULL,
default_redundancy_repair_shares integer NOT NULL,
default_redundancy_optimal_shares integer NOT NULL,
default_redundancy_total_shares integer NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( project_id, name )
);
CREATE TABLE project_members (
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( member_id, project_id )
);
CREATE TABLE stripecoinpayments_apply_balance_intents (
tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE,
state integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( tx_id )
);
CREATE TABLE user_credits (
id serial NOT NULL,
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
offer_id integer NOT NULL REFERENCES offers( id ),
referred_by bytea REFERENCES users( id ) ON DELETE SET NULL,
type text NOT NULL,
credits_earned_in_cents integer NOT NULL,
credits_used_in_cents integer NOT NULL,
expires_at timestamp with time zone NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( id, offer_id )
);
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time );
CREATE INDEX bucket_bandwidth_rollups_project_id_action_interval_index ON bucket_bandwidth_rollups ( project_id, action, interval_start );
CREATE INDEX bucket_bandwidth_rollups_action_interval_project_id_index ON bucket_bandwidth_rollups ( action, interval_start, project_id );
CREATE INDEX bucket_bandwidth_rollups_archive_project_id_action_interval_index ON bucket_bandwidth_rollup_archives ( project_id, action, interval_start );
CREATE INDEX bucket_bandwidth_rollups_archive_action_interval_project_id_index ON bucket_bandwidth_rollup_archives ( action, interval_start, project_id );
CREATE INDEX bucket_storage_tallies_project_id_interval_start_index ON bucket_storage_tallies ( project_id, interval_start );
CREATE INDEX graceful_exit_transfer_queue_nid_dr_qa_fa_lfa_index ON graceful_exit_transfer_queue ( node_id, durability_ratio, queued_at, finished_at, last_failed_at );
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
CREATE INDEX injuredsegments_segment_health_index ON injuredsegments ( segment_health );
CREATE INDEX injuredsegments_updated_at_index ON injuredsegments ( updated_at );
CREATE INDEX node_last_ip ON nodes ( last_net );
CREATE INDEX nodes_dis_unk_exit_fin_last_success_index ON nodes ( disqualified, unknown_audit_suspended, exit_finished_at, last_contact_success );
CREATE INDEX storagenode_bandwidth_rollups_interval_start_index ON storagenode_bandwidth_rollups ( interval_start );
CREATE INDEX storagenode_bandwidth_rollup_archives_interval_start_index ON storagenode_bandwidth_rollup_archives ( interval_start );
CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period );
CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id );
CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id );
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits ( id, offer_id );
INSERT INTO "accounting_rollups"("node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 3000, 6000, 9000, 12000, 0, 15000);
INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00');
INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00');
INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00');
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 1, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "vetted_at", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 300, 0, 1, 0, 300, 100, false, '2020-03-18 12:00:00.000000+00', 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "last_ip_port", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55516', '127.0.0.0', '127.0.0.1:55516', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at", "is_professional") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', '1EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00', false);
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00');
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00');
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00');
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, '2019-02-13 08:28:24.677953+00');
INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10);
INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00');
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
INSERT INTO "storagenode_storage_tallies" VALUES (E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000);
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00');
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (1, 'Default referral offer', 'Is active when no other active referral offer', 300, 600, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 2, 365, 14);
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (2, 'Default free credit offer', 'Is active when no active free credit offer', 0, 300, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 1, NULL, 14);
INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "partner_id", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, NULL, '2019-02-14 08:28:24.267934+00');
INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-02-14 08:07:31.028103+00');
INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "type", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, 'invalid', '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10);
INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "path") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, 'not null');
INSERT INTO "peer_identities" VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:07:31.335028+00');
INSERT INTO "graceful_exit_progress" ("node_id", "bytes_transferred", "pieces_transferred", "pieces_failed", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', 1000000000000000, 0, 0, '2019-09-12 10:07:31.028103+00');
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "stripe_customers" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id', '2019-06-01 08:28:24.267934+00');
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "period_start", "period_end", "state", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\021\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00');
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "root_piece_id", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 10, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "stripecoinpayments_tx_conversion_rates" ("tx_id", "rate", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci,'::bytea, '2019-06-01 08:28:24.267934+00');
INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount", "received", "status", "key", "timeout", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', E'\\363\\311\\033w'::bytea, E'\\363\\311\\033w'::bytea, 1, 'key', 60, '2019-06-01 08:28:24.267934+00');
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2020-01-11 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 2024);
INSERT INTO "coupons" ("id", "user_id", "amount", "description", "type", "status", "duration", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 50, 'description', 0, 0, 2, '2019-06-01 08:28:24.267934+00');
INSERT INTO "coupon_usages" ("coupon_id", "amount", "status", "period") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 22, 0, '2019-06-01 09:28:24.267934+00');
INSERT INTO "stripecoinpayments_apply_balance_intents" ("tx_id", "state", "created_at") VALUES ('tx_id', 0, '2019-06-01 08:28:24.267934+00');
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, 'projName1', 'Test project 1', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-01-15 08:28:24.636949+00');
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('0', '\x0a0130120100', 1.0, '2020-09-01 00:00:00.000000+00');
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/this/is/a/new/path', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 1.0, '2020-09-01 00:00:00.000000+00');
INSERT INTO "injuredsegments" ("path", "data", "segment_health", "updated_at") VALUES ('/some/path/1/23/4', '\x0a23736f2f6d618e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 0.2, '2020-09-01 00:00:00.000000+00');
INSERT INTO "project_bandwidth_rollups"("project_id", "interval_month", egress_allocated) VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, '2020-04-01', 10000);
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets","rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\345'::bytea, 'egress101', 'High Bandwidth Project', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-05-15 08:46:24.000000+00');
INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid", "distributed") VALUES ('2020-01', '\xf2a3b4c4dfdf7221310382fd5db5aa73e1d227d6df09734ec4e5305000000000', '2020-04-07T20:14:21.479141Z', '', 1327959864508416, 294054066688, 159031363328, 226751, 0, 836608, 2861984, 5881081, 0, 226751, 0, 8, 300, 0, 26909472, 0, 26909472, 0);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "unknown_audit_suspended", "offline_suspended", "under_review") VALUES (E'\\153\\313\\233\\074\\327\\255\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00');
INSERT INTO "audit_histories" ("node_id", "history") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a');
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', 1, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
INSERT INTO "node_api_versions"("id", "api_version", "created_at", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00');
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\256\\263'::bytea, 'egress102', 'High Bandwidth Project 2', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\255\\244'::bytea, 'egress103', 'High Bandwidth Project 3', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-05-15 08:46:24.000000+00', 1000);
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\253\\231'::bytea, 'Limit Test 1', 'This project is above the default', 50000000001, 50000000001, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:10.000000+00', 101);
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "rate_limit", "partner_id", "owner_id", "created_at", "max_buckets") VALUES (E'300\\273|\\342N\\347\\347\\363\\342\\363\\371>+F\\252\\230'::bytea, 'Limit Test 2', 'This project is below the default', NULL, NULL, 2000000, NULL, E'265\\343U\\303\\312\\312\\363\\311\\033w\\222\\303Ci",'::bytea, '2020-10-14 10:10:11.000000+00', NULL);
INSERT INTO "storagenode_bandwidth_rollups_phase2" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at", "position", "company_name", "working_on", "company_size", "is_professional") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\205\\311",'::bytea, 'Thierry', 'Berg', '2email2@mail.test', '2EMAIL2@MAIL.TEST', E'some_readable_hash'::bytea, 2, NULL, '2020-05-16 10:28:24.614594+00', 'engineer', 'storj', 'data storage', 55, true);
INSERT INTO "storagenode_bandwidth_rollup_archives" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
INSERT INTO "bucket_bandwidth_rollup_archives" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid", "distributed") VALUES ('2020-12', '\x1111111111111111111111111111111111111111111111111111111111111111', '2020-04-07T20:14:21.479141Z', '', 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 117);
INSERT INTO "storagenode_payments"("id", "created_at", "period", "node_id", "amount") VALUES (1, '2020-04-07T20:14:21.479141Z', '2020-12', '\x1111111111111111111111111111111111111111111111111111111111111111', 117);
-- NEW DATA --
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "wallet_features", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\362\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, 4, '', '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);

View File

@ -1,225 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package satellitedb
import (
"context"
"database/sql"
"time"
"github.com/zeebo/errs"
"storj.io/common/uuid"
"storj.io/storj/private/currency"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/storj/satellite/console"
"storj.io/storj/satellite/rewards"
"storj.io/storj/satellite/satellitedb/dbx"
)
// ensures that usercredits implements console.UserCredits.
var _ console.UserCredits = (*usercredits)(nil)
type usercredits struct {
db *satelliteDB
tx *dbx.Tx
}
// GetCreditUsage returns the total amount of referral a user has made based on user id, total available credits, and total used credits based on user id.
func (c *usercredits) GetCreditUsage(ctx context.Context, userID uuid.UUID, expirationEndDate time.Time) (*console.UserCreditUsage, error) {
usageRows, err := c.db.DB.QueryContext(ctx, c.db.Rebind(`SELECT a.used_credit, b.available_credit, c.referred
FROM (SELECT SUM(credits_used_in_cents) AS used_credit FROM user_credits WHERE user_id = ?) AS a,
(SELECT SUM(credits_earned_in_cents - credits_used_in_cents) AS available_credit FROM user_credits WHERE expires_at > ? AND user_id = ?) AS b,
(SELECT count(id) AS referred FROM user_credits WHERE user_credits.user_id = ? AND user_credits.type = ?) AS c;`), userID[:], expirationEndDate, userID[:], userID[:], console.Referrer)
if err != nil {
return nil, errs.Wrap(err)
}
defer func() { err = errs.Combine(err, usageRows.Close()) }()
usage := console.UserCreditUsage{}
for usageRows.Next() {
var (
usedCreditInCents sql.NullInt64
availableCreditInCents sql.NullInt64
referred sql.NullInt64
)
err = usageRows.Scan(&usedCreditInCents, &availableCreditInCents, &referred)
if err != nil {
return nil, errs.Wrap(err)
}
usage.Referred += referred.Int64
usage.UsedCredits = usage.UsedCredits.Add(currency.Cents(int(usedCreditInCents.Int64)))
usage.AvailableCredits = usage.AvailableCredits.Add(currency.Cents(int(availableCreditInCents.Int64)))
}
return &usage, usageRows.Err()
}
// Create insert a new record of user credit.
func (c *usercredits) Create(ctx context.Context, userCredit console.CreateCredit) (err error) {
if userCredit.ExpiresAt.Before(time.Now().UTC()) {
return errs.New("user credit is already expired")
}
var referrerID []byte
if userCredit.ReferredBy != nil {
referrerID = userCredit.ReferredBy[:]
}
var shouldCreate bool
switch userCredit.OfferInfo.Type {
case rewards.Partner:
shouldCreate = false
default:
shouldCreate = userCredit.OfferInfo.Status.IsDefault()
}
var dbExec interface {
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
}
if c.tx != nil {
dbExec = c.tx.Tx
} else {
dbExec = c.db.DB
}
var (
result sql.Result
statement string
)
statement = `
INSERT INTO user_credits (user_id, offer_id, credits_earned_in_cents, credits_used_in_cents, expires_at, referred_by, type, created_at)
SELECT * FROM (VALUES (?::bytea, ?::int, ?::int, 0, ?::timestamp, NULLIF(?::bytea, ?::bytea), ?::text, now())) AS v
WHERE COALESCE((SELECT COUNT(offer_id) FROM user_credits WHERE offer_id = ? AND referred_by IS NOT NULL ) < NULLIF(?, 0), ?);
`
result, err = dbExec.ExecContext(ctx, c.db.Rebind(statement),
userCredit.UserID[:],
userCredit.OfferID,
userCredit.CreditsEarned.Cents(),
userCredit.ExpiresAt, referrerID, new([]byte),
userCredit.Type,
userCredit.OfferID,
userCredit.OfferInfo.RedeemableCap, shouldCreate)
if err != nil {
// check to see if there's a constraint error
if pgutil.IsConstraintError(err) {
_, err := dbExec.ExecContext(ctx, c.db.Rebind(`UPDATE offers SET status = ? AND expires_at = ? WHERE id = ?`), rewards.Done, time.Now().UTC(), userCredit.OfferID)
if err != nil {
return errs.Wrap(err)
}
return rewards.ErrReachedMaxCapacity.Wrap(err)
}
return errs.Wrap(err)
}
rows, err := result.RowsAffected()
if err != nil {
return errs.Wrap(err)
}
if rows != 1 {
return rewards.ErrReachedMaxCapacity.New("failed to create new credit")
}
return nil
}
// UpdateEarnedCredits updates user credits after user activated their account.
func (c *usercredits) UpdateEarnedCredits(ctx context.Context, userID uuid.UUID) error {
statement := `
UPDATE user_credits SET credits_earned_in_cents = offers.invitee_credit_in_cents
FROM offers
WHERE user_id = ? AND credits_earned_in_cents = 0 AND offer_id = offers.id
`
result, err := c.db.DB.ExecContext(ctx, c.db.Rebind(statement), userID[:])
if err != nil {
return err
}
affected, err := result.RowsAffected()
if err != nil {
return err
}
if affected != 1 {
return console.NoCreditForUpdateErr.New("row affected: %d", affected)
}
return nil
}
// UpdateAvailableCredits updates user's available credits based on their spending and the time of their spending.
func (c *usercredits) UpdateAvailableCredits(ctx context.Context, creditsToCharge int, id uuid.UUID, expirationEndDate time.Time) (remainingCharge int, err error) {
err = c.db.WithTx(ctx, func(ctx context.Context, tx *dbx.Tx) (err error) {
availableCredits, err := tx.All_UserCredit_By_UserId_And_ExpiresAt_Greater_And_CreditsUsedInCents_Less_CreditsEarnedInCents_OrderBy_Asc_ExpiresAt(ctx,
dbx.UserCredit_UserId(id[:]),
dbx.UserCredit_ExpiresAt(expirationEndDate),
)
if err != nil {
return err
}
if len(availableCredits) == 0 {
return errs.New("No available credits")
}
values := make([]interface{}, len(availableCredits)*2)
rowIds := make([]interface{}, len(availableCredits))
remainingCharge = creditsToCharge
for i, credit := range availableCredits {
if remainingCharge == 0 {
break
}
creditsForUpdateInCents := credit.CreditsEarnedInCents - credit.CreditsUsedInCents
if remainingCharge < creditsForUpdateInCents {
creditsForUpdateInCents = remainingCharge
}
values[i%2] = credit.Id
values[(i%2 + 1)] = creditsForUpdateInCents
rowIds[i] = credit.Id
remainingCharge -= creditsForUpdateInCents
}
values = append(values, rowIds...)
statement := generateQuery(len(availableCredits), true)
_, err = tx.Tx.ExecContext(ctx, c.db.Rebind(`UPDATE user_credits SET credits_used_in_cents = CASE `+statement), values...)
return err
})
if err != nil {
return creditsToCharge, errs.Wrap(err)
}
return remainingCharge, nil
}
func generateQuery(totalRows int, toInt bool) (query string) {
whereClause := `WHERE id IN (`
condition := `WHEN id=? THEN ? `
if toInt {
condition = `WHEN id=? THEN ?::int `
}
for i := 0; i < totalRows; i++ {
query += condition
if i == totalRows-1 {
query += ` END ` + whereClause + ` ?);`
break
}
whereClause += `?, `
}
return query
}

View File

@ -54,7 +54,8 @@ func (users *users) Insert(ctx context.Context, user *console.User) (_ *console.
}
optional := dbx.User_Create_Fields{
ShortName: dbx.User_ShortName(user.ShortName),
ShortName: dbx.User_ShortName(user.ShortName),
IsProfessional: dbx.User_IsProfessional(user.IsProfessional),
}
if !user.PartnerID.IsZero() {
optional.PartnerId = dbx.User_PartnerId(user.PartnerID[:])
@ -62,6 +63,12 @@ func (users *users) Insert(ctx context.Context, user *console.User) (_ *console.
if user.ProjectLimit != 0 {
optional.ProjectLimit = dbx.User_ProjectLimit(user.ProjectLimit)
}
if user.IsProfessional {
optional.Position = dbx.User_Position(user.Position)
optional.CompanyName = dbx.User_CompanyName(user.CompanyName)
optional.CompanySize = dbx.User_CompanySize(user.CompanySize)
optional.WorkingOn = dbx.User_WorkingOn(user.WorkingOn)
}
createdUser, err := users.db.Create_User(ctx,
dbx.User_Id(user.ID[:]),
@ -143,13 +150,14 @@ func userFromDBX(ctx context.Context, user *dbx.User) (_ *console.User, err erro
}
result := console.User{
ID: id,
FullName: user.FullName,
Email: user.Email,
PasswordHash: user.PasswordHash,
Status: console.UserStatus(user.Status),
CreatedAt: user.CreatedAt,
ProjectLimit: user.ProjectLimit,
ID: id,
FullName: user.FullName,
Email: user.Email,
PasswordHash: user.PasswordHash,
Status: console.UserStatus(user.Status),
CreatedAt: user.CreatedAt,
ProjectLimit: user.ProjectLimit,
IsProfessional: user.IsProfessional,
}
if user.PartnerId != nil {
@ -163,6 +171,22 @@ func userFromDBX(ctx context.Context, user *dbx.User) (_ *console.User, err erro
result.ShortName = *user.ShortName
}
if user.Position != nil {
result.Position = *user.Position
}
if user.CompanyName != nil {
result.CompanyName = *user.CompanyName
}
if user.CompanySize != nil {
result.CompanySize = *user.CompanySize
}
if user.WorkingOn != nil {
result.WorkingOn = *user.WorkingOn
}
return &result, nil
}

View File

@ -301,15 +301,6 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
# uri which is used when retrieving new access token
# mail.token-uri: ""
# server address of the marketing Admin GUI
# marketing.address: 127.0.0.1:8090
# base url for marketing Admin GUI
# marketing.base-url: ""
# path to static resources
# marketing.static-dir: ""
# the database connection string to use
# metainfo.database-url: postgres://
@ -568,9 +559,6 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
# how long to cache the project limits.
# project-limit.cache-expiration: 10m0s
# the URL for referral manager
# referrals.referral-manager-url: ""
# time limit for downloading pieces from a node for repair
# repairer.download-timeout: 5m0s

View File

@ -270,7 +270,12 @@ setup_stage "${test_dir}" "${stage2_sat_version}" "${stage2_storagenode_versions
echo -e "\nRunning stage 2."
# Starting old satellite api in the background
old_api_cmd="${test_dir}/local-network/satellite/0/old_satellite run api --config-dir ${test_dir}/local-network/satellite/0/ --debug.addr 127.0.0.1:30009 --server.address 127.0.0.1:30000 --server.private-address 127.0.0.1:30001 --console.address 127.0.0.1:30002 --marketing.address 127.0.0.1:30003"
has_marketing_server=$(echo $stage1_sat_version | awk 'BEGIN{FS="[v.]"} ($2 == 1 && $3 <= 22) || $2 == 0 {print $0}')
if [ "$has_marketing_server" != "" ]; then
old_api_cmd="${test_dir}/local-network/satellite/0/old_satellite run api --config-dir ${test_dir}/local-network/satellite/0/ --debug.addr 127.0.0.1:30009 --server.address 127.0.0.1:30000 --server.private-address 127.0.0.1:30001 --console.address 127.0.0.1:30002 --marketing.address 127.0.0.1:30003 --marketing.static-dir $(version_dir ${stage1_sat_version})/web/marketing/"
else
old_api_cmd="${test_dir}/local-network/satellite/0/old_satellite run api --config-dir ${test_dir}/local-network/satellite/0/ --debug.addr 127.0.0.1:30009 --server.address 127.0.0.1:30000 --server.private-address 127.0.0.1:30001 --console.address 127.0.0.1:30002"
fi
nohup $old_api_cmd &
# Storing the background process' PID.
old_api_pid=$!
@ -291,8 +296,7 @@ for ul_version in ${stage2_uplink_versions}; do
ln -f ${src_ul_version_dir}/bin/uplink $test_dir/bin/uplink
PATH=$test_dir/bin:$PATH storj-sim -x --host "${STORJ_NETWORK_HOST4}" --config-dir "${test_dir}/local-network" network test bash "${scriptdir}/test-rolling-upgrade.sh" "${test_dir}/local-network" "${stage1_uplink_version}" "$update_access_script_path"
if [[ $ul_version == $current_commit ]]
then
if [[ $ul_version == $current_commit ]];then
echo "Running final upload/download test on $current_commit"
PATH=$test_dir/bin:$PATH storj-sim -x --host "${STORJ_NETWORK_HOST4}" --config-dir "${test_dir}/local-network" network test bash "${scriptdir}/test-rolling-upgrade-final-upload.sh" "${test_dir}/local-network"
fi

View File

@ -24,7 +24,7 @@ var _ = time.Kitchen
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type GetNonExitingSatellitesRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`

View File

@ -24,7 +24,7 @@ var _ = time.Kitchen
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
type StatsRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`

View File

@ -51,3 +51,32 @@ func (payout *PayoutEndpoint) Earned(ctx context.Context, req *multinodepb.Earne
Total: earned,
}, nil
}
// EarnedPerSatellite returns total earned amount per satellite.
func (payout *PayoutEndpoint) EarnedPerSatellite(ctx context.Context, req *multinodepb.EarnedPerSatelliteRequest) (_ *multinodepb.EarnedPerSatelliteResponse, err error) {
defer mon.Task()(&ctx)(&err)
if err = authenticate(ctx, payout.apiKeys, req.GetHeader()); err != nil {
return nil, rpcstatus.Wrap(rpcstatus.Unauthenticated, err)
}
var resp multinodepb.EarnedPerSatelliteResponse
satelliteIDs, err := payout.db.GetPayingSatellitesIDs(ctx)
if err != nil {
return nil, rpcstatus.Wrap(rpcstatus.Internal, err)
}
for i := 0; i < len(satelliteIDs); i++ {
earned, err := payout.db.GetEarnedAtSatellite(ctx, satelliteIDs[i])
if err != nil {
return nil, rpcstatus.Wrap(rpcstatus.Internal, err)
}
resp.EarnedSatellite = append(resp.EarnedSatellite, &multinodepb.EarnedSatellite{
Total: earned,
SatelliteId: satelliteIDs[i],
})
}
return &resp, nil
}

View File

@ -0,0 +1,48 @@
// Copyright (C) 2021 Storj Labs, Inc.
// See LICENSE for copying information.
package multinode_test
import (
"testing"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/private/multinodepb"
"storj.io/storj/private/testplanet"
"storj.io/storj/storagenode/apikeys"
"storj.io/storj/storagenode/multinode"
"storj.io/storj/storagenode/payouts"
)
func TestEarnedPerSatellite(t *testing.T) {
testplanet.Run(t, testplanet.Config{
StorageNodeCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
log := zaptest.NewLogger(t)
service := apikeys.NewService(planet.StorageNodes[0].DB.APIKeys())
endpoint := multinode.NewPayoutEndpoint(log, service, planet.StorageNodes[0].DB.Payout())
var amount int64 = 200
err := planet.StorageNodes[0].DB.Payout().StorePayStub(ctx, payouts.PayStub{
SatelliteID: testrand.NodeID(),
CompAtRest: amount,
})
require.NoError(t, err)
key, err := service.Issue(ctx)
require.NoError(t, err)
response, err := endpoint.EarnedPerSatellite(ctx, &multinodepb.EarnedPerSatelliteRequest{
Header: &multinodepb.RequestHeader{
ApiKey: key.Secret[:],
},
})
require.NoError(t, err)
require.Equal(t, response.EarnedSatellite[0].Total, amount)
})
}

View File

@ -7,7 +7,7 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"storj.io/common/identity/testidentity"
"storj.io/common/storj"
@ -51,43 +51,43 @@ func TestNotificationsDB(t *testing.T) {
}
notificationFromDB0, err := notificationsdb.Insert(ctx, expectedNotification0)
assert.NoError(t, err)
assert.Equal(t, expectedNotification0.SenderID, notificationFromDB0.SenderID)
assert.Equal(t, expectedNotification0.Type, notificationFromDB0.Type)
assert.Equal(t, expectedNotification0.Title, notificationFromDB0.Title)
assert.Equal(t, expectedNotification0.Message, notificationFromDB0.Message)
require.NoError(t, err)
require.Equal(t, expectedNotification0.SenderID, notificationFromDB0.SenderID)
require.Equal(t, expectedNotification0.Type, notificationFromDB0.Type)
require.Equal(t, expectedNotification0.Title, notificationFromDB0.Title)
require.Equal(t, expectedNotification0.Message, notificationFromDB0.Message)
// Ensure that every insert gets a different "created at" time.
waitForTimeToChange()
notificationFromDB1, err := notificationsdb.Insert(ctx, expectedNotification1)
assert.NoError(t, err)
assert.Equal(t, expectedNotification1.SenderID, notificationFromDB1.SenderID)
assert.Equal(t, expectedNotification1.Type, notificationFromDB1.Type)
assert.Equal(t, expectedNotification1.Title, notificationFromDB1.Title)
assert.Equal(t, expectedNotification1.Message, notificationFromDB1.Message)
require.NoError(t, err)
require.Equal(t, expectedNotification1.SenderID, notificationFromDB1.SenderID)
require.Equal(t, expectedNotification1.Type, notificationFromDB1.Type)
require.Equal(t, expectedNotification1.Title, notificationFromDB1.Title)
require.Equal(t, expectedNotification1.Message, notificationFromDB1.Message)
waitForTimeToChange()
notificationFromDB2, err := notificationsdb.Insert(ctx, expectedNotification2)
assert.NoError(t, err)
assert.Equal(t, expectedNotification2.SenderID, notificationFromDB2.SenderID)
assert.Equal(t, expectedNotification2.Type, notificationFromDB2.Type)
assert.Equal(t, expectedNotification2.Title, notificationFromDB2.Title)
assert.Equal(t, expectedNotification2.Message, notificationFromDB2.Message)
require.NoError(t, err)
require.Equal(t, expectedNotification2.SenderID, notificationFromDB2.SenderID)
require.Equal(t, expectedNotification2.Type, notificationFromDB2.Type)
require.Equal(t, expectedNotification2.Title, notificationFromDB2.Title)
require.Equal(t, expectedNotification2.Message, notificationFromDB2.Message)
page := notifications.Page{}
// test List method to return right form of page depending on cursor.
t.Run("test paged list", func(t *testing.T) {
page, err = notificationsdb.List(ctx, notificationCursor)
assert.NoError(t, err)
assert.Equal(t, 2, len(page.Notifications))
assert.Equal(t, notificationFromDB1, page.Notifications[1])
assert.Equal(t, notificationFromDB2, page.Notifications[0])
assert.Equal(t, notificationCursor.Limit, page.Limit)
assert.Equal(t, uint64(0), page.Offset)
assert.Equal(t, uint(2), page.PageCount)
assert.Equal(t, uint64(3), page.TotalCount)
assert.Equal(t, uint(1), page.CurrentPage)
require.NoError(t, err)
require.Equal(t, 2, len(page.Notifications))
require.Equal(t, notificationFromDB1, page.Notifications[1])
require.Equal(t, notificationFromDB2, page.Notifications[0])
require.Equal(t, notificationCursor.Limit, page.Limit)
require.Equal(t, uint64(0), page.Offset)
require.Equal(t, uint(2), page.PageCount)
require.Equal(t, uint64(3), page.TotalCount)
require.Equal(t, uint(1), page.CurrentPage)
})
notificationCursor = notifications.Cursor{
@ -98,32 +98,32 @@ func TestNotificationsDB(t *testing.T) {
// test Read method to make specific notification's status as read.
t.Run("test notification read", func(t *testing.T) {
err = notificationsdb.Read(ctx, notificationFromDB0.ID)
assert.NoError(t, err)
require.NoError(t, err)
page, err = notificationsdb.List(ctx, notificationCursor)
assert.NoError(t, err)
assert.NotEqual(t, page.Notifications[2].ReadAt, (*time.Time)(nil))
require.NoError(t, err)
require.NotEqual(t, page.Notifications[2].ReadAt, (*time.Time)(nil))
err = notificationsdb.Read(ctx, notificationFromDB1.ID)
assert.NoError(t, err)
require.NoError(t, err)
page, err = notificationsdb.List(ctx, notificationCursor)
assert.NoError(t, err)
assert.NotEqual(t, page.Notifications[1].ReadAt, (*time.Time)(nil))
require.NoError(t, err)
require.NotEqual(t, page.Notifications[1].ReadAt, (*time.Time)(nil))
assert.Equal(t, page.Notifications[0].ReadAt, (*time.Time)(nil))
require.Equal(t, page.Notifications[0].ReadAt, (*time.Time)(nil))
})
// test ReadAll method to make all notifications' status as read.
t.Run("test notification read all", func(t *testing.T) {
err = notificationsdb.ReadAll(ctx)
assert.NoError(t, err)
require.NoError(t, err)
page, err = notificationsdb.List(ctx, notificationCursor)
assert.NoError(t, err)
assert.NotEqual(t, page.Notifications[2].ReadAt, (*time.Time)(nil))
assert.NotEqual(t, page.Notifications[1].ReadAt, (*time.Time)(nil))
assert.NotEqual(t, page.Notifications[0].ReadAt, (*time.Time)(nil))
require.NoError(t, err)
require.NotEqual(t, page.Notifications[2].ReadAt, (*time.Time)(nil))
require.NotEqual(t, page.Notifications[1].ReadAt, (*time.Time)(nil))
require.NotEqual(t, page.Notifications[0].ReadAt, (*time.Time)(nil))
})
})
}
@ -140,25 +140,25 @@ func TestEmptyNotificationsDB(t *testing.T) {
// test List method to return right form of page depending on cursor with empty database.
t.Run("test empty paged list", func(t *testing.T) {
page, err := notificationsdb.List(ctx, notificationCursor)
assert.NoError(t, err)
assert.Equal(t, len(page.Notifications), 0)
assert.Equal(t, page.Limit, notificationCursor.Limit)
assert.Equal(t, page.Offset, uint64(0))
assert.Equal(t, page.PageCount, uint(0))
assert.Equal(t, page.TotalCount, uint64(0))
assert.Equal(t, page.CurrentPage, uint(0))
require.NoError(t, err)
require.Equal(t, len(page.Notifications), 0)
require.Equal(t, page.Limit, notificationCursor.Limit)
require.Equal(t, page.Offset, uint64(0))
require.Equal(t, page.PageCount, uint(0))
require.Equal(t, page.TotalCount, uint64(0))
require.Equal(t, page.CurrentPage, uint(0))
})
// test notification read with not existing id.
t.Run("test notification read with not existing id", func(t *testing.T) {
err := notificationsdb.Read(ctx, testrand.UUID())
assert.Error(t, err, "no rows affected")
require.Error(t, err, "no rows affected")
})
// test read for all notifications if they don't exist.
t.Run("test notification readAll on empty page", func(t *testing.T) {
err := notificationsdb.ReadAll(ctx)
assert.NoError(t, err)
require.NoError(t, err)
})
})
}

View File

@ -6,14 +6,19 @@ package storagenode
import (
"fmt"
"regexp"
"strings"
"github.com/spf13/pflag"
"go.uber.org/zap"
"storj.io/storj/private/nodeoperator"
)
// OperatorConfig defines properties related to storage node operator metadata.
type OperatorConfig struct {
Email string `user:"true" help:"operator email address" default:""`
Wallet string `user:"true" help:"operator wallet address" default:""`
Email string `user:"true" help:"operator email address" default:""`
Wallet string `user:"true" help:"operator wallet address" default:""`
WalletFeatures WalletFeatures `user:"true" help:"operator wallet features" default:""`
}
// Verify verifies whether operator config is valid.
@ -24,6 +29,9 @@ func (c OperatorConfig) Verify(log *zap.Logger) error {
if err := isOperatorWalletValid(log, c.Wallet); err != nil {
return err
}
if err := isOperatorWalletFeaturesValid(log, c.WalletFeatures); err != nil {
return err
}
return nil
}
@ -48,3 +56,32 @@ func isOperatorWalletValid(log *zap.Logger, wallet string) error {
log.Info("Operator wallet", zap.String("Address", wallet))
return nil
}
// isOperatorWalletFeaturesValid checks if wallet features list does not exceed length limits.
func isOperatorWalletFeaturesValid(log *zap.Logger, features WalletFeatures) error {
return nodeoperator.DefaultWalletFeaturesValidation.Validate(features)
}
// ensure WalletFeatures implements pflag.Value.
var _ pflag.Value = (*WalletFeatures)(nil)
// WalletFeatures payout opt-in wallet features list.
type WalletFeatures []string
// String returns the comma separated list of wallet features.
func (features WalletFeatures) String() string {
return strings.Join(features, ",")
}
// Set implements pflag.Value by parsing a comma separated list of wallet features.
func (features *WalletFeatures) Set(value string) error {
if value != "" {
*features = strings.Split(value, ",")
}
return nil
}
// Type returns the type of the pflag.Value.
func (features WalletFeatures) Type() string {
return "wallet-features"
}

View File

@ -314,3 +314,97 @@ func TestAllPayStubPeriodCached(t *testing.T) {
require.Equal(t, 0, len(payStubs))
})
}
func TestPayouts(t *testing.T) {
storagenodedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db storagenode.DB) {
payout := db.Payout()
t.Run("Test SatelliteIDs", func(t *testing.T) {
id1 := storj.NodeID{1, 2, 3}
id2 := storj.NodeID{2, 3, 4}
id3 := storj.NodeID{3, 3, 3}
err := payout.StorePayStub(ctx, payouts.PayStub{
SatelliteID: id1,
})
require.NoError(t, err)
err = payout.StorePayStub(ctx, payouts.PayStub{
SatelliteID: id1,
})
require.NoError(t, err)
err = payout.StorePayStub(ctx, payouts.PayStub{
SatelliteID: id2,
})
require.NoError(t, err)
err = payout.StorePayStub(ctx, payouts.PayStub{
SatelliteID: id3,
})
require.NoError(t, err)
err = payout.StorePayStub(ctx, payouts.PayStub{
SatelliteID: id3,
})
require.NoError(t, err)
err = payout.StorePayStub(ctx, payouts.PayStub{
SatelliteID: id2,
})
require.NoError(t, err)
listIDs, err := payout.GetPayingSatellitesIDs(ctx)
require.Equal(t, len(listIDs), 3)
require.NoError(t, err)
})
t.Run("Test GetSatelliteEarned", func(t *testing.T) {
id1 := storj.NodeID{1, 2, 3}
id2 := storj.NodeID{2, 3, 4}
id3 := storj.NodeID{3, 3, 3}
err := payout.StorePayStub(ctx, payouts.PayStub{
Period: "2020-11",
SatelliteID: id1,
CompGet: 11,
CompAtRest: 11,
})
require.NoError(t, err)
err = payout.StorePayStub(ctx, payouts.PayStub{
Period: "2020-12",
SatelliteID: id1,
CompGet: 22,
CompAtRest: 22,
})
require.NoError(t, err)
err = payout.StorePayStub(ctx, payouts.PayStub{
Period: "2020-11",
SatelliteID: id2,
CompGet: 33,
CompAtRest: 33,
})
require.NoError(t, err)
err = payout.StorePayStub(ctx, payouts.PayStub{
Period: "2020-10",
SatelliteID: id3,
CompGet: 44,
CompAtRest: 44,
})
require.NoError(t, err)
err = payout.StorePayStub(ctx, payouts.PayStub{
Period: "2020-11",
SatelliteID: id3,
CompGet: 55,
CompAtRest: 55,
})
require.NoError(t, err)
err = payout.StorePayStub(ctx, payouts.PayStub{
Period: "2020-10",
SatelliteID: id2,
CompGet: 66,
CompAtRest: 66,
})
require.NoError(t, err)
satellite1Earned, err := payout.GetEarnedAtSatellite(ctx, id1)
require.Equal(t, int(satellite1Earned), 66)
require.NoError(t, err)
satellite2Earned, err := payout.GetEarnedAtSatellite(ctx, id2)
require.Equal(t, int(satellite2Earned), 198)
require.NoError(t, err)
satellite3Earned, err := payout.GetEarnedAtSatellite(ctx, id3)
require.Equal(t, int(satellite3Earned), 198)
require.NoError(t, err)
})
})
}

View File

@ -36,6 +36,10 @@ type DB interface {
GetReceipt(ctx context.Context, satelliteID storj.NodeID, period string) (string, error)
// GetTotalEarned returns total earned amount of node from all paystubs.
GetTotalEarned(ctx context.Context) (_ int64, err error)
// GetEarnedAtSatellite returns total earned value for node from specific satellite.
GetEarnedAtSatellite(ctx context.Context, id storj.NodeID) (int64, error)
// GetPayingSatellitesIDs returns list of satellite ID's that ever paid to storagenode.
GetPayingSatellitesIDs(ctx context.Context) ([]storj.NodeID, error)
}
// ErrNoPayStubForPeriod represents errors from the payouts database.

View File

@ -403,8 +403,9 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten
ID: peer.ID(),
Address: c.ExternalAddress,
Operator: pb.NodeOperator{
Email: config.Operator.Email,
Wallet: config.Operator.Wallet,
Email: config.Operator.Email,
Wallet: config.Operator.Wallet,
WalletFeatures: config.Operator.WalletFeatures,
},
Version: *pbVersion,
}
@ -422,7 +423,6 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten
if err := pb.DRPCRegisterContact(peer.Server.DRPC(), peer.Contact.Endpoint); err != nil {
return nil, errs.Combine(err, peer.Close())
}
}
{ // setup storage

View File

@ -7,13 +7,15 @@ import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"go.uber.org/zap/zaptest"
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/storagenode"
"storj.io/storj/storagenode/notifications"
"storj.io/storj/storagenode/reputation"
"storj.io/storj/storagenode/storagenodedb/storagenodedbtest"
)
@ -46,22 +48,22 @@ func TestReputationDBGetInsert(t *testing.T) {
t.Run("insert", func(t *testing.T) {
err := reputationDB.Store(ctx, stats)
assert.NoError(t, err)
require.NoError(t, err)
})
t.Run("get", func(t *testing.T) {
res, err := reputationDB.Get(ctx, stats.SatelliteID)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, res.SatelliteID, stats.SatelliteID)
assert.True(t, res.DisqualifiedAt.Equal(*stats.DisqualifiedAt))
assert.True(t, res.SuspendedAt.Equal(*stats.SuspendedAt))
assert.True(t, res.UpdatedAt.Equal(stats.UpdatedAt))
assert.True(t, res.JoinedAt.Equal(stats.JoinedAt))
assert.True(t, res.OfflineSuspendedAt.Equal(*stats.OfflineSuspendedAt))
assert.True(t, res.OfflineUnderReviewAt.Equal(*stats.OfflineUnderReviewAt))
assert.Equal(t, res.OnlineScore, stats.OnlineScore)
assert.Nil(t, res.AuditHistory)
require.Equal(t, res.SatelliteID, stats.SatelliteID)
require.True(t, res.DisqualifiedAt.Equal(*stats.DisqualifiedAt))
require.True(t, res.SuspendedAt.Equal(*stats.SuspendedAt))
require.True(t, res.UpdatedAt.Equal(stats.UpdatedAt))
require.True(t, res.JoinedAt.Equal(stats.JoinedAt))
require.True(t, res.OfflineSuspendedAt.Equal(*stats.OfflineSuspendedAt))
require.True(t, res.OfflineUnderReviewAt.Equal(*stats.OfflineUnderReviewAt))
require.Equal(t, res.OnlineScore, stats.OnlineScore)
require.Nil(t, res.AuditHistory)
compareReputationMetric(t, &res.Audit, &stats.Audit)
})
@ -105,22 +107,22 @@ func TestReputationDBGetAll(t *testing.T) {
}
res, err := reputationDB.All(ctx)
assert.NoError(t, err)
assert.NotNil(t, res)
assert.Equal(t, len(stats), len(res))
require.NoError(t, err)
require.NotNil(t, res)
require.Equal(t, len(stats), len(res))
for _, rep := range res {
assert.Contains(t, stats, rep)
require.Contains(t, stats, rep)
if rep.SatelliteID == stats[0].SatelliteID {
assert.Equal(t, rep.DisqualifiedAt, stats[0].DisqualifiedAt)
assert.Equal(t, rep.SuspendedAt, stats[0].SuspendedAt)
assert.Equal(t, rep.UpdatedAt, stats[0].UpdatedAt)
assert.Equal(t, rep.JoinedAt, stats[0].JoinedAt)
assert.Equal(t, rep.OfflineSuspendedAt, stats[0].OfflineSuspendedAt)
assert.Equal(t, rep.OfflineUnderReviewAt, stats[0].OfflineUnderReviewAt)
assert.Equal(t, rep.OnlineScore, stats[0].OnlineScore)
assert.Nil(t, rep.AuditHistory)
require.Equal(t, rep.DisqualifiedAt, stats[0].DisqualifiedAt)
require.Equal(t, rep.SuspendedAt, stats[0].SuspendedAt)
require.Equal(t, rep.UpdatedAt, stats[0].UpdatedAt)
require.Equal(t, rep.JoinedAt, stats[0].JoinedAt)
require.Equal(t, rep.OfflineSuspendedAt, stats[0].OfflineSuspendedAt)
require.Equal(t, rep.OfflineUnderReviewAt, stats[0].OfflineUnderReviewAt)
require.Equal(t, rep.OnlineScore, stats[0].OnlineScore)
require.Nil(t, rep.AuditHistory)
compareReputationMetric(t, &rep.Audit, &stats[0].Audit)
}
@ -130,11 +132,11 @@ func TestReputationDBGetAll(t *testing.T) {
// compareReputationMetric compares two reputation metrics and asserts that they are equal.
func compareReputationMetric(t *testing.T, a, b *reputation.Metric) {
assert.Equal(t, a.SuccessCount, b.SuccessCount)
assert.Equal(t, a.TotalCount, b.TotalCount)
assert.Equal(t, a.Alpha, b.Alpha)
assert.Equal(t, a.Beta, b.Beta)
assert.Equal(t, a.Score, b.Score)
require.Equal(t, a.SuccessCount, b.SuccessCount)
require.Equal(t, a.TotalCount, b.TotalCount)
require.Equal(t, a.Alpha, b.Alpha)
require.Equal(t, a.Beta, b.Beta)
require.Equal(t, a.Score, b.Score)
}
func TestReputationDBGetInsertAuditHistory(t *testing.T) {
@ -159,20 +161,136 @@ func TestReputationDBGetInsertAuditHistory(t *testing.T) {
t.Run("insert", func(t *testing.T) {
err := reputationDB.Store(ctx, stats)
assert.NoError(t, err)
require.NoError(t, err)
})
t.Run("get", func(t *testing.T) {
res, err := reputationDB.Get(ctx, stats.SatelliteID)
assert.NoError(t, err)
require.NoError(t, err)
assert.Equal(t, res.AuditHistory.Score, stats.AuditHistory.Score)
assert.Equal(t, len(res.AuditHistory.Windows), len(stats.AuditHistory.Windows))
require.Equal(t, res.AuditHistory.Score, stats.AuditHistory.Score)
require.Equal(t, len(res.AuditHistory.Windows), len(stats.AuditHistory.Windows))
resWindow := res.AuditHistory.Windows[0]
statsWindow := stats.AuditHistory.Windows[0]
assert.True(t, resWindow.WindowStart.Equal(statsWindow.WindowStart))
assert.Equal(t, resWindow.TotalCount, statsWindow.TotalCount)
assert.Equal(t, resWindow.OnlineCount, statsWindow.OnlineCount)
require.True(t, resWindow.WindowStart.Equal(statsWindow.WindowStart))
require.Equal(t, resWindow.TotalCount, statsWindow.TotalCount)
require.Equal(t, resWindow.OnlineCount, statsWindow.OnlineCount)
})
})
}
func TestServiceStore(t *testing.T) {
storagenodedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db storagenode.DB) {
reputationDB := db.Reputation()
notificationsDB := db.Notifications()
log := zaptest.NewLogger(t)
notificationService := notifications.NewService(log, notificationsDB)
reputationService := reputation.NewService(log, reputationDB, storj.NodeID{}, notificationService)
id := testrand.NodeID()
now := time.Now().AddDate(0, 0, -2)
later := time.Now().AddDate(0, 0, -1)
stats := reputation.Stats{
SatelliteID: id,
}
err := reputationDB.Store(ctx, stats)
require.NoError(t, err)
statsNew := reputation.Stats{
SatelliteID: id,
OfflineSuspendedAt: &now,
}
err = reputationService.Store(ctx, statsNew, id)
require.NoError(t, err)
amount, err := notificationsDB.UnreadAmount(ctx)
require.NoError(t, err)
require.Equal(t, amount, 1)
statsNew = reputation.Stats{
SatelliteID: id,
OfflineSuspendedAt: &later,
}
err = reputationService.Store(ctx, statsNew, id)
require.NoError(t, err)
amount, err = notificationsDB.UnreadAmount(ctx)
require.NoError(t, err)
require.Equal(t, amount, 2)
statsNew = reputation.Stats{
SatelliteID: id,
OfflineSuspendedAt: &later,
DisqualifiedAt: &later,
}
err = reputationService.Store(ctx, statsNew, id)
require.NoError(t, err)
amount, err = notificationsDB.UnreadAmount(ctx)
require.NoError(t, err)
require.Equal(t, amount, 2)
statsNew = reputation.Stats{
SatelliteID: id,
OfflineSuspendedAt: &now,
DisqualifiedAt: &later,
}
err = reputationService.Store(ctx, statsNew, id)
require.NoError(t, err)
amount, err = notificationsDB.UnreadAmount(ctx)
require.NoError(t, err)
require.Equal(t, amount, 2)
statsNew = reputation.Stats{
SatelliteID: id,
OfflineSuspendedAt: &later,
DisqualifiedAt: nil,
}
err = reputationService.Store(ctx, statsNew, id)
require.NoError(t, err)
amount, err = notificationsDB.UnreadAmount(ctx)
require.NoError(t, err)
require.Equal(t, amount, 3)
later = later.AddDate(0, 1, 0)
statsNew = reputation.Stats{
SatelliteID: id,
OfflineSuspendedAt: &later,
}
err = reputationService.Store(ctx, statsNew, id)
require.NoError(t, err)
amount, err = notificationsDB.UnreadAmount(ctx)
require.NoError(t, err)
require.Equal(t, amount, 4)
statsNew = reputation.Stats{
SatelliteID: id,
OfflineSuspendedAt: &later,
}
err = reputationService.Store(ctx, statsNew, id)
require.NoError(t, err)
amount, err = notificationsDB.UnreadAmount(ctx)
require.NoError(t, err)
require.Equal(t, amount, 4)
id2 := testrand.NodeID()
statsNew = reputation.Stats{
SatelliteID: id2,
OfflineSuspendedAt: &later,
}
err = reputationService.Store(ctx, statsNew, id2)
require.NoError(t, err)
amount, err = notificationsDB.UnreadAmount(ctx)
require.NoError(t, err)
require.Equal(t, amount, 5)
})
}

View File

@ -5,6 +5,7 @@ package reputation
import (
"context"
"time"
"go.uber.org/zap"
@ -35,33 +36,51 @@ func NewService(log *zap.Logger, db DB, nodeID storj.NodeID, notifications *noti
// Store stores reputation stats into db, and notify's in case of offline suspension.
func (s *Service) Store(ctx context.Context, stats Stats, satelliteID storj.NodeID) error {
if err := s.db.Store(ctx, stats); err != nil {
rep, err := s.db.Get(ctx, satelliteID)
if err != nil {
return err
}
if stats.DisqualifiedAt == nil && stats.OfflineSuspendedAt != nil {
s.notifyOfflineSuspension(ctx, satelliteID)
err = s.db.Store(ctx, stats)
if err != nil {
return err
}
if stats.DisqualifiedAt == nil && isSuspended(stats, *rep) {
notification := newSuspensionNotification(satelliteID, s.nodeID, *stats.OfflineSuspendedAt)
_, err = s.notifications.Receive(ctx, notification)
if err != nil {
s.log.Sugar().Errorf("Failed to receive notification", err.Error())
}
}
return nil
}
// NotifyOfflineSuspension notifies storagenode about offline suspension.
func (s *Service) notifyOfflineSuspension(ctx context.Context, satelliteID storj.NodeID) {
notification := NewSuspensionNotification(satelliteID, s.nodeID)
_, err := s.notifications.Receive(ctx, notification)
if err != nil {
s.log.Sugar().Errorf("Failed to receive notification", err.Error())
// isSuspended returns if there's new downtime suspension.
func isSuspended(new, old Stats) bool {
if new.OfflineSuspendedAt == nil {
return false
}
if old.OfflineSuspendedAt == nil {
return true
}
if !old.OfflineSuspendedAt.Equal(*new.OfflineSuspendedAt) {
return true
}
return false
}
// NewSuspensionNotification - returns offline suspension notification.
func NewSuspensionNotification(satelliteID storj.NodeID, senderID storj.NodeID) (_ notifications.NewNotification) {
// newSuspensionNotification - returns offline suspension notification.
func newSuspensionNotification(satelliteID storj.NodeID, senderID storj.NodeID, time time.Time) (_ notifications.NewNotification) {
return notifications.NewNotification{
SenderID: senderID,
Type: notifications.TypeCustom,
Title: "Your Node was suspended!",
Message: "This is a reminder that your Storage Node on " + satelliteID.String() + "Satellite is suspended",
Type: notifications.TypeSuspension,
Title: "Your Node was suspended " + time.String(),
Message: "This is a reminder that your StorageNode on " + satelliteID.String() + "Satellite is suspended",
}
}

View File

@ -1901,6 +1901,76 @@ func (db *DB) Migration(ctx context.Context) *migrate.Migration {
`ALTER TABLE paystubs ADD COLUMN distributed bigint`,
},
},
{
DB: &db.payoutDB.DB,
Description: "Make distributed field in paystubs table not null",
Version: 50,
Action: migrate.Func(func(ctx context.Context, _ *zap.Logger, rdb tagsql.DB, rtx tagsql.Tx) (err error) {
_, err = rtx.Exec(ctx, `UPDATE paystubs SET distributed = ? WHERE distributed ISNULL`, 0)
if err != nil {
return errs.Wrap(err)
}
_, err = rtx.Exec(ctx, `
CREATE TABLE paystubs_new (
period text NOT NULL,
satellite_id bytea NOT NULL,
created_at timestamp NOT NULL,
codes text NOT NULL,
usage_at_rest double precision NOT NULL,
usage_get bigint NOT NULL,
usage_put bigint NOT NULL,
usage_get_repair bigint NOT NULL,
usage_put_repair bigint NOT NULL,
usage_get_audit bigint NOT NULL,
comp_at_rest bigint NOT NULL,
comp_get bigint NOT NULL,
comp_put bigint NOT NULL,
comp_get_repair bigint NOT NULL,
comp_put_repair bigint NOT NULL,
comp_get_audit bigint NOT NULL,
surge_percent bigint NOT NULL,
held bigint NOT NULL,
owed bigint NOT NULL,
disposed bigint NOT NULL,
paid bigint NOT NULL,
distributed bigint NOT NULL,
PRIMARY KEY ( period, satellite_id )
);
INSERT INTO paystubs_new SELECT
period,
satellite_id,
created_at,
codes,
usage_at_rest,
usage_get,
usage_put,
usage_get_repair,
usage_put_repair,
usage_get_audit,
comp_at_rest,
comp_get,
comp_put,
comp_get_repair,
comp_put_repair,
comp_get_audit,
surge_percent,
held,
owed,
disposed,
paid,
distributed
FROM paystubs;
DROP TABLE paystubs;
ALTER TABLE paystubs_new RENAME TO paystubs;
`)
if err != nil {
return errs.Wrap(err)
}
return nil
}),
},
},
}
}

View File

@ -430,3 +430,51 @@ func (db *payoutDB) GetTotalEarned(ctx context.Context) (_ int64, err error) {
return totalEarned, nil
}
// GetEarnedAtSatellite returns total earned value for node from specific satellite.
func (db *payoutDB) GetEarnedAtSatellite(ctx context.Context, id storj.NodeID) (_ int64, err error) {
defer mon.Task()(&ctx)(&err)
query := `SELECT comp_at_rest, comp_get, comp_get_repair, comp_get_audit FROM paystubs WHERE satellite_id = ?`
rows, err := db.QueryContext(ctx, query, id)
if err != nil {
return 0, err
}
defer func() { err = errs.Combine(err, rows.Close()) }()
var totalEarned int64
for rows.Next() {
var compAtRest, compGet, compGetRepair, compGetAudit int64
err := rows.Scan(&compAtRest, &compGet, &compGetRepair, &compGetAudit)
if err != nil {
return 0, ErrPayout.Wrap(err)
}
totalEarned += compGetAudit + compGet + compGetRepair + compAtRest
}
if err = rows.Err(); err != nil {
return 0, ErrPayout.Wrap(err)
}
return totalEarned, nil
}
// GetPayingSatellitesIDs returns list of satellite ID's that ever paid to storagenode.
func (db *payoutDB) GetPayingSatellitesIDs(ctx context.Context) (_ []storj.NodeID, err error) {
defer mon.Task()(&ctx)(&err)
query := `SELECT DISTINCT (satellite_id) FROM paystubs`
rows, err := db.QueryContext(ctx, query)
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, rows.Close()) }()
var satelliteIDs []storj.NodeID
for rows.Next() {
var satelliteID storj.NodeID
err := rows.Scan(&satelliteID)
if err != nil {
return nil, ErrPayout.Wrap(err)
}
satelliteIDs = append(satelliteIDs, satelliteID)
}
if err = rows.Err(); err != nil {
return nil, ErrPayout.Wrap(err)
}
return satelliteIDs, nil
}

View File

@ -162,7 +162,7 @@ func Schema() map[string]*dbschema.Schema {
&dbschema.Column{
Name: "distributed",
Type: "bigint",
IsNullable: true,
IsNullable: false,
},
&dbschema.Column{
Name: "held",

View File

@ -64,6 +64,7 @@ var States = MultiDBStates{
&v47,
&v48,
&v49,
&v50,
},
}

View File

@ -0,0 +1,63 @@
// Copyright (C) 2021 Storj Labs, Inc.
// See LICENSE for copying information.
package testdata
import "storj.io/storj/storagenode/storagenodedb"
var v50 = MultiDBState{
Version: 50,
DBStates: DBStates{
storagenodedb.UsedSerialsDBName: v47.DBStates[storagenodedb.UsedSerialsDBName],
storagenodedb.StorageUsageDBName: v47.DBStates[storagenodedb.StorageUsageDBName],
storagenodedb.ReputationDBName: v48.DBStates[storagenodedb.ReputationDBName],
storagenodedb.PieceSpaceUsedDBName: v47.DBStates[storagenodedb.PieceSpaceUsedDBName],
storagenodedb.PieceInfoDBName: v47.DBStates[storagenodedb.PieceInfoDBName],
storagenodedb.PieceExpirationDBName: v47.DBStates[storagenodedb.PieceExpirationDBName],
storagenodedb.OrdersDBName: v47.DBStates[storagenodedb.OrdersDBName],
storagenodedb.BandwidthDBName: v47.DBStates[storagenodedb.BandwidthDBName],
storagenodedb.SatellitesDBName: v47.DBStates[storagenodedb.SatellitesDBName],
storagenodedb.DeprecatedInfoDBName: v47.DBStates[storagenodedb.DeprecatedInfoDBName],
storagenodedb.NotificationsDBName: v47.DBStates[storagenodedb.NotificationsDBName],
storagenodedb.HeldAmountDBName: &DBState{
SQL: `
-- tables to hold payments and paystub data
CREATE TABLE paystubs (
period text NOT NULL,
satellite_id bytea NOT NULL,
created_at timestamp NOT NULL,
codes text NOT NULL,
usage_at_rest double precision NOT NULL,
usage_get bigint NOT NULL,
usage_put bigint NOT NULL,
usage_get_repair bigint NOT NULL,
usage_put_repair bigint NOT NULL,
usage_get_audit bigint NOT NULL,
comp_at_rest bigint NOT NULL,
comp_get bigint NOT NULL,
comp_put bigint NOT NULL,
comp_get_repair bigint NOT NULL,
comp_put_repair bigint NOT NULL,
comp_get_audit bigint NOT NULL,
surge_percent bigint NOT NULL,
held bigint NOT NULL,
owed bigint NOT NULL,
disposed bigint NOT NULL,
paid bigint NOT NULL,
distributed bigint NOT NULL,
PRIMARY KEY ( period, satellite_id )
);
CREATE TABLE payments (
id bigserial NOT NULL,
created_at timestamp NOT NULL,
satellite_id bytea NOT NULL,
period text,
amount bigint NOT NULL,
receipt text,
notes text,
PRIMARY KEY ( id )
);`,
},
storagenodedb.PricingDBName: v47.DBStates[storagenodedb.PricingDBName],
storagenodedb.APIKeysDBName: v47.DBStates[storagenodedb.APIKeysDBName]},
}

View File

@ -1,195 +0,0 @@
/*Copyright (C) 2019 Storj Labs, Inc.*/
/*See LICENSE for copying information.*/
html{
height:100vh;
width:100vw;
}
body{
height:100%;
width:100%;
background-color:#E5E5E5;
}
.home-link:hover,
.home-link p:hover{
text-decoration: none;
}
.btn{
height:48px;
}
hr{
margin-top:-5px;
margin-left: -15px;
margin-bottom: 0px;
width: 1102px;
}
.offers-table{
width : 1106px;
margin-left: -15px;
background: #FFFFFF;
border: 1.5px solid #DFDFDF;
border-radius: 6px;
overflow-x: auto;
overflow-y: hidden;
}
.offer-heading{
font-weight: 500;
font-size: 11px;
line-height: 49px;
letter-spacing: -0.100741px;
margin-top:-5px;
width: 120%;
color: #656565;
background: #F2F3F5;
border: 1px solid #DADDE5;
height:42px;
}
.offer-heading p{
margin-top:-5px;
}
.offer-heading,
.data-row,
.col-heading {
font-family: 'Roboto';
font-style: normal;
white-space: nowrap;
}
.data-row,
.data-row .col a{
font-weight: normal;
font-size: 14px;
line-height: 49px;
letter-spacing: -0.100741px;
color: #656565;
}
.data-row:hover{
background-color: #F9FAFB;
}
.col-heading{
font-weight: 500;
font-size: 14px;
line-height: 49px;
align-items: center;
letter-spacing: -0.100741px;
color: #96989E;
}
.referral-admin,h3{
font-family: 'Poppins';
font-style: normal;
}
.referral-admin{
font-weight: 500;
font-size: 18px;
line-height: 19px;
color: #96989E;
margin-top:30px;
}
h3{
font-weight: bold;
font-size: 24px;
line-height: 49px;
letter-spacing: -0.100741px;
color: #494949;
}
.banner{
height: 80px;
overflow: hidden;
}
.banner-txt,
.offer-label{
font-family: 'Inter';
}
.banner-txt{
font-size: 18px;
color:white;
}
.offer-label{
font-size: 11px;
line-height: 49px;
letter-spacing: -0.100741px;
color: #656565;
}
.toggler{
margin-top: -15px;
}
.toggler:hover{
cursor:pointer;
}
#free-offers-modal-lbl,
#ref-offers-modal-lbl{
font-family: 'Poppins';
font-style: normal;
font-weight: 600;
font-size: 24px;
line-height: 49px;
letter-spacing: -0.100741px;
color: #494949;
}
label,
input{
font-family: 'Roboto';
font-style: normal;
font-weight: normal;
}
label{
font-size: 15px;
line-height: 19px;
color: #848484;
}
input{
font-size: 16px;
line-height: 131.56%;
letter-spacing: 0.01em;
text-indent: 15px;
color: #D0D0D0;
}
.offer-type{
margin-left:35px;
text-transform: uppercase;
white-space: nowrap;
}
.edit-offer{
color:white;
}
.edit-offer:hover{
text-decoration: underline;
color: #656565;
}
.stop-offer{
cursor:pointer;
}
.modal-btn{
margin-right:-30px;
}
.header-row .col-heading{
margin-left:-110px;
}

Some files were not shown because too many files have changed in this diff Show More