Merge remote-tracking branch 'origin/main' into multipart-upload

Change-Id: Ib5ce5965b77b81c254d08c27ab30c7eccefbd4c6
This commit is contained in:
Michał Niewrzał 2021-03-17 15:37:17 +01:00
commit fa083a7f05
26 changed files with 456 additions and 279 deletions

View File

@ -317,8 +317,15 @@ push-images: ## Push Docker images to Docker Hub (jenkins)
.PHONY: binaries-upload
binaries-upload: ## Upload binaries to Google Storage (jenkins)
cd "release/${TAG}"; for f in *; do \
zipname=$$(echo $${f} | sed 's/.exe//g') && \
zip -r "$${zipname}.zip" "$${f}" \
zipname=$$(echo $${f} | sed 's/.exe//g') \
&& filename=$$(echo $${f} | sed 's/_.*\.exe/.exe/g' | sed 's/_.*//g') \
&& if [ "$${f}" != "$${filename}" ]; then \
ln $${f} $${filename} \
&& zip -r "$${zipname}.zip" "$${filename}" \
&& rm $${filename} \
; else \
zip -r "$${zipname}.zip" "$${filename}" \
; fi \
; done
cd "release/${TAG}"; gsutil -m cp -r *.zip "gs://storj-v3-alpha-builds/${TAG}/"

4
go.mod
View File

@ -46,9 +46,9 @@ require (
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e
google.golang.org/api v0.20.0 // indirect
google.golang.org/protobuf v1.25.0 // indirect
storj.io/common v0.0.0-20210311141746-133f4d716d1d
storj.io/common v0.0.0-20210315162710-05d54340fb1e
storj.io/drpc v0.0.16
storj.io/monkit-jaeger v0.0.0-20210205021559-85f08034688c
storj.io/monkit-jaeger v0.0.0-20210225162224-66fb37637bf6
storj.io/private v0.0.0-20210203200143-9d2ec06f0d3c
storj.io/uplink v1.4.6-0.20210212112107-f7f8a3c8321a
)

8
go.sum
View File

@ -921,15 +921,15 @@ sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3
storj.io/common v0.0.0-20200424175742-65ac59022f4f/go.mod h1:pZyXiIE7bGETIRXtfs0nICqMwp7PM8HqnDuyUeldNA0=
storj.io/common v0.0.0-20201026135900-1aaeec90670b/go.mod h1:GqdmNf3fLm2UZX/7Zr0BLFCJ4gFjgm6eHrk/fnmr5jQ=
storj.io/common v0.0.0-20210208122718-577b1f8a0a0f/go.mod h1:b8XP/TdW8OyTZ/J2BDFOIE9KojSUNZgImBFZI99zS04=
storj.io/common v0.0.0-20210311141746-133f4d716d1d h1:cq11lWaPt91EppJBnR7vrVyfGpb0Za68Dm1lTENLwF0=
storj.io/common v0.0.0-20210311141746-133f4d716d1d/go.mod h1:OAPn3OXJBq4omkIlWSrTsLa6hm4FnaLs12Odn/ksQL4=
storj.io/common v0.0.0-20210315162710-05d54340fb1e h1:F3hJhxnIqwWUURYsF/rTZiy4DGxpNdjOmPD6bdxWdGM=
storj.io/common v0.0.0-20210315162710-05d54340fb1e/go.mod h1:OAPn3OXJBq4omkIlWSrTsLa6hm4FnaLs12Odn/ksQL4=
storj.io/drpc v0.0.11/go.mod h1:TiFc2obNjL9/3isMW1Rpxjy8V9uE0B2HMeMFGiiI7Iw=
storj.io/drpc v0.0.14/go.mod h1:82nfl+6YwRwF6UG31cEWWUqv/FaKvP5SGqUvoqTxCMA=
storj.io/drpc v0.0.16 h1:9sxypc5lKi/0D69cR21BR0S21+IvXfON8L5nXMVNTwQ=
storj.io/drpc v0.0.16/go.mod h1:zdmQ93nx4Z35u11pQ+GAnBy4DGOK3HJCSOfeh2RryTo=
storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b/go.mod h1:gj4vuCeyCRjRmH8LIrgoyU9Dc9uR6H+/GcDUXmTbf80=
storj.io/monkit-jaeger v0.0.0-20210205021559-85f08034688c h1:6B1nHL8pGEjxzAHoADZBNpYAqLfpqEtmji1YgU4ByDA=
storj.io/monkit-jaeger v0.0.0-20210205021559-85f08034688c/go.mod h1:gj4vuCeyCRjRmH8LIrgoyU9Dc9uR6H+/GcDUXmTbf80=
storj.io/monkit-jaeger v0.0.0-20210225162224-66fb37637bf6 h1:LTDmeZDrFWD9byqNOf/Bc1VmMNKvja/9Cs52d1V5aTk=
storj.io/monkit-jaeger v0.0.0-20210225162224-66fb37637bf6/go.mod h1:gj4vuCeyCRjRmH8LIrgoyU9Dc9uR6H+/GcDUXmTbf80=
storj.io/private v0.0.0-20210203200143-9d2ec06f0d3c h1:9sLvfSIZgUhw98J8/3FBOVVJ+huhgYedhYpbrLbE+uk=
storj.io/private v0.0.0-20210203200143-9d2ec06f0d3c/go.mod h1:VHaDkpBka3Pp5rXqFSDHbEmzMaFFW4BYrXJfGIN1Udo=
storj.io/uplink v1.4.6-0.20210212112107-f7f8a3c8321a h1:s4adJCY5sKrc96LLk/oHBemuq4V2yM9a2uju1c6P3xo=

View File

@ -157,6 +157,16 @@ func (schema *Schema) DropTable(tableName string) {
schema.Indexes = schema.Indexes[:j:j]
}
// FindTable returns the specified table.
func (schema *Schema) FindTable(tableName string) (*Table, bool) {
for _, table := range schema.Tables {
if table.Name == tableName {
return table, true
}
}
return nil, false
}
// FindIndex finds index in the schema.
func (schema *Schema) FindIndex(name string) (*Index, bool) {
for _, idx := range schema.Indexes {
@ -183,6 +193,16 @@ func (table *Table) AddColumn(column *Column) {
table.Columns = append(table.Columns, column)
}
// RemoveColumn removes the column from the table.
func (table *Table) RemoveColumn(columnName string) {
for i, column := range table.Columns {
if column.Name == columnName {
table.Columns = append(table.Columns[:i], table.Columns[i+1:]...)
return
}
}
}
// FindColumn finds a column in the table.
func (table *Table) FindColumn(columnName string) (*Column, bool) {
for _, column := range table.Columns {

View File

@ -83,6 +83,8 @@ type Config struct {
ProjectLimitsIncreaseRequestURL string `help:"url link to project limit increase request page" default:"https://support.tardigrade.io/hc/en-us/requests/new?ticket_form_id=360000683212"`
GatewayCredentialsRequestURL string `help:"url link for gateway credentials requests" default:"https://auth.tardigradeshare.io"`
IsBetaSatellite bool `help:"indicates if satellite is in beta" default:"false"`
BetaSatelliteFeedbackURL string `help:"url link for for beta satellite feedback" default:""`
BetaSatelliteSupportURL string `help:"url link for for beta satellite support" default:""`
RateLimit web.IPRateLimiterConfig
@ -290,6 +292,8 @@ func (server *Server) appHandler(w http.ResponseWriter, r *http.Request) {
ProjectLimitsIncreaseRequestURL string
GatewayCredentialsRequestURL string
IsBetaSatellite bool
BetaSatelliteFeedbackURL string
BetaSatelliteSupportURL string
}
data.ExternalAddress = server.config.ExternalAddress
@ -305,6 +309,8 @@ func (server *Server) appHandler(w http.ResponseWriter, r *http.Request) {
data.ProjectLimitsIncreaseRequestURL = server.config.ProjectLimitsIncreaseRequestURL
data.GatewayCredentialsRequestURL = server.config.GatewayCredentialsRequestURL
data.IsBetaSatellite = server.config.IsBetaSatellite
data.BetaSatelliteFeedbackURL = server.config.BetaSatelliteFeedbackURL
data.BetaSatelliteSupportURL = server.config.BetaSatelliteSupportURL
if server.templates.index == nil {
server.log.Error("index template is not set")

View File

@ -1717,6 +1717,12 @@ func (endpoint *Endpoint) ListSegments(ctx context.Context, req *pb.SegmentListR
}, nil
}
// DownloadObject returns all the information necessary to begin downloading an object in a single request.
func (endpoint *Endpoint) DownloadObject(ctx context.Context, req *pb.ObjectDownloadRequest) (resp *pb.ObjectDownloadResponse, err error) {
defer mon.Task()(&ctx)(&err)
return nil, rpcstatus.Error(rpcstatus.Unimplemented, "Not Implemented")
}
// DownloadSegment returns data necessary to download segment.
func (endpoint *Endpoint) DownloadSegment(ctx context.Context, req *pb.SegmentDownloadRequest) (resp *pb.SegmentDownloadResponse, err error) {
defer mon.Task()(&ctx)(&err)

View File

@ -89,7 +89,7 @@ read scalar (
)
model accounting_rollup (
key node_id start_time
key node_id start_time
index ( fields start_time )
field node_id blob
@ -102,8 +102,6 @@ model accounting_rollup (
field at_rest_total float64
)
create accounting_rollup ( noreturn, replace )
//--- overlay cache ---//
model node (
@ -261,7 +259,6 @@ model user (
field working_on text ( updatable, nullable )
field is_professional bool ( updatable, default false )
field employee_count text ( updatable, nullable )
)
create user ( )

View File

@ -9061,42 +9061,6 @@ func (obj *pgxImpl) CreateNoReturn_AccountingTimestamps(ctx context.Context,
}
func (obj *pgxImpl) ReplaceNoReturn_AccountingRollup(ctx context.Context,
accounting_rollup_node_id AccountingRollup_NodeId_Field,
accounting_rollup_start_time AccountingRollup_StartTime_Field,
accounting_rollup_put_total AccountingRollup_PutTotal_Field,
accounting_rollup_get_total AccountingRollup_GetTotal_Field,
accounting_rollup_get_audit_total AccountingRollup_GetAuditTotal_Field,
accounting_rollup_get_repair_total AccountingRollup_GetRepairTotal_Field,
accounting_rollup_put_repair_total AccountingRollup_PutRepairTotal_Field,
accounting_rollup_at_rest_total AccountingRollup_AtRestTotal_Field) (
err error) {
defer mon.Task()(&ctx)(&err)
__node_id_val := accounting_rollup_node_id.value()
__start_time_val := accounting_rollup_start_time.value()
__put_total_val := accounting_rollup_put_total.value()
__get_total_val := accounting_rollup_get_total.value()
__get_audit_total_val := accounting_rollup_get_audit_total.value()
__get_repair_total_val := accounting_rollup_get_repair_total.value()
__put_repair_total_val := accounting_rollup_put_repair_total.value()
__at_rest_total_val := accounting_rollup_at_rest_total.value()
var __embed_stmt = __sqlbundle_Literal("INSERT INTO accounting_rollups ( node_id, start_time, put_total, get_total, get_audit_total, get_repair_total, put_repair_total, at_rest_total ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? ) ON CONFLICT ( node_id, start_time ) DO UPDATE SET node_id = EXCLUDED.node_id, start_time = EXCLUDED.start_time, put_total = EXCLUDED.put_total, get_total = EXCLUDED.get_total, get_audit_total = EXCLUDED.get_audit_total, get_repair_total = EXCLUDED.get_repair_total, put_repair_total = EXCLUDED.put_repair_total, at_rest_total = EXCLUDED.at_rest_total")
var __values []interface{}
__values = append(__values, __node_id_val, __start_time_val, __put_total_val, __get_total_val, __get_audit_total_val, __get_repair_total_val, __put_repair_total_val, __at_rest_total_val)
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
if err != nil {
return obj.makeErr(err)
}
return nil
}
func (obj *pgxImpl) Create_AuditHistory(ctx context.Context,
audit_history_node_id AuditHistory_NodeId_Field,
audit_history_history AuditHistory_History_Field) (
@ -14434,42 +14398,6 @@ func (obj *pgxcockroachImpl) CreateNoReturn_AccountingTimestamps(ctx context.Con
}
func (obj *pgxcockroachImpl) ReplaceNoReturn_AccountingRollup(ctx context.Context,
accounting_rollup_node_id AccountingRollup_NodeId_Field,
accounting_rollup_start_time AccountingRollup_StartTime_Field,
accounting_rollup_put_total AccountingRollup_PutTotal_Field,
accounting_rollup_get_total AccountingRollup_GetTotal_Field,
accounting_rollup_get_audit_total AccountingRollup_GetAuditTotal_Field,
accounting_rollup_get_repair_total AccountingRollup_GetRepairTotal_Field,
accounting_rollup_put_repair_total AccountingRollup_PutRepairTotal_Field,
accounting_rollup_at_rest_total AccountingRollup_AtRestTotal_Field) (
err error) {
defer mon.Task()(&ctx)(&err)
__node_id_val := accounting_rollup_node_id.value()
__start_time_val := accounting_rollup_start_time.value()
__put_total_val := accounting_rollup_put_total.value()
__get_total_val := accounting_rollup_get_total.value()
__get_audit_total_val := accounting_rollup_get_audit_total.value()
__get_repair_total_val := accounting_rollup_get_repair_total.value()
__put_repair_total_val := accounting_rollup_put_repair_total.value()
__at_rest_total_val := accounting_rollup_at_rest_total.value()
var __embed_stmt = __sqlbundle_Literal("UPSERT INTO accounting_rollups ( node_id, start_time, put_total, get_total, get_audit_total, get_repair_total, put_repair_total, at_rest_total ) VALUES ( ?, ?, ?, ?, ?, ?, ?, ? )")
var __values []interface{}
__values = append(__values, __node_id_val, __start_time_val, __put_total_val, __get_total_val, __get_audit_total_val, __get_repair_total_val, __put_repair_total_val, __at_rest_total_val)
var __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
obj.logStmt(__stmt, __values...)
_, err = obj.driver.ExecContext(ctx, __stmt, __values...)
if err != nil {
return obj.makeErr(err)
}
return nil
}
func (obj *pgxcockroachImpl) Create_AuditHistory(ctx context.Context,
audit_history_node_id AuditHistory_NodeId_Field,
audit_history_history AuditHistory_History_Field) (
@ -20941,24 +20869,6 @@ func (rx *Rx) Paged_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStar
return tx.Paged_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_GreaterOrEqual(ctx, storagenode_bandwidth_rollup_storagenode_id, storagenode_bandwidth_rollup_interval_start_greater_or_equal, limit, start)
}
func (rx *Rx) ReplaceNoReturn_AccountingRollup(ctx context.Context,
accounting_rollup_node_id AccountingRollup_NodeId_Field,
accounting_rollup_start_time AccountingRollup_StartTime_Field,
accounting_rollup_put_total AccountingRollup_PutTotal_Field,
accounting_rollup_get_total AccountingRollup_GetTotal_Field,
accounting_rollup_get_audit_total AccountingRollup_GetAuditTotal_Field,
accounting_rollup_get_repair_total AccountingRollup_GetRepairTotal_Field,
accounting_rollup_put_repair_total AccountingRollup_PutRepairTotal_Field,
accounting_rollup_at_rest_total AccountingRollup_AtRestTotal_Field) (
err error) {
var tx *Tx
if tx, err = rx.getTx(ctx); err != nil {
return
}
return tx.ReplaceNoReturn_AccountingRollup(ctx, accounting_rollup_node_id, accounting_rollup_start_time, accounting_rollup_put_total, accounting_rollup_get_total, accounting_rollup_get_audit_total, accounting_rollup_get_repair_total, accounting_rollup_put_repair_total, accounting_rollup_at_rest_total)
}
func (rx *Rx) ReplaceNoReturn_NodeApiVersion(ctx context.Context,
node_api_version_id NodeApiVersion_Id_Field,
node_api_version_api_version NodeApiVersion_ApiVersion_Field) (
@ -21728,17 +21638,6 @@ type Methods interface {
limit int, start *Paged_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation) (
rows []*StoragenodeBandwidthRollup, next *Paged_StoragenodeBandwidthRollup_By_StoragenodeId_And_IntervalStart_GreaterOrEqual_Continuation, err error)
ReplaceNoReturn_AccountingRollup(ctx context.Context,
accounting_rollup_node_id AccountingRollup_NodeId_Field,
accounting_rollup_start_time AccountingRollup_StartTime_Field,
accounting_rollup_put_total AccountingRollup_PutTotal_Field,
accounting_rollup_get_total AccountingRollup_GetTotal_Field,
accounting_rollup_get_audit_total AccountingRollup_GetAuditTotal_Field,
accounting_rollup_get_repair_total AccountingRollup_GetRepairTotal_Field,
accounting_rollup_put_repair_total AccountingRollup_PutRepairTotal_Field,
accounting_rollup_at_rest_total AccountingRollup_AtRestTotal_Field) (
err error)
ReplaceNoReturn_NodeApiVersion(ctx context.Context,
node_api_version_id NodeApiVersion_Id_Field,
node_api_version_api_version NodeApiVersion_ApiVersion_Field) (

View File

@ -1267,12 +1267,9 @@ func (db *satelliteDB) PostgresMigration() *migrate.Migration {
},
{
DB: &db.migrationDB,
Description: "drop unused columns uptime_reputation_alpha and uptime_reputation_beta from nodes table",
Description: "empty migration to fix backwards compat test discrepancy with release tag",
Version: 148,
Action: migrate.SQL{
`ALTER TABLE nodes DROP COLUMN uptime_reputation_alpha;`,
`ALTER TABLE nodes DROP COLUMN uptime_reputation_beta;`,
},
Action: migrate.SQL{},
},
// NB: after updating testdata in `testdata`, run
// `go generate` to update `migratez.go`.

View File

@ -232,6 +232,13 @@ func migrateTest(t *testing.T, connStr string) {
finalSchema = currentSchema
}
// TODO(cam): remove this check with the migration step to drop the columns
nodes, ok := finalSchema.FindTable("nodes")
if ok {
nodes.RemoveColumn("uptime_reputation_alpha")
nodes.RemoveColumn("uptime_reputation_beta")
}
// verify that we also match the dbx version
require.Equal(t, dbxschema, finalSchema, "result of all migration scripts did not match dbx schema")
}

View File

@ -141,49 +141,51 @@ CREATE TABLE irreparabledbs (
PRIMARY KEY ( segmentpath )
);
CREATE TABLE nodes (
id bytea NOT NULL,
address text NOT NULL DEFAULT '',
last_net text NOT NULL,
last_ip_port text,
protocol integer NOT NULL DEFAULT 0,
type integer NOT NULL DEFAULT 0,
email text NOT NULL,
wallet text NOT NULL,
wallet_features text NOT NULL DEFAULT '',
free_disk bigint NOT NULL DEFAULT -1,
piece_count bigint NOT NULL DEFAULT 0,
major bigint NOT NULL DEFAULT 0,
minor bigint NOT NULL DEFAULT 0,
patch bigint NOT NULL DEFAULT 0,
hash text NOT NULL DEFAULT '',
timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00',
release boolean NOT NULL DEFAULT false,
latency_90 bigint NOT NULL DEFAULT 0,
audit_success_count bigint NOT NULL DEFAULT 0,
total_audit_count bigint NOT NULL DEFAULT 0,
vetted_at timestamp with time zone,
uptime_success_count bigint NOT NULL DEFAULT 0,
total_uptime_count bigint NOT NULL DEFAULT 0,
created_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch',
last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch',
contained boolean NOT NULL DEFAULT false,
disqualified timestamp with time zone,
suspended timestamp with time zone,
unknown_audit_suspended timestamp with time zone,
offline_suspended timestamp with time zone,
under_review timestamp with time zone,
online_score double precision NOT NULL DEFAULT 1,
audit_reputation_alpha double precision NOT NULL DEFAULT 1,
audit_reputation_beta double precision NOT NULL DEFAULT 0,
unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1,
unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0,
exit_initiated_at timestamp with time zone,
exit_loop_completed_at timestamp with time zone,
exit_finished_at timestamp with time zone,
exit_success boolean NOT NULL DEFAULT false,
PRIMARY KEY ( id )
id bytea NOT NULL,
address text NOT NULL DEFAULT '',
last_net text NOT NULL,
last_ip_port text,
protocol integer NOT NULL DEFAULT 0,
type integer NOT NULL DEFAULT 0,
email text NOT NULL,
wallet text NOT NULL,
wallet_features text NOT NULL DEFAULT '',
free_disk bigint NOT NULL DEFAULT -1,
piece_count bigint NOT NULL DEFAULT 0,
major bigint NOT NULL DEFAULT 0,
minor bigint NOT NULL DEFAULT 0,
patch bigint NOT NULL DEFAULT 0,
hash text NOT NULL DEFAULT '',
timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00',
release boolean NOT NULL DEFAULT false,
latency_90 bigint NOT NULL DEFAULT 0,
audit_success_count bigint NOT NULL DEFAULT 0,
total_audit_count bigint NOT NULL DEFAULT 0,
vetted_at timestamp with time zone,
uptime_success_count bigint NOT NULL DEFAULT 0,
total_uptime_count bigint NOT NULL DEFAULT 0,
created_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch',
last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch',
contained boolean NOT NULL DEFAULT false,
disqualified timestamp with time zone,
suspended timestamp with time zone,
unknown_audit_suspended timestamp with time zone,
offline_suspended timestamp with time zone,
under_review timestamp with time zone,
online_score double precision NOT NULL DEFAULT 1,
audit_reputation_alpha double precision NOT NULL DEFAULT 1,
audit_reputation_beta double precision NOT NULL DEFAULT 0,
unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1,
unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0,
uptime_reputation_alpha double precision NOT NULL DEFAULT 1,
uptime_reputation_beta double precision NOT NULL DEFAULT 0,
exit_initiated_at timestamp with time zone,
exit_loop_completed_at timestamp with time zone,
exit_finished_at timestamp with time zone,
exit_success boolean NOT NULL DEFAULT false,
PRIMARY KEY ( id )
);
CREATE TABLE node_api_versions (
id bytea NOT NULL,
@ -370,10 +372,10 @@ CREATE TABLE users (
project_limit integer NOT NULL DEFAULT 0,
position text,
company_name text,
company_size integer,
company_size int,
working_on text,
is_professional boolean NOT NULL DEFAULT false,
employee_count text,
employee_count text,
PRIMARY KEY ( id )
);
CREATE TABLE value_attributions (
@ -458,7 +460,6 @@ CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_i
CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id );
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits ( id, offer_id );
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (1, 'Default referral offer', 'Is active when no other active referral offer', 300, 600, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 2, 365, 14);
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (2, 'Default free credit offer', 'Is active when no active free credit offer', 0, 300, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 1, NULL, 14);

View File

@ -256,46 +256,84 @@ func (db *StoragenodeAccounting) SaveRollup(ctx context.Context, latestRollup ti
rollups = append(rollups, ar)
}
}
finished := false
for !finished {
err = db.db.WithTx(ctx, func(ctx context.Context, tx *dbx.Tx) error {
for i := 0; i < batchSize && len(rollups) > 0; i++ {
ar := rollups[0]
rollups = rollups[1:]
insertBatch := func(ctx context.Context, db *dbx.DB, batch []*accounting.Rollup) (err error) {
defer mon.Task()(&ctx)(&err)
n := len(batch)
nID := dbx.AccountingRollup_NodeId(ar.NodeID.Bytes())
start := dbx.AccountingRollup_StartTime(ar.StartTime)
put := dbx.AccountingRollup_PutTotal(ar.PutTotal)
get := dbx.AccountingRollup_GetTotal(ar.GetTotal)
audit := dbx.AccountingRollup_GetAuditTotal(ar.GetAuditTotal)
getRepair := dbx.AccountingRollup_GetRepairTotal(ar.GetRepairTotal)
putRepair := dbx.AccountingRollup_PutRepairTotal(ar.PutRepairTotal)
atRest := dbx.AccountingRollup_AtRestTotal(ar.AtRestTotal)
nodeID := make([]storj.NodeID, n)
startTime := make([]time.Time, n)
putTotal := make([]int64, n)
getTotal := make([]int64, n)
getAuditTotal := make([]int64, n)
getRepairTotal := make([]int64, n)
putRepairTotal := make([]int64, n)
atRestTotal := make([]float64, n)
err := tx.ReplaceNoReturn_AccountingRollup(ctx, nID, start, put, get, audit, getRepair, putRepair, atRest)
if err != nil {
return err
}
}
for i, ar := range batch {
nodeID[i] = ar.NodeID
startTime[i] = ar.StartTime
putTotal[i] = ar.PutTotal
getTotal[i] = ar.GetTotal
getAuditTotal[i] = ar.GetAuditTotal
getRepairTotal[i] = ar.GetRepairTotal
putRepairTotal[i] = ar.PutRepairTotal
atRestTotal[i] = ar.AtRestTotal
}
if len(rollups) == 0 {
finished = true
return tx.UpdateNoReturn_AccountingTimestamps_By_Name(ctx,
dbx.AccountingTimestamps_Name(accounting.LastRollup),
dbx.AccountingTimestamps_Update_Fields{
Value: dbx.AccountingTimestamps_Value(latestRollup),
},
)
}
_, err = db.ExecContext(ctx, `
INSERT INTO accounting_rollups (
node_id, start_time,
put_total, get_total,
get_audit_total, get_repair_total, put_repair_total,
at_rest_total
)
SELECT * FROM unnest(
$1::bytea[], $2::timestamptz[],
$3::int8[], $4::int8[],
$5::int8[], $6::int8[], $7::int8[],
$8::float8[]
)
ON CONFLICT ( node_id, start_time )
DO UPDATE SET
put_total = EXCLUDED.put_total,
get_total = EXCLUDED.get_total,
get_audit_total = EXCLUDED.get_audit_total,
get_repair_total = EXCLUDED.get_repair_total,
put_repair_total = EXCLUDED.put_repair_total,
at_rest_total = EXCLUDED.at_rest_total
`, pgutil.NodeIDArray(nodeID), pgutil.TimestampTZArray(startTime),
pgutil.Int8Array(putTotal), pgutil.Int8Array(getTotal),
pgutil.Int8Array(getAuditTotal), pgutil.Int8Array(getRepairTotal), pgutil.Int8Array(putRepairTotal),
pgutil.Float8Array(atRestTotal))
return nil
})
if err != nil {
return Error.Wrap(err)
}
// Note: we do not need here a transaction because we will "update" the
// columns when we do not update accounting.LastRollup. We will end up
// with partial data in the database, however in the next runs, we will
// try to fix them.
for len(rollups) > 0 {
batch := rollups
if len(batch) > batchSize {
batch = batch[:batchSize]
}
rollups = rollups[len(batch):]
if err := insertBatch(ctx, db.db.DB, batch); err != nil {
return Error.Wrap(err)
}
}
return nil
err = db.db.UpdateNoReturn_AccountingTimestamps_By_Name(ctx,
dbx.AccountingTimestamps_Name(accounting.LastRollup),
dbx.AccountingTimestamps_Update_Fields{
Value: dbx.AccountingTimestamps_Value(latestRollup),
},
)
return Error.Wrap(err)
}
// LastTimestamp records the greatest last tallied time.

View File

@ -125,49 +125,51 @@ CREATE TABLE irreparabledbs (
PRIMARY KEY ( segmentpath )
);
CREATE TABLE nodes (
id bytea NOT NULL,
address text NOT NULL DEFAULT '',
last_net text NOT NULL,
last_ip_port text,
protocol integer NOT NULL DEFAULT 0,
type integer NOT NULL DEFAULT 0,
email text NOT NULL,
wallet text NOT NULL,
wallet_features text NOT NULL DEFAULT '',
free_disk bigint NOT NULL DEFAULT -1,
piece_count bigint NOT NULL DEFAULT 0,
major bigint NOT NULL DEFAULT 0,
minor bigint NOT NULL DEFAULT 0,
patch bigint NOT NULL DEFAULT 0,
hash text NOT NULL DEFAULT '',
timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00',
release boolean NOT NULL DEFAULT false,
latency_90 bigint NOT NULL DEFAULT 0,
audit_success_count bigint NOT NULL DEFAULT 0,
total_audit_count bigint NOT NULL DEFAULT 0,
vetted_at timestamp with time zone,
uptime_success_count bigint NOT NULL DEFAULT 0,
total_uptime_count bigint NOT NULL DEFAULT 0,
created_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch',
last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch',
contained boolean NOT NULL DEFAULT false,
disqualified timestamp with time zone,
suspended timestamp with time zone,
unknown_audit_suspended timestamp with time zone,
offline_suspended timestamp with time zone,
under_review timestamp with time zone,
online_score double precision NOT NULL DEFAULT 1,
audit_reputation_alpha double precision NOT NULL DEFAULT 1,
audit_reputation_beta double precision NOT NULL DEFAULT 0,
unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1,
unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0,
exit_initiated_at timestamp with time zone,
exit_loop_completed_at timestamp with time zone,
exit_finished_at timestamp with time zone,
exit_success boolean NOT NULL DEFAULT false,
PRIMARY KEY ( id )
id bytea NOT NULL,
address text NOT NULL DEFAULT '',
last_net text NOT NULL,
last_ip_port text,
protocol integer NOT NULL DEFAULT 0,
type integer NOT NULL DEFAULT 0,
email text NOT NULL,
wallet text NOT NULL,
wallet_features text NOT NULL DEFAULT '',
free_disk bigint NOT NULL DEFAULT -1,
piece_count bigint NOT NULL DEFAULT 0,
major bigint NOT NULL DEFAULT 0,
minor bigint NOT NULL DEFAULT 0,
patch bigint NOT NULL DEFAULT 0,
hash text NOT NULL DEFAULT '',
timestamp timestamp with time zone NOT NULL DEFAULT '0001-01-01 00:00:00+00',
release boolean NOT NULL DEFAULT false,
latency_90 bigint NOT NULL DEFAULT 0,
audit_success_count bigint NOT NULL DEFAULT 0,
total_audit_count bigint NOT NULL DEFAULT 0,
vetted_at timestamp with time zone,
uptime_success_count bigint NOT NULL DEFAULT 0,
total_uptime_count bigint NOT NULL DEFAULT 0,
created_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
updated_at timestamp with time zone NOT NULL DEFAULT current_timestamp,
last_contact_success timestamp with time zone NOT NULL DEFAULT 'epoch',
last_contact_failure timestamp with time zone NOT NULL DEFAULT 'epoch',
contained boolean NOT NULL DEFAULT false,
disqualified timestamp with time zone,
suspended timestamp with time zone,
unknown_audit_suspended timestamp with time zone,
offline_suspended timestamp with time zone,
under_review timestamp with time zone,
online_score double precision NOT NULL DEFAULT 1,
audit_reputation_alpha double precision NOT NULL DEFAULT 1,
audit_reputation_beta double precision NOT NULL DEFAULT 0,
unknown_audit_reputation_alpha double precision NOT NULL DEFAULT 1,
unknown_audit_reputation_beta double precision NOT NULL DEFAULT 0,
uptime_reputation_alpha double precision NOT NULL DEFAULT 1,
uptime_reputation_beta double precision NOT NULL DEFAULT 0,
exit_initiated_at timestamp with time zone,
exit_loop_completed_at timestamp with time zone,
exit_finished_at timestamp with time zone,
exit_success boolean NOT NULL DEFAULT false,
PRIMARY KEY ( id )
);
CREATE TABLE node_api_versions (
id bytea NOT NULL,
@ -354,10 +356,10 @@ CREATE TABLE users (
project_limit integer NOT NULL DEFAULT 0,
position text,
company_name text,
company_size integer,
company_size int,
working_on text,
is_professional boolean NOT NULL DEFAULT false,
employee_count text,
employee_count text,
PRIMARY KEY ( id )
);
CREATE TABLE value_attributions (
@ -442,7 +444,6 @@ CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_i
CREATE INDEX storagenode_storage_tallies_node_id_index ON storagenode_storage_tallies ( node_id );
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits ( id, offer_id );
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (1, 'Default referral offer', 'Is active when no other active referral offer', 300, 600, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 2, 365, 14);
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (2, 'Default free credit offer', 'Is active when no active free credit offer', 0, 300, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 1, NULL, 14);
@ -454,15 +455,15 @@ INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00
INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00');
INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00');
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "exit_success", "online_score") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "exit_success", "online_score") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "exit_success", "vetted_at", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 300, 0, 1, 0, false, '2020-03-18 12:00:00.000000+00', 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "last_ip_port", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55516', '127.0.0.0', '127.0.0.1:55516', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "wallet_features", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "exit_success", "online_score") VALUES (E'\\362\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, 4, '', '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 1, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "vetted_at", "online_score") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 300, 0, 1, 0, 300, 100, false, '2020-03-18 12:00:00.000000+00', 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "last_ip_port", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55516', '127.0.0.0', '127.0.0.1:55516', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 75, 25, 100, 5, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\363\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "wallet_features", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "online_score") VALUES (E'\\362\\341\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55516', '', 0, 4, '', '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, 1);
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at", "is_professional") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', '1EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00', false);
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets", "partner_id", "owner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', NULL, NULL, NULL, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00');
@ -534,7 +535,7 @@ INSERT INTO "project_bandwidth_rollups"("project_id", "interval_month", egress_a
INSERT INTO "projects"("id", "name", "description", "usage_limit", "bandwidth_limit", "max_buckets","rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\345'::bytea, 'egress101', 'High Bandwidth Project', NULL, NULL, NULL, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-05-15 08:46:24.000000+00');
INSERT INTO "storagenode_paystubs"("period", "node_id", "created_at", "codes", "usage_at_rest", "usage_get", "usage_put", "usage_get_repair", "usage_put_repair", "usage_get_audit", "comp_at_rest", "comp_get", "comp_put", "comp_get_repair", "comp_put_repair", "comp_get_audit", "surge_percent", "held", "owed", "disposed", "paid", "distributed") VALUES ('2020-01', '\xf2a3b4c4dfdf7221310382fd5db5aa73e1d227d6df09734ec4e5305000000000', '2020-04-07T20:14:21.479141Z', '', 1327959864508416, 294054066688, 159031363328, 226751, 0, 836608, 2861984, 5881081, 0, 226751, 0, 8, 300, 0, 26909472, 0, 26909472, 0);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "exit_success", "unknown_audit_suspended", "offline_suspended", "under_review") VALUES (E'\\153\\313\\233\\074\\327\\255\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, false, '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00');
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success", "unknown_audit_suspended", "offline_suspended", "under_review") VALUES (E'\\153\\313\\233\\074\\327\\255\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false, '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00', '2019-02-14 08:07:31.108963+00');
INSERT INTO "audit_histories" ("node_id", "history") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a');

View File

@ -76,6 +76,12 @@ compensation.withheld-percents: 75,75,75,50,50,50,25,25,25,0,0,0,0,0,0
# secret used to sign auth tokens
# console.auth-token-secret: ""
# url link for for beta satellite feedback
# console.beta-satellite-feedback-url: ""
# url link for for beta satellite support
# console.beta-satellite-support-url: ""
# url link to contacts page
# console.contact-info-url: https://forum.storj.io

View File

@ -18,7 +18,6 @@ import (
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/common/testcontext"
"storj.io/storj/private/date"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite"
"storj.io/storj/storagenode/payouts/estimatedpayouts"
@ -96,16 +95,14 @@ func TestStorageNodeApi(t *testing.T) {
})
require.NoError(t, err)
now2 := time.Now().UTC()
daysPerMonth := date.UTCEndOfMonth(now2).Day()
err = reputationdb.Store(ctx, reputation.Stats{
SatelliteID: satellite.ID(),
JoinedAt: now.AddDate(0, 0, -daysPerMonth+3),
JoinedAt: now.AddDate(0, -2, 0),
})
require.NoError(t, err)
t.Run("test EstimatedPayout", func(t *testing.T) {
t.Skip("disabled until flakiness fixed")
// should return estimated payout for both satellites in current month and empty for previous
url := fmt.Sprintf("%s/estimated-payout", baseURL)
@ -134,8 +131,8 @@ func TestStorageNodeApi(t *testing.T) {
require.NoError(t, err)
// round CurrentMonthExpectations to 3 decimal places to resolve precision issues
bodyPayout.CurrentMonthExpectations = math.Floor(bodyPayout.CurrentMonthExpectations*1000) / 1000
expectedPayout.CurrentMonthExpectations = math.Floor(expectedPayout.CurrentMonthExpectations*1000) / 1000
bodyPayout.CurrentMonthExpectations = math.Round(bodyPayout.CurrentMonthExpectations*100) / 100
expectedPayout.CurrentMonthExpectations = math.Round(expectedPayout.CurrentMonthExpectations*100) / 100
require.EqualValues(t, expectedPayout, bodyPayout)
})

View File

@ -97,7 +97,7 @@ func (estimatedPayout *EstimatedPayout) Set(current, previous PayoutMonthly, now
return
}
estimatedPayout.CurrentMonthExpectations += estimatedPayout.CurrentMonth.Payout / daysSinceJoined * daysPerMonth
estimatedPayout.CurrentMonthExpectations += estimatedPayout.CurrentMonth.Payout / math.Round(daysSinceJoined) * daysPerMonth
}
// Add adds estimate into the receiver.

View File

@ -128,7 +128,7 @@ export class NodesClient {
*/
public async updateName(id: string, name: string): Promise<void> {
const path = `${this.ROOT_PATH}/${id}`;
const response = await this.http.patch(path, JSON.stringify({name: name}));
const response = await this.http.patch(path, JSON.stringify({ name }));
if (!response.ok) {
await this.handleError(response);

View File

@ -2,9 +2,9 @@
// See LICENSE for copying information.
<template>
<div id="app">
<router-view/>
</div>
<div id="app">
<router-view/>
</div>
</template>
<script lang="ts">

View File

@ -109,16 +109,5 @@ export default class AddNewNode extends Vue {
align-items: center;
justify-content: space-between;
}
&__item {
box-sizing: border-box;
padding: 16px;
cursor: pointer;
text-align: left;
&:hover {
background: var(--c-background);
}
}
}
</style>

View File

@ -0,0 +1,145 @@
// Copyright (C) 2021 Storj Labs, Inc.
// See LICENSE for copying information.
<template>
<div class="update-name">
<div @click="openModal" class="update-name__button">Update Name</div>
<v-modal v-if="isModalShown" @close="closeModal">
<h2 slot="header">Set name for node</h2>
<div class="update-name__body" slot="body">
<div class="update-name__body__node-id-container">
<span>{{ nodeId }}</span>
</div>
<headered-input
class="update-name__body__input"
label="Displayed name"
placeholder="Name"
:error="nameError"
@setData="setNodeName"
/>
</div>
<div class="delete-node__footer" slot="footer">
<v-button label="Cancel" :is-white="true" width="205px" :on-press="closeModal" />
<v-button label="Set Name" width="205px" :on-press="onSetName"/>
</div>
</v-modal>
</div>
</template>
<script lang="ts">
import { Component, Prop, Vue } from 'vue-property-decorator';
import HeaderedInput from '@/app/components/common/HeaderedInput.vue';
import VButton from '@/app/components/common/VButton.vue';
import VModal from '@/app/components/common/VModal.vue';
import { CreateNodeFields, UpdateNodeModel } from '@/nodes';
@Component({
components: {
VButton,
HeaderedInput,
VModal,
},
})
export default class AddNewNode extends Vue {
@Prop({default: ''})
public nodeId: string;
public nodeName: string = '';
private nameError: string = '';
public isModalShown: boolean = false;
private isLoading: boolean = false;
/**
* Sets node name field from value string.
*/
public setNodeName(value: string): void {
this.nodeName = value.trim();
this.nameError = '';
}
public openModal(): void {
this.isModalShown = true;
}
public closeModal(): void {
this.isLoading = false;
this.isModalShown = false;
}
public async onSetName(): Promise<void> {
if (this.isLoading) return;
if (!this.nodeName) {
this.nameError = 'This field is required. Please enter a valid node name';
return;
}
this.isLoading = true;
try {
await this.$store.dispatch('nodes/updateName', new UpdateNodeModel(this.nodeId, this.nodeName));
this.closeModal();
} catch (error) {
console.error(error.message);
this.isLoading = false;
}
}
}
</script>
<style lang="scss">
.update-name {
h2 {
margin: 0;
font-size: 32px;
}
&__button {
width: 100%;
box-sizing: border-box;
padding: 16px;
cursor: pointer;
text-align: left;
font-family: 'font_medium', sans-serif;
font-size: 14px;
color: var(--c-title);
&:hover {
background: var(--c-background);
}
}
&__body {
width: 441px;
&__node-id-container {
width: 100%;
box-sizing: border-box;
padding: 10px 12px;
font-family: 'font_regular', sans-serif;
font-size: 14px;
color: var(--c-title);
background: var(--c-background);
border-radius: 32px;
text-align: center;
}
&__input {
margin-top: 42px !important;
}
}
&__footer {
width: 460px;
display: flex;
align-items: center;
justify-content: space-between;
}
}
</style>

View File

@ -24,6 +24,7 @@
<div class="node-item__options" v-if="areOptionsShown" v-click-outside="closeOptions">
<div @click.stop="() => onCopy(node.id)" class="node-item__options__item">Copy Node ID</div>
<delete-node :node-id="node.id" />
<update-name :node-id="node.id" />
</div>
</th>
</tr>
@ -33,6 +34,7 @@
import { Component, Prop, Vue } from 'vue-property-decorator';
import DeleteNode from '@/app/components/modals/DeleteNode.vue';
import UpdateName from '@/app/components/modals/UpdateName.vue';
import MoreIcon from '@/../static/images/icons/more.svg';
@ -40,6 +42,7 @@ import { Node } from '@/nodes';
@Component({
components: {
UpdateName,
DeleteNode,
MoreIcon,
},

View File

@ -23,7 +23,7 @@
</tr>
</thead>
<tbody>
<NodeItem v-for="node in nodes" :key="node.id" :node="node" />
<node-item v-for="node in nodes" :key="node.id" :node="node" />
</tbody>
</table>
</template>

View File

@ -4,7 +4,7 @@
import { ActionContext, ActionTree, GetterTree, Module, MutationTree } from 'vuex';
import { RootState } from '@/app/store/index';
import { CreateNodeFields, Node, NodeURL } from '@/nodes';
import { CreateNodeFields, Node, NodeURL, UpdateNodeModel } from '@/nodes';
import { Nodes } from '@/nodes/service';
/**
@ -44,6 +44,7 @@ export class NodesModule implements Module<NodesState, RootState> {
delete: this.delete.bind(this),
trustedSatellites: this.trustedSatellites.bind(this),
selectSatellite: this.selectSatellite.bind(this),
updateName: this.updateName.bind(this),
};
}
@ -103,6 +104,16 @@ export class NodesModule implements Module<NodesState, RootState> {
await this.fetch(ctx);
}
/**
* Update node from multinode list.
* @param ctx - context of the Vuex action.
* @param node - updated node info.
*/
public async updateName(ctx: ActionContext<NodesState, RootState>, node: UpdateNodeModel): Promise<void> {
await this.nodes.updateName(node.id, node.name);
await this.fetch(ctx);
}
/**
* retrieves list of trusted satellites node urls for a node.
* @param ctx - context of the Vuex action.

View File

@ -60,3 +60,13 @@ export class NodeURL {
public address: string,
) {}
}
/**
* UpdateNodeModel defines a structure for updating node name.
*/
export class UpdateNodeModel {
public constructor(
public id: string,
public name: string,
) {}
}

View File

@ -16,6 +16,8 @@
<meta name="project-limits-increase-request-url" content="{{ .ProjectLimitsIncreaseRequestURL }}">
<meta name="gateway-credentials-request-url" content="{{ .GatewayCredentialsRequestURL }}">
<meta name="is-beta-satellite" content="{{ .IsBetaSatellite }}">
<meta name="beta-satellite-feedback-url" content="{{ .BetaSatelliteFeedbackURL }}">
<meta name="beta-satellite-support-url" content="{{ .BetaSatelliteSupportURL }}">
<title>{{ .SatelliteName }}</title>
<link rel="shortcut icon" href="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAACAAAAAgCAYAAABzenr0AAAACXBIWXMAABYlAAAWJQFJUiTwAAAAAXNSR0IArs4c6QAAAARnQU1BAACxjwv8YQUAAAL8SURBVHgBxVcxTFNRFL01JvylrRsmNWETWWSybg5I3NTQBCdRFyfrDuoKMgMyOQgdTcDAJmrUrWWCgQIbhJKy0TJQpsc73P/S9/tfP+8/GnqSn1fo77/3nnPufe8nhAR1ETeoy7hJjpj93/ycu0+UuUVuEA5Y2xaifirEVpX/nvknnBFbgpMGUfmIKOkRLRT578oxXy6IJcFCialH0EyaaPoZBy7tEQ3NEY1IKd4/iidHwqYLijLA559cuY6dT0RjBU5AAYm9fiivLFnBKMGBTyeqQ4BXhXDwdqjUiKZkskOzREsbzeeBNRMCEiDgr12uYl1WNbnW/oc2iUys8jrQyyxhHRkM3hdgAMFBHQyGG/GDqyDlsSeS/npQC99jlEBpOnyX2XCF8sGhZLbeMLMZkCDbJ1nYYTfDeMP9fMH5y5vmIKYE8RxUjBXPedDH1Zu6I9QFSzLQxErz4Xn5oNwg+2NSmuv3Lkvz4QlTi8rupDlBmA6tqQLrnYNCvoxSNAOtUEaakwzMv+ALidTP2OlKKiSK75Cs6hy9NYFkjzmG1SBCIuUq0Za8pgydge8R9E+e10qNrGE1ikH5435mo11bQgr4B9LEgVUC0Npm1o+vcuvBxB1NYFsaaeC2XUuW/Xs7msC9Xqa+MMa9jQr1KtXAQoKYHakeskbIhDrVasdTbbVY4s8ZYld/9PWuyeTSHksFBjBFcZ+aH/j/yZk5gcAcgImgIX6MNsKKhKBta1sB2A3HV5pD6iJQIzw/MICwoohc1F6ALBH03XemFYPl+VdzcBNUh6j5gZZEcP341opAAnX/AXl/A0FlrrshgMRR+YUvPPN8CHgAxlqWVYuEdH7V/ZilA6cosFDa53EcmUDKC+7X+IwxHEVhO0DK6aeXH88uHcWQA8xE7Yg69M6xgdWZUEFtNNDyx1s2KnyDIxu22zdZTjgWhANm/vL6clGIsnw3+Fbk94RreS8AMGrBxvwoT0lMPnSNC2JJoAPdgnMBJLjKq5lzAp1C19+OzwFiYzAU5f7eeQAAAABJRU5ErkJggg==" type="image/x-icon">
<link rel="dns-prefetch" href="https://js.stripe.com">

View File

@ -8,7 +8,10 @@
</div>
<div v-if="isBetaSatellite" class="dashboard__beta-banner">
<p class="dashboard__beta-banner__message">
Please be aware that this is a beta satellite. Data uploaded may be deleted at any point in time.
Thanks for testing the {{satelliteName}} Beta satellite | Data may be deleted during this beta | Submit testing feedback
<a class="dashboard__beta-banner__message__link" :href="betaFeedbackURL" target="_blank" rel="noopener noreferrer">here</a>
| Request support
<a class="dashboard__beta-banner__message__link" :href="betaSupportURL" target="_blank" rel="noopener noreferrer">here</a>
</p>
</div>
<NoPaywallInfoBar v-if="isNoPaywallInfoBarShown && !isLoading && !isBetaSatellite"/>
@ -228,6 +231,27 @@ export default class DashboardArea extends Vue {
await this.$store.dispatch(APP_STATE_ACTIONS.CHANGE_STATE, AppState.LOADED);
}
/**
* Returns satellite name from store (config).
*/
public get satelliteName(): string {
return MetaUtils.getMetaContent('satellite-name');
}
/**
* Returns feedback URL from config for beta satellites.
*/
public get betaFeedbackURL(): string {
return MetaUtils.getMetaContent('beta-satellite-feedback-url');
}
/**
* Returns support URL from config for beta satellites.
*/
public get betaSupportURL(): string {
return MetaUtils.getMetaContent('beta-satellite-support-url');
}
/**
* Indicates if no paywall info bar is shown.
*/
@ -405,8 +429,19 @@ export default class DashboardArea extends Vue {
&__message {
font-weight: normal;
font-size: 14px;
line-height: 12px;
line-height: 16px;
color: #fff;
&__link {
font-size: 14px;
line-height: 16px;
color: #fff;
text-decoration: underline;
&:hover {
text-decoration: none;
}
}
}
}