all: reformat comments as required by gofmt 1.19

I don't know why the go people thought this was a good idea, because
this automatic reformatting is bound to do the wrong thing sometimes,
which is very annoying. But I don't see a way to turn it off, so best to
get this change out of the way.

Change-Id: Ib5dbbca6a6f6fc944d76c9b511b8c904f796e4f3
This commit is contained in:
paul cannon 2022-08-10 10:35:58 -05:00 committed by Storj Robot
parent 0550731598
commit 37a4edbaff
16 changed files with 134 additions and 138 deletions

View File

@ -18,8 +18,9 @@ var initialized = false
const padding = 2 const padding = 2
// Point is a 2D coordinate in console. // Point is a 2D coordinate in console.
// X is the column //
// Y is the row // X is the column
// Y is the row
type Point struct{ X, Y int } type Point struct{ X, Y int }
// Rect is a 2D rectangle in console, excluding Max edge. // Rect is a 2D rectangle in console, excluding Max edge.

View File

@ -5,13 +5,13 @@
// //
// It allows to set an environment variable to get a trace per test. // It allows to set an environment variable to get a trace per test.
// //
// STORJ_TEST_MONKIT=svg // STORJ_TEST_MONKIT=svg
// STORJ_TEST_MONKIT=json // STORJ_TEST_MONKIT=json
// //
// By default, it saves the output the same folder as the test. However, if you wish // By default, it saves the output the same folder as the test. However, if you wish
// to specify a separate folder, you can specify an absolute directory: // to specify a separate folder, you can specify an absolute directory:
// //
// STORJ_TEST_MONKIT=json,svg,dir=/home/user/debug/trace // STORJ_TEST_MONKIT=json,svg,dir=/home/user/debug/trace
// //
// Note, due to how go tests work, it's not possible to specify a relative directory. // Note, due to how go tests work, it's not possible to specify a relative directory.
package testmonkit package testmonkit

View File

@ -6,38 +6,35 @@
// testplanet provides access to most of the internals of satellites, // testplanet provides access to most of the internals of satellites,
// storagenodes and uplinks. // storagenodes and uplinks.
// //
// // # Database
// Database
// //
// It does require setting two variables for the databases: // It does require setting two variables for the databases:
// //
// STORJ_TEST_POSTGRES=postgres://storj:storj-pass@test-postgres/teststorj?sslmode=disable // STORJ_TEST_POSTGRES=postgres://storj:storj-pass@test-postgres/teststorj?sslmode=disable
// STORJ_TEST_COCKROACH=cockroach://root@localhost:26257/master?sslmode=disable // STORJ_TEST_COCKROACH=cockroach://root@localhost:26257/master?sslmode=disable
// //
// When you wish to entirely omit either of them from the test output, it's possible to use: // When you wish to entirely omit either of them from the test output, it's possible to use:
// //
// STORJ_TEST_POSTGRES=omit // STORJ_TEST_POSTGRES=omit
// STORJ_TEST_COCKROACH=omit // STORJ_TEST_COCKROACH=omit
// //
// // # Host
// Host
// //
// It's possible to change the listing host with: // It's possible to change the listing host with:
// //
// STORJ_TEST_HOST=127.0.0.2;127.0.0.3 // STORJ_TEST_HOST=127.0.0.2;127.0.0.3
// //
// // # Debugging
// Debugging
// //
// For debugging, it's possible to set STORJ_TEST_MONKIT to get a trace per test. // For debugging, it's possible to set STORJ_TEST_MONKIT to get a trace per test.
// //
// STORJ_TEST_MONKIT=svg // STORJ_TEST_MONKIT=svg
// STORJ_TEST_MONKIT=json // STORJ_TEST_MONKIT=json
// //
// By default, it saves the output the same folder as the test. However, if you wish // By default, it saves the output the same folder as the test. However, if you wish
// to specify a separate folder, you can specify an absolute directory: // to specify a separate folder, you can specify an absolute directory:
// //
// STORJ_TEST_MONKIT=svg,dir=/home/user/debug/trace // STORJ_TEST_MONKIT=svg,dir=/home/user/debug/trace
// //
// Note, due to how go tests work, it's not possible to specify a relative directory. // Note, due to how go tests work, it's not possible to specify a relative directory.
package testplanet package testplanet

View File

@ -26,9 +26,9 @@ import (
// - create a audit observer and call metaloop.Join(auditObs) // - create a audit observer and call metaloop.Join(auditObs)
// //
// Then for every node in testplanet: // Then for every node in testplanet:
// - expect that there is a reservoir for that node on the audit observer // - expect that there is a reservoir for that node on the audit observer
// - that the reservoir size is <= 2 (the maxReservoirSize) // - that the reservoir size is <= 2 (the maxReservoirSize)
// - that every item in the reservoir is unique // - that every item in the reservoir is unique
func TestAuditCollector(t *testing.T) { func TestAuditCollector(t *testing.T) {
testplanet.Run(t, testplanet.Config{ testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 5, UplinkCount: 1, SatelliteCount: 1, StorageNodeCount: 5, UplinkCount: 1,

View File

@ -27,7 +27,7 @@ import (
// TestDisqualificationTooManyFailedAudits does the following: // TestDisqualificationTooManyFailedAudits does the following:
// - Create a failed audit report for a storagenode // - Create a failed audit report for a storagenode
// - Record the audit report several times and check that the node isn't // - Record the audit report several times and check that the node isn't
// disqualified until the audit reputation reaches the cut-off value. // disqualified until the audit reputation reaches the cut-off value.
func TestDisqualificationTooManyFailedAudits(t *testing.T) { func TestDisqualificationTooManyFailedAudits(t *testing.T) {
var ( var (
auditDQCutOff = 0.4 auditDQCutOff = 0.4

View File

@ -13,9 +13,10 @@ import (
// TotalAmounts holds the amounts held and disposed. // TotalAmounts holds the amounts held and disposed.
// //
// Invariants: // Invariants:
// TotalHeld >= TotalDisposed //
// TotalPaid >= TotalDisposed // TotalHeld >= TotalDisposed
// TotalPaid >= TotalDistributed (we may distribute less due to minimum payout threshold) // TotalPaid >= TotalDisposed
// TotalPaid >= TotalDistributed (we may distribute less due to minimum payout threshold)
type TotalAmounts struct { type TotalAmounts struct {
TotalHeld currency.MicroUnit // portion from owed that was held back TotalHeld currency.MicroUnit // portion from owed that was held back
TotalDisposed currency.MicroUnit // portion from held back that went into paid TotalDisposed currency.MicroUnit // portion from held back that went into paid

View File

@ -25,8 +25,8 @@ type Object RawObject
// IsMigrated returns whether the object comes from PointerDB. // IsMigrated returns whether the object comes from PointerDB.
// Pointer objects are special that they are missing some information. // Pointer objects are special that they are missing some information.
// //
// * TotalPlainSize = 0 and FixedSegmentSize = 0. // - TotalPlainSize = 0 and FixedSegmentSize = 0.
// * Segment.PlainOffset = 0, Segment.PlainSize = 0 // - Segment.PlainOffset = 0, Segment.PlainSize = 0
func (obj *Object) IsMigrated() bool { func (obj *Object) IsMigrated() bool {
return obj.TotalPlainSize <= 0 return obj.TotalPlainSize <= 0
} }

View File

@ -88,12 +88,12 @@ func (a *MacaroonAccessGenerate) apiKeyForProject(ctx context.Context, data *oau
// Token issues access and refresh tokens that are backed by storj's Macaroons. This expects several scopes to be set on // Token issues access and refresh tokens that are backed by storj's Macaroons. This expects several scopes to be set on
// the request. The following describes the available scopes supported by the macaroon style of access token. // the request. The following describes the available scopes supported by the macaroon style of access token.
// //
// project:<projectId> - required, scopes operations to a single project (one) // project:<projectId> - required, scopes operations to a single project (one)
// bucket:<name> - optional, scopes operations to one or many buckets (repeatable) // bucket:<name> - optional, scopes operations to one or many buckets (repeatable)
// object:list - optional, allows listing object data // object:list - optional, allows listing object data
// object:read - optional, allows reading object data // object:read - optional, allows reading object data
// object:write - optional, allows writing object data // object:write - optional, allows writing object data
// object:delete - optional, allows deleting object data // object:delete - optional, allows deleting object data
// //
// In OAuth2.0, access_tokens are short-lived tokens that authorize operations to be performed on behalf of an end user. // In OAuth2.0, access_tokens are short-lived tokens that authorize operations to be performed on behalf of an end user.
// refresh_tokens are longer lived tokens that allow you to obtain new authorization tokens. // refresh_tokens are longer lived tokens that allow you to obtain new authorization tokens.

View File

@ -130,7 +130,7 @@ func AmountFromBaseUnits(units int64, currency *Currency) Amount {
// //
// Example: // Example:
// //
// AmountFromDecimal(decimal.NewFromFloat(3.50), USDollars) == Amount{baseUnits: 350, currency: USDollars} // AmountFromDecimal(decimal.NewFromFloat(3.50), USDollars) == Amount{baseUnits: 350, currency: USDollars}
func AmountFromDecimal(d decimal.Decimal, currency *Currency) Amount { func AmountFromDecimal(d decimal.Decimal, currency *Currency) Amount {
return AmountFromBaseUnits(d.Shift(currency.decimalPlaces).Round(0).IntPart(), currency) return AmountFromBaseUnits(d.Shift(currency.decimalPlaces).Round(0).IntPart(), currency)
} }

View File

@ -18,9 +18,9 @@ import "math"
// First, we calculate the expected number of iterations for a segment to // First, we calculate the expected number of iterations for a segment to
// survive if we were to lose exactly one node every iteration: // survive if we were to lose exactly one node every iteration:
// //
// r = numHealthy - minPieces + 1 // r = numHealthy - minPieces + 1
// p = (totalNodes - numHealthy) / totalNodes // p = (totalNodes - numHealthy) / totalNodes
// X ~ NB(r, p) // X ~ NB(r, p)
// //
// Then we take the mean of that distribution to use as our expected value, // Then we take the mean of that distribution to use as our expected value,
// which is pr/(1-p). // which is pr/(1-p).

View File

@ -42,13 +42,13 @@ import (
) )
// TestDataRepair does the following: // TestDataRepair does the following:
// - Uploads test data // - Uploads test data
// - Kills some nodes and disqualifies 1 // - Kills some nodes and disqualifies 1
// - Triggers data repair, which repairs the data from the remaining nodes to // - Triggers data repair, which repairs the data from the remaining nodes to
// the numbers of nodes determined by the upload repair max threshold // the numbers of nodes determined by the upload repair max threshold
// - Shuts down several nodes, but keeping up a number equal to the minim // - Shuts down several nodes, but keeping up a number equal to the minim
// threshold // threshold
// - Downloads the data from those left nodes and check that it's the same than the uploaded one. // - Downloads the data from those left nodes and check that it's the same than the uploaded one.
func TestDataRepairInMemory(t *testing.T) { func TestDataRepairInMemory(t *testing.T) {
testDataRepair(t, true) testDataRepair(t, true)
} }
@ -204,14 +204,14 @@ func testDataRepair(t *testing.T, inMemoryRepair bool) {
} }
// TestDataRepairPendingObject does the following: // TestDataRepairPendingObject does the following:
// - Starts new multipart upload with one part of test data. Does not complete the multipart upload. // - Starts new multipart upload with one part of test data. Does not complete the multipart upload.
// - Kills some nodes and disqualifies 1 // - Kills some nodes and disqualifies 1
// - Triggers data repair, which repairs the data from the remaining nodes to // - Triggers data repair, which repairs the data from the remaining nodes to
// the numbers of nodes determined by the upload repair max threshold // the numbers of nodes determined by the upload repair max threshold
// - Shuts down several nodes, but keeping up a number equal to the minim // - Shuts down several nodes, but keeping up a number equal to the minim
// threshold // threshold
// - Completes the multipart upload. // - Completes the multipart upload.
// - Downloads the data from those left nodes and check that it's the same than the uploaded one. // - Downloads the data from those left nodes and check that it's the same than the uploaded one.
func TestDataRepairPendingObject(t *testing.T) { func TestDataRepairPendingObject(t *testing.T) {
const ( const (
RepairMaxExcessRateOptimalThreshold = 0.05 RepairMaxExcessRateOptimalThreshold = 0.05
@ -351,12 +351,12 @@ func TestDataRepairPendingObject(t *testing.T) {
} }
// TestMinRequiredDataRepair does the following: // TestMinRequiredDataRepair does the following:
// - Uploads test data // - Uploads test data
// - Kills all but the minimum number of nodes carrying the uploaded segment // - Kills all but the minimum number of nodes carrying the uploaded segment
// - Triggers data repair, which attempts to repair the data from the remaining nodes to // - Triggers data repair, which attempts to repair the data from the remaining nodes to
// the numbers of nodes determined by the upload repair max threshold // the numbers of nodes determined by the upload repair max threshold
// - Expects that the repair succeed. // - Expects that the repair succeed.
// Reputation info to be updated for all remaining nodes. // Reputation info to be updated for all remaining nodes.
func TestMinRequiredDataRepair(t *testing.T) { func TestMinRequiredDataRepair(t *testing.T) {
const RepairMaxExcessRateOptimalThreshold = 0.05 const RepairMaxExcessRateOptimalThreshold = 0.05
@ -453,14 +453,14 @@ func TestMinRequiredDataRepair(t *testing.T) {
} }
// TestFailedDataRepair does the following: // TestFailedDataRepair does the following:
// - Uploads test data // - Uploads test data
// - Kills some nodes carrying the uploaded segment but keep it above minimum requirement // - Kills some nodes carrying the uploaded segment but keep it above minimum requirement
// - On one of the remaining nodes, return unknown error during downloading of the piece // - On one of the remaining nodes, return unknown error during downloading of the piece
// - Stop one of the remaining nodes, for it to be offline during repair // - Stop one of the remaining nodes, for it to be offline during repair
// - Triggers data repair, which attempts to repair the data from the remaining nodes to // - Triggers data repair, which attempts to repair the data from the remaining nodes to
// the numbers of nodes determined by the upload repair max threshold // the numbers of nodes determined by the upload repair max threshold
// - Expects that the repair failed and the pointer was not updated. // - Expects that the repair failed and the pointer was not updated.
// Reputation info to be updated for all remaining nodes. // Reputation info to be updated for all remaining nodes.
func TestFailedDataRepair(t *testing.T) { func TestFailedDataRepair(t *testing.T) {
const RepairMaxExcessRateOptimalThreshold = 0.05 const RepairMaxExcessRateOptimalThreshold = 0.05
@ -574,13 +574,13 @@ func TestFailedDataRepair(t *testing.T) {
} }
// TestOfflineNodeDataRepair does the following: // TestOfflineNodeDataRepair does the following:
// - Uploads test data // - Uploads test data
// - Kills some nodes carrying the uploaded segment but keep it above minimum requirement // - Kills some nodes carrying the uploaded segment but keep it above minimum requirement
// - Stop one of the remaining nodes, for it to be offline during repair // - Stop one of the remaining nodes, for it to be offline during repair
// - Triggers data repair, which attempts to repair the data from the remaining nodes to // - Triggers data repair, which attempts to repair the data from the remaining nodes to
// the numbers of nodes determined by the upload repair max threshold // the numbers of nodes determined by the upload repair max threshold
// - Expects that the repair succeed and the pointer should contain the offline piece. // - Expects that the repair succeed and the pointer should contain the offline piece.
// Reputation info to be updated for all remaining nodes. // Reputation info to be updated for all remaining nodes.
func TestOfflineNodeDataRepair(t *testing.T) { func TestOfflineNodeDataRepair(t *testing.T) {
const RepairMaxExcessRateOptimalThreshold = 0.05 const RepairMaxExcessRateOptimalThreshold = 0.05
@ -691,13 +691,13 @@ func TestOfflineNodeDataRepair(t *testing.T) {
} }
// TestUnknownErrorDataRepair does the following: // TestUnknownErrorDataRepair does the following:
// - Uploads test data // - Uploads test data
// - Kills some nodes carrying the uploaded segment but keep it above minimum requirement // - Kills some nodes carrying the uploaded segment but keep it above minimum requirement
// - On one of the remaining nodes, return unknown error during downloading of the piece // - On one of the remaining nodes, return unknown error during downloading of the piece
// - Triggers data repair, which attempts to repair the data from the remaining nodes to // - Triggers data repair, which attempts to repair the data from the remaining nodes to
// the numbers of nodes determined by the upload repair max threshold // the numbers of nodes determined by the upload repair max threshold
// - Expects that the repair succeed and the pointer should contain the unknown piece. // - Expects that the repair succeed and the pointer should contain the unknown piece.
// Reputation info to be updated for all remaining nodes. // Reputation info to be updated for all remaining nodes.
func TestUnknownErrorDataRepair(t *testing.T) { func TestUnknownErrorDataRepair(t *testing.T) {
const RepairMaxExcessRateOptimalThreshold = 0.05 const RepairMaxExcessRateOptimalThreshold = 0.05
@ -813,13 +813,13 @@ func TestUnknownErrorDataRepair(t *testing.T) {
} }
// TestMissingPieceDataRepair_Succeed does the following: // TestMissingPieceDataRepair_Succeed does the following:
// - Uploads test data // - Uploads test data
// - Kills some nodes carrying the uploaded segment but keep it above minimum requirement // - Kills some nodes carrying the uploaded segment but keep it above minimum requirement
// - On one of the remaining nodes, delete the piece data being stored by that node // - On one of the remaining nodes, delete the piece data being stored by that node
// - Triggers data repair, which attempts to repair the data from the remaining nodes to // - Triggers data repair, which attempts to repair the data from the remaining nodes to
// the numbers of nodes determined by the upload repair max threshold // the numbers of nodes determined by the upload repair max threshold
// - Expects that the repair succeed and the pointer should not contain the missing piece. // - Expects that the repair succeed and the pointer should not contain the missing piece.
// Reputation info to be updated for all remaining nodes. // Reputation info to be updated for all remaining nodes.
func TestMissingPieceDataRepair_Succeed(t *testing.T) { func TestMissingPieceDataRepair_Succeed(t *testing.T) {
const RepairMaxExcessRateOptimalThreshold = 0.05 const RepairMaxExcessRateOptimalThreshold = 0.05
@ -928,13 +928,13 @@ func TestMissingPieceDataRepair_Succeed(t *testing.T) {
} }
// TestMissingPieceDataRepair_Failed does the following: // TestMissingPieceDataRepair_Failed does the following:
// - Uploads test data // - Uploads test data
// - Kills all but the minimum number of nodes carrying the uploaded segment // - Kills all but the minimum number of nodes carrying the uploaded segment
// - On one of the remaining nodes, delete the piece data being stored by that node // - On one of the remaining nodes, delete the piece data being stored by that node
// - Triggers data repair, which attempts to repair the data from the remaining nodes to // - Triggers data repair, which attempts to repair the data from the remaining nodes to
// the numbers of nodes determined by the upload repair max threshold // the numbers of nodes determined by the upload repair max threshold
// - Expects that the repair failed and the pointer was not updated. // - Expects that the repair failed and the pointer was not updated.
// Reputation info to be updated for node missing the piece. // Reputation info to be updated for node missing the piece.
func TestMissingPieceDataRepair(t *testing.T) { func TestMissingPieceDataRepair(t *testing.T) {
const RepairMaxExcessRateOptimalThreshold = 0.05 const RepairMaxExcessRateOptimalThreshold = 0.05
@ -1043,13 +1043,13 @@ func TestMissingPieceDataRepair(t *testing.T) {
} }
// TestCorruptDataRepair_Succeed does the following: // TestCorruptDataRepair_Succeed does the following:
// - Uploads test data // - Uploads test data
// - Kills some nodes carrying the uploaded segment but keep it above minimum requirement // - Kills some nodes carrying the uploaded segment but keep it above minimum requirement
// - On one of the remaining nodes, corrupt the piece data being stored by that node // - On one of the remaining nodes, corrupt the piece data being stored by that node
// - Triggers data repair, which attempts to repair the data from the remaining nodes to // - Triggers data repair, which attempts to repair the data from the remaining nodes to
// the numbers of nodes determined by the upload repair max threshold // the numbers of nodes determined by the upload repair max threshold
// - Expects that the repair succeed and the pointer should not contain the corrupted piece. // - Expects that the repair succeed and the pointer should not contain the corrupted piece.
// Reputation info to be updated for all remaining nodes. // Reputation info to be updated for all remaining nodes.
func TestCorruptDataRepair_Succeed(t *testing.T) { func TestCorruptDataRepair_Succeed(t *testing.T) {
const RepairMaxExcessRateOptimalThreshold = 0.05 const RepairMaxExcessRateOptimalThreshold = 0.05
@ -1157,13 +1157,13 @@ func TestCorruptDataRepair_Succeed(t *testing.T) {
} }
// TestCorruptDataRepair_Failed does the following: // TestCorruptDataRepair_Failed does the following:
// - Uploads test data // - Uploads test data
// - Kills all but the minimum number of nodes carrying the uploaded segment // - Kills all but the minimum number of nodes carrying the uploaded segment
// - On one of the remaining nodes, corrupt the piece data being stored by that node // - On one of the remaining nodes, corrupt the piece data being stored by that node
// - Triggers data repair, which attempts to repair the data from the remaining nodes to // - Triggers data repair, which attempts to repair the data from the remaining nodes to
// the numbers of nodes determined by the upload repair max threshold // the numbers of nodes determined by the upload repair max threshold
// - Expects that the repair failed and the pointer was not updated. // - Expects that the repair failed and the pointer was not updated.
// Reputation info to be updated for corrupted node. // Reputation info to be updated for corrupted node.
func TestCorruptDataRepair_Failed(t *testing.T) { func TestCorruptDataRepair_Failed(t *testing.T) {
const RepairMaxExcessRateOptimalThreshold = 0.05 const RepairMaxExcessRateOptimalThreshold = 0.05
@ -1908,7 +1908,7 @@ func TestRepairMultipleDisqualifiedAndSuspended(t *testing.T) {
// - Uploads test data // - Uploads test data
// - Kills nodes to fall to the Repair Override Value of the checker but stays above the original Repair Threshold // - Kills nodes to fall to the Repair Override Value of the checker but stays above the original Repair Threshold
// - Triggers data repair, which attempts to repair the data from the remaining nodes to // - Triggers data repair, which attempts to repair the data from the remaining nodes to
// the numbers of nodes determined by the upload repair max threshold // the numbers of nodes determined by the upload repair max threshold
func TestDataRepairOverride_HigherLimit(t *testing.T) { func TestDataRepairOverride_HigherLimit(t *testing.T) {
const repairOverride = 6 const repairOverride = 6
@ -1995,7 +1995,7 @@ func TestDataRepairOverride_HigherLimit(t *testing.T) {
// - Starts Checker and Repairer and ensures this is the case. // - Starts Checker and Repairer and ensures this is the case.
// - Kills more nodes to fall to the Override Value to trigger repair // - Kills more nodes to fall to the Override Value to trigger repair
// - Triggers data repair, which attempts to repair the data from the remaining nodes to // - Triggers data repair, which attempts to repair the data from the remaining nodes to
// the numbers of nodes determined by the upload repair max threshold // the numbers of nodes determined by the upload repair max threshold
func TestDataRepairOverride_LowerLimit(t *testing.T) { func TestDataRepairOverride_LowerLimit(t *testing.T) {
const repairOverride = 4 const repairOverride = 4
@ -3076,9 +3076,9 @@ func TestSegmentInExcludedCountriesRepair(t *testing.T) {
// - run the checker and check the segment is in the repair queue // - run the checker and check the segment is in the repair queue
// - run the repairer // - run the repairer
// - check the segment has been repaired and that: // - check the segment has been repaired and that:
// - piece in excluded is still there // - piece in excluded is still there
// - piece held by offline node is not // - piece held by offline node is not
// - there are no duplicate // - there are no duplicate
func TestSegmentInExcludedCountriesRepairIrreparable(t *testing.T) { func TestSegmentInExcludedCountriesRepairIrreparable(t *testing.T) {
testplanet.Run(t, testplanet.Config{ testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, SatelliteCount: 1,

View File

@ -25,13 +25,12 @@ func UpdateReputation(isSuccess bool, alpha, beta, lambda, w float64) (newAlpha,
// With the arguments as named, applies 'count' successful audits. To apply negative // With the arguments as named, applies 'count' successful audits. To apply negative
// audits, swap the alpha and beta parameters and return values. // audits, swap the alpha and beta parameters and return values.
// //
//
// WARNING: GREEK LETTER MATH AHEAD // WARNING: GREEK LETTER MATH AHEAD
// //
// Applying n successful audit results to an initial alpha value of α₀ gives a // Applying n successful audit results to an initial alpha value of α₀ gives a
// new α₁ value of: // new α₁ value of:
// //
// α₁ = λⁿα₀ + λⁿ⁻¹w + λⁿ⁻²w + ... + λ²w + λw + w // α₁ = λⁿα₀ + λⁿ⁻¹w + λⁿ⁻²w + ... + λ²w + λw + w
// //
// The terms with w are the first n terms of a geometric series with coefficient // The terms with w are the first n terms of a geometric series with coefficient
// w and common ratio λ. The closed form formula for the sum of those first n // w and common ratio λ. The closed form formula for the sum of those first n
@ -39,20 +38,19 @@ func UpdateReputation(isSuccess bool, alpha, beta, lambda, w float64) (newAlpha,
// (https://en.wikipedia.org/wiki/Geometric_series#Closed-form_formula). // (https://en.wikipedia.org/wiki/Geometric_series#Closed-form_formula).
// Adding the initial λⁿα₀ term, we get // Adding the initial λⁿα₀ term, we get
// //
// α₁ = λⁿα₀ + w(1-λⁿ) / (1-λ) // α₁ = λⁿα₀ + w(1-λⁿ) / (1-λ)
// //
// The formula has the same structure for beta for n _failures_. // The formula has the same structure for beta for n _failures_.
// //
// β₁ = λⁿβ₀ + w(1-λⁿ) / (1-λ) // β₁ = λⁿβ₀ + w(1-λⁿ) / (1-λ)
// //
// For n _failures_, // For n _failures_,
// //
// α₁ = λⁿα₀ // α₁ = λⁿα₀
// //
// For n _successes_, // For n _successes_,
// //
// β₁ = λⁿβ₀ // β₁ = λⁿβ₀
//
func UpdateReputationMultiple(count int, alpha, beta, lambda, w float64) (newAlpha, newBeta float64) { func UpdateReputationMultiple(count int, alpha, beta, lambda, w float64) (newAlpha, newBeta float64) {
if lambda == 1 { if lambda == 1 {
// special case: when the coefficient is 1, the closed-form formula is invalid // special case: when the coefficient is 1, the closed-form formula is invalid

View File

@ -224,14 +224,13 @@ func nodeSelectionCondition(ctx context.Context, criteria *overlay.NodeCriteria,
// partialQuery corresponds to a query. // partialQuery corresponds to a query.
// //
// distinct=false // distinct=false
// //
// $selection WHERE $condition ORDER BY $orderBy, RANDOM() LIMIT $limit // $selection WHERE $condition ORDER BY $orderBy, RANDOM() LIMIT $limit
// //
// distinct=true // distinct=true
//
// SELECT * FROM ($selection WHERE $condition ORDER BY $orderBy, RANDOM()) filtered ORDER BY RANDOM() LIMIT $limit
// //
// SELECT * FROM ($selection WHERE $condition ORDER BY $orderBy, RANDOM()) filtered ORDER BY RANDOM() LIMIT $limit
type partialQuery struct { type partialQuery struct {
selection string selection string
condition condition condition condition

View File

@ -728,11 +728,11 @@ func (db *ProjectAccounting) getSingleBucketRollup(ctx context.Context, projectI
// bytes), returns false for ok. // bytes), returns false for ok.
// //
// examples: prefixIncrement([]byte("abc")) -> ([]byte("abd", true) // examples: prefixIncrement([]byte("abc")) -> ([]byte("abd", true)
// prefixIncrement([]byte("ab\xff\xff")) -> ([]byte("ac", true)
// prefixIncrement([]byte("")) -> (nil, false)
// prefixIncrement([]byte("\x00")) -> ([]byte("\x01", true)
// prefixIncrement([]byte("\xff\xff\xff")) -> (nil, false)
// //
// prefixIncrement([]byte("ab\xff\xff")) -> ([]byte("ac", true)
// prefixIncrement([]byte("")) -> (nil, false)
// prefixIncrement([]byte("\x00")) -> ([]byte("\x01", true)
// prefixIncrement([]byte("\xff\xff\xff")) -> (nil, false)
func prefixIncrement(origPrefix []byte) (incremented []byte, ok bool) { func prefixIncrement(origPrefix []byte) (incremented []byte, ok bool) {
incremented = make([]byte, len(origPrefix)) incremented = make([]byte, len(origPrefix))
copy(incremented, origPrefix) copy(incremented, origPrefix)

View File

@ -44,10 +44,10 @@ func (reputations *reputations) Update(ctx context.Context, updateReq reputation
// The update is done in a loop to handle concurrent update calls and to avoid // The update is done in a loop to handle concurrent update calls and to avoid
// the need for an explicit transaction. // the need for an explicit transaction.
// There are three main steps go into the update process: // There are three main steps go into the update process:
// 1. Get existing row for the node // 1. Get existing row for the node
// a. if no row found, insert a new row. // (if no row found, insert a new row).
// 2. Evaluate what the new values for the row fields should be. // 2. Evaluate what the new values for the row fields should be.
// 3. Update row using compare-and-swap. // 3. Update row using compare-and-swap.
// //
// If the node (as represented in the returned info) becomes newly vetted, // If the node (as represented in the returned info) becomes newly vetted,
// disqualified, or suspended as a result of these updates, the caller is // disqualified, or suspended as a result of these updates, the caller is

View File

@ -71,10 +71,10 @@ func isReserved(s string) (schema string, ok bool) {
// reProbablySatelliteURL matches config strings that are (intended, but // reProbablySatelliteURL matches config strings that are (intended, but
// possibly misconfigured) satellite URLs, like the following: // possibly misconfigured) satellite URLs, like the following:
// //
// - @ // - @
// - id@ // - id@
// - host:9999 // - host:9999
// - id@host:9999 // - id@host:9999
var reProbablySatelliteURL = regexp.MustCompile(`@|(^[^/\\]{2,}:\d+$)`) var reProbablySatelliteURL = regexp.MustCompile(`@|(^[^/\\]{2,}:\d+$)`)
func isProbablySatelliteURL(s string) bool { func isProbablySatelliteURL(s string) bool {