all: golangci-lint v1.33.0 fixes (#3985)

This commit is contained in:
Stefan Benten 2020-12-05 17:01:42 +01:00 committed by GitHub
parent 746315672f
commit 494bd5db81
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
67 changed files with 132 additions and 113 deletions

View File

@ -17,7 +17,7 @@ var initialized = false
const padding = 2 const padding = 2
// Point is a 2D coordinate in console // Point is a 2D coordinate in console.
// X is the column // X is the column
// Y is the row // Y is the row
type Point struct{ X, Y int } type Point struct{ X, Y int }

View File

@ -22,7 +22,7 @@ type Config struct {
CheckInterval time.Duration `help:"Interval to check the version" default:"0h15m0s"` CheckInterval time.Duration `help:"Interval to check the version" default:"0h15m0s"`
} }
// Service contains the information and variables to ensure the Software is up to date // Service contains the information and variables to ensure the Software is up to date.
// //
// architecture: Service // architecture: Service
type Service struct { type Service struct {

View File

@ -138,7 +138,7 @@ type BucketUsageRollup struct {
Before time.Time Before time.Time
} }
// StoragenodeAccounting stores information about bandwidth and storage usage for storage nodes // StoragenodeAccounting stores information about bandwidth and storage usage for storage nodes.
// //
// architecture: Database // architecture: Database
type StoragenodeAccounting interface { type StoragenodeAccounting interface {
@ -164,7 +164,7 @@ type StoragenodeAccounting interface {
DeleteTalliesBefore(ctx context.Context, latestRollup time.Time) error DeleteTalliesBefore(ctx context.Context, latestRollup time.Time) error
} }
// ProjectAccounting stores information about bandwidth and storage usage for projects // ProjectAccounting stores information about bandwidth and storage usage for projects.
// //
// architecture: Database // architecture: Database
type ProjectAccounting interface { type ProjectAccounting interface {

View File

@ -21,7 +21,7 @@ type Config struct {
DeleteTallies bool `help:"option for deleting tallies after they are rolled up" default:"true"` DeleteTallies bool `help:"option for deleting tallies after they are rolled up" default:"true"`
} }
// Service is the rollup service for totalling data on storage nodes on daily intervals // Service is the rollup service for totalling data on storage nodes on daily intervals.
// //
// architecture: Chore // architecture: Chore
type Service struct { type Service struct {

View File

@ -32,7 +32,7 @@ type Config struct {
ReadRollupBatchSize int `help:"how large of batches GetBandwidthSince should process at a time" default:"10000"` ReadRollupBatchSize int `help:"how large of batches GetBandwidthSince should process at a time" default:"10000"`
} }
// Service is the tally service for data stored on each storage node // Service is the tally service for data stored on each storage node.
// //
// architecture: Chore // architecture: Chore
type Service struct { type Service struct {

View File

@ -24,7 +24,7 @@ import (
"storj.io/storj/satellite/payments/stripecoinpayments" "storj.io/storj/satellite/payments/stripecoinpayments"
) )
// Admin is the satellite core process that runs chores // Admin is the satellite core process that runs chores.
// //
// architecture: Peer // architecture: Peer
type Admin struct { type Admin struct {

View File

@ -54,7 +54,7 @@ import (
"storj.io/storj/satellite/snopayout" "storj.io/storj/satellite/snopayout"
) )
// API is the satellite API process // API is the satellite API process.
// //
// architecture: Peer // architecture: Peer
type API struct { type API struct {

View File

@ -34,7 +34,7 @@ type CSVRow struct {
EgressData int64 EgressData int64
} }
// DB implements the database for value attribution table // DB implements the database for value attribution table.
// //
// architecture: Database // architecture: Database
type DB interface { type DB interface {

View File

@ -34,7 +34,7 @@ type PendingAudit struct {
Path storj.Path Path storj.Path
} }
// Containment holds information about pending audits for contained nodes // Containment holds information about pending audits for contained nodes.
// //
// architecture: Database // architecture: Database
type Containment interface { type Containment interface {

View File

@ -25,9 +25,9 @@ import (
) )
// TestDisqualificationTooManyFailedAudits does the following: // TestDisqualificationTooManyFailedAudits does the following:
// * Create a failed audit report for a storagenode // - Create a failed audit report for a storagenode
// * Record the audit report several times and check that the node isn't // - Record the audit report several times and check that the node isn't
// disqualified until the audit reputation reaches the cut-off value. // disqualified until the audit reputation reaches the cut-off value.
func TestDisqualificationTooManyFailedAudits(t *testing.T) { func TestDisqualificationTooManyFailedAudits(t *testing.T) {
var ( var (
auditDQCutOff = 0.4 auditDQCutOff = 0.4

View File

@ -13,7 +13,7 @@ import (
var _ metainfo.Observer = (*PathCollector)(nil) var _ metainfo.Observer = (*PathCollector)(nil)
// PathCollector uses the metainfo loop to add paths to node reservoirs // PathCollector uses the metainfo loop to add paths to node reservoirs.
// //
// architecture: Observer // architecture: Observer
type PathCollector struct { type PathCollector struct {

View File

@ -13,7 +13,7 @@ import (
"storj.io/storj/satellite/overlay" "storj.io/storj/satellite/overlay"
) )
// Reporter records audit reports in overlay and implements the reporter interface // Reporter records audit reports in overlay and implements the reporter interface.
// //
// architecture: Service // architecture: Service
type Reporter struct { type Reporter struct {

View File

@ -50,7 +50,7 @@ type Share struct {
Data []byte Data []byte
} }
// Verifier helps verify the correctness of a given stripe // Verifier helps verify the correctness of a given stripe.
// //
// architecture: Worker // architecture: Worker
type Verifier struct { type Verifier struct {

View File

@ -10,7 +10,7 @@ import (
"storj.io/common/uuid" "storj.io/common/uuid"
) )
// APIKeys is interface for working with api keys store // APIKeys is interface for working with api keys store.
// //
// architecture: Database // architecture: Database
type APIKeys interface { type APIKeys interface {

View File

@ -87,7 +87,7 @@ type Config struct {
console.Config console.Config
} }
// Server represents console web server // Server represents console web server.
// //
// architecture: Endpoint // architecture: Endpoint
type Server struct { type Server struct {

View File

@ -15,7 +15,7 @@ import (
"storj.io/common/uuid" "storj.io/common/uuid"
) )
// RegistrationTokens is interface for working with registration tokens // RegistrationTokens is interface for working with registration tokens.
// //
// architecture: Database // architecture: Database
type RegistrationTokens interface { type RegistrationTokens interface {

View File

@ -14,7 +14,7 @@ import (
"storj.io/common/uuid" "storj.io/common/uuid"
) )
// ResetPasswordTokens is interface for working with reset password tokens // ResetPasswordTokens is interface for working with reset password tokens.
// //
// architecture: Database // architecture: Database
type ResetPasswordTokens interface { type ResetPasswordTokens interface {

View File

@ -76,7 +76,7 @@ var (
ErrEmailUsed = errs.Class("email used") ErrEmailUsed = errs.Class("email used")
) )
// Service is handling accounts related logic // Service is handling accounts related logic.
// //
// architecture: Service // architecture: Service
type Service struct { type Service struct {

View File

@ -17,7 +17,7 @@ import (
// NoCreditForUpdateErr is a error message used when no credits are found for update when new users sign up. // NoCreditForUpdateErr is a error message used when no credits are found for update when new users sign up.
var NoCreditForUpdateErr = errs.Class("no credit found to update") var NoCreditForUpdateErr = errs.Class("no credit found to update")
// UserCredits holds information to interact with database // UserCredits holds information to interact with database.
// //
// architecture: Database // architecture: Database
type UserCredits interface { type UserCredits interface {

View File

@ -44,7 +44,7 @@ import (
"storj.io/storj/satellite/repair/checker" "storj.io/storj/satellite/repair/checker"
) )
// Core is the satellite core process that runs chores // Core is the satellite core process that runs chores.
// //
// architecture: Peer // architecture: Peer
type Core struct { type Core struct {

View File

@ -28,7 +28,7 @@ import (
"storj.io/storj/satellite/overlay" "storj.io/storj/satellite/overlay"
) )
// GarbageCollection is the satellite garbage collection process // GarbageCollection is the satellite garbage collection process.
// //
// architecture: Peer // architecture: Peer
type GarbageCollection struct { type GarbageCollection struct {

View File

@ -17,7 +17,7 @@ import (
var _ metainfo.Observer = (*PieceTracker)(nil) var _ metainfo.Observer = (*PieceTracker)(nil)
// PieceTracker implements the metainfo loop observer interface for garbage collection // PieceTracker implements the metainfo loop observer interface for garbage collection.
// //
// architecture: Observer // architecture: Observer
type PieceTracker struct { type PieceTracker struct {

View File

@ -33,6 +33,7 @@ type Config struct {
Enabled bool `help:"set if garbage collection is enabled or not" releaseDefault:"true" devDefault:"true"` Enabled bool `help:"set if garbage collection is enabled or not" releaseDefault:"true" devDefault:"true"`
SkipFirst bool `help:"if true, skip the first run of GC" releaseDefault:"true" devDefault:"false"` SkipFirst bool `help:"if true, skip the first run of GC" releaseDefault:"true" devDefault:"false"`
RunInCore bool `help:"if true, run garbage collection as part of the core" releaseDefault:"false" devDefault:"false"` RunInCore bool `help:"if true, run garbage collection as part of the core" releaseDefault:"false" devDefault:"false"`
// value for InitialPieces currently based on average pieces per node // value for InitialPieces currently based on average pieces per node
InitialPieces int `help:"the initial number of pieces expected for a storage node to have, used for creating a filter" releaseDefault:"400000" devDefault:"10"` InitialPieces int `help:"the initial number of pieces expected for a storage node to have, used for creating a filter" releaseDefault:"400000" devDefault:"10"`
FalsePositiveRate float64 `help:"the false positive rate used for creating a garbage collection bloom filter" releaseDefault:"0.1" devDefault:"0.1"` FalsePositiveRate float64 `help:"the false positive rate used for creating a garbage collection bloom filter" releaseDefault:"0.1" devDefault:"0.1"`
@ -40,7 +41,7 @@ type Config struct {
RetainSendTimeout time.Duration `help:"the amount of time to allow a node to handle a retain request" default:"1m"` RetainSendTimeout time.Duration `help:"the amount of time to allow a node to handle a retain request" default:"1m"`
} }
// Service implements the garbage collection service // Service implements the garbage collection service.
// //
// architecture: Chore // architecture: Chore
type Service struct { type Service struct {

View File

@ -36,7 +36,7 @@ type TransferQueueItem struct {
OrderLimitSendCount int OrderLimitSendCount int
} }
// DB implements CRUD operations for graceful exit service // DB implements CRUD operations for graceful exit service.
// //
// architecture: Database // architecture: Database
type DB interface { type DB interface {

View File

@ -17,7 +17,7 @@ import (
var _ metainfo.Observer = (*PathCollector)(nil) var _ metainfo.Observer = (*PathCollector)(nil)
// PathCollector uses the metainfo loop to add paths to node reservoirs // PathCollector uses the metainfo loop to add paths to node reservoirs.
// //
// architecture: Observer // architecture: Observer
type PathCollector struct { type PathCollector struct {

View File

@ -27,7 +27,7 @@ var (
const lastSegmentIndex = int64(-1) const lastSegmentIndex = int64(-1)
// Endpoint for checking object and segment health // Endpoint for checking object and segment health.
// //
// architecture: Endpoint // architecture: Endpoint
type Endpoint struct { type Endpoint struct {

View File

@ -34,7 +34,7 @@ var (
mon = monkit.Package() mon = monkit.Package()
) )
// Sender sends emails // Sender sends emails.
// //
// architecture: Service // architecture: Service
type Sender interface { type Sender interface {
@ -48,7 +48,7 @@ type Message interface {
Subject() string Subject() string
} }
// Service sends template-backed email messages through SMTP // Service sends template-backed email messages through SMTP.
// //
// architecture: Service // architecture: Service
type Service struct { type Service struct {

View File

@ -20,8 +20,7 @@ var mon = monkit.Package()
var _ mailservice.Sender = (*LinkClicker)(nil) var _ mailservice.Sender = (*LinkClicker)(nil)
// LinkClicker is mailservice.Sender that click all links // LinkClicker is mailservice.Sender that click all links from html msg parts.
// from html msg parts
// //
// architecture: Service // architecture: Service
type LinkClicker struct{} type LinkClicker struct{}

View File

@ -31,7 +31,7 @@ type Config struct {
StaticDir string `help:"path to static resources" default:""` StaticDir string `help:"path to static resources" default:""`
} }
// Server represents marketing offersweb server // Server represents marketing offersweb server.
// //
// architecture: Endpoint // architecture: Endpoint
type Server struct { type Server struct {

View File

@ -12,7 +12,7 @@ import (
"storj.io/storj/satellite/metainfo/metabase" "storj.io/storj/satellite/metainfo/metabase"
) )
// BucketsDB is the interface for the database to interact with buckets // BucketsDB is the interface for the database to interact with buckets.
// //
// architecture: Database // architecture: Database
type BucketsDB interface { type BucketsDB interface {

View File

@ -27,7 +27,7 @@ type Config struct {
Enabled bool `help:"set if expired segment cleanup is enabled or not" releaseDefault:"true" devDefault:"true"` Enabled bool `help:"set if expired segment cleanup is enabled or not" releaseDefault:"true" devDefault:"true"`
} }
// Chore implements the expired segment cleanup chore // Chore implements the expired segment cleanup chore.
// //
// architecture: Chore // architecture: Chore
type Chore struct { type Chore struct {

View File

@ -17,7 +17,7 @@ import (
var _ metainfo.Observer = (*expiredDeleter)(nil) var _ metainfo.Observer = (*expiredDeleter)(nil)
// expiredDeleter implements the metainfo loop observer interface for expired segment cleanup // expiredDeleter implements the metainfo loop observer interface for expired segment cleanup.
// //
// architecture: Observer // architecture: Observer
type expiredDeleter struct { type expiredDeleter struct {

View File

@ -33,7 +33,7 @@ import (
// * upload 2 inline files // * upload 2 inline files
// * connect two observers to the metainfo loop // * connect two observers to the metainfo loop
// * run the metainfo loop // * run the metainfo loop
// * expect that each observer has seen // * expect that each observer has seen:
// - 5 remote files // - 5 remote files
// - 5 remote segments // - 5 remote segments
// - 2 inline files/segments // - 2 inline files/segments

View File

@ -54,7 +54,7 @@ type PointerDB interface {
UnsynchronizedGetDel(ctx context.Context, keys []metabase.SegmentKey) (deletedKeys []metabase.SegmentKey, _ []*pb.Pointer, _ error) UnsynchronizedGetDel(ctx context.Context, keys []metabase.SegmentKey) (deletedKeys []metabase.SegmentKey, _ []*pb.Pointer, _ error)
} }
// Service implements the object deletion service // Service implements the object deletion service.
// //
// architecture: Service // architecture: Service
type Service struct { type Service struct {

View File

@ -24,7 +24,7 @@ var (
ErrBucketNotEmpty = errs.Class("bucket not empty") ErrBucketNotEmpty = errs.Class("bucket not empty")
) )
// Service structure // Service provides the metainfo service dependencies.
// //
// architecture: Service // architecture: Service
type Service struct { type Service struct {

View File

@ -21,7 +21,7 @@ var (
mon = monkit.Package() mon = monkit.Package()
) )
// Endpoint for querying node stats for the SNO // Endpoint for querying node stats for the SNO.
// //
// architecture: Endpoint // architecture: Endpoint
type Endpoint struct { type Endpoint struct {

View File

@ -26,7 +26,7 @@ import (
"storj.io/storj/satellite/nodeapiversion" "storj.io/storj/satellite/nodeapiversion"
) )
// DB implements saving order after receiving from storage node // DB implements saving order after receiving from storage node.
// //
// architecture: Database // architecture: Database
type DB interface { type DB interface {
@ -200,7 +200,7 @@ type ProcessOrderResponse struct {
Status pb.SettlementResponse_Status Status pb.SettlementResponse_Status
} }
// Endpoint for orders receiving // Endpoint for orders receiving.
// //
// architecture: Endpoint // architecture: Endpoint
type Endpoint struct { type Endpoint struct {
@ -218,7 +218,10 @@ type Endpoint struct {
// //
// ordersSemaphoreSize controls the number of concurrent clients allowed to submit orders at once. // ordersSemaphoreSize controls the number of concurrent clients allowed to submit orders at once.
// A value of zero means unlimited. // A value of zero means unlimited.
func NewEndpoint(log *zap.Logger, satelliteSignee signing.Signee, db DB, nodeAPIVersionDB nodeapiversion.DB, settlementBatchSize int, windowEndpointRolloutPhase WindowEndpointRolloutPhase, ordersSemaphoreSize int, ordersService *Service) *Endpoint { func NewEndpoint(log *zap.Logger, satelliteSignee signing.Signee, db DB, nodeAPIVersionDB nodeapiversion.DB,
settlementBatchSize int, windowEndpointRolloutPhase WindowEndpointRolloutPhase,
ordersSemaphoreSize int, ordersService *Service) *Endpoint {
var ordersSemaphore chan struct{} var ordersSemaphore chan struct{}
if ordersSemaphoreSize > 0 { if ordersSemaphoreSize > 0 {
ordersSemaphore = make(chan struct{}, ordersSemaphoreSize) ordersSemaphore = make(chan struct{}, ordersSemaphoreSize)
@ -737,7 +740,9 @@ func (endpoint *Endpoint) SettlementWithWindowFinal(stream pb.DRPCOrders_Settlem
}) })
} }
func (endpoint *Endpoint) isValid(ctx context.Context, log *zap.Logger, order *pb.Order, orderLimit *pb.OrderLimit, peerID storj.NodeID, window int64) bool { func (endpoint *Endpoint) isValid(ctx context.Context, log *zap.Logger, order *pb.Order,
orderLimit *pb.OrderLimit, peerID storj.NodeID, window int64) bool {
if orderLimit.StorageNodeId != peerID { if orderLimit.StorageNodeId != peerID {
log.Debug("storage node id mismatch") log.Debug("storage node id mismatch")
mon.Event("order_not_valid_storagenodeid") mon.Event("order_not_valid_storagenodeid")

View File

@ -11,7 +11,7 @@ import (
"storj.io/storj/satellite/internalpb" "storj.io/storj/satellite/internalpb"
) )
// Inspector is a RPC service for inspecting overlay internals // Inspector is a RPC service for inspecting overlay internals.
// //
// architecture: Endpoint // architecture: Endpoint
type Inspector struct { type Inspector struct {

View File

@ -15,7 +15,7 @@ import (
"storj.io/storj/satellite/nodeselection" "storj.io/storj/satellite/nodeselection"
) )
// CacheDB implements the database for overlay node selection cache // CacheDB implements the database for overlay node selection cache.
// //
// architecture: Database // architecture: Database
type CacheDB interface { type CacheDB interface {

View File

@ -10,7 +10,7 @@ import (
"storj.io/common/storj" "storj.io/common/storj"
) )
// PeerIdentities stores storagenode peer identities // PeerIdentities stores storagenode peer identities.
// //
// architecture: Database // architecture: Database
type PeerIdentities interface { type PeerIdentities interface {

View File

@ -36,7 +36,7 @@ var ErrNodeFinishedGE = errs.Class("node finished graceful exit")
// ErrNotEnoughNodes is when selecting nodes failed with the given parameters. // ErrNotEnoughNodes is when selecting nodes failed with the given parameters.
var ErrNotEnoughNodes = errs.Class("not enough nodes") var ErrNotEnoughNodes = errs.Class("not enough nodes")
// DB implements the database for overlay.Service // DB implements the database for overlay.Service.
// //
// architecture: Database // architecture: Database
type DB interface { type DB interface {
@ -265,7 +265,7 @@ func (node *SelectedNode) Clone() *SelectedNode {
} }
} }
// Service is used to store and handle node information // Service is used to store and handle node information.
// //
// architecture: Service // architecture: Service
type Service struct { type Service struct {

View File

@ -55,7 +55,7 @@ func init() {
hw.Register(monkit.Default) hw.Register(monkit.Default)
} }
// DB is the master database for the satellite // DB is the master database for the satellite.
// //
// architecture: Master Database // architecture: Master Database
type DB interface { type DB interface {

View File

@ -31,7 +31,7 @@ type Config struct {
ReferralManagerURL storj.NodeURL `help:"the URL for referral manager"` ReferralManagerURL storj.NodeURL `help:"the URL for referral manager"`
} }
// Service allows communicating with the Referral Manager // Service allows communicating with the Referral Manager.
// //
// architecture: Service // architecture: Service
type Service struct { type Service struct {

View File

@ -43,7 +43,7 @@ type durabilityStats struct {
remoteSegmentsOverThreshold [5]int64 remoteSegmentsOverThreshold [5]int64
} }
// Checker contains the information needed to do checks for missing pieces // Checker contains the information needed to do checks for missing pieces.
// //
// architecture: Chore // architecture: Chore
type Checker struct { type Checker struct {
@ -243,7 +243,7 @@ func (checker *Checker) updateIrreparableSegmentStatus(ctx context.Context, poin
var _ metainfo.Observer = (*checkerObserver)(nil) var _ metainfo.Observer = (*checkerObserver)(nil)
// checkerObserver implements the metainfo loop Observer interface // checkerObserver implements the metainfo loop Observer interface.
// //
// architecture: Observer // architecture: Observer
type checkerObserver struct { type checkerObserver struct {

View File

@ -15,7 +15,7 @@ var (
mon = monkit.Package() mon = monkit.Package()
) )
// Inspector is a RPC service for inspecting irreparable internals // Inspector is a RPC service for inspecting irreparable internals.
// //
// architecture: Endpoint // architecture: Endpoint
type Inspector struct { type Inspector struct {

View File

@ -33,8 +33,7 @@ import (
// the numbers of nodes determined by the upload repair max threshold // the numbers of nodes determined by the upload repair max threshold
// - Shuts down several nodes, but keeping up a number equal to the minim // - Shuts down several nodes, but keeping up a number equal to the minim
// threshold // threshold
// - Downloads the data from those left nodes and check that it's the same than // - Downloads the data from those left nodes and check that it's the same than the uploaded one.
// the uploaded one
func TestDataRepairInMemory(t *testing.T) { func TestDataRepairInMemory(t *testing.T) {
testDataRepair(t, true) testDataRepair(t, true)
} }
@ -894,10 +893,10 @@ func testRepairMultipleDisqualifiedAndSuspended(t *testing.T, inMemoryRepair boo
} }
// TestDataRepairOverride_HigherLimit does the following: // TestDataRepairOverride_HigherLimit does the following:
// - Uploads test data // - Uploads test data
// - Kills nodes to fall to the Repair Override Value of the checker but stays above the original Repair Threshold // - Kills nodes to fall to the Repair Override Value of the checker but stays above the original Repair Threshold
// - Triggers data repair, which attempts to repair the data from the remaining nodes to // - Triggers data repair, which attempts to repair the data from the remaining nodes to
// the numbers of nodes determined by the upload repair max threshold // the numbers of nodes determined by the upload repair max threshold
func TestDataRepairOverride_HigherLimitInMemory(t *testing.T) { func TestDataRepairOverride_HigherLimitInMemory(t *testing.T) {
testDataRepairOverrideHigherLimit(t, true) testDataRepairOverrideHigherLimit(t, true)
} }
@ -988,12 +987,12 @@ func testDataRepairOverrideHigherLimit(t *testing.T, inMemoryRepair bool) {
} }
// TestDataRepairOverride_LowerLimit does the following: // TestDataRepairOverride_LowerLimit does the following:
// - Uploads test data // - Uploads test data
// - Kills nodes to fall to the Repair Threshold of the checker that should not trigger repair any longer // - Kills nodes to fall to the Repair Threshold of the checker that should not trigger repair any longer
// - Starts Checker and Repairer and ensures this is the case. // - Starts Checker and Repairer and ensures this is the case.
// - Kills more nodes to fall to the Override Value to trigger repair // - Kills more nodes to fall to the Override Value to trigger repair
// - Triggers data repair, which attempts to repair the data from the remaining nodes to // - Triggers data repair, which attempts to repair the data from the remaining nodes to
// the numbers of nodes determined by the upload repair max threshold // the numbers of nodes determined by the upload repair max threshold
func TestDataRepairOverride_LowerLimitInMemory(t *testing.T) { func TestDataRepairOverride_LowerLimitInMemory(t *testing.T) {
testDataRepairOverrideLowerLimit(t, true) testDataRepairOverrideLowerLimit(t, true)
} }
@ -1112,12 +1111,12 @@ func testDataRepairOverrideLowerLimit(t *testing.T, inMemoryRepair bool) {
} }
// TestDataRepairUploadLimits does the following: // TestDataRepairUploadLimits does the following:
// - Uploads test data to nodes // - Uploads test data to nodes
// - Get one segment of that data to check in which nodes its pieces are stored // - Get one segment of that data to check in which nodes its pieces are stored
// - Kills as many nodes as needed which store such segment pieces // - Kills as many nodes as needed which store such segment pieces
// - Triggers data repair // - Triggers data repair
// - Verify that the number of pieces which repaired has uploaded don't overpass // - Verify that the number of pieces which repaired has uploaded don't overpass
// the established limit (success threshold + % of excess) // the established limit (success threshold + % of excess)
func TestDataRepairUploadLimitInMemory(t *testing.T) { func TestDataRepairUploadLimitInMemory(t *testing.T) {
testDataRepairUploadLimit(t, true) testDataRepairUploadLimit(t, true)
} }

View File

@ -38,7 +38,7 @@ type Config struct {
InMemoryRepair bool `help:"whether to download pieces for repair in memory (true) or download to disk (false)" default:"false"` InMemoryRepair bool `help:"whether to download pieces for repair in memory (true) or download to disk (false)" default:"false"`
} }
// Service contains the information needed to run the repair service // Service contains the information needed to run the repair service.
// //
// architecture: Worker // architecture: Worker
type Service struct { type Service struct {

View File

@ -19,7 +19,7 @@ var (
ErrOfferNotExist = errs.Class("no current offer") ErrOfferNotExist = errs.Class("no current offer")
) )
// DB holds information about offer // DB holds information about offers.
// //
// architecture: Database // architecture: Database
type DB interface { type DB interface {

View File

@ -90,7 +90,10 @@ func (cache *overlaycache) SelectStorageNodes(ctx context.Context, totalNeededNo
return nodes, nil return nodes, nil
} }
func (cache *overlaycache) selectStorageNodesOnce(ctx context.Context, reputableNodeCount, newNodeCount int, criteria *overlay.NodeCriteria, excludedIDs []storj.NodeID, excludedNetworks []string) (reputableNodes, newNodes []*overlay.SelectedNode, err error) { func (cache *overlaycache) selectStorageNodesOnce(ctx context.Context, reputableNodeCount, newNodeCount int,
criteria *overlay.NodeCriteria, excludedIDs []storj.NodeID,
excludedNetworks []string) (reputableNodes, newNodes []*overlay.SelectedNode, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
newNodesCondition, err := nodeSelectionCondition(ctx, criteria, excludedIDs, excludedNetworks, true) newNodesCondition, err := nodeSelectionCondition(ctx, criteria, excludedIDs, excludedNetworks, true)
@ -171,7 +174,9 @@ func (cache *overlaycache) selectStorageNodesOnce(ctx context.Context, reputable
} }
// nodeSelectionCondition creates a condition with arguments that corresponds to the arguments. // nodeSelectionCondition creates a condition with arguments that corresponds to the arguments.
func nodeSelectionCondition(ctx context.Context, criteria *overlay.NodeCriteria, excludedIDs []storj.NodeID, excludedNetworks []string, isNewNodeQuery bool) (condition, error) { func nodeSelectionCondition(ctx context.Context, criteria *overlay.NodeCriteria, excludedIDs []storj.NodeID,
excludedNetworks []string, isNewNodeQuery bool) (condition, error) {
var conds conditions var conds conditions
conds.add(`disqualified IS NULL`) conds.add(`disqualified IS NULL`)
conds.add(`unknown_audit_suspended IS NULL`) conds.add(`unknown_audit_suspended IS NULL`)
@ -220,15 +225,15 @@ func nodeSelectionCondition(ctx context.Context, criteria *overlay.NodeCriteria,
return conds.combine(), nil return conds.combine(), nil
} }
// partialQuery corresponds to a query // partialQuery corresponds to a query.
// //
// distinct=false // distinct=false
// //
// $selection WHERE $condition ORDER BY $orderBy, RANDOM() LIMIT $limit // $selection WHERE $condition ORDER BY $orderBy, RANDOM() LIMIT $limit
// //
// distinct=true // distinct=true
// //
// SELECT * FROM ($selection WHERE $condition ORDER BY $orderBy, RANDOM()) filtered ORDER BY RANDOM() LIMIT $limit // SELECT * FROM ($selection WHERE $condition ORDER BY $orderBy, RANDOM()) filtered ORDER BY RANDOM() LIMIT $limit
// //
type partialQuery struct { type partialQuery struct {
selection string selection string

View File

@ -21,7 +21,7 @@ var (
mon = monkit.Package() mon = monkit.Package()
) )
// Endpoint for querying node stats for the SNO // Endpoint for querying node stats for the SNO.
// //
// architecture: Endpoint // architecture: Endpoint
type Endpoint struct { type Endpoint struct {

View File

@ -73,7 +73,7 @@ type StoragenodePayment struct {
Notes string `json:"notes"` Notes string `json:"notes"`
} }
// Service is used to store and handle node paystub information // Service is used to store and handle node paystub information.
// //
// architecture: Service // architecture: Service
type Service struct { type Service struct {

View File

@ -21,7 +21,7 @@ type Config struct {
Interval time.Duration `help:"how frequently bandwidth usage rollups are calculated" default:"1h0m0s"` Interval time.Duration `help:"how frequently bandwidth usage rollups are calculated" default:"1h0m0s"`
} }
// Service implements // Service implements the bandwidth usage rollup service.
// //
// architecture: Chore // architecture: Chore
type Service struct { type Service struct {

View File

@ -15,7 +15,7 @@ import (
"storj.io/common/sync2" "storj.io/common/sync2"
) )
// Chore is the contact chore for nodes announcing themselves to their trusted satellites // Chore is the contact chore for nodes announcing themselves to their trusted satellites.
// //
// architecture: Chore // architecture: Chore
type Chore struct { type Chore struct {

View File

@ -16,7 +16,7 @@ import (
"storj.io/common/rpc/rpcstatus" "storj.io/common/rpc/rpcstatus"
) )
// Endpoint implements the contact service Endpoints // Endpoint implements the contact service Endpoints.
// //
// architecture: Endpoint // architecture: Endpoint
type Endpoint struct { type Endpoint struct {

View File

@ -27,7 +27,7 @@ var (
Error = errs.Class("piecestore inspector") Error = errs.Class("piecestore inspector")
) )
// Endpoint does inspectory things // Endpoint implements the inspector endpoints.
// //
// architecture: Endpoint // architecture: Endpoint
type Endpoint struct { type Endpoint struct {

View File

@ -37,7 +37,7 @@ type Config struct {
NotifyLowDiskCooldown time.Duration `help:"minimum length of time between capacity reports" default:"10m" hidden:"true"` NotifyLowDiskCooldown time.Duration `help:"minimum length of time between capacity reports" default:"10m" hidden:"true"`
} }
// Service which monitors disk usage // Service which monitors disk usage.
// //
// architecture: Service // architecture: Service
type Service struct { type Service struct {

View File

@ -39,8 +39,7 @@ type CacheStorage struct {
Satellites satellites.DB Satellites satellites.DB
} }
// Cache runs cache loop and stores reputation stats // Cache runs cache loop and stores reputation stats and storage usage into db.
// and storage usage into db
// //
// architecture: Chore // architecture: Chore
type Cache struct { type Cache struct {
@ -58,7 +57,9 @@ type Cache struct {
} }
// NewCache creates new caching service instance. // NewCache creates new caching service instance.
func NewCache(log *zap.Logger, config Config, db CacheStorage, service *Service, payoutEndpoint *payout.Endpoint, reputationService *reputation.Service, trust *trust.Pool) *Cache { func NewCache(log *zap.Logger, config Config, db CacheStorage, service *Service,
payoutEndpoint *payout.Endpoint, reputationService *reputation.Service, trust *trust.Pool) *Cache {
return &Cache{ return &Cache{
log: log, log: log,
db: db, db: db,
@ -187,7 +188,8 @@ func (cache *Cache) CacheSpaceUsage(ctx context.Context) (err error) {
}) })
} }
// CacheHeldAmount queries held amount stats and payments from all the satellites known to the storagenode and stores info into db. // CacheHeldAmount queries held amount stats and payments from
// all the satellites known to the storagenode and stores info into db.
func (cache *Cache) CacheHeldAmount(ctx context.Context) (err error) { func (cache *Cache) CacheHeldAmount(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)

View File

@ -27,7 +27,7 @@ var (
mon = monkit.Package() mon = monkit.Package()
) )
// Client encapsulates NodeStatsClient with underlying connection // Client encapsulates NodeStatsClient with underlying connection.
// //
// architecture: Client // architecture: Client
type Client struct { type Client struct {
@ -40,7 +40,7 @@ func (c *Client) Close() error {
return c.conn.Close() return c.conn.Close()
} }
// Service retrieves info from satellites using an rpc client // Service retrieves info from satellites using an rpc client.
// //
// architecture: Service // architecture: Service
type Service struct { type Service struct {

View File

@ -12,7 +12,7 @@ import (
"storj.io/common/storj" "storj.io/common/storj"
) )
// DB works with payout database // DB works with payout database.
// //
// architecture: Database // architecture: Database
type DB interface { type DB interface {

View File

@ -33,7 +33,7 @@ var (
mon = monkit.Package() mon = monkit.Package()
) )
// Service retrieves info from satellites using an rpc client // Service retrieves info from satellites using an rpc client.
// //
// architecture: Service // architecture: Service
type Service struct { type Service struct {

View File

@ -66,7 +66,7 @@ var (
mon = monkit.Package() mon = monkit.Package()
) )
// DB is the master database for Storage Node // DB is the master database for Storage Node.
// //
// architecture: Master Database // architecture: Master Database
type DB interface { type DB interface {

View File

@ -17,7 +17,7 @@ import (
"storj.io/storj/storage" "storj.io/storj/storage"
) )
// CacheService updates the space used cache // CacheService updates the space used cache.
// //
// architecture: Chore // architecture: Chore
type CacheService struct { type CacheService struct {
@ -146,7 +146,8 @@ func (service *CacheService) Close() (err error) {
// - piecesTotal: the total space used by pieces, including headers // - piecesTotal: the total space used by pieces, including headers
// - piecesContentSize: the space used by piece content, not including headers // - piecesContentSize: the space used by piece content, not including headers
// - trashTotal: the total space used in the trash, including headers // - trashTotal: the total space used in the trash, including headers
// - pieceTotal and pieceContentSize are the corollary for a single file //
// pieceTotal and pieceContentSize are the corollary for a single file.
// //
// architecture: Database // architecture: Database
type BlobsUsageCache struct { type BlobsUsageCache struct {

View File

@ -103,7 +103,7 @@ type V0PieceInfoDBForTest interface {
Add(context.Context, *Info) error Add(context.Context, *Info) error
} }
// PieceSpaceUsedDB stores the most recent totals from the space used cache // PieceSpaceUsedDB stores the most recent totals from the space used cache.
// //
// architecture: Database // architecture: Database
type PieceSpaceUsedDB interface { type PieceSpaceUsedDB interface {
@ -182,7 +182,9 @@ type StoreForTest struct {
} }
// NewStore creates a new piece store. // NewStore creates a new piece store.
func NewStore(log *zap.Logger, blobs storage.Blobs, v0PieceInfo V0PieceInfoDB, expirationInfo PieceExpirationDB, pieceSpaceUsedDB PieceSpaceUsedDB, config Config) *Store { func NewStore(log *zap.Logger, blobs storage.Blobs, v0PieceInfo V0PieceInfoDB,
expirationInfo PieceExpirationDB, pieceSpaceUsedDB PieceSpaceUsedDB, config Config) *Store {
return &Store{ return &Store{
log: log, log: log,
config: config, config: config,
@ -222,7 +224,9 @@ func (store *Store) Writer(ctx context.Context, satellite storj.NodeID, pieceID
// WriterForFormatVersion allows opening a piece writer with a specified storage format version. // WriterForFormatVersion allows opening a piece writer with a specified storage format version.
// This is meant to be used externally only in test situations (thus the StoreForTest receiver // This is meant to be used externally only in test situations (thus the StoreForTest receiver
// type). // type).
func (store StoreForTest) WriterForFormatVersion(ctx context.Context, satellite storj.NodeID, pieceID storj.PieceID, formatVersion storage.FormatVersion) (_ *Writer, err error) { func (store StoreForTest) WriterForFormatVersion(ctx context.Context, satellite storj.NodeID,
pieceID storj.PieceID, formatVersion storage.FormatVersion) (_ *Writer, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
blobRef := storage.BlobRef{ blobRef := storage.BlobRef{
@ -271,7 +275,9 @@ func (store *Store) Reader(ctx context.Context, satellite storj.NodeID, pieceID
// ReaderWithStorageFormat returns a new piece reader for a located piece, which avoids the // ReaderWithStorageFormat returns a new piece reader for a located piece, which avoids the
// potential need to check multiple storage formats to find the right blob. // potential need to check multiple storage formats to find the right blob.
func (store *Store) ReaderWithStorageFormat(ctx context.Context, satellite storj.NodeID, pieceID storj.PieceID, formatVersion storage.FormatVersion) (_ *Reader, err error) { func (store *Store) ReaderWithStorageFormat(ctx context.Context, satellite storj.NodeID,
pieceID storj.PieceID, formatVersion storage.FormatVersion) (_ *Reader, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
ref := storage.BlobRef{Namespace: satellite.Bytes(), Key: pieceID.Bytes()} ref := storage.BlobRef{Namespace: satellite.Bytes(), Key: pieceID.Bytes()}
blob, err := store.blobs.OpenWithStorageFormat(ctx, ref, formatVersion) blob, err := store.blobs.OpenWithStorageFormat(ctx, ref, formatVersion)
@ -306,7 +312,8 @@ func (store *Store) Delete(ctx context.Context, satellite storj.NodeID, pieceID
err = errs.Combine(err, store.v0PieceInfo.Delete(ctx, satellite, pieceID)) err = errs.Combine(err, store.v0PieceInfo.Delete(ctx, satellite, pieceID))
} }
store.log.Debug("deleted piece", zap.String("Satellite ID", satellite.String()), zap.String("Piece ID", pieceID.String())) store.log.Debug("deleted piece", zap.String("Satellite ID", satellite.String()),
zap.String("Piece ID", pieceID.String()))
return Error.Wrap(err) return Error.Wrap(err)
} }
@ -386,10 +393,10 @@ func (store *Store) RestoreTrash(ctx context.Context, satelliteID storj.NodeID)
// MigrateV0ToV1 will migrate a piece stored with storage format v0 to storage // MigrateV0ToV1 will migrate a piece stored with storage format v0 to storage
// format v1. If the piece is not stored as a v0 piece it will return an error. // format v1. If the piece is not stored as a v0 piece it will return an error.
// The follow failures are possible: // The follow failures are possible:
// - Fail to open or read v0 piece. In this case no artifacts remain. // - Fail to open or read v0 piece. In this case no artifacts remain.
// - Fail to Write or Commit v1 piece. In this case no artifacts remain. // - Fail to Write or Commit v1 piece. In this case no artifacts remain.
// - Fail to Delete v0 piece. In this case v0 piece may remain, but v1 piece // - Fail to Delete v0 piece. In this case v0 piece may remain,
// will exist and be preferred in future calls. // but v1 piece will exist and be preferred in future calls.
func (store *Store) MigrateV0ToV1(ctx context.Context, satelliteID storj.NodeID, pieceID storj.PieceID) (err error) { func (store *Store) MigrateV0ToV1(ctx context.Context, satelliteID storj.NodeID, pieceID storj.PieceID) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)

View File

@ -9,7 +9,7 @@ import (
"storj.io/common/storj" "storj.io/common/storj"
) )
// DB works with pricing database // DB works with pricing database.
// //
// architecture: Database // architecture: Database
type DB interface { type DB interface {

View File

@ -10,7 +10,7 @@ import (
"storj.io/common/storj" "storj.io/common/storj"
) )
// DB works with reputation database // DB works with reputation database.
// //
// architecture: Database // architecture: Database
type DB interface { type DB interface {

View File

@ -44,7 +44,7 @@ type Satellite struct {
Status int32 Status int32
} }
// DB works with satellite database // DB works with satellite database.
// //
// architecture: Database // architecture: Database
type DB interface { type DB interface {

View File

@ -10,7 +10,7 @@ import (
"storj.io/common/storj" "storj.io/common/storj"
) )
// DB works with storage usage database // DB works with storage usage database.
// //
// architecture: Database // architecture: Database
type DB interface { type DB interface {