all: fix linter complaints

Change-Id: Ia01404dbb6bdd19a146fa10ff7302e08f87a8c95
This commit is contained in:
Egon Elbre 2020-10-13 15:47:55 +03:00
parent 0bdb952269
commit 2268cc1df3
51 changed files with 100 additions and 100 deletions

View File

@ -104,7 +104,7 @@ func TestCertificateSigner_Sign_E2E(t *testing.T) {
assert.Equal(t, clientIdent.CA.RawTBSCertificate, signedChain[0].RawTBSCertificate)
assert.Equal(t, signer.Cert.Raw, signedChainBytes[1])
// TODO: test scenario with rest chain
//assert.Equal(t, signingCA.RawRestChain(), signedChainBytes[1:])
// assert.Equal(t, signingCA.RawRestChain(), signedChainBytes[1:])
err = signedChain[0].CheckSignatureFrom(signer.Cert)
require.NoError(t, err)
@ -187,7 +187,7 @@ func TestCertificateSigner_Sign(t *testing.T) {
assert.Equal(t, ident.CA.RawTBSCertificate, signedChain[0].RawTBSCertificate)
assert.Equal(t, ca.Cert.Raw, signedChain[1].Raw)
// TODO: test scenario with rest chain
//assert.Equal(t, signingCA.RawRestChain(), res.Chain[1:])
// assert.Equal(t, signingCA.RawRestChain(), res.Chain[1:])
err = signedChain[0].CheckSignatureFrom(ca.Cert)
require.NoError(t, err)

View File

@ -265,7 +265,7 @@ func cmdConfig(cmd *cobra.Command, args []string) (err error) {
if err != nil {
return err
}
//run setup if we can't access the config file
// run setup if we can't access the config file
conf := filepath.Join(setupDir, "config.yaml")
if _, err := os.Stat(conf); err != nil {
return cmdSetup(cmd, args)

View File

@ -31,7 +31,6 @@ var (
runCmd = &cobra.Command{
Use: "run",
Short: "Run the storj-admin",
//RunE: cmdRun,
}
confDir string

View File

@ -100,7 +100,8 @@ func (writer *prefixWriter) Write(data []byte) (int, error) {
// buffer everything that hasn't been written yet
if len(writer.buffer) > 0 {
buffer = append(writer.buffer, data...) // nolint gocritic
buffer = writer.buffer
buffer = append(buffer, data...)
defer func() {
writer.buffer = buffer
}()

View File

@ -61,7 +61,7 @@ func deleteBucket(cmd *cobra.Command, args []string) (err error) {
}()
if *rbForceFlag {
//TODO: Do we need to have retry here?
// TODO: Do we need to have retry here?
if _, err := project.DeleteBucketWithObjects(ctx, dst.Bucket()); err != nil {
return convertError(err, dst)
}

View File

@ -43,7 +43,7 @@ func MonthsBetweenDates(from time.Time, to time.Time) int {
y2, M2, _ := to.UTC().Date()
months := ((y2 - y1) * 12) + int(M2) - int(M1)
//note that according to the tests, we ignore days of the month
// note that according to the tests, we ignore days of the month
return months
}

View File

@ -59,7 +59,7 @@ func TestBasicMigration(t *testing.T) {
}
func basicMigration(ctx *testcontext.Context, t *testing.T, db tagsql.DB, testDB tagsql.DB) {
dbName := strings.ToLower(`versions_` + strings.Replace(t.Name(), "/", "_", -1))
dbName := strings.ToLower(`versions_` + strings.ReplaceAll(t.Name(), "/", "_"))
defer func() { assert.NoError(t, dropTables(ctx, db, dbName, "users")) }()
/* #nosec G306 */ // This is a test besides the file contains just test data.

View File

@ -123,7 +123,7 @@ func (msg *Message) Bytes() (data []byte, err error) {
}
func tocrlf(data []byte) []byte {
lf := bytes.Replace(data, []byte("\r\n"), []byte("\n"), -1)
crlf := bytes.Replace(lf, []byte("\n"), []byte("\r\n"), -1)
lf := bytes.ReplaceAll(data, []byte("\r\n"), []byte("\n"))
crlf := bytes.ReplaceAll(lf, []byte("\n"), []byte("\r\n"))
return crlf
}

View File

@ -14,27 +14,27 @@ import (
"golang.org/x/time/rate"
)
//IPRateLimiterConfig configures an IPRateLimiter.
// IPRateLimiterConfig configures an IPRateLimiter.
type IPRateLimiterConfig struct {
Duration time.Duration `help:"the rate at which request are allowed" default:"5m"`
Burst int `help:"number of events before the limit kicks in" default:"5"`
NumLimits int `help:"number of IPs whose rate limits we store" default:"1000"`
}
//IPRateLimiter imposes a rate limit per HTTP user IP.
// IPRateLimiter imposes a rate limit per HTTP user IP.
type IPRateLimiter struct {
config IPRateLimiterConfig
mu sync.Mutex
ipLimits map[string]*userLimit
}
//userLimit is the per-IP limiter.
// userLimit is the per-IP limiter.
type userLimit struct {
limiter *rate.Limiter
lastSeen time.Time
}
//NewIPRateLimiter constructs an IPRateLimiter.
// NewIPRateLimiter constructs an IPRateLimiter.
func NewIPRateLimiter(config IPRateLimiterConfig) *IPRateLimiter {
return &IPRateLimiter{
config: config,
@ -67,7 +67,7 @@ func (rl *IPRateLimiter) cleanupLimiters() {
}
}
//Limit applies a per IP rate limiting as an HTTP Handler.
// Limit applies a per IP rate limiting as an HTTP Handler.
func (rl *IPRateLimiter) Limit(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ip, err := getRequestIP(r)
@ -84,7 +84,7 @@ func (rl *IPRateLimiter) Limit(next http.Handler) http.Handler {
})
}
//getRequestIP gets the original IP address of the request by handling the request headers.
// getRequestIP gets the original IP address of the request by handling the request headers.
func getRequestIP(r *http.Request) (ip string, err error) {
realIP := r.Header.Get("X-REAL-IP")
if realIP != "" {
@ -104,7 +104,7 @@ func getRequestIP(r *http.Request) (ip string, err error) {
return ip, err
}
//getUserLimit returns a rate limiter for an IP.
// getUserLimit returns a rate limiter for an IP.
func (rl *IPRateLimiter) getUserLimit(ip string) *rate.Limiter {
rl.mu.Lock()
defer rl.mu.Unlock()
@ -128,7 +128,7 @@ func (rl *IPRateLimiter) getUserLimit(ip string) *rate.Limiter {
oldestKey = ip
}
}
//only delete the oldest non-expired if there's still an issue
// only delete the oldest non-expired if there's still an issue
if oldestKey != "" && len(rl.ipLimits) >= rl.config.NumLimits {
delete(rl.ipLimits, oldestKey)
}
@ -141,12 +141,12 @@ func (rl *IPRateLimiter) getUserLimit(ip string) *rate.Limiter {
return v.limiter
}
//Burst returns the number of events that happen before the rate limit.
// Burst returns the number of events that happen before the rate limit.
func (rl *IPRateLimiter) Burst() int {
return rl.config.Burst
}
//Duration returns the amount of time required between events.
// Duration returns the amount of time required between events.
func (rl *IPRateLimiter) Duration() time.Duration {
return rl.config.Duration
}

View File

@ -19,12 +19,13 @@ import (
)
func TestNewIPRateLimiter(t *testing.T) {
//create a rate limiter with defaults except NumLimits = 2
// create a rate limiter with defaults except NumLimits = 2
config := web.IPRateLimiterConfig{}
cfgstruct.Bind(&pflag.FlagSet{}, &config, cfgstruct.UseDevDefaults())
config.NumLimits = 2
rateLimiter := web.NewIPRateLimiter(config)
//run ratelimiter cleanup until end of test
// run ratelimiter cleanup until end of test
ctx := testcontext.New(t)
defer ctx.Cleanup()
ctx2, cancel := context.WithCancel(ctx)
@ -33,32 +34,36 @@ func TestNewIPRateLimiter(t *testing.T) {
rateLimiter.Run(ctx2)
return nil
})
//make the default HTTP handler return StatusOK
// make the default HTTP handler return StatusOK
handler := rateLimiter.Limit(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}))
//expect burst number of successes
// expect burst number of successes
testWithAddress(t, "192.168.1.1:5000", rateLimiter.Burst(), handler)
//expect similar results for a different IP
// expect similar results for a different IP
testWithAddress(t, "127.0.0.1:5000", rateLimiter.Burst(), handler)
//expect similar results for a different IP
// expect similar results for a different IP
testWithAddress(t, "127.0.0.100:5000", rateLimiter.Burst(), handler)
//expect original IP to work again because numLimits == 2
// expect original IP to work again because numLimits == 2
testWithAddress(t, "192.168.1.1:5000", rateLimiter.Burst(), handler)
}
func testWithAddress(t *testing.T, remoteAddress string, burst int, handler http.Handler) {
//create HTTP request
// create HTTP request
req, err := http.NewRequest("GET", "", nil)
require.NoError(t, err)
req.RemoteAddr = remoteAddress
//expect burst number of successes
// expect burst number of successes
for x := 0; x < burst; x++ {
rr := httptest.NewRecorder()
handler.ServeHTTP(rr, req)
assert.Equal(t, rr.Code, http.StatusOK, remoteAddress)
}
//then expect failure
// then expect failure
rr := httptest.NewRecorder()
handler.ServeHTTP(rr, req)
assert.Equal(t, rr.Code, http.StatusTooManyRequests, remoteAddress)

View File

@ -78,7 +78,7 @@ func (r *Service) Rollup(ctx context.Context) (err error) {
return Error.Wrap(err)
}
//remove the latest day (which we cannot know is complete), then push to DB
// remove the latest day (which we cannot know is complete), then push to DB
latestTally = time.Date(latestTally.Year(), latestTally.Month(), latestTally.Day(), 0, 0, 0, 0, latestTally.Location())
delete(rollupStats, latestTally)
if len(rollupStats) == 0 {
@ -113,7 +113,7 @@ func (r *Service) RollupStorage(ctx context.Context, lastRollup time.Time, rollu
r.logger.Info("Rollup found no new tallies")
return lastRollup, nil
}
//loop through tallies and build Rollup
// loop through tallies and build Rollup
for _, tallyRow := range tallies {
node := tallyRow.NodeID
// tallyEndTime is the time the at rest tally was saved
@ -121,7 +121,7 @@ func (r *Service) RollupStorage(ctx context.Context, lastRollup time.Time, rollu
if tallyEndTime.After(latestTally) {
latestTally = tallyEndTime
}
//create or get AccoutingRollup day entry
// create or get AccoutingRollup day entry
iDay := time.Date(tallyEndTime.Year(), tallyEndTime.Month(), tallyEndTime.Day(), 0, 0, 0, 0, tallyEndTime.Location())
if rollupStats[iDay] == nil {
rollupStats[iDay] = make(map[storj.NodeID]*accounting.Rollup)
@ -129,7 +129,7 @@ func (r *Service) RollupStorage(ctx context.Context, lastRollup time.Time, rollu
if rollupStats[iDay][node] == nil {
rollupStats[iDay][node] = &accounting.Rollup{NodeID: node, StartTime: iDay}
}
//increment data at rest sum
// increment data at rest sum
rollupStats[iDay][node].AtRestTotal += tallyRow.DataTotal
}

View File

@ -144,7 +144,7 @@ func NewAdmin(log *zap.Logger, full *identity.FullIdentity, db DB,
peer.Payments.Stripe = stripeClient
peer.Payments.Accounts = peer.Payments.Service.Accounts()
}
{ //setup admin endpoint
{ // setup admin endpoint
var err error
peer.Admin.Listener, err = net.Listen("tcp", config.Admin.Address)
if err != nil {

View File

@ -491,7 +491,7 @@ func (server *Server) checkUsage(ctx context.Context, w http.ResponseWriter, pro
}
if lastMonthUsage.Storage > 0 || lastMonthUsage.Egress > 0 || lastMonthUsage.ObjectCount > 0 {
//time passed into the check function need to be the UTC midnight dates of the first and last day of the month
// time passed into the check function need to be the UTC midnight dates of the first and last day of the month
err := server.db.StripeCoinPayments().ProjectRecords().Check(ctx, projectID, firstOfMonth.AddDate(0, -1, 0), firstOfMonth.Add(-time.Hour*24))
switch err {
case stripecoinpayments.ErrProjectRecordExists:

View File

@ -538,7 +538,7 @@ func TestDeleteProjectWithUsagePreviousMonth(t *testing.T) {
err = planet.Satellites[0].DB.Console().APIKeys().Delete(ctx, apiKeys.APIKeys[0].ID)
require.NoError(t, err)
//ToDo: Improve updating of DB entries
// TODO: Improve updating of DB entries
now := time.Now().UTC()
// set fixed day to avoid failures at the end of the month
accTime := time.Date(now.Year(), now.Month()-1, 15, now.Hour(), now.Minute(), now.Second(), now.Nanosecond(), time.UTC)

View File

@ -108,7 +108,7 @@ func (server *Server) addUser(w http.ResponseWriter, r *http.Request) {
return
}
//Set User Status to be activated, as we manually created it
// Set User Status to be activated, as we manually created it
newuser.Status = console.Active
newuser.PasswordHash = nil
err = server.db.Console().Users().Update(ctx, newuser)

View File

@ -48,7 +48,7 @@ func TestConfigLock(t *testing.T) {
assert.NoErrorf(t, err, "Error reading file for move")
err = ioutil.WriteFile(lockPath, input, 0644)
assert.NoErrorf(t, err, "Error writing file for move")
} else { //compare to satellite-config.yaml.lock
} else { // compare to satellite-config.yaml.lock
configs1 := readLines(t, lockPath)
configs2 := readLines(t, cleanedupConfig)
if diff := cmp.Diff(configs1, configs2); diff != "" {
@ -84,11 +84,11 @@ func normalizeConfig(t *testing.T, configIn, configOut, tempDir string) {
appDir := fpath.ApplicationDir()
for scanner.Scan() {
line := scanner.Text()
//fix metrics.app and tracing.app
// fix metrics.app and tracing.app
line = strings.Replace(line, ".exe", "", 1)
//fix server.revocation-dburl
// fix server.revocation-dburl
line = strings.Replace(line, tempDir, "testdata", 1)
//fix identity.cert-path and identity.key-path
// fix identity.cert-path and identity.key-path
if strings.Contains(line, appDir) {
line = strings.Replace(line, appDir, "/root/.local/share", 1)
line = strings.ToLower(strings.ReplaceAll(line, "\\", "/"))

View File

@ -12,7 +12,7 @@ import (
"storj.io/storj/satellite/console/consoleauth"
)
//TODO: change to JWT or Macaroon based auth
// TODO: change to JWT or Macaroon based auth
// Signer creates signature for provided data.
type Signer interface {

View File

@ -11,7 +11,7 @@ import (
"storj.io/common/uuid"
)
//TODO: change to JWT or Macaroon based auth
// TODO: change to JWT or Macaroon based auth
// Claims represents data signed by server and used for authentication.
type Claims struct {

View File

@ -8,7 +8,7 @@ import (
"crypto/sha256"
)
//TODO: change to JWT or Macaroon based auth
// TODO: change to JWT or Macaroon based auth
// Hmac is hmac256 based Signer.
type Hmac struct {

View File

@ -12,7 +12,7 @@ import (
"github.com/zeebo/errs"
)
//TODO: change to JWT or Macaroon based auth
// TODO: change to JWT or Macaroon based auth
// Token represents authentication data structure.
type Token struct {

View File

@ -21,7 +21,6 @@ import (
)
func TestProjectsRepository(t *testing.T) {
//testing constants
const (
// for user
shortName = "lastName"
@ -195,7 +194,7 @@ func TestProjectsList(t *testing.T) {
projectsDB := db.Console().Projects()
//create projects
// Create projects
var projects []console.Project
for i := 0; i < length; i++ {
proj, err := projectsDB.Insert(ctx,

View File

@ -16,9 +16,7 @@ import (
)
func TestNewRegistrationSecret(t *testing.T) {
// testing constants
const (
// for user
shortName = "lastName"
email = "email@mail.test"
pass = "123456"

View File

@ -227,7 +227,7 @@ func (paymentService PaymentsService) AddCreditCard(ctx context.Context, creditC
return nil
}
//ToDo: check if this is the right place
// TODO: check if this is the right place
err = paymentService.AddPromotionalCoupon(ctx, auth.User.ID)
if err != nil {
paymentService.service.log.Warn(fmt.Sprintf("could not add promotional coupon for user %s", auth.User.ID.String()), zap.Error(err))
@ -534,7 +534,7 @@ func (s *Service) CreateUser(ctx context.Context, user CreateUser, tokenSecret R
offerType = rewards.Referral
}
//TODO: Create a current offer cache to replace database call
// TODO: Create a current offer cache to replace database call
offers, err := s.rewards.GetActiveOffersByType(ctx, offerType)
if err != nil && !rewards.ErrOfferNotExist.Has(err) {
s.log.Error("internal error", zap.Error(err))
@ -641,7 +641,7 @@ func (s *Service) CreateUser(ctx context.Context, user CreateUser, tokenSecret R
func (s *Service) GenerateActivationToken(ctx context.Context, id uuid.UUID, email string) (token string, err error) {
defer mon.Task()(&ctx)(&err)
//TODO: activation token should differ from auth token
// TODO: activation token should differ from auth token
claims := &consoleauth.Claims{
ID: id,
Email: email,
@ -719,7 +719,7 @@ func (s *Service) ActivateAccount(ctx context.Context, activationToken string) (
return nil
}
//ToDo: check if this is the right place
// TODO: check if this is the right place
err = s.accounts.Coupons().AddPromotionalCoupon(ctx, user.ID)
if err != nil {
s.log.Debug(fmt.Sprintf("could not add promotional coupon for user %s", user.ID.String()), zap.Error(Error.Wrap(err)))

View File

@ -298,7 +298,7 @@ func setupData(ctx context.Context, t *testing.T, db satellite.DB) (user *consol
})
require.NoError(t, err)
//create an user as referrer
// create an user as referrer
referrer, err = consoleDB.Users().Insert(ctx, &console.User{
ID: testrand.UUID(),
FullName: "referrer",

View File

@ -17,7 +17,6 @@ import (
"storj.io/storj/satellite/satellitedb/satellitedbtest"
)
//testing constants.
const (
lastName = "lastName"
email = "email@mail.test"

View File

@ -57,7 +57,7 @@ type Service struct {
html *htmltemplate.Template
// TODO(yar): prepare plain text version
//text *texttemplate.Template
// text *texttemplate.Template
sending sync.WaitGroup
}
@ -68,10 +68,10 @@ func New(log *zap.Logger, sender Sender, templatePath string) (*Service, error)
service := &Service{log: log, sender: sender}
// TODO(yar): prepare plain text version
//service.text, err = texttemplate.ParseGlob(filepath.Join(templatePath, "*.txt"))
//if err != nil {
// service.text, err = texttemplate.ParseGlob(filepath.Join(templatePath, "*.txt"))
// if err != nil {
// return nil, err
//}
// }
service.html, err = htmltemplate.ParseGlob(filepath.Join(templatePath, "*.html"))
if err != nil {
@ -126,9 +126,9 @@ func (service *Service) SendRendered(ctx context.Context, to []post.Address, msg
var textBuffer bytes.Buffer
// TODO(yar): prepare plain text version
//if err = service.text.ExecuteTemplate(&textBuffer, msg.Template() + ".txt", msg); err != nil {
// if err = service.text.ExecuteTemplate(&textBuffer, msg.Template() + ".txt", msg); err != nil {
// return
//}
// }
if err = service.html.ExecuteTemplate(&htmlBuffer, msg.Template()+".html", msg); err != nil {
return

View File

@ -80,7 +80,7 @@ func TestCreateAndStopOffers(t *testing.T) {
return err
}
require.Equal(t, http.StatusOK, req.StatusCode)
//reading out the rest of the connection
// reading out the rest of the connection
_, err = io.Copy(ioutil.Discard, req.Body)
if err != nil {
return err

View File

@ -72,7 +72,7 @@ func (endpoint *Endpoint) Batch(ctx context.Context, req *pb.BatchRequest) (resp
},
})
//OBJECT
// OBJECT
case *pb.BatchRequestItem_ObjectBegin:
singleRequest.ObjectBegin.Header = req.Header
response, err := endpoint.BeginObject(ctx, singleRequest.ObjectBegin)

View File

@ -68,7 +68,7 @@ func TestBasicBucketOperations(t *testing.T) {
require.Equal(t, expectedBucket.DefaultRedundancyScheme, bucket.DefaultRedundancyScheme)
require.Equal(t, expectedBucket.DefaultEncryptionParameters, bucket.DefaultEncryptionParameters)
//CountBuckets
// CountBuckets
count, err = bucketsDB.CountBuckets(ctx, project.ID)
require.NoError(t, err)
require.Equal(t, 1, count)

View File

@ -1361,7 +1361,7 @@ func (endpoint *Endpoint) commitSegment(ctx context.Context, req *pb.SegmentComm
// ToDo: Replace with hash & signature validation
// Ensure neither uplink or storage nodes are cheating on us
if pointer.Type == pb.Pointer_REMOTE {
//We cannot have more redundancy than total/min
// We cannot have more redundancy than total/min
if float64(totalStored) > (float64(pointer.SegmentSize)/float64(pointer.Remote.Redundancy.MinReq))*float64(pointer.Remote.Redundancy.Total) {
endpoint.log.Debug("data size mismatch",
zap.Int64("segment", pointer.SegmentSize),

View File

@ -134,7 +134,7 @@ func TestOffline(t *testing.T) {
result, err = service.KnownUnreliableOrOffline(ctx, []storj.NodeID{
planet.StorageNodes[0].ID(),
{1, 2, 3, 4}, //note that this succeeds by design
{1, 2, 3, 4}, // note that this succeeds by design
planet.StorageNodes[2].ID(),
})
require.NoError(t, err)

View File

@ -531,12 +531,12 @@ func ResolveIPAndNetwork(ctx context.Context, target string) (ipPort, network st
// If addr can be converted to 4byte notation, it is an IPv4 address, else its an IPv6 address
if ipv4 := ipAddr.IP.To4(); ipv4 != nil {
//Filter all IPv4 Addresses into /24 Subnet's
// Filter all IPv4 Addresses into /24 Subnet's
mask := net.CIDRMask(24, 32)
return net.JoinHostPort(ipAddr.String(), port), ipv4.Mask(mask).String(), nil
}
if ipv6 := ipAddr.IP.To16(); ipv6 != nil {
//Filter all IPv6 Addresses into /64 Subnet's
// Filter all IPv6 Addresses into /64 Subnet's
mask := net.CIDRMask(64, 128)
return net.JoinHostPort(ipAddr.String(), port), ipv6.Mask(mask).String(), nil
}

View File

@ -13,7 +13,7 @@ import (
)
func TestListInfos(t *testing.T) {
//This test is deliberately skipped as it requires credentials to coinpayments.net
// This test is deliberately skipped as it requires credentials to coinpayments.net
t.SkipNow()
ctx := testcontext.New(t)
defer ctx.Cleanup()
@ -23,12 +23,12 @@ func TestListInfos(t *testing.T) {
PrivateKey: "ask-littleskunk-on-keybase",
}).Transactions()
//verify that bad ids fail
// verify that bad ids fail
infos, err := payments.ListInfos(ctx, TransactionIDList{"an_unlikely_id"})
assert.Error(t, err)
assert.Len(t, infos, 0)
//verify that ListInfos can handle more than 25 good ids
// verify that ListInfos can handle more than 25 good ids
ids := TransactionIDList{}
for x := 0; x < 27; x++ {
tx, err := payments.Create(ctx,

View File

@ -152,7 +152,7 @@ func (accounts *accounts) CheckProjectInvoicingStatus(ctx context.Context, proje
}
if lastMonthUsage.Storage > 0 || lastMonthUsage.Egress > 0 || lastMonthUsage.ObjectCount > 0 {
//time passed into the check function need to be the UTC midnight dates of the first and last day of the month
// time passed into the check function need to be the UTC midnight dates of the first and last day of the month
err = accounts.service.db.ProjectRecords().Check(ctx, projectID, firstOfMonth.AddDate(0, -1, 0), firstOfMonth.Add(-time.Hour*24))
switch err {
case ErrProjectRecordExists:
@ -236,7 +236,7 @@ func (accounts *accounts) PaywallEnabled(userID uuid.UUID) bool {
return BytesAreWithinProportion(userID, accounts.service.PaywallProportion)
}
//BytesAreWithinProportion returns true if first byte is less than the normalized proportion [0..1].
// BytesAreWithinProportion returns true if first byte is less than the normalized proportion [0..1].
func BytesAreWithinProportion(uuidBytes [16]byte, proportion float64) bool {
return int(uuidBytes[0]) < int(proportion*256)
}

View File

@ -74,7 +74,7 @@ type Service struct {
// Minimum CoinPayment to create a coupon
MinCoinPayment int64
//Stripe Extended Features
// Stripe Extended Features
AutoAdvance bool
mu sync.Mutex
@ -204,7 +204,7 @@ func (service *Service) updateTransactions(ctx context.Context, ids TransactionA
// moment of CoinPayments receives funds, not when STORJ does
// this was a business decision to not wait until StatusCompleted
if info.Status >= coinpayments.StatusReceived {
//monkit currently does not have a DurationVal
// monkit currently does not have a DurationVal
mon.IntVal("coinpayment_duration").Observe(int64(time.Since(creationTimes[id])))
applies = append(applies, id)
}

View File

@ -154,7 +154,7 @@ func TestIdentifyIrreparableSegments(t *testing.T) {
_, err = repairQueue.Select(ctx)
require.True(t, storage.ErrEmptyQueue.Has(err))
//check if the expected segments were added to the irreparable DB
// check if the expected segments were added to the irreparable DB
irreparable := planet.Satellites[0].DB.Irreparable()
remoteSegmentInfo, err := irreparable.Get(ctx, pointerKey)
require.NoError(t, err)

View File

@ -93,7 +93,7 @@ func TestIrreparable(t *testing.T) {
require.Empty(t, cmp.Diff(segments[0], dbxInfo, cmp.Comparer(pb.Equal)))
}
{ //Delete existing entry
{ // Delete existing entry
err := irrdb.Delete(ctx, segments[0].Path)
require.NoError(t, err)

View File

@ -61,7 +61,7 @@ type SegmentRepairer struct {
// repaired pieces
multiplierOptimalThreshold float64
//repairOverride is the value handed over from the checker to override the Repair Threshold
// repairOverride is the value handed over from the checker to override the Repair Threshold
repairOverride int
}

View File

@ -28,7 +28,7 @@ type apikeys struct {
func (keys *apikeys) GetPagedByProjectID(ctx context.Context, projectID uuid.UUID, cursor console.APIKeyCursor) (akp *console.APIKeyPage, err error) {
defer mon.Task()(&ctx)(&err)
search := "%" + strings.Replace(cursor.Search, " ", "%", -1) + "%"
search := "%" + strings.ReplaceAll(cursor.Search, " ", "%") + "%"
if cursor.Limit > 50 {
cursor.Limit = 50

View File

@ -38,7 +38,7 @@ func (pm *projectMembers) GetByMemberID(ctx context.Context, memberID uuid.UUID)
func (pm *projectMembers) GetPagedByProjectID(ctx context.Context, projectID uuid.UUID, cursor console.ProjectMembersCursor) (_ *console.ProjectMembersPage, err error) {
defer mon.Task()(&ctx)(&err)
search := "%" + strings.Replace(cursor.Search, " ", "%", -1) + "%"
search := "%" + strings.ReplaceAll(cursor.Search, " ", "%") + "%"
if cursor.Limit > 50 {
cursor.Limit = 50

View File

@ -120,7 +120,7 @@ func (r *repairQueue) SelectN(ctx context.Context, limit int) (segs []pb.Injured
if limit <= 0 || limit > RepairQueueSelectLimit {
limit = RepairQueueSelectLimit
}
//todo: strictly enforce order-by or change tests
// TODO: strictly enforce order-by or change tests
rows, err := r.db.QueryContext(ctx, r.db.Rebind(`SELECT data FROM injuredsegments LIMIT ?`), limit)
if err != nil {
return nil, Error.Wrap(err)

View File

@ -16,7 +16,7 @@ var mon = monkit.Package()
// Delimiter separates nested paths in storage.
const Delimiter = '/'
//ErrKeyNotFound used when something doesn't exist.
// ErrKeyNotFound used when something doesn't exist.
var ErrKeyNotFound = errs.Class("key not found")
// ErrEmptyKey is returned when an empty key is used in Put or in CompareAndSwap.

View File

@ -17,7 +17,7 @@ import (
func testCRUD(t *testing.T, ctx *testcontext.Context, store storage.KeyValueStore) {
items := storage.Items{
// newItem("0", "", false), //TODO: broken
// newItem("0", "", false), // TODO: broken
newItem("\x00", "\x00", false),
newItem("a/b", "\x01\x00", false),
newItem("a\\b", "\xFF", false),

View File

@ -31,7 +31,7 @@ func TestStoragenodeContactEndpoint(t *testing.T) {
firstPing := pingStats.WhenLastPinged()
time.Sleep(time.Second) //HACKFIX: windows has large time granularity
time.Sleep(time.Second) // HACKFIX: windows has large time granularity
resp, err = pb.NewDRPCContactClient(conn).PingNode(ctx, &pb.ContactPingRequest{})
require.NotNil(t, resp)

View File

@ -456,7 +456,7 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten
peer.Contact.Service,
peer.DB.Bandwidth(),
config.Storage.AllocatedDiskSpace.Int64(),
//TODO use config.Storage.Monitor.Interval, but for some reason is not set
// TODO: use config.Storage.Monitor.Interval, but for some reason is not set
config.Storage.KBucketRefreshInterval,
peer.Contact.Chore.Trigger,
config.Storage2.Monitor,

View File

@ -199,8 +199,7 @@ func (blobs *BlobsUsageCache) SpaceUsedBySatellite(ctx context.Context, satellit
return values.Total, values.ContentSize, nil
}
// SpaceUsedForPieces returns the current total used space for
//// all pieces.
// SpaceUsedForPieces returns the current total used space for all pieces.
func (blobs *BlobsUsageCache) SpaceUsedForPieces(ctx context.Context) (int64, int64, error) {
blobs.mu.Lock()
defer blobs.mu.Unlock()

View File

@ -122,7 +122,7 @@ var monLiveRequests = mon.TaskNamed("live-request")
// Delete handles deleting a piece on piece store requested by uplink.
//
// DEPRECATED in favor of DeletePieces.
// Deprecated: use DeletePieces instead.
func (endpoint *Endpoint) Delete(ctx context.Context, delete *pb.PieceDeleteRequest) (_ *pb.PieceDeleteResponse, err error) {
defer monLiveRequests(&ctx)(&err)
defer mon.Task()(&ctx)(&err)

View File

@ -259,8 +259,8 @@ func TestOrderLimitGetValidation(t *testing.T) {
closeErr := downloader.Close()
err = errs.Combine(readErr, closeErr)
if tt.err != "" {
assert.Equal(t, 0, len(buffer)) //errors 10240
require.Error(t, err) //nil
assert.Equal(t, 0, len(buffer))
require.Error(t, err)
require.Contains(t, err.Error(), tt.err)
} else {
require.NoError(t, err)

View File

@ -14,15 +14,15 @@ import (
type Status = int
const (
//Unexpected status should not be used for sanity checking.
// Unexpected status should not be used for sanity checking.
Unexpected Status = 0
//Normal status reflects a lack of graceful exit.
// Normal status reflects a lack of graceful exit.
Normal = 1
//Exiting reflects an active graceful exit.
// Exiting reflects an active graceful exit.
Exiting = 2
//ExitSucceeded reflects a graceful exit that succeeded.
// ExitSucceeded reflects a graceful exit that succeeded.
ExitSucceeded = 3
//ExitFailed reflects a graceful exit that failed.
// ExitFailed reflects a graceful exit that failed.
ExitFailed = 4
)

View File

@ -128,7 +128,6 @@ func verifyOrders(t *testing.T, ctx *testcontext.Context, db *storagenodedb.DB,
for _, order := range orders {
for _, dbOrder := range dbOrders {
if order.Order.SerialNumber == dbOrder.Order.SerialNumber {
//fmt.Printf("Found %v\n", order.Order.SerialNumber)
found++
}
}

View File

@ -39,7 +39,8 @@ type Config struct {
}
// OldVersionConfig provides a list of allowed Versions per process.
// NB: this will be deprecated in favor of `ProcessesConfig`.
//
// NB: use `ProcessesConfig` for newer code instead.
type OldVersionConfig struct {
Satellite string `user:"true" help:"Allowed Satellite Versions" default:"v0.0.1"`
Storagenode string `user:"true" help:"Allowed Storagenode Versions" default:"v0.0.1"`