all: enable staticcheck (#2849)
* by having megacheck in disable it also disabled staticcheck * fix closing body * keep interfacer disabled * hide bodies * don't use deprecated func * fix dead code * fix potential overrun * keep stylecheck disabled * don't pass nil as context * fix infinite recursion * remove extraneous return * fix data race * use correct func * ignore unused var * remove unused consts
This commit is contained in:
parent
7c6fdd09d2
commit
00b2e1a7d7
@ -18,27 +18,28 @@ linters:
|
||||
- varcheck # find unused global variables and constants
|
||||
- structcheck # check for unused struct parameters
|
||||
- deadcode # find code that is not used
|
||||
#TODO#- unparam # check for unused parameters
|
||||
- bodyclose # find unclosed http response bodies
|
||||
- nakedret # check for naked returns
|
||||
#- goimports # fix import order, disabled because it's slow
|
||||
- gofmt
|
||||
- gofmt # sanity check formatting
|
||||
- misspell # check spelling
|
||||
- unconvert # remove unnecessary conversions
|
||||
- scopelint # checks for unpinned variables
|
||||
- gocritic # checks for style, performance issues, and common programming errors
|
||||
#TODO#- unparam # check for unused parameters
|
||||
#TODO#- maligned # check for better memory usage
|
||||
#TODO#- prealloc # easy optimizations
|
||||
#TODO#- gosec
|
||||
disable:
|
||||
- goimports
|
||||
- goimports # disabled, because it's slow, using scripts/check-imports.go instead.
|
||||
- goconst # check for things that could be replaced by constants
|
||||
- gocyclo # needs tweaking
|
||||
- depguard # unused
|
||||
- gosec # needs tweaking
|
||||
- stylecheck # has false positives
|
||||
- dupl # slow
|
||||
- interfacer # not that useful
|
||||
- gosimple # part of staticcheck
|
||||
- unused # part of staticcheck
|
||||
- megacheck # part of staticcheck
|
||||
- lll
|
||||
fast: false
|
||||
|
||||
|
@ -120,7 +120,7 @@ func (s *Server) Run(ctx context.Context) error {
|
||||
var group errgroup.Group
|
||||
group.Go(func() error {
|
||||
<-ctx.Done()
|
||||
return s.server.Shutdown(nil)
|
||||
return s.server.Shutdown(context.Background())
|
||||
})
|
||||
group.Go(func() error {
|
||||
defer cancel()
|
||||
|
@ -163,25 +163,20 @@ func cmdRevokeCA(cmd *cobra.Command, args []string) (err error) {
|
||||
|
||||
func cmdRevokePeerCA(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := process.Ctx(cmd)
|
||||
argLen := len(args)
|
||||
switch {
|
||||
case argLen > 0:
|
||||
if len(args) > 0 {
|
||||
revokePeerCACfg.CA = identity.FullCAConfig{
|
||||
CertPath: filepath.Join(identityDir, args[0], "ca.cert"),
|
||||
KeyPath: filepath.Join(identityDir, args[0], "ca.key"),
|
||||
}
|
||||
|
||||
revokePeerCACfg.RevocationDBURL = "bolt://" + filepath.Join(configDir, args[0], "revocations.db")
|
||||
fallthrough
|
||||
case argLen > 1:
|
||||
}
|
||||
if len(args) > 1 {
|
||||
revokePeerCACfg.PeerCA = identity.PeerCAConfig{
|
||||
CertPath: args[1],
|
||||
}
|
||||
}
|
||||
|
||||
if len(args) > 0 {
|
||||
}
|
||||
|
||||
ca, err := revokePeerCACfg.CA.Load()
|
||||
if err != nil {
|
||||
return err
|
||||
|
1
go.sum
1
go.sum
@ -454,6 +454,7 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190730183949-1393eb018365 h1:SaXEMXhWzMJThc05vu6uh61Q245r4KaWMrsTedk0FDc=
|
||||
golang.org/x/sys v0.0.0-20190730183949-1393eb018365/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ=
|
||||
golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
|
@ -97,8 +97,7 @@ func (planet *Planet) newBootstrap() (peer *bootstrap.Peer, err error) {
|
||||
planet.config.Reconfigure.Bootstrap(0, &config)
|
||||
}
|
||||
|
||||
var verInfo version.Info
|
||||
verInfo = planet.NewVersionInfo()
|
||||
versionInfo := planet.NewVersionInfo()
|
||||
|
||||
revocationDB, err := revocation.NewDBFromCfg(config.Server.Config)
|
||||
if err != nil {
|
||||
@ -108,7 +107,7 @@ func (planet *Planet) newBootstrap() (peer *bootstrap.Peer, err error) {
|
||||
err = errs.Combine(err, revocationDB.Close())
|
||||
}()
|
||||
|
||||
peer, err = bootstrap.New(log, identity, db, revocationDB, config, verInfo)
|
||||
peer, err = bootstrap.New(log, identity, db, revocationDB, config, versionInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -207,7 +207,7 @@ func (planet *Planet) newSatellites(count int) ([]*satellite.Peer, error) {
|
||||
planet.config.Reconfigure.Satellite(log, i, &config)
|
||||
}
|
||||
|
||||
verInfo := planet.NewVersionInfo()
|
||||
versionInfo := planet.NewVersionInfo()
|
||||
|
||||
revocationDB, err := revocation.NewDBFromCfg(config.Server.Config)
|
||||
if err != nil {
|
||||
@ -215,7 +215,7 @@ func (planet *Planet) newSatellites(count int) ([]*satellite.Peer, error) {
|
||||
}
|
||||
planet.databases = append(planet.databases, revocationDB)
|
||||
|
||||
peer, err := satellite.New(log, identity, db, revocationDB, &config, verInfo)
|
||||
peer, err := satellite.New(log, identity, db, revocationDB, &config, versionInfo)
|
||||
if err != nil {
|
||||
return xs, err
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ func (planet *Planet) newStorageNodes(count int, whitelistedSatellites storj.Nod
|
||||
}
|
||||
}
|
||||
|
||||
verInfo := planet.NewVersionInfo()
|
||||
verisonInfo := planet.NewVersionInfo()
|
||||
|
||||
storageConfig := storagenodedb.Config{
|
||||
Storage: config.Storage.Path,
|
||||
@ -168,7 +168,7 @@ func (planet *Planet) newStorageNodes(count int, whitelistedSatellites storj.Nod
|
||||
}
|
||||
planet.databases = append(planet.databases, revocationDB)
|
||||
|
||||
peer, err := storagenode.New(log, identity, db, revocationDB, config, verInfo)
|
||||
peer, err := storagenode.New(log, identity, db, revocationDB, config, verisonInfo)
|
||||
if err != nil {
|
||||
return xs, err
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ func (a APIKey) Serialize() string {
|
||||
|
||||
// IsZero returns if the api key is an uninitialized value
|
||||
func (a *APIKey) IsZero() bool {
|
||||
return a.IsZero()
|
||||
return a.lib.IsZero()
|
||||
}
|
||||
|
||||
// ParseAPIKey parses an API key
|
||||
|
@ -79,10 +79,10 @@ type helloServer struct{}
|
||||
func (s *helloServer) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloReply, error) {
|
||||
key, ok := auth.GetAPIKey(ctx)
|
||||
if !ok {
|
||||
return nil, grpc.Errorf(codes.Unauthenticated, "Invalid API credentials")
|
||||
return nil, status.Errorf(codes.Unauthenticated, "Invalid API credentials")
|
||||
}
|
||||
if string(key) != "good key" {
|
||||
return nil, grpc.Errorf(codes.Unauthenticated, "Invalid API credentials")
|
||||
return nil, status.Errorf(codes.Unauthenticated, "Invalid API credentials")
|
||||
}
|
||||
|
||||
return &pb.HelloReply{Message: "Hello " + in.Name}, nil
|
||||
|
@ -141,7 +141,6 @@ func TestEncodingDecodingStress(t *testing.T) {
|
||||
combination(i+1, j+1)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
combination(0, 0)
|
||||
}
|
||||
|
@ -286,9 +286,7 @@ func NewFullIdentity(ctx context.Context, opts NewCAOptions) (*FullIdentity, err
|
||||
// ToChains takes a number of certificate chains and returns them as a 2d slice of chains of certificates.
|
||||
func ToChains(chains ...[]*x509.Certificate) [][]*x509.Certificate {
|
||||
combinedChains := make([][]*x509.Certificate, len(chains))
|
||||
for i, chain := range chains {
|
||||
combinedChains[i] = chain
|
||||
}
|
||||
copy(combinedChains, chains)
|
||||
return combinedChains
|
||||
}
|
||||
|
||||
|
@ -282,7 +282,6 @@ func TestSlowDialerHasTimeout(t *testing.T) {
|
||||
if !transport.Error.Has(err) || errs.Unwrap(err) != context.DeadlineExceeded {
|
||||
return errs.New("invalid error: %v (peer:%s target:%s)", err, peer.ID(), target.ID())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
@ -40,10 +40,7 @@ func bitAtDepth(id storj.NodeID, bitDepth int) bool {
|
||||
power := uint(7 - bitOffset)
|
||||
bitMask := byte(1 << power)
|
||||
b := id[byteDepth]
|
||||
if b&bitMask > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return b&bitMask > 0
|
||||
}
|
||||
|
||||
func extendPrefix(prefix string, bit bool) string {
|
||||
|
@ -128,7 +128,7 @@ func ParseMacaroon(data []byte) (_ *Macaroon, err error) {
|
||||
//cav.VerificationId = section[0].data
|
||||
mac.caveats = append(mac.caveats, cav)
|
||||
}
|
||||
data, sig, err := parsePacket(data)
|
||||
_, sig, err := parsePacket(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ func Equal(msg1, msg2 proto.Message) bool {
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return bytes.Compare(msg1Bytes, msg2Bytes) == 0
|
||||
return bytes.Equal(msg1Bytes, msg2Bytes)
|
||||
}
|
||||
|
||||
// NodesToIDs extracts Node-s into a list of ids
|
||||
|
@ -67,7 +67,9 @@ func TestServeContentParseRange(t *testing.T) {
|
||||
ServeContent(context.Background(), writer, req, "", time.Now().UTC(), ranger)
|
||||
|
||||
assert.Equal(t, http.StatusOK, writer.Code)
|
||||
assert.Equal(t, "23", writer.Result().Header.Get("Content-Length"))
|
||||
result := writer.Result()
|
||||
assert.NoError(t, result.Body.Close())
|
||||
assert.Equal(t, "23", result.Header.Get("Content-Length"))
|
||||
}
|
||||
|
||||
func Test_isZeroTime(t *testing.T) {
|
||||
@ -97,7 +99,9 @@ func Test_setLastModified(t *testing.T) {
|
||||
req := httptest.NewRecorder()
|
||||
setLastModified(req, tt.modtime)
|
||||
|
||||
assert.Equal(t, tt.expected, req.Result().Header.Get("Last-Modified"), tt.name)
|
||||
result := req.Result()
|
||||
assert.NoError(t, result.Body.Close())
|
||||
assert.Equal(t, tt.expected, result.Header.Get("Last-Modified"), tt.name)
|
||||
}
|
||||
}
|
||||
|
||||
@ -106,7 +110,9 @@ func Test_setLastModifiedNilWriter(t *testing.T) {
|
||||
|
||||
setLastModified(nil, time.Now().UTC())
|
||||
|
||||
assert.Equal(t, "", req.Result().Header.Get("Last-Modified"))
|
||||
result := req.Result()
|
||||
assert.NoError(t, result.Body.Close())
|
||||
assert.Equal(t, "", result.Header.Get("Last-Modified"))
|
||||
}
|
||||
|
||||
func Test_checkPreconditions(t *testing.T) {
|
||||
|
@ -40,7 +40,7 @@ func ParseNodeURL(s string) (NodeURL, error) {
|
||||
return NodeURL{}, nil
|
||||
}
|
||||
if !strings.HasPrefix(s, "storj://") {
|
||||
if strings.Index(s, "://") < 0 {
|
||||
if !strings.Contains(s, "://") {
|
||||
s = "storj://" + s
|
||||
}
|
||||
}
|
||||
|
@ -193,7 +193,7 @@ func (t *Service) CalculateAtRestData(ctx context.Context) (latestTally time.Tim
|
||||
totalTallies.Report("total")
|
||||
|
||||
//store byte hours, not just bytes
|
||||
numHours := time.Now().Sub(latestTally).Hours()
|
||||
numHours := time.Since(latestTally).Hours()
|
||||
if latestTally.IsZero() {
|
||||
numHours = 1.0 //todo: something more considered?
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ func TestDisqualificationTooManyFailedAudits(t *testing.T) {
|
||||
iterations, auditDQCutOff, prevReputation, reputation,
|
||||
)
|
||||
|
||||
require.True(t, time.Now().Sub(*dossier.Disqualified) >= 0,
|
||||
require.True(t, time.Since(*dossier.Disqualified) >= 0,
|
||||
"Disqualified should be in the past",
|
||||
)
|
||||
|
||||
|
@ -144,7 +144,7 @@ func (server *Server) Run(ctx context.Context) (err error) {
|
||||
var group errgroup.Group
|
||||
group.Go(func() error {
|
||||
<-ctx.Done()
|
||||
return server.server.Shutdown(nil)
|
||||
return server.server.Shutdown(context.Background())
|
||||
})
|
||||
group.Go(func() error {
|
||||
defer cancel()
|
||||
|
@ -48,8 +48,8 @@ func (clicker *LinkClicker) SendEmail(ctx context.Context, msg *post.Message) (e
|
||||
// click all links
|
||||
var sendError error
|
||||
for _, link := range links {
|
||||
_, err := http.Get(link)
|
||||
sendError = errs.Combine(sendError, err)
|
||||
response, err := http.Get(link)
|
||||
sendError = errs.Combine(sendError, err, response.Body.Close())
|
||||
}
|
||||
|
||||
return sendError
|
||||
|
@ -241,7 +241,7 @@ func (s *Server) Run(ctx context.Context) error {
|
||||
var group errgroup.Group
|
||||
group.Go(func() error {
|
||||
<-ctx.Done()
|
||||
return Error.Wrap(s.server.Shutdown(ctx))
|
||||
return Error.Wrap(s.server.Shutdown(context.Background()))
|
||||
})
|
||||
group.Go(func() error {
|
||||
defer cancel()
|
||||
|
@ -72,20 +72,29 @@ func TestCreateAndStopOffers(t *testing.T) {
|
||||
group.Go(func() error {
|
||||
baseURL := "http://" + addr.String()
|
||||
|
||||
_, err := http.PostForm(baseURL+o.Path, o.Values)
|
||||
req, err := http.PostForm(baseURL+o.Path, o.Values)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = http.Get(baseURL)
|
||||
if err != nil {
|
||||
if err := req.Body.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = http.Post(baseURL+"/stop/"+id, "application/x-www-form-urlencoded", nil)
|
||||
req, err = http.Get(baseURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := req.Body.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req, err = http.Post(baseURL+"/stop/"+id, "application/x-www-form-urlencoded", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := req.Body.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
@ -27,22 +27,10 @@ import (
|
||||
"storj.io/storj/pkg/signing"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/satellite"
|
||||
"storj.io/storj/satellite/console"
|
||||
"storj.io/storj/uplink/eestream"
|
||||
"storj.io/storj/uplink/metainfo"
|
||||
)
|
||||
|
||||
// mockAPIKeys is mock for api keys store of pointerdb
|
||||
type mockAPIKeys struct {
|
||||
info console.APIKeyInfo
|
||||
err error
|
||||
}
|
||||
|
||||
// GetByKey return api key info for given key
|
||||
func (keys *mockAPIKeys) GetByKey(ctx context.Context, key macaroon.APIKey) (*console.APIKeyInfo, error) {
|
||||
return &keys.info, keys.err
|
||||
}
|
||||
|
||||
func TestInvalidAPIKey(t *testing.T) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
@ -235,11 +223,6 @@ func TestServiceList(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
type Test struct {
|
||||
Request storj.ListOptions
|
||||
Expected storj.ObjectList // objects are partial
|
||||
}
|
||||
|
||||
config := planet.Uplinks[0].GetConfig(planet.Satellites[0])
|
||||
project, bucket, err := planet.Uplinks[0].GetProjectAndBucket(ctx, planet.Satellites[0], "testbucket", config)
|
||||
require.NoError(t, err)
|
||||
|
@ -311,7 +311,7 @@ func (endpoint *Endpoint) validatePointer(ctx context.Context, pointer *pb.Point
|
||||
if limit.PieceId.IsZero() || limit.PieceId != derivedPieceID {
|
||||
return Error.New("invalid order limit piece id")
|
||||
}
|
||||
if bytes.Compare(piece.NodeId.Bytes(), limit.StorageNodeId.Bytes()) != 0 {
|
||||
if piece.NodeId != limit.StorageNodeId {
|
||||
return Error.New("piece NodeID != order limit NodeID")
|
||||
}
|
||||
}
|
||||
@ -323,7 +323,7 @@ func (endpoint *Endpoint) validatePointer(ctx context.Context, pointer *pb.Point
|
||||
func (endpoint *Endpoint) validateRedundancy(ctx context.Context, redundancy *pb.RedundancyScheme) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
if endpoint.requiredRSConfig.Validate == true {
|
||||
if endpoint.requiredRSConfig.Validate {
|
||||
if endpoint.requiredRSConfig.ErasureShareSize.Int32() != redundancy.ErasureShareSize ||
|
||||
endpoint.requiredRSConfig.MaxThreshold != int(redundancy.Total) ||
|
||||
endpoint.requiredRSConfig.MinThreshold != int(redundancy.MinReq) ||
|
||||
|
@ -185,7 +185,7 @@ func (service *Service) Get(ctx context.Context, nodeID storj.NodeID) (_ *NodeDo
|
||||
|
||||
// IsOnline checks if a node is 'online' based on the collected statistics.
|
||||
func (service *Service) IsOnline(node *NodeDossier) bool {
|
||||
return time.Now().Sub(node.Reputation.LastContactSuccess) < service.config.Node.OnlineWindow ||
|
||||
return time.Since(node.Reputation.LastContactSuccess) < service.config.Node.OnlineWindow ||
|
||||
node.Reputation.LastContactSuccess.After(node.Reputation.LastContactFailure)
|
||||
}
|
||||
|
||||
|
@ -281,7 +281,7 @@ func (obs *checkerObserver) RemoteSegment(ctx context.Context, path storj.Path,
|
||||
if len(pathElements) >= 4 {
|
||||
project, bucketName, segmentpath := pathElements[0], pathElements[2], pathElements[3]
|
||||
lostSegInfo := storj.JoinPaths(project, bucketName, segmentpath)
|
||||
if contains(obs.monStats.remoteSegmentInfo, lostSegInfo) == false {
|
||||
if !contains(obs.monStats.remoteSegmentInfo, lostSegInfo) {
|
||||
obs.monStats.remoteSegmentInfo = append(obs.monStats.remoteSegmentInfo, lostSegInfo)
|
||||
}
|
||||
}
|
||||
|
@ -157,7 +157,7 @@ func TestIdentifyIrreparableSegments(t *testing.T) {
|
||||
err = checker.IdentifyInjuredSegments(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
remoteSegmentInfo, err = irreparable.Get(ctx, []byte("fake-piece-id"))
|
||||
_, err = irreparable.Get(ctx, []byte("fake-piece-id"))
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
@ -304,7 +304,6 @@ func TestRepairMultipleDisqualified(t *testing.T) {
|
||||
require.True(t, (numStorageNodes-toDisqualify) >= numPieces)
|
||||
|
||||
// disqualify nodes and track lost pieces
|
||||
var lostPieces []int32
|
||||
nodesToDisqualify := make(map[storj.NodeID]bool)
|
||||
nodesToKeepAlive := make(map[storj.NodeID]bool)
|
||||
|
||||
@ -314,7 +313,6 @@ func TestRepairMultipleDisqualified(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
nodesToDisqualify[piece.NodeId] = true
|
||||
lostPieces = append(lostPieces, piece.GetPieceNum())
|
||||
}
|
||||
|
||||
for _, node := range planet.StorageNodes {
|
||||
|
@ -172,7 +172,7 @@ func (keys *attributionDB) QueryAttribution(ctx context.Context, partnerID uuid.
|
||||
}
|
||||
|
||||
defer func() { err = errs.Combine(err, rows.Close()) }()
|
||||
results := make([]*attribution.CSVRow, 0, 0)
|
||||
results := []*attribution.CSVRow{}
|
||||
for rows.Next() {
|
||||
r := &attribution.CSVRow{}
|
||||
err := rows.Scan(&r.PartnerID, &r.ProjectID, &r.BucketName, &r.RemoteBytesPerHour, &r.InlineBytesPerHour, &r.EgressData)
|
||||
|
@ -64,7 +64,7 @@ func (db *offersDB) GetActiveOffersByType(ctx context.Context, offerType rewards
|
||||
)
|
||||
|
||||
defer func() { err = errs.Combine(err, rows.Close()) }()
|
||||
results := make(rewards.Offers, 0, 0)
|
||||
results := rewards.Offers{}
|
||||
for rows.Next() {
|
||||
o := rewards.Offer{}
|
||||
err := rows.Scan(&o.ID, &o.Name, &o.Description, &awardCreditInCents, &inviteeCreditInCents, &awardCreditDurationDays, &inviteeCreditDurationDays, &redeemableCap, &o.ExpiresAt, &o.CreatedAt, &o.Status, &o.Type)
|
||||
|
@ -867,11 +867,11 @@ func (cache *overlaycache) UpdateUptime(ctx context.Context, nodeID storj.NodeID
|
||||
|
||||
mon.Meter("uptime_update_successes").Mark(1)
|
||||
// we have seen this node in the past 24 hours
|
||||
if time.Now().Sub(lastContactFailure) > time.Hour*24 {
|
||||
if time.Since(lastContactFailure) > time.Hour*24 {
|
||||
mon.Meter("uptime_seen_24h").Mark(1)
|
||||
}
|
||||
// we have seen this node in the past week
|
||||
if time.Now().Sub(lastContactFailure) > time.Hour*24*7 {
|
||||
if time.Since(lastContactFailure) > time.Hour*24*7 {
|
||||
mon.Meter("uptime_seen_week").Mark(1)
|
||||
}
|
||||
} else {
|
||||
@ -879,11 +879,11 @@ func (cache *overlaycache) UpdateUptime(ctx context.Context, nodeID storj.NodeID
|
||||
|
||||
mon.Meter("uptime_update_failures").Mark(1)
|
||||
// it's been over 24 hours since we've seen this node
|
||||
if time.Now().Sub(lastContactSuccess) > time.Hour*24 {
|
||||
if time.Since(lastContactSuccess) > time.Hour*24 {
|
||||
mon.Meter("uptime_not_seen_24h").Mark(1)
|
||||
}
|
||||
// it's been over a week since we've seen this node
|
||||
if time.Now().Sub(lastContactSuccess) > time.Hour*24*7 {
|
||||
if time.Since(lastContactSuccess) > time.Hour*24*7 {
|
||||
mon.Meter("uptime_not_seen_week").Mark(1)
|
||||
}
|
||||
}
|
||||
|
@ -171,7 +171,7 @@ func (db *StoragenodeAccounting) QueryPaymentInfo(ctx context.Context, start tim
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
defer func() { err = errs.Combine(err, rows.Close()) }()
|
||||
csv := make([]*accounting.CSVRow, 0, 0)
|
||||
csv := []*accounting.CSVRow{}
|
||||
for rows.Next() {
|
||||
var nodeID []byte
|
||||
r := &accounting.CSVRow{}
|
||||
|
@ -13,5 +13,5 @@ import (
|
||||
)
|
||||
|
||||
var _ = imports.Process
|
||||
var _ = packages.LoadImports
|
||||
var _ = packages.NeedName
|
||||
var _ = astutil.PathEnclosingInterval
|
||||
|
@ -9,10 +9,10 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/storage"
|
||||
@ -168,21 +168,18 @@ func BenchmarkClientWrite(b *testing.B) {
|
||||
// benchmark test: execute 1000 Put operations where each call to `PutAndCommit` does the following:
|
||||
// 1) create a BoltDB transaction (tx), 2) execute the db operation, 3) commit the tx which writes it to disk.
|
||||
for n := 0; n < b.N; n++ {
|
||||
var wg sync.WaitGroup
|
||||
var group errgroup.Group
|
||||
for i := 0; i < 1000; i++ {
|
||||
key := storage.Key(fmt.Sprintf("testkey%d", i))
|
||||
value := storage.Value("testvalue")
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
err := kdb.PutAndCommit(ctx, key, value)
|
||||
if err != nil {
|
||||
b.Fatal("Put err:", err)
|
||||
group.Go(func() error {
|
||||
return kdb.PutAndCommit(ctx, key, value)
|
||||
})
|
||||
}
|
||||
}()
|
||||
if err := group.Wait(); err != nil {
|
||||
b.Fatalf("PutAndCommit: %v", err)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
@ -210,21 +207,18 @@ func BenchmarkClientNoSyncWrite(b *testing.B) {
|
||||
// 2) executes the db operation, and 3) commits the tx which does NOT write it to disk.
|
||||
kdb.db.NoSync = true
|
||||
for n := 0; n < b.N; n++ {
|
||||
var wg sync.WaitGroup
|
||||
var group errgroup.Group
|
||||
for i := 0; i < 1000; i++ {
|
||||
key := storage.Key(fmt.Sprintf("testkey%d", i))
|
||||
value := storage.Value("testvalue")
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
err := kdb.PutAndCommit(ctx, key, value)
|
||||
if err != nil {
|
||||
b.Fatal("PutAndCommit Nosync err:", err)
|
||||
group.Go(func() error {
|
||||
return kdb.PutAndCommit(ctx, key, value)
|
||||
})
|
||||
}
|
||||
}()
|
||||
if err := group.Wait(); err != nil {
|
||||
b.Fatalf("PutAndCommit: %v", err)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
err = kdb.db.Sync()
|
||||
if err != nil {
|
||||
@ -258,26 +252,18 @@ func BenchmarkClientBatchWrite(b *testing.B) {
|
||||
// transaction for all operations currently in the batch, executes the operations,
|
||||
// commits, and writes them to disk
|
||||
for n := 0; n < b.N; n++ {
|
||||
var wg sync.WaitGroup
|
||||
var group errgroup.Group
|
||||
for i := 0; i < 1000; i++ {
|
||||
key := storage.Key(fmt.Sprintf("testkey%d", i))
|
||||
value := storage.Value("testvalue")
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
err := kdb.Put(ctx, key, value)
|
||||
if err != nil {
|
||||
b.Fatalf("boltDB put: %v\n", err)
|
||||
group.Go(func() error {
|
||||
return kdb.Put(ctx, key, value)
|
||||
})
|
||||
}
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
b.Fatalf("boltDB put: %v\n", err)
|
||||
if err := group.Wait(); err != nil {
|
||||
b.Fatalf("Put: %v", err)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkClientBatchNoSyncWrite(b *testing.B) {
|
||||
@ -306,25 +292,19 @@ func BenchmarkClientBatchNoSyncWrite(b *testing.B) {
|
||||
// commits, but does NOT write them to disk
|
||||
kdb.db.NoSync = true
|
||||
for n := 0; n < b.N; n++ {
|
||||
var wg sync.WaitGroup
|
||||
var group errgroup.Group
|
||||
for i := 0; i < 1000; i++ {
|
||||
key := storage.Key(fmt.Sprintf("testkey%d", i))
|
||||
value := storage.Value("testvalue")
|
||||
group.Go(func() error {
|
||||
return kdb.Put(ctx, key, value)
|
||||
})
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
err := kdb.Put(ctx, key, value)
|
||||
if err != nil {
|
||||
b.Fatalf("boltDB put: %v\n", err)
|
||||
if err := group.Wait(); err != nil {
|
||||
b.Fatalf("Put: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err != nil {
|
||||
b.Fatalf("boltDB put: %v\n", err)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
err := kdb.db.Sync()
|
||||
if err != nil {
|
||||
b.Fatalf("boltDB sync err: %v\n", err)
|
||||
|
@ -109,9 +109,6 @@ func tryFixLongPath(path string) string {
|
||||
|
||||
// rename implements atomic file rename on windows
|
||||
func rename(oldpath, newpath string) error {
|
||||
const replace_existing = 0x1
|
||||
const write_through = 0x8
|
||||
|
||||
oldpathp, err := windows.UTF16PtrFromString(tryFixLongPath(oldpath))
|
||||
if err != nil {
|
||||
return &os.LinkError{Op: "replace", Old: oldpath, New: newpath, Err: err}
|
||||
|
@ -162,7 +162,6 @@ func TestStoreLoad(t *testing.T) {
|
||||
|
||||
func TestDeleteWhileReading(t *testing.T) {
|
||||
const blobSize = 8 << 10
|
||||
const repeatCount = 16
|
||||
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
|
@ -6,9 +6,10 @@ package testsuite
|
||||
import (
|
||||
"path"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"storj.io/storj/storage"
|
||||
)
|
||||
|
||||
@ -43,21 +44,19 @@ func RunBenchmarks(b *testing.B, store storage.KeyValueStore) {
|
||||
b.Run("Put", func(b *testing.B) {
|
||||
b.SetBytes(int64(len(items)))
|
||||
for k := 0; k < b.N; k++ {
|
||||
var wg sync.WaitGroup
|
||||
var group errgroup.Group
|
||||
for _, item := range items {
|
||||
key := item.Key
|
||||
value := item.Value
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
err := store.Put(ctx, key, value)
|
||||
if err != nil {
|
||||
b.Fatal("store.Put err", err)
|
||||
group.Go(func() error {
|
||||
return store.Put(ctx, key, value)
|
||||
})
|
||||
}
|
||||
}()
|
||||
|
||||
if err := group.Wait(); err != nil {
|
||||
b.Fatalf("Put: %v", err)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -8,7 +8,6 @@ import (
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
@ -43,20 +42,17 @@ func testConstraints(t *testing.T, store storage.KeyValueStore) {
|
||||
})
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var group errgroup.Group
|
||||
for _, item := range items {
|
||||
key := item.Key
|
||||
value := item.Value
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
err := store.Put(ctx, key, value)
|
||||
if err != nil {
|
||||
t.Fatal("store.Put err:", err)
|
||||
group.Go(func() error {
|
||||
return store.Put(ctx, key, value)
|
||||
})
|
||||
}
|
||||
}()
|
||||
if err := group.Wait(); err != nil {
|
||||
t.Fatalf("Put failed: %v", err)
|
||||
}
|
||||
wg.Wait()
|
||||
defer cleanupItems(store, items)
|
||||
|
||||
t.Run("Put Empty", func(t *testing.T) {
|
||||
|
@ -89,7 +89,7 @@ func (server *Server) Run(ctx context.Context) (err error) {
|
||||
var group errgroup.Group
|
||||
group.Go(func() error {
|
||||
<-ctx.Done()
|
||||
return server.server.Shutdown(nil)
|
||||
return server.server.Shutdown(context.Background())
|
||||
})
|
||||
group.Go(func() error {
|
||||
defer cancel()
|
||||
@ -157,7 +157,7 @@ func (server *Server) satelliteHandler(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
satelliteID, err := storj.NodeIDFromString(strings.TrimLeft(r.URL.Path, "/api/satellite/"))
|
||||
satelliteID, err := storj.NodeIDFromString(strings.TrimPrefix(r.URL.Path, "/api/satellite/"))
|
||||
if err != nil {
|
||||
server.writeError(w, http.StatusBadRequest, Error.Wrap(err))
|
||||
return
|
||||
|
@ -78,7 +78,7 @@ func (db *consoleDB) getDailyBandwidthUsed(ctx context.Context, cond string, arg
|
||||
}()
|
||||
|
||||
var dates []time.Time
|
||||
dailyBandwidth := make(map[time.Time]*console.BandwidthUsed, 0)
|
||||
dailyBandwidth := make(map[time.Time]*console.BandwidthUsed)
|
||||
|
||||
for rows.Next() {
|
||||
var action int32
|
||||
|
Loading…
Reference in New Issue
Block a user