Enable more linters (#272)

* enable more linters

* Run gofmt -s

* run goimports

* run unconvert

* fix naked return

* fix misspellings

* fix ineffectual assigments

* fix missing declaration

* don't use deprecated grpc.Errof

* check errors in tests

* run gofmt -w -r "assert.Nil(err) -> assert.NoError(err)"

* fix directory permissions

* don't use nil Context

* simplify boolean expressions

* use bytes.Equal instead of bytes.Compare

* merge variable declarations, remove redundant returns

* fix some golint errors

* run goimports

* handle more errors

* delete empty TestMain

* delete empty TestMain

* ignore examples for now

* fix lint errors

* remove unused values

* more fixes

* run gofmt -w -s .

* add more comments

* fix naming

* more lint fixes

* try switching travis to go1.11

* fix unnecessary conversions

* fix deprecated methods

* use go1.10 and disable gofmt/goimports for now

* switch to 1.10

* don't re-enable gofmt and goimports

* switch covermode to atomic because of -race

* gofmt
This commit is contained in:
Egon Elbre 2018-08-27 20:28:16 +03:00 committed by JT Olio
parent 7e52c81ebc
commit 0f5a2f4ef5
68 changed files with 341 additions and 317 deletions

View File

@ -3,7 +3,6 @@
GO_VERSION ?= 1.10
COMPOSE_PROJECT_NAME := ${TAG}-$(shell git rev-parse --abbrev-ref HEAD)
GO_DIRS := $(shell go list ./... | grep -v storj.io/storj/examples)
BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
ifeq (${BRANCH},master)
TAG := $(shell git rev-parse --short HEAD)-go${GO_VERSION}
@ -12,22 +11,50 @@ TAG := $(shell git rev-parse --short HEAD)-${BRANCH}-go${GO_VERSION}
endif
# currently disabled linters:
# gofmt # enable after switch to go1.11
# goimpor # enable after switch to go1.11
# unparam # enable later
# gosec # enable later
# vetshadow # enable later
# gochecknoinits # enable later
# gochecknoglobals # enable later
# dupl # needs tuning
# gocyclo # needs tuning
# lll # long lines, not relevant
# gotype, gotypex # already done by compiling
# safesql # no sql
# interfacer # not that useful
lint: check-copyrights
@echo "Running ${@}"
@gometalinter \
--deadline=170s \
--disable-all \
--enable=golint \
--enable=errcheck \
--enable=goimports \
--enable=vet \
--enable=deadcode \
--enable=goconst \
--deadline=10m \
--enable-all \
--enable=golint \
--enable=errcheck \
--enable=unconvert \
--enable=structcheck \
--enable=misspell \
--disable=goimports \
--enable=ineffassign \
--disable=gofmt \
--enable=nakedret \
--enable=megacheck \
--disable=unparam \
--disable=gosec \
--disable=vetshadow \
--disable=gochecknoinits \
--disable=gochecknoglobals \
--disable=dupl \
--disable=gocyclo \
--disable=lll \
--disable=gotype --disable=gotypex \
--disable=safesql \
--disable=interfacer \
--skip=examples \
--exclude=".*\.pb\.go" \
--exclude=".*\.dbx\.go" \
--exclude=".*_test.go" \
--exclude="examples/*" \
${GO_DIRS}
./...
check-copyrights:
@echo "Running ${@}"
@ -51,7 +78,7 @@ build-dev-deps:
test: lint
go install -v ./...
go test -v -covermode=count -coverprofile=coverage.out ./...
go test -race -v -covermode=atomic -coverprofile=coverage.out ./...
gover
@echo done

View File

@ -23,6 +23,7 @@ const (
storagenodeCount = 50
)
// HeavyClient is for configuring client
type HeavyClient struct {
Identity provider.IdentityConfig
Kademlia kademlia.Config
@ -31,6 +32,7 @@ type HeavyClient struct {
MockOverlay bool `default:"true" help:"if false, use real overlay"`
}
// StorageNode is for configuring storage nodes
type StorageNode struct {
Identity provider.IdentityConfig
Kademlia kademlia.Config

View File

@ -18,8 +18,6 @@ type cacheConfig struct {
}
func (c cacheConfig) open() (*overlay.Cache, error) {
overlay.NewBoltOverlayCache(cacheCfg.DatabaseURL, nil)
dburl, err := url.Parse(c.DatabaseURL)
if err != nil {
return nil, Error.Wrap(err)
@ -38,7 +36,7 @@ func (c cacheConfig) open() (*overlay.Cache, error) {
if err != nil {
return nil, Error.New("invalid db: %s", err)
}
cache, err = overlay.NewRedisOverlayCache(dburl.Host, overlay.UrlPwd(dburl), db, nil)
cache, err = overlay.NewRedisOverlayCache(dburl.Host, overlay.GetUserPassword(dburl), db, nil)
if err != nil {
return nil, err
}

View File

@ -6,6 +6,7 @@ package main
import (
"encoding/json"
"io/ioutil"
"github.com/spf13/cobra"
"github.com/zeebo/errs"
"go.uber.org/zap"
@ -16,6 +17,7 @@ import (
)
var (
// Error is the error class for overlays
Error = errs.Class("overlay error")
rootCmd = &cobra.Command{
Use: "overlay",
@ -95,9 +97,9 @@ func cmdAdd(cmd *cobra.Command, args []string) (err error) {
Address: a,
},
Restrictions: &proto.NodeRestrictions{
FreeBandwidth: 2000000000,
FreeDisk: 2000000000,
},
FreeBandwidth: 2000000000,
FreeDisk: 2000000000,
},
Type: 1,
})
if err != nil {

View File

@ -55,6 +55,9 @@ func copy(cmd *cobra.Command, args []string) (err error) {
if u.Scheme == "" {
f, err := os.Open(args[0])
if err != nil {
return err
}
fi, err := f.Stat()
if err != nil {
@ -66,7 +69,7 @@ func copy(cmd *cobra.Command, args []string) (err error) {
return err
}
defer f.Close()
defer func() { _ = f.Close() }()
u, err = url.Parse(args[1])
if err != nil {
@ -94,7 +97,7 @@ func copy(cmd *cobra.Command, args []string) (err error) {
return err
}
defer f.Close()
defer func() { _ = f.Close() }()
err = so.GetObject(ctx, oi.Bucket, oi.Name, 0, oi.Size, f, oi.ETag)
if err != nil {

View File

@ -14,6 +14,7 @@ import (
const defaultConfDir = "$HOME/.storj/cli"
// Config is miniogw.Config configuration
type Config struct {
miniogw.Config
}

View File

@ -34,10 +34,10 @@ var (
runCfg miniogw.Config
setupCfg struct {
CA provider.CASetupConfig
Identity provider.IdentitySetupConfig
BasePath string `default:"$CONFDIR" help:"base path for setup"`
Concurrency uint `default:"4" help:"number of concurrent workers for certificate authority generation"`
CA provider.CASetupConfig
Identity provider.IdentitySetupConfig
BasePath string `default:"$CONFDIR" help:"base path for setup"`
// Concurrency uint `default:"4" help:"number of concurrent workers for certificate authority generation"`
Overwrite bool `default:"false" help:"whether to overwrite pre-existing configuration files"`
SatelliteAddr string `default:"localhost:7778" help:"the address to use for the satellite"`
APIKey string `default:"" help:"the api key to use for the satellite"`

View File

@ -14,7 +14,7 @@ import (
"github.com/urfave/cli"
"github.com/zeebo/errs"
"storj.io/storj/pkg/piecestore"
pstore "storj.io/storj/pkg/piecestore"
"storj.io/storj/pkg/process"
)
@ -101,7 +101,7 @@ func run(_ *cobra.Command, args []string) error {
return err
}
if fileInfo.IsDir() != true {
if !fileInfo.IsDir() {
return argError.New(fmt.Sprintf("Path (%s) is a file, not a directory", c.Args().Get(1)))
}

View File

@ -23,7 +23,7 @@ import (
var (
pointerdbClientPort string
ctx = context.Background()
ctx = context.Background()
)
func initializeFlags() {

View File

@ -188,16 +188,17 @@ func EnsureRedis(t *testing.T) (_ RedisDone) {
index, _ := randomHex(5)
redisRefs[index] = true
if testRedis.started != true {
if !testRedis.started {
conn, err := net.Dial("tcp", "127.0.0.1:6379")
if err != nil {
testRedis.start(t)
} else {
testRedis.started = true
n, err := conn.Write([]byte("*1\r\n$8\r\nflushall\r\n"))
_, err := conn.Write([]byte("*1\r\n$8\r\nflushall\r\n"))
if err != nil {
log.Fatalf("Failed to request flush of existing redis keys: error %s\n", err)
}
var n int
b := make([]byte, 5)
n, err = conn.Read(b)
if err != nil {
@ -206,7 +207,10 @@ func EnsureRedis(t *testing.T) (_ RedisDone) {
if n != len(b) || !bytes.Equal(b, []byte("+OK\r\n")) {
log.Fatalf("Failed to flush existing redis keys: Unexpected response %s\n", b)
}
conn.Close()
err = conn.Close()
if err != nil {
log.Fatalf("Failed to close conn: %s\n", err)
}
}
}
@ -256,6 +260,7 @@ func (r *RedisServer) start(t *testing.T) {
if err := cmd.Run(); err != nil {
// TODO(bryanchriswhite) error checking
t.Logf("unable to run redis: %v", err)
}
}()

View File

@ -6,11 +6,12 @@ package mock_dht
import (
context "context"
gomock "github.com/golang/mock/gomock"
reflect "reflect"
time "time"
gomock "github.com/golang/mock/gomock"
dht "storj.io/storj/pkg/dht"
overlay "storj.io/storj/protos/overlay"
time "time"
)
// MockDHT is a mock of DHT interface

View File

@ -55,7 +55,7 @@ func DecodeReaders(ctx context.Context, rs map[int]io.ReadCloser,
// Kick off a goroutine to watch for context cancelation.
go func() {
<-dr.ctx.Done()
dr.Close()
_ = dr.Close()
}()
return dr
}

View File

@ -330,7 +330,7 @@ type EncodedRanger struct {
}
// NewEncodedRanger from the given Ranger and RedundancyStrategy. See the
// comments for EncodeReader about the minumum and optimum thresholds, and the
// comments for EncodeReader about the minimum and optimum thresholds, and the
// max buffer memory.
func NewEncodedRanger(rr ranger.Ranger, rs RedundancyStrategy, mbm int) (*EncodedRanger, error) {
if rr.Size()%int64(rs.DecodedBlockSize()) != 0 {

View File

@ -241,9 +241,10 @@ func (b *PieceBuffer) HasShare(num int64) bool {
bufShares := int64(b.buffered() / b.shareSize)
if num-b.currentShare > 0 {
if bufShares > num-b.currentShare {
b.discardUntil(num)
// TODO: should this error be ignored?
_ = b.discardUntil(num)
} else {
b.discardUntil(b.currentShare + bufShares)
_ = b.discardUntil(b.currentShare + bufShares)
}
bufShares = int64(b.buffered() / b.shareSize)
}

View File

@ -113,8 +113,10 @@ func TestRSRanger(t *testing.T) {
for i, piece := range pieces {
rrs[i] = ranger.ByteRangeCloser(piece)
}
decrypter, err := NewAESGCMDecrypter(
&encKey, &firstNonce, rs.DecodedBlockSize())
decrypter, err := NewAESGCMDecrypter(&encKey, &firstNonce, rs.DecodedBlockSize())
if err != nil {
t.Fatal(err)
}
rc, err := Decode(rrs, rs, 0)
if err != nil {
t.Fatal(err)
@ -460,7 +462,7 @@ func testRSProblematic(t *testing.T, tt testCase, i int, fn problematicReadClose
defer decoder.Close()
data2, err := ioutil.ReadAll(decoder)
if tt.fail {
if err == nil && bytes.Compare(data, data2) == 0 {
if err == nil && bytes.Equal(data, data2) {
assert.Fail(t, "expected to fail, but didn't", errTag)
}
} else if assert.NoError(t, err, errTag) {

View File

@ -40,6 +40,7 @@ func bootstrapTestNetwork(t *testing.T, ip, port string) ([]dht.DHT, overlay.Nod
dhts = append(dhts, boot)
rt, err := boot.GetRoutingTable(context.Background())
assert.NoError(t, err)
bootNode := rt.Local()
err = boot.ListenAndServe()
@ -133,7 +134,6 @@ func TestGetNodes(t *testing.T) {
cases := []struct {
k *Kademlia
start string
limit int
expectedErr error
restrictions []overlay.Restriction
@ -179,8 +179,6 @@ func TestFindNode(t *testing.T) {
cases := []struct {
k *Kademlia
start string
input NodeID
expectedErr error
}{
{

View File

@ -8,8 +8,7 @@ import (
"storj.io/storj/storage"
)
func (rt *RoutingTable) addToReplacementCache(kadBucketID storage.Key, node *proto.Node) {
func (rt *RoutingTable) addToReplacementCache(kadBucketID storage.Key, node *proto.Node) {
bucketID := string(kadBucketID)
nodes := rt.replacementCache[bucketID]
nodes = append(nodes, node)

View File

@ -38,7 +38,7 @@ type RoutingTable struct {
type RoutingOptions struct {
kpath string
npath string
idLength int //TODO (JJ): add checks for > 0
idLength int //TODO (JJ): add checks for > 0
bucketSize int
rcBucketSize int
}
@ -67,7 +67,7 @@ func NewRoutingTable(localNode *proto.Node, options *RoutingOptions) (*RoutingTa
rcBucketSize: options.rcBucketSize,
}
ok, err := rt.addNode(localNode)
if ok == false || err != nil {
if !ok || err != nil {
return nil, RoutingErr.New("could not add localNode to routing table: %s", err)
}
return rt, nil
@ -161,7 +161,7 @@ func (rt *RoutingTable) FindNear(id dht.NodeID, limit int) ([]*proto.Node, error
return unmarshaledNodes, nil
}
// ConnectionSuccess updates or adds a node to the routing table when
// ConnectionSuccess updates or adds a node to the routing table when
// a successful connection is made to the node on the network
func (rt *RoutingTable) ConnectionSuccess(node *proto.Node) error {
v, err := rt.nodeBucketDB.Get(storage.Key(node.Id))

View File

@ -263,10 +263,7 @@ func (rt *RoutingTable) kadBucketContainsLocalNode(bucketID storage.Key) (bool,
if err != nil {
return false, err
}
if bytes.Compare(bucket, bucketID) == 0 {
return true, nil
}
return false, nil
return bytes.Equal(bucket, bucketID), nil
}
// kadBucketHasRoom: helper, returns true if it has fewer than k nodes
@ -358,7 +355,7 @@ func (rt *RoutingTable) getUnmarshaledNodesFromBucket(bucketID storage.Key) ([]*
// getKBucketRange: helper, returns the left and right endpoints of the range of node ids contained within the bucket
func (rt *RoutingTable) getKBucketRange(bucketID storage.Key) (storage.Keys, error) {
key := storage.Key(bucketID)
key := bucketID
kadIDs, err := rt.kadBucketDB.ReverseList(key, 2)
if err != nil {
return nil, RoutingErr.New("could not reverse list k bucket ids %s", err)

View File

@ -34,10 +34,10 @@ func createRT(localNodeID []byte) *RoutingTable {
}
localNode := &proto.Node{Id: string(localNodeID)}
options := &RoutingOptions{
kpath: tempfile("Kadbucket"),
npath: tempfile("Nodebucket"),
idLength: 16,
bucketSize: 6,
kpath: tempfile("Kadbucket"),
npath: tempfile("Nodebucket"),
idLength: 16,
bucketSize: 6,
rcBucketSize: 2,
}
rt, _ := NewRoutingTable(localNode, options)
@ -169,7 +169,7 @@ func TestAddNode(t *testing.T) {
assert.Equal(t, 6, len(a))
//should drop
node13 := mockNode("8O")
node13 := mockNode("8O")
ok, err = rt.addNode(node13)
assert.False(t, ok)
assert.NoError(t, err)
@ -347,6 +347,7 @@ func TestDetermineFurthestIDWithinK(t *testing.T) {
nodes, err := rt.nodeBucketDB.List(nil, 0)
assert.NoError(t, err)
furthest, err := rt.determineFurthestIDWithinK(nodes)
assert.NoError(t, err)
assert.Equal(t, expectedFurthest, furthest)
node2 := []byte{143, 255} //xor 240
@ -355,6 +356,7 @@ func TestDetermineFurthestIDWithinK(t *testing.T) {
nodes, err = rt.nodeBucketDB.List(nil, 0)
assert.NoError(t, err)
furthest, err = rt.determineFurthestIDWithinK(nodes)
assert.NoError(t, err)
assert.Equal(t, expectedFurthest, furthest)
node3 := []byte{255, 255} //xor 128
@ -363,6 +365,7 @@ func TestDetermineFurthestIDWithinK(t *testing.T) {
nodes, err = rt.nodeBucketDB.List(nil, 0)
assert.NoError(t, err)
furthest, err = rt.determineFurthestIDWithinK(nodes)
assert.NoError(t, err)
assert.Equal(t, expectedFurthest, furthest)
node4 := []byte{191, 255} //xor 192
@ -371,6 +374,7 @@ func TestDetermineFurthestIDWithinK(t *testing.T) {
nodes, err = rt.nodeBucketDB.List(nil, 0)
assert.NoError(t, err)
furthest, err = rt.determineFurthestIDWithinK(nodes)
assert.NoError(t, err)
assert.Equal(t, expectedFurthest, furthest)
node5 := []byte{133, 255} //xor 250
@ -379,6 +383,7 @@ func TestDetermineFurthestIDWithinK(t *testing.T) {
nodes, err = rt.nodeBucketDB.List(nil, 0)
assert.NoError(t, err)
furthest, err = rt.determineFurthestIDWithinK(nodes)
assert.NoError(t, err)
assert.Equal(t, expectedFurthest, furthest)
}

View File

@ -4,7 +4,6 @@
package kademlia
import (
"fmt"
"testing"
"time"
@ -61,11 +60,11 @@ func TestGetBucket(t *testing.T) {
b, e := rt.GetBucket(node2.Id)
for j, w := range v.expected.nodes {
if !assert.True(t, pb.Equal(w, b.Nodes()[j])) {
fmt.Printf("case %v failed expected: ", i)
t.Logf("case %v failed expected: ", i)
}
}
if !assert.Equal(t, v.ok, e) {
fmt.Printf("case %v failed ok: ", i)
t.Logf("case %v failed ok: ", i)
}
}
}

View File

@ -112,6 +112,7 @@ func (c Config) action(ctx context.Context, cliCtx *cli.Context,
return Error.New("unexpected minio exit")
}
// NewGateway creates a new minio Gateway
func (c Config) NewGateway(ctx context.Context,
identity *provider.FullIdentity) (gw minio.Gateway, err error) {
defer mon.Task()(&ctx)(&err)

View File

@ -262,20 +262,20 @@ func TestListObjects(t *testing.T) {
maxKeys := 123
items := []objects.ListItem{
objects.ListItem{
{
Path: paths.New(prefix, "test-file-1.txt"),
},
objects.ListItem{
{
Path: paths.New(prefix, "test-file-2.txt"),
},
}
objInfos := []minio.ObjectInfo{
minio.ObjectInfo{
{
Bucket: bucket,
Name: path.Join(prefix, "test-file-1.txt"),
},
minio.ObjectInfo{
{
Bucket: bucket,
Name: path.Join(prefix, "test-file-2.txt"),
},
@ -330,9 +330,7 @@ func TestDeleteBucket(t *testing.T) {
itemsInBucket := make([]objects.ListItem, 1)
itemsInBucket[0] = objects.ListItem{Path: paths.New("path1"), Meta: objects.Meta{}}
var exp time.Time
exp = time.Unix(0, 0).UTC()
exp := time.Unix(0, 0).UTC()
var noItemsInBucket []objects.ListItem
for i, example := range []struct {
@ -375,8 +373,7 @@ func TestGetBucketInfo(t *testing.T) {
storjObj := storjObjects{storj: &b}
var exp time.Time
exp = time.Unix(0, 0).UTC()
exp := time.Unix(0, 0).UTC()
for i, example := range []struct {
bucket string
@ -405,8 +402,7 @@ func TestMakeBucketWithLocation(t *testing.T) {
storjObj := storjObjects{storj: &b}
var exp time.Time
exp = time.Unix(0, 0).UTC()
exp := time.Unix(0, 0).UTC()
for i, example := range []struct {
bucket string
@ -442,8 +438,7 @@ func TestListBuckets(t *testing.T) {
storjObj := storjObjects{storj: &b}
var exp time.Time
exp = time.Unix(0, 0).UTC()
exp := time.Unix(0, 0).UTC()
for i, example := range []struct {
bucket string

View File

@ -38,10 +38,7 @@ func (m *MockErrorLogger) EXPECT() *MockErrorLoggerMockRecorder {
// Errorf mocks base method
func (m *MockErrorLogger) Errorf(arg0 string, arg1 ...interface{}) {
varargs := []interface{}{arg0}
for _, a := range arg1 {
varargs = append(varargs, a)
}
varargs := append([]interface{}{arg0}, arg1...)
m.ctrl.Call(m, "Errorf", varargs...)
}

View File

@ -60,7 +60,7 @@ var (
partList = minio.ListPartsInfo{Parts: []minio.PartInfo{partInfo}}
healItem = madmin.HealResultItem{Bucket: bucket, Object: object}
healList = []madmin.HealResultItem{healItem}
lockList = []minio.VolumeLockInfo{minio.VolumeLockInfo{Bucket: bucket, Object: object}}
lockList = []minio.VolumeLockInfo{{Bucket: bucket, Object: object}}
plcy = &policy.Policy{ID: n}
)
@ -441,7 +441,7 @@ func TestListMultipartUploads(t *testing.T) {
uidMarker := "test-upload-id-marker"
listMultiParts := minio.ListMultipartsInfo{
Uploads: []minio.MultipartInfo{minio.MultipartInfo{Object: object}}}
Uploads: []minio.MultipartInfo{{Object: object}}}
// No error returned
mol.EXPECT().ListMultipartUploads(ctx, bucket, prefix, marker, uidMarker,
@ -616,7 +616,7 @@ func TestCompleteMultipartUpload(t *testing.T) {
logger, mol, ol := initMocks(mockCtrl)
parts := []minio.CompletePart{minio.CompletePart{PartNumber: partID}}
parts := []minio.CompletePart{{PartNumber: partID}}
// No error returned
mol.EXPECT().CompleteMultipartUpload(ctx, bucket, object, uploadID, parts).

View File

@ -21,11 +21,10 @@ var ctx = context.Background()
func TestLookup(t *testing.T) {
cases := []struct {
self proto.Node
to proto.Node
find proto.Node
expectedErr error
expectedNumNodes int
self proto.Node
to proto.Node
find proto.Node
expectedErr error
}{
{
self: proto.Node{Id: test.NewNodeID(t), Address: &proto.NodeAddress{Address: ":7070"}},

View File

@ -32,7 +32,7 @@ type Cache struct {
DHT dht.DHT
}
// NewRedisOverlayCache returns a pointer to a new Cache instance with an initalized connection to Redis.
// NewRedisOverlayCache returns a pointer to a new Cache instance with an initialized connection to Redis.
func NewRedisOverlayCache(address, password string, db int, DHT dht.DHT) (*Cache, error) {
rc, err := redis.NewClient(address, password, db)
if err != nil {
@ -45,7 +45,7 @@ func NewRedisOverlayCache(address, password string, db int, DHT dht.DHT) (*Cache
}, nil
}
// NewBoltOverlayCache returns a pointer to a new Cache instance with an initalized connection to a Bolt db.
// NewBoltOverlayCache returns a pointer to a new Cache instance with an initialized connection to a Bolt db.
func NewBoltOverlayCache(dbPath string, DHT dht.DHT) (*Cache, error) {
bc, err := boltdb.NewClient(zap.L(), dbPath, boltdb.OverlayBucket)
if err != nil {
@ -84,7 +84,7 @@ func (o *Cache) Put(nodeID string, value overlay.Node) error {
return err
}
return o.DB.Put(kademlia.StringToNodeID(nodeID).Bytes(), []byte(data))
return o.DB.Put(kademlia.StringToNodeID(nodeID).Bytes(), data)
}
// Bootstrap walks the initialized network and populates the cache

View File

@ -46,7 +46,7 @@ const (
func newTestKademlia(t *testing.T, ip, port string, d dht.DHT, b overlay.Node) *kademlia.Kademlia {
i, err := kademlia.NewID()
assert.NoError(t, err)
id := kademlia.NodeID(*i)
id := *i
n := []overlay.Node{b}
kad, err := kademlia.NewKademlia(&id, n, ip, port)
assert.NoError(t, err)
@ -58,7 +58,7 @@ func bootstrapTestNetwork(t *testing.T, ip, port string) ([]dht.DHT, overlay.Nod
bid, err := kademlia.NewID()
assert.NoError(t, err)
bnid := kademlia.NodeID(*bid)
bnid := *bid
dhts := []dht.DHT{}
p, err := strconv.Atoi(port)
@ -71,6 +71,7 @@ func bootstrapTestNetwork(t *testing.T, ip, port string) ([]dht.DHT, overlay.Nod
assert.NoError(t, err)
rt, err := boot.GetRoutingTable(context.Background())
assert.NoError(t, err)
bootNode := rt.Local()
err = boot.ListenAndServe()
@ -84,7 +85,7 @@ func bootstrapTestNetwork(t *testing.T, ip, port string) ([]dht.DHT, overlay.Nod
nid, err := kademlia.NewID()
assert.NoError(t, err)
id := kademlia.NodeID(*nid)
id := *nid
dht, err := kademlia.NewKademlia(&id, []overlay.Node{bootNode}, ip, gg)
assert.NoError(t, err)

View File

@ -69,7 +69,7 @@ func TestChoose(t *testing.T) {
lis, err := net.Listen("tcp", fmt.Sprintf(":%d", 0))
assert.NoError(t, err)
srv, mock, err := NewTestServer(ctx)
srv, mock, err := newTestServer(ctx)
assert.NoError(t, err)
go srv.Serve(lis)
defer srv.Stop()
@ -106,7 +106,7 @@ func TestLookup(t *testing.T) {
lis, err := net.Listen("tcp", fmt.Sprintf(":%d", 0))
assert.NoError(t, err)
srv, mock, err := NewTestServer(ctx)
srv, mock, err := newTestServer(ctx)
assert.NoError(t, err)
go srv.Serve(lis)
defer srv.Stop()
@ -129,7 +129,7 @@ func TestLookup(t *testing.T) {
}
func NewTestServer(ctx context.Context) (*grpc.Server, *mockOverlayServer, error) {
func newTestServer(ctx context.Context) (*grpc.Server, *mockOverlayServer, error) {
ca, err := provider.NewCA(ctx, 12, 4)
if err != nil {
return nil, nil, err

View File

@ -61,7 +61,7 @@ func (c Config) Run(ctx context.Context, server *provider.Provider) (
if err != nil {
return Error.New("invalid db: %s", err)
}
cache, err = NewRedisOverlayCache(dburl.Host, UrlPwd(dburl), db, kad)
cache, err = NewRedisOverlayCache(dburl.Host, GetUserPassword(dburl), db, kad)
if err != nil {
return err
}
@ -75,7 +75,7 @@ func (c Config) Run(ctx context.Context, server *provider.Provider) (
return err
}
ticker := time.NewTicker(time.Duration(c.RefreshInterval))
ticker := time.NewTicker(c.RefreshInterval)
defer ticker.Stop()
ctx, cancel := context.WithCancel(ctx)
@ -107,7 +107,8 @@ func (c Config) Run(ctx context.Context, server *provider.Provider) (
return server.Run(ctx)
}
func UrlPwd(u *url.URL) string {
// GetUserPassword extracts password from scheme://user:password@hostname
func GetUserPassword(u *url.URL) string {
if u == nil || u.User == nil {
return ""
}

View File

@ -74,7 +74,7 @@ func TestRun(t *testing.T) {
}
func TestUrlPwd(t *testing.T) {
res := UrlPwd(nil)
res := GetUserPassword(nil)
assert.Equal(t, res, "")
@ -82,7 +82,7 @@ func TestUrlPwd(t *testing.T) {
uri := url.URL{User: uinfo}
res = UrlPwd(&uri)
res = GetUserPassword(&uri)
assert.Equal(t, res, "testPassword")
}

View File

@ -13,10 +13,12 @@ import (
proto "storj.io/storj/protos/overlay"
)
// MockOverlay is a mocked overlay implementation
type MockOverlay struct {
nodes map[string]*proto.Node
}
// NewMockOverlay creates a new overlay mock
func NewMockOverlay(nodes []*proto.Node) *MockOverlay {
rv := &MockOverlay{nodes: map[string]*proto.Node{}}
for _, node := range nodes {
@ -25,6 +27,7 @@ func NewMockOverlay(nodes []*proto.Node) *MockOverlay {
return rv
}
// FindStorageNodes finds storage nodes based on the request
func (mo *MockOverlay) FindStorageNodes(ctx context.Context,
req *proto.FindStorageNodesRequest) (resp *proto.FindStorageNodesResponse,
err error) {
@ -39,15 +42,18 @@ func (mo *MockOverlay) FindStorageNodes(ctx context.Context,
return &proto.FindStorageNodesResponse{Nodes: nodes}, nil
}
// Lookup finds a single storage node based on the request
func (mo *MockOverlay) Lookup(ctx context.Context, req *proto.LookupRequest) (
*proto.LookupResponse, error) {
return &proto.LookupResponse{Node: mo.nodes[req.NodeID]}, nil
}
// MockConfig specifies static nodes for mock overlay
type MockConfig struct {
Nodes string `help:"a comma-separated list of <node-id>:<ip>:<port>" default:""`
}
// Run runs server with mock overlay
func (c MockConfig) Run(ctx context.Context, server *provider.Provider) error {
var nodes []*proto.Node
for _, nodestr := range strings.Split(c.Nodes, ",") {

View File

@ -28,9 +28,9 @@ const (
var (
// ErrNotExist is used when a file or directory doesn't exist
ErrNotExist = errs.Class("file or directory not found error")
// ErrGenerate is used when an error occured during cert/key generation
// ErrGenerate is used when an error occurred during cert/key generation
ErrGenerate = errs.Class("tls generation error")
// ErrTLSOptions is used inconsistently and should probably just be removed
// ErrUnsupportedKey is used when key type is not supported
ErrUnsupportedKey = errs.Class("unsupported key type")
// ErrTLSTemplate is used when an error occurs during tls template generation
ErrTLSTemplate = errs.Class("tls template error")
@ -44,6 +44,7 @@ var (
// `VerifyPeerCertificate` function.
type PeerCertVerificationFunc func([][]byte, [][]*x509.Certificate) error
// NewKey returns a new PrivateKey
func NewKey() (crypto.PrivateKey, error) {
k, err := ecdsa.GenerateKey(authECCurve, rand.Reader)
if err != nil {
@ -103,6 +104,7 @@ func VerifyPeerFunc(next ...PeerCertVerificationFunc) PeerCertVerificationFunc {
}
}
// VerifyPeerCertChains verifies chains
func VerifyPeerCertChains(_ [][]byte, parsedChains [][]*x509.Certificate) error {
return verifyChainSignatures(parsedChains[0])
}
@ -119,6 +121,7 @@ func NewCertBlock(b []byte) *pem.Block {
return &pem.Block{Type: BlockTypeCertificate, Bytes: b}
}
// TLSCert creates a tls.Certificate from chains, key and leaf
func TLSCert(chain [][]byte, leaf *x509.Certificate, key crypto.PrivateKey) (*tls.Certificate, error) {
var err error
if leaf == nil {
@ -149,7 +152,7 @@ func WriteChain(w io.Writer, chain ...*x509.Certificate) error {
return nil
}
// WriteChain writes the private key to the writer, PEM-encoded.
// WriteKey writes the private key to the writer, PEM-encoded.
func WriteKey(w io.Writer, key crypto.PrivateKey) error {
var (
kb []byte

View File

@ -80,9 +80,9 @@ func TestVerifyPeerFunc(t *testing.T) {
testFunc := func(chain [][]byte, parsedChains [][]*x509.Certificate) error {
switch {
case bytes.Compare(chain[1], c.Raw) != 0:
case !bytes.Equal(chain[1], c.Raw):
return errs.New("CA cert doesn't match")
case bytes.Compare(chain[0], l.Raw) != 0:
case !bytes.Equal(chain[0], l.Raw):
return errs.New("leaf's CA cert doesn't match")
case l.PublicKey.(*ecdsa.PublicKey).Curve != parsedChains[0][0].PublicKey.(*ecdsa.PublicKey).Curve:
return errs.New("leaf public key doesn't match")
@ -90,9 +90,9 @@ func TestVerifyPeerFunc(t *testing.T) {
return errs.New("leaf public key doesn't match")
case l.PublicKey.(*ecdsa.PublicKey).Y.Cmp(parsedChains[0][0].PublicKey.(*ecdsa.PublicKey).Y) != 0:
return errs.New("leaf public key doesn't match")
case bytes.Compare(parsedChains[0][1].Raw, c.Raw) != 0:
case !bytes.Equal(parsedChains[0][1].Raw, c.Raw):
return errs.New("parsed CA cert doesn't match")
case bytes.Compare(parsedChains[0][0].Raw, l.Raw) != 0:
case !bytes.Equal(parsedChains[0][0].Raw, l.Raw):
return errs.New("parsed leaf cert doesn't match")
}
return nil

View File

@ -7,6 +7,7 @@ import (
"crypto/x509"
)
// CATemplate returns x509.Certificate template for certificate authority
func CATemplate() (*x509.Certificate, error) {
serialNumber, err := newSerialNumber()
if err != nil {
@ -18,12 +19,13 @@ func CATemplate() (*x509.Certificate, error) {
KeyUsage: x509.KeyUsageCertSign,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
IsCA: true,
IsCA: true,
}
return template, nil
}
// LeafTemplate returns x509.Certificate template for signing and encrypting
func LeafTemplate() (*x509.Certificate, error) {
serialNumber, err := newSerialNumber()
if err != nil {
@ -35,7 +37,7 @@ func LeafTemplate() (*x509.Certificate, error) {
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageKeyEncipherment,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth},
BasicConstraintsValid: true,
IsCA: false,
IsCA: false,
}
return template, nil

View File

@ -33,9 +33,9 @@ func PathByID(id, dir string) (string, error) {
return "", ArgError.New("No path provided")
}
folder1 := string(id[0:2])
folder2 := string(id[2:4])
fileName := string(id[4:])
folder1 := id[0:2]
folder2 := id[2:4]
fileName := id[4:]
return path.Join(dir, folder1, folder2, fileName), nil
}

View File

@ -53,13 +53,13 @@ func TestStore(t *testing.T) {
// Write chunk received to disk
_, err = storeFile.Write(tt.content)
assert.Nil(err)
assert.NoError(err)
storeFile.Close()
folder1 := string(tt.id[0:2])
folder2 := string(tt.id[2:4])
fileName := string(tt.id[4:])
folder1 := tt.id[0:2]
folder2 := tt.id[2:4]
fileName := tt.id[4:]
createdFilePath := path.Join(os.TempDir(), folder1, folder2, fileName)
@ -144,9 +144,9 @@ func TestRetrieve(t *testing.T) {
t.Run(tt.it, func(t *testing.T) {
assert := assert.New(t)
folder1 := string(tt.id[0:2])
folder2 := string(tt.id[2:4])
fileName := string(tt.id[4:])
folder1 := tt.id[0:2]
folder2 := tt.id[2:4]
fileName := tt.id[4:]
createdFilePath := path.Join(os.TempDir(), folder1, folder2, fileName)
@ -225,9 +225,9 @@ func TestDelete(t *testing.T) {
t.Run(tt.it, func(t *testing.T) {
assert := assert.New(t)
folder1 := string(tt.id[0:2])
folder2 := string(tt.id[2:4])
fileName := string(tt.id[4:])
folder1 := tt.id[0:2]
folder2 := tt.id[2:4]
fileName := tt.id[4:]
createdFilePath := path.Join(os.TempDir(), folder1, folder2, fileName)
@ -259,11 +259,6 @@ func TestDelete(t *testing.T) {
t.Errorf("Error deleting file")
return
}
return
})
}
}
func TestMain(m *testing.M) {
m.Run()
}

View File

@ -45,7 +45,6 @@ type PSClient interface {
type Client struct {
route pb.PieceStoreRoutesClient
conn *grpc.ClientConn
pkey []byte
bandwidthMsgSize int
}
@ -120,7 +119,7 @@ func (client *Client) Put(ctx context.Context, id PieceID, data io.Reader, ttl t
_, err = io.Copy(bufw, data)
if err == io.ErrUnexpectedEOF {
writer.Close()
_ = writer.Close()
zap.S().Infof("Node cut from upload due to slow connection. Deleting piece %s...", id)
return client.Delete(ctx, id)
}

View File

@ -28,7 +28,7 @@ type pieceRanger struct {
// PieceRanger PieceRanger returns a RangeCloser from a PieceID.
func PieceRanger(ctx context.Context, c *Client, stream pb.PieceStoreRoutes_RetrieveClient, id PieceID, pba *pb.PayerBandwidthAllocation) (ranger.RangeCloser, error) {
piece, err := c.Meta(ctx, PieceID(id))
piece, err := c.Meta(ctx, id)
if err != nil {
return nil, err
}

View File

@ -52,7 +52,7 @@ func OpenPSDB(ctx context.Context, DataPath, DBPath string) (psdb *PSDB, err err
defer func() {
if err != nil {
db.Close()
_ = db.Close()
}
}()
@ -133,11 +133,7 @@ func (psdb *PSDB) DeleteExpired(ctx context.Context) (err error) {
}
_, err = psdb.DB.Exec(fmt.Sprintf("DELETE FROM ttl WHERE expires < %d AND expires > 0", now))
if err != nil {
return err
}
return nil
return err
}
// DeleteExpiredLoop will periodically run DeleteExpired
@ -182,7 +178,7 @@ func (psdb *PSDB) WriteBandwidthAllocToDB(ba *pb.RenterBandwidthAllocation) erro
return err
}
defer stmt.Close()
defer func() { _ = stmt.Close() }()
_, err = tx.Stmt(stmt).Exec(data, ba.GetSignature())
if err != nil {
@ -209,7 +205,7 @@ func (psdb *PSDB) AddTTLToDB(id string, expiration int64) error {
return err
}
defer stmt.Close()
defer func() { _ = stmt.Close() }()
_, err = tx.Stmt(stmt).Exec(id, time.Now().Unix(), expiration)
if err != nil {
@ -248,7 +244,7 @@ func (psdb *PSDB) DeleteTTLByID(id string) error {
if err != nil {
return err
}
defer stmt.Close()
defer func() { _ = stmt.Close() }()
_, err = tx.Stmt(stmt).Exec(id)
if err != nil {

View File

@ -51,7 +51,7 @@ func TestOpenPSDB(t *testing.T) {
assert.Equal(tt.err, err.Error())
return
}
assert.Nil(err)
assert.NoError(err)
assert.NotNil(DB)
assert.NotNil(DB.DB)
})
@ -98,18 +98,18 @@ func TestAddTTLToDB(t *testing.T) {
assert.Equal(tt.err, err.Error())
return
}
assert.Nil(err)
assert.NoError(err)
db.mtx.Lock()
rows, err := db.DB.Query(fmt.Sprintf(`SELECT * FROM ttl WHERE id="%s"`, tt.id))
assert.Nil(err)
assert.NoError(err)
rows.Next()
var expiration int64
var id string
var time int64
err = rows.Scan(&id, &time, &expiration)
assert.Nil(err)
assert.NoError(err)
rows.Close()
db.mtx.Unlock()
@ -155,7 +155,7 @@ func TestDeleteTTLByID(t *testing.T) {
t.Run(tt.it, func(t *testing.T) {
assert := assert.New(t)
err := db.AddTTLToDB(tt.id, 0)
assert.Nil(err)
assert.NoError(err)
err = db.DeleteTTLByID(tt.id)
if tt.err != "" {
@ -163,7 +163,7 @@ func TestDeleteTTLByID(t *testing.T) {
assert.Equal(tt.err, err.Error())
return
}
assert.Nil(err)
assert.NoError(err)
})
}
@ -205,7 +205,7 @@ func TestGetTTLByID(t *testing.T) {
t.Run(tt.it, func(t *testing.T) {
assert := assert.New(t)
err := db.AddTTLToDB(tt.id, tt.expiration)
assert.Nil(err)
assert.NoError(err)
expiration, err := db.GetTTLByID(tt.id)
if tt.err != "" {
@ -213,7 +213,7 @@ func TestGetTTLByID(t *testing.T) {
assert.Equal(tt.err, err.Error())
return
}
assert.Nil(err)
assert.NoError(err)
assert.Equal(tt.expiration, expiration)
})
}
@ -231,7 +231,6 @@ func TestGetTTLByID(t *testing.T) {
func TestWriteBandwidthAllocToDB(t *testing.T) {
tests := []struct {
it string
id string
payerAllocation *pb.PayerBandwidthAllocation
total int64
err string
@ -275,11 +274,11 @@ func TestWriteBandwidthAllocToDB(t *testing.T) {
assert.Equal(tt.err, err.Error())
return
}
assert.Nil(err)
assert.NoError(err)
// check db to make sure agreement and signature were stored correctly
db.mtx.Lock()
rows, err := db.DB.Query(`SELECT * FROM bandwidth_agreements Limit 1`)
assert.Nil(err)
assert.NoError(err)
for rows.Next() {
var (
@ -288,11 +287,11 @@ func TestWriteBandwidthAllocToDB(t *testing.T) {
)
err = rows.Scan(&agreement, &signature)
assert.Nil(err)
assert.NoError(err)
decodedRow := &pb.RenterBandwidthAllocation_Data{}
err = proto.Unmarshal(agreement, decodedRow)
assert.Nil(err)
assert.NoError(err)
assert.Equal(ba.GetSignature(), signature)
assert.Equal(tt.payerAllocation, decodedRow.GetPayerAllocation())
@ -302,7 +301,7 @@ func TestWriteBandwidthAllocToDB(t *testing.T) {
rows.Close()
db.mtx.Unlock()
err = rows.Err()
assert.Nil(err)
assert.NoError(err)
})
}
}
@ -313,7 +312,3 @@ func serializeData(ba *pb.RenterBandwidthAllocation_Data) []byte {
return data
}
func TestMain(m *testing.M) {
m.Run()
}

View File

@ -26,7 +26,7 @@ import (
"google.golang.org/grpc"
"storj.io/storj/pkg/piecestore"
pstore "storj.io/storj/pkg/piecestore"
"storj.io/storj/pkg/piecestore/rpc/server/psdb"
pb "storj.io/storj/protos/piecestore"
)
@ -91,7 +91,7 @@ func TestPiece(t *testing.T) {
// simulate piece TTL entry
_, err := TS.s.DB.DB.Exec(fmt.Sprintf(`INSERT INTO ttl (id, created, expires) VALUES ("%s", "%d", "%d")`, tt.id, 1234567890, tt.expiration))
assert.Nil(err)
assert.NoError(err)
defer TS.s.DB.DB.Exec(fmt.Sprintf(`DELETE FROM ttl WHERE id="%s"`, tt.id))
@ -108,7 +108,7 @@ func TestPiece(t *testing.T) {
return
}
assert.Nil(err)
assert.NoError(err)
assert.Equal(tt.id, resp.GetId())
assert.Equal(tt.size, resp.GetSize())
@ -217,10 +217,11 @@ func TestRetrieve(t *testing.T) {
t.Run("should return expected PieceRetrievalStream values", func(t *testing.T) {
assert := assert.New(t)
stream, err := TS.c.Retrieve(ctx)
assert.NoError(err)
// send piece database
err = stream.Send(&pb.PieceRetrieval{PieceData: &pb.PieceRetrieval_PieceData{Id: tt.id, Size: tt.reqSize, Offset: tt.offset}})
assert.Nil(err)
assert.NoError(err)
totalAllocated := int64(0)
var data string
@ -240,7 +241,7 @@ func TestRetrieve(t *testing.T) {
},
},
)
assert.Nil(err)
assert.NoError(err)
resp, err = stream.Recv()
if tt.err != "" {
@ -257,7 +258,7 @@ func TestRetrieve(t *testing.T) {
totalRetrieved += resp.Size
}
assert.Nil(err)
assert.NoError(err)
assert.NotNil(resp)
if resp != nil {
assert.Equal(tt.respSize, totalRetrieved)
@ -311,11 +312,11 @@ func TestStore(t *testing.T) {
t.Run("should return expected PieceStoreSummary values", func(t *testing.T) {
assert := assert.New(t)
stream, err := TS.c.Store(ctx)
assert.Nil(err)
assert.NoError(err)
// Write the buffer to the stream we opened earlier
err = stream.Send(&pb.PieceStore{Piecedata: &pb.PieceStore_PieceData{Id: tt.id, ExpirationUnixSec: tt.ttl}})
assert.Nil(err)
assert.NoError(err)
// Send Bandwidth Allocation Data
msg := &pb.PieceStore{
@ -331,7 +332,7 @@ func TestStore(t *testing.T) {
// Write the buffer to the stream we opened earlier
err = stream.Send(msg)
assert.Nil(err)
assert.NoError(err)
resp, err := stream.CloseAndRecv()
if tt.err != "" {
@ -340,13 +341,13 @@ func TestStore(t *testing.T) {
return
}
assert.Nil(err)
assert.NoError(err)
defer db.Exec(fmt.Sprintf(`DELETE FROM ttl WHERE id="%s"`, tt.id))
// check db to make sure agreement and signature were stored correctly
rows, err := db.Query(`SELECT * FROM bandwidth_agreements`)
assert.Nil(err)
assert.NoError(err)
defer rows.Close()
for rows.Next() {
@ -356,19 +357,19 @@ func TestStore(t *testing.T) {
)
err = rows.Scan(&agreement, &signature)
assert.Nil(err)
assert.NoError(err)
decoded := &pb.RenterBandwidthAllocation_Data{}
err = proto.Unmarshal(agreement, decoded)
assert.NoError(err)
assert.Equal(msg.Bandwidthallocation.GetSignature(), signature)
assert.Equal(&pb.PayerBandwidthAllocation{}, decoded.GetPayerAllocation())
assert.Equal(int64(len(tt.content)), decoded.GetTotal())
}
err = rows.Err()
assert.Nil(err)
assert.NoError(err)
assert.Equal(tt.message, resp.Message)
assert.Equal(tt.totalReceived, resp.TotalReceived)
@ -417,7 +418,7 @@ func TestDelete(t *testing.T) {
// simulate piece TTL entry
_, err := db.Exec(fmt.Sprintf(`INSERT INTO ttl (id, created, expires) VALUES ("%s", "%d", "%d")`, tt.id, 1234567890, 1234567890))
assert.Nil(err)
assert.NoError(err)
defer db.Exec(fmt.Sprintf(`DELETE FROM ttl WHERE id="%s"`, tt.id))
@ -431,12 +432,13 @@ func TestDelete(t *testing.T) {
return
}
assert.Nil(err)
assert.NoError(err)
assert.Equal(tt.message, resp.GetMessage())
// if test passes, check if file was indeed deleted
filePath, err := pstore.PathByID(tt.id, TS.s.DataDir)
if _, err = os.Stat(filePath); os.IsNotExist(err) != true {
assert.NoError(err)
if _, err = os.Stat(filePath); os.IsExist(err) {
t.Errorf("File not deleted")
return
}

View File

@ -56,7 +56,7 @@ func (s *Server) Store(reqStream pb.PieceStoreRoutes_StoreServer) (err error) {
log.Printf("Successfully stored %s.", pd.GetId())
return reqStream.SendAndClose(&pb.PieceStoreSummary{Message: OK, TotalReceived: int64(total)})
return reqStream.SendAndClose(&pb.PieceStoreSummary{Message: OK, TotalReceived: total})
}
func (s *Server) storeData(ctx context.Context, stream pb.PieceStoreRoutes_StoreServer, id string) (total int64, err error) {

View File

@ -17,9 +17,9 @@ import (
// Config is a configuration struct that is everything you need to start a
// PointerDB responsibility
type Config struct {
DatabaseURL string `help:"the database connection string to use" default:"bolt://$CONFDIR/pointerdb.db"`
MinInlineSegmentSize int64 `default:"1240" help:"minimum inline segment size"`
MaxInlineSegmentSize int `default:"8000" help:"maximum inline segment size"`
DatabaseURL string `help:"the database connection string to use" default:"bolt://$CONFDIR/pointerdb.db"`
MinInlineSegmentSize int64 `default:"1240" help:"minimum inline segment size"`
MaxInlineSegmentSize int `default:"8000" help:"maximum inline segment size"`
}
// Run implements the provider.Responsibility interface

View File

@ -24,14 +24,12 @@ import (
const (
unauthenticated = "failed API creds"
noPathGiven = "file path not given"
noLimitGiven = "limit not given"
)
var (
ctx = context.Background()
ErrUnauthenticated = errors.New(unauthenticated)
ErrNoFileGiven = errors.New(noPathGiven)
ErrNoLimitGiven = errors.New(noLimitGiven)
)
func TestNewPointerDBClient(t *testing.T) {
@ -136,7 +134,7 @@ func TestGet(t *testing.T) {
log.Fatal("marshaling error: ", err)
}
byteData := []byte(data)
byteData := data
getResponse := pb.GetResponse{Pointer: byteData}
@ -179,17 +177,17 @@ func TestList(t *testing.T) {
{"", "", "", false, 0, meta.None, "",
[]*pb.ListResponse_Item{}, false, nil, ""},
{"", "", "", false, 0, meta.None, "",
[]*pb.ListResponse_Item{&pb.ListResponse_Item{}}, false, nil, ""},
[]*pb.ListResponse_Item{{}}, false, nil, ""},
{"", "", "", false, -1, meta.None, "",
[]*pb.ListResponse_Item{}, false, ErrUnauthenticated, unauthenticated},
{"prefix", "after", "before", false, 1, meta.None, "some key",
[]*pb.ListResponse_Item{
&pb.ListResponse_Item{Path: "a/b/c"},
{Path: "a/b/c"},
},
true, nil, ""},
{"prefix", "after", "before", false, 1, meta.All, "some key",
[]*pb.ListResponse_Item{
&pb.ListResponse_Item{Path: "a/b/c", Pointer: &pb.Pointer{
{Path: "a/b/c", Pointer: &pb.Pointer{
Size: 1234,
CreationDate: ptypes.TimestampNow(),
ExpirationDate: ptypes.TimestampNow(),
@ -198,8 +196,8 @@ func TestList(t *testing.T) {
true, nil, ""},
{"some/prefix", "start/after", "end/before", true, 123, meta.Size, "some key",
[]*pb.ListResponse_Item{
&pb.ListResponse_Item{Path: "a/b/c", Pointer: &pb.Pointer{Size: 1234}},
&pb.ListResponse_Item{Path: "x/y", Pointer: &pb.Pointer{Size: 789}},
{Path: "a/b/c", Pointer: &pb.Pointer{Size: 1234}},
{Path: "x/y", Pointer: &pb.Pointer{Size: 789}},
},
true, nil, ""},
} {

View File

@ -7,11 +7,10 @@ import (
"context"
"reflect"
"github.com/zeebo/errs"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/zeebo/errs"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
@ -24,7 +23,7 @@ import (
)
var (
mon = monkit.Package()
mon = monkit.Package()
segmentError = errs.Class("segment error")
)
@ -33,7 +32,6 @@ var (
// TODO(kaloyan): make it configurable
const ListPageLimit = 1000
// Server implements the network state RPC service
type Server struct {
DB storage.KeyValueStore
@ -52,8 +50,8 @@ func NewServer(db storage.KeyValueStore, logger *zap.Logger, c Config) *Server {
func (s *Server) validateAuth(APIKey []byte) error {
if !auth.ValidateAPIKey(string(APIKey)) {
s.logger.Error("unauthorized request: ", zap.Error(grpc.Errorf(codes.Unauthenticated, "Invalid API credential")))
return grpc.Errorf(codes.Unauthenticated, "Invalid API credential")
s.logger.Error("unauthorized request: ", zap.Error(status.Errorf(codes.Unauthenticated, "Invalid API credential")))
return status.Errorf(codes.Unauthenticated, "Invalid API credential")
}
return nil
}
@ -84,7 +82,7 @@ func (s *Server) Put(ctx context.Context, req *pb.PutRequest) (resp *pb.PutRespo
if err != nil {
return nil, status.Errorf(codes.InvalidArgument, err.Error())
}
if err = s.validateAuth(req.GetAPIKey()); err != nil {
return nil, err
}
@ -105,7 +103,7 @@ func (s *Server) Put(ctx context.Context, req *pb.PutRequest) (resp *pb.PutRespo
s.logger.Error("err putting pointer", zap.Error(err))
return nil, status.Errorf(codes.Internal, err.Error())
}
s.logger.Debug("put to the db: " + string(req.GetPath()))
s.logger.Debug("put to the db: " + req.GetPath())
return &pb.PutResponse{}, nil
}
@ -323,6 +321,6 @@ func (s *Server) Delete(ctx context.Context, req *pb.DeleteRequest) (resp *pb.De
s.logger.Error("err deleting path and pointer", zap.Error(err))
return nil, status.Errorf(codes.Internal, err.Error())
}
s.logger.Debug("deleted pointer at path: " + string(req.GetPath()))
s.logger.Debug("deleted pointer at path: " + req.GetPath())
return &pb.DeleteResponse{}, nil
}

View File

@ -10,7 +10,6 @@ import (
"testing"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@ -38,7 +37,7 @@ func TestServicePut(t *testing.T) {
errString string
}{
{nil, nil, ""},
{[]byte("wrong key"), nil, grpc.Errorf(codes.Unauthenticated, "Invalid API credential").Error()},
{[]byte("wrong key"), nil, status.Errorf(codes.Unauthenticated, "Invalid API credential").Error()},
{nil, errors.New("put error"), status.Errorf(codes.Internal, "put error").Error()},
} {
errTag := fmt.Sprintf("Test case #%d", i)
@ -74,7 +73,7 @@ func TestServiceGet(t *testing.T) {
errString string
}{
{nil, nil, ""},
{[]byte("wrong key"), nil, grpc.Errorf(codes.Unauthenticated, "Invalid API credential").Error()},
{[]byte("wrong key"), nil, status.Errorf(codes.Unauthenticated, "Invalid API credential").Error()},
{nil, errors.New("get error"), status.Errorf(codes.Internal, "get error").Error()},
} {
errTag := fmt.Sprintf("Test case #%d", i)
@ -116,7 +115,7 @@ func TestServiceDelete(t *testing.T) {
errString string
}{
{nil, nil, ""},
{[]byte("wrong key"), nil, grpc.Errorf(codes.Unauthenticated, "Invalid API credential").Error()},
{[]byte("wrong key"), nil, status.Errorf(codes.Unauthenticated, "Invalid API credential").Error()},
{nil, errors.New("delete error"), status.Errorf(codes.Internal, "delete error").Error()},
} {
errTag := fmt.Sprintf("Test case #%d", i)
@ -173,7 +172,7 @@ func TestServiceList(t *testing.T) {
{"", "", "", true, 0, meta.None, nil, keys, keys, false, nil, ""},
{"", "", "", true, 0, meta.All, nil, keys, keys, false, nil, ""},
{"", "", "", true, 0, meta.None, []byte("wrong key"), keys, keys, false,
nil, grpc.Errorf(codes.Unauthenticated, "Invalid API credential").Error()},
nil, status.Errorf(codes.Unauthenticated, "Invalid API credential").Error()},
{"", "", "", true, 0, meta.None, nil, keys, keys, false,
errors.New("list error"), status.Errorf(codes.Internal, "list error").Error()},
{"", "", "", true, 2, meta.None, nil, keys, keys[:2], true, nil, ""},

View File

@ -70,7 +70,6 @@ func TestRemove(t *testing.T) {
cases := []struct {
pool ConnectionPool
key string
value TestFoo
expected interface{}
expectedError error
}{

View File

@ -44,8 +44,7 @@ var (
// SaveConfig will save all flags with default values to outfilewith specific
// values specified in 'overrides' overridden.
func SaveConfig(flagset *pflag.FlagSet, outfile string,
overrides map[string]interface{}) error {
func SaveConfig(flagset *pflag.FlagSet, outfile string, overrides map[string]interface{}) error {
vip := viper.New()
err := vip.BindPFlags(pflag.CommandLine)
@ -66,10 +65,8 @@ func SaveConfig(flagset *pflag.FlagSet, outfile string,
return err
}
if overrides != nil {
for key, val := range overrides {
vip.Set(key, val)
}
for key, val := range overrides {
vip.Set(key, val)
}
return vip.WriteConfigAs(os.ExpandEnv(outfile))

View File

@ -22,29 +22,30 @@ func TestNewCA(t *testing.T) {
}
func BenchmarkNewCA_Difficulty8_Concurrency1(b *testing.B) {
context.Background()
for i := 0; i < b.N; i++ {
expectedDifficulty := uint16(8)
NewCA(nil, expectedDifficulty, 1)
NewCA(context.Background(), expectedDifficulty, 1)
}
}
func BenchmarkNewCA_Difficulty8_Concurrency2(b *testing.B) {
for i := 0; i < b.N; i++ {
expectedDifficulty := uint16(8)
NewCA(nil, expectedDifficulty, 2)
NewCA(context.Background(), expectedDifficulty, 2)
}
}
func BenchmarkNewCA_Difficulty8_Concurrency5(b *testing.B) {
for i := 0; i < b.N; i++ {
expectedDifficulty := uint16(8)
NewCA(nil, expectedDifficulty, 5)
NewCA(context.Background(), expectedDifficulty, 5)
}
}
func BenchmarkNewCA_Difficulty8_Concurrency10(b *testing.B) {
for i := 0; i < b.N; i++ {
expectedDifficulty := uint16(8)
NewCA(nil, expectedDifficulty, 10)
NewCA(context.Background(), expectedDifficulty, 10)
}
}

View File

@ -35,6 +35,7 @@ type FullCertificateAuthority struct {
Key crypto.PrivateKey
}
// CASetupConfig is for creating a CA
type CASetupConfig struct {
CertPath string `help:"path to the certificate chain for this identity" default:"$CONFDIR/ca.cert"`
KeyPath string `help:"path to the private key for this identity" default:"$CONFDIR/ca.key"`
@ -44,13 +45,14 @@ type CASetupConfig struct {
Concurrency uint `help:"number of concurrent workers for certificate authority generation" default:"4"`
}
// CAConfig is for locating the CA keys
type CAConfig struct {
CertPath string `help:"path to the certificate chain for this identity" default:"$CONFDIR/ca.cert"`
KeyPath string `help:"path to the private key for this identity" default:"$CONFDIR/ca.key"`
}
// Stat returns the status of the CA cert/key files for the config
func (caS CASetupConfig) Stat() TlsFilesStat {
// Status returns the status of the CA cert/key files for the config
func (caS CASetupConfig) Status() TLSFilesStatus {
return statTLSFiles(caS.CertPath, caS.KeyPath)
}
@ -156,7 +158,7 @@ func (caC CAConfig) Save(ca *FullCertificateAuthority) error {
return nil
}
// Generate Identity generates a new `FullIdentity` based on the CA. The CA
// NewIdentity generates a new `FullIdentity` based on the CA. The CA
// cert is included in the identity's cert chain and the identity's leaf cert
// is signed by the CA.
func (ca FullCertificateAuthority) NewIdentity() (*FullIdentity, error) {

View File

@ -26,13 +26,10 @@ import (
)
const (
// IdentityLength is the number of bytes required to represent node id
IdentityLength = uint16(256 / 8) // 256 bits
)
var (
ErrDifficulty = errs.Class("difficulty error")
)
// PeerIdentity represents another peer on the network.
type PeerIdentity struct {
// CA represents the peer's self-signed CA
@ -58,7 +55,7 @@ type FullIdentity struct {
Key crypto.PrivateKey
}
// IdentityConfig allows you to run a set of Responsibilities with the given
// IdentitySetupConfig allows you to run a set of Responsibilities with the given
// identity. You can also just load an Identity from disk.
type IdentitySetupConfig struct {
CertPath string `help:"path to the certificate chain for this identity" default:"$CONFDIR/identity.cert"`
@ -139,8 +136,8 @@ func PeerIdentityFromCerts(leaf, ca *x509.Certificate) (*PeerIdentity, error) {
}, nil
}
// Stat returns the status of the identity cert/key files for the config
func (is IdentitySetupConfig) Stat() TlsFilesStat {
// Status returns the status of the identity cert/key files for the config
func (is IdentitySetupConfig) Status() TLSFilesStatus {
return statTLSFiles(is.CertPath, is.KeyPath)
}

View File

@ -181,6 +181,7 @@ AwEHoUQDQgAEoLy/0hs5deTXZunRumsMkiHpF0g8wAc58aXANmr7Mxx9tzoIYFnx
-----END EC PRIVATE KEY-----`
ic, cleanup, err := tempIdentityConfig()
assert.NoError(t, err)
fi, err := FullIdentityFromPEM([]byte(chain), []byte(key))
assert.NoError(t, err)

View File

@ -17,8 +17,8 @@ import (
"storj.io/storj/storage"
)
//ErrSetup is setup error
var (
// ErrSetup is returned when there's an error with setup
ErrSetup = errs.Class("setup error")
)
@ -62,34 +62,30 @@ func NewProvider(identity *FullIdentity, lis net.Listener,
// SetupIdentity ensures a CA and identity exist and returns a config overrides map
func SetupIdentity(ctx context.Context, c CASetupConfig, i IdentitySetupConfig) error {
if s := c.Stat(); s == NoCertNoKey || c.Overwrite {
t, err := time.ParseDuration(c.Timeout)
if err != nil {
return errs.Wrap(err)
}
ctx, cancel := context.WithTimeout(ctx, t)
defer cancel()
// Load or create a certificate authority
ca, err := c.Create(ctx, 4)
if err != nil {
return err
}
if s := i.Stat(); s == NoCertNoKey || i.Overwrite {
// Create identity from new CA
_, err = i.Create(ca)
if err != nil {
return err
}
return nil
} else {
return ErrSetup.New("identity file(s) exist: %s", s)
}
} else {
if s := c.Status(); s != NoCertNoKey && !c.Overwrite {
return ErrSetup.New("certificate authority file(s) exist: %s", s)
}
t, err := time.ParseDuration(c.Timeout)
if err != nil {
return errs.Wrap(err)
}
ctx, cancel := context.WithTimeout(ctx, t)
defer cancel()
// Load or create a certificate authority
ca, err := c.Create(ctx, 4)
if err != nil {
return err
}
if s := c.Status(); s != NoCertNoKey && !c.Overwrite {
return ErrSetup.New("identity file(s) exist: %s", s)
}
// Create identity from new CA
_, err = i.Create(ca)
return err
}
// Identity returns the provider's identity

View File

@ -19,16 +19,19 @@ import (
"storj.io/storj/pkg/peertls"
)
type TlsFilesStat int
// TLSFilesStatus is the status of keys
type TLSFilesStatus int
// Four possible outcomes for four files
const (
NoCertNoKey = iota
NoCertNoKey = TLSFilesStatus(iota)
CertNoKey
NoCertKey
CertKey
)
var (
// ErrZeroBytes is returned for zero slice
ErrZeroBytes = errs.New("byte slice was unexpectedly empty")
)
@ -65,6 +68,10 @@ func newCAWorker(ctx context.Context, difficulty uint16, caC chan FullCertificat
return
default:
k, err = peertls.NewKey()
if err != nil {
eC <- err
return
}
switch kE := k.(type) {
case *ecdsa.PrivateKey:
i, err = idFromKey(&kE.PublicKey)
@ -106,7 +113,6 @@ func newCAWorker(ctx context.Context, difficulty uint16, caC chan FullCertificat
ID: i,
}
caC <- ca
return
}
func idFromKey(k crypto.PublicKey) (nodeID, error) {
@ -120,7 +126,7 @@ func idFromKey(k crypto.PublicKey) (nodeID, error) {
}
func openCert(path string, flag int) (*os.File, error) {
if err := os.MkdirAll(filepath.Dir(path), 744); err != nil {
if err := os.MkdirAll(filepath.Dir(path), 0744); err != nil {
return nil, errs.Wrap(err)
}
@ -132,7 +138,7 @@ func openCert(path string, flag int) (*os.File, error) {
}
func openKey(path string, flag int) (*os.File, error) {
if err := os.MkdirAll(filepath.Dir(path), 700); err != nil {
if err := os.MkdirAll(filepath.Dir(path), 0700); err != nil {
return nil, errs.Wrap(err)
}
@ -143,20 +149,25 @@ func openKey(path string, flag int) (*os.File, error) {
return k, nil
}
func statTLSFiles(certPath, keyPath string) TlsFilesStat {
s := 0
func statTLSFiles(certPath, keyPath string) TLSFilesStatus {
_, err := os.Stat(certPath)
if err == nil {
s += 1
}
hasCert := os.IsExist(err)
_, err = os.Stat(keyPath)
if err == nil {
s += 2
hasKey := os.IsExist(err)
if hasCert && hasKey {
return CertKey
} else if hasCert {
return CertNoKey
} else if hasKey {
return NoCertKey
}
return TlsFilesStat(s)
return NoCertNoKey
}
func (t TlsFilesStat) String() string {
func (t TLSFilesStatus) String() string {
switch t {
case CertKey:
return "certificate and key"
@ -164,7 +175,6 @@ func (t TlsFilesStat) String() string {
return "certificate"
case NoCertKey:
return "key"
default:
return ""
}
return ""
}

View File

@ -26,8 +26,7 @@ import (
// ServeContent is the Go standard library's http.ServeContent but modified to
// work with Rangers.
func ServeContent(ctx context.Context, w http.ResponseWriter, r *http.Request,
name string, modtime time.Time, content Ranger) {
func ServeContent(ctx context.Context, w http.ResponseWriter, r *http.Request, name string, modtime time.Time, content Ranger) {
setLastModified(w, modtime)
done, rangeReq := checkPreconditions(w, r, modtime)
if done {
@ -221,8 +220,7 @@ func setLastModified(w http.ResponseWriter, modtime time.Time) {
// checkPreconditions evaluates request preconditions and reports whether a
// precondition resulted in sending StatusNotModified or
// StatusPreconditionFailed.
func checkPreconditions(w http.ResponseWriter, r *http.Request,
modtime time.Time) (done bool, rangeHeader string) {
func checkPreconditions(w http.ResponseWriter, r *http.Request, modtime time.Time) (done bool, rangeHeader string) {
// This function carefully follows RFC 7232 section 6.
ch := checkIfMatch(w, r)
if ch == condNone {
@ -360,8 +358,7 @@ func checkIfModifiedSince(r *http.Request, modtime time.Time) condResult {
return condTrue
}
func checkIfRange(w http.ResponseWriter, r *http.Request, modtime time.Time) (
rv condResult) {
func checkIfRange(w http.ResponseWriter, r *http.Request, modtime time.Time) (rv condResult) {
if r.Method != http.MethodGet && r.Method != http.MethodHead {
return condNone
}
@ -458,8 +455,7 @@ func (r httpRange) contentRange(size int64) string {
return fmt.Sprintf("bytes %d-%d/%d", r.start, r.start+r.length-1, size)
}
func (r httpRange) mimeHeader(contentType string, size int64) (
rv textproto.MIMEHeader) {
func (r httpRange) mimeHeader(contentType string, size int64) (rv textproto.MIMEHeader) {
return textproto.MIMEHeader{
"Content-Range": {r.contentRange(size)},
"Content-Type": {contentType},
@ -547,8 +543,7 @@ func (w *countingWriter) Write(p []byte) (n int, err error) {
// rangesMIMESize returns the number of bytes it takes to encode the
// provided ranges as a multipart response.
func rangesMIMESize(ranges []httpRange, contentType string, contentSize int64) (
encSize int64) {
func rangesMIMESize(ranges []httpRange, contentType string, contentSize int64) (encSize int64) {
var w countingWriter
mw := multipart.NewWriter(&w)
for _, ra := range ranges {
@ -563,14 +558,14 @@ func rangesMIMESize(ranges []httpRange, contentType string, contentSize int64) (
}
encSize += int64(w)
return
return encSize
}
func sumRangesSize(ranges []httpRange) (size int64) {
for _, ra := range ranges {
size += ra.length
}
return
return size
}
// errNoOverlap is returned by serveContent's parseRange if first-byte-pos of

View File

@ -15,9 +15,6 @@ import (
)
func TestServeContent(t *testing.T) {
type RangerArgs struct {
a string
}
for _, tt := range []struct {
testName string
requestMethod string
@ -73,7 +70,7 @@ func TestServeContentParseRange(t *testing.T) {
ServeContent(context.Background(), writer, req, "", time.Now().UTC(), ranger)
assert.Equal(t, http.StatusOK, writer.Code)
assert.Equal(t, "23", writer.HeaderMap.Get("Content-Length"))
assert.Equal(t, "23", writer.Result().Header.Get("Content-Length"))
}
func Test_isZeroTime(t *testing.T) {
@ -124,7 +121,7 @@ func Test_setLastModified(t *testing.T) {
setLastModified(req, tt.modtime)
assert.Equal(t, tt.expected, req.HeaderMap.Get("Last-Modified"))
assert.Equal(t, tt.expected, req.Result().Header.Get("Last-Modified"))
})
}
}

View File

@ -62,6 +62,9 @@ func TestFileRanger(t *testing.T) {
t.Fatalf("unexpected err: %v", err)
}
data, err := ioutil.ReadAll(r)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(data, []byte(example.substr)) {
t.Fatalf("invalid subrange: %#v != %#v", string(data), example.substr)
}

View File

@ -55,9 +55,8 @@ type Service struct {
}
// SetLogger for process
func (s *Service) SetLogger(l *zap.Logger) error {
func (s *Service) SetLogger(l *zap.Logger) {
s.logger = l
return nil
}
func setEnv() error {
@ -67,9 +66,8 @@ func setEnv() error {
}
// SetMetricHandler for process
func (s *Service) SetMetricHandler(m *monkit.Registry) error {
func (s *Service) SetMetricHandler(m *monkit.Registry) {
s.metrics = m
return nil
}
// InstanceID assigns a new instance ID to the process

View File

@ -8,7 +8,6 @@ import (
"strings"
"go.uber.org/zap"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@ -43,8 +42,8 @@ func NewServer(driver, source string, logger *zap.Logger) (*Server, error) {
func (s *Server) validateAuth(APIKeyBytes []byte) error {
if !auth.ValidateAPIKey(string(APIKeyBytes)) {
s.logger.Error("unauthorized request: ", zap.Error(grpc.Errorf(codes.Unauthenticated, "Invalid API credential")))
return grpc.Errorf(codes.Unauthenticated, "Invalid API credential")
s.logger.Error("unauthorized request: ", zap.Error(status.Errorf(codes.Unauthenticated, "Invalid API credential")))
return status.Errorf(codes.Unauthenticated, "Invalid API credential")
}
return nil
}
@ -53,7 +52,7 @@ func (s *Server) validateAuth(APIKeyBytes []byte) error {
func (s *Server) Create(ctx context.Context, createReq *pb.CreateRequest) (resp *pb.CreateResponse, err error) {
s.logger.Debug("entering statdb Create")
APIKeyBytes := []byte(createReq.APIKey)
APIKeyBytes := createReq.APIKey
if err := s.validateAuth(APIKeyBytes); err != nil {
return nil, err
}
@ -92,7 +91,7 @@ func (s *Server) Create(ctx context.Context, createReq *pb.CreateRequest) (resp
func (s *Server) Get(ctx context.Context, getReq *pb.GetRequest) (resp *pb.GetResponse, err error) {
s.logger.Debug("entering statdb Get")
APIKeyBytes := []byte(getReq.APIKey)
APIKeyBytes := getReq.APIKey
err = s.validateAuth(APIKeyBytes)
if err != nil {
return nil, err
@ -117,7 +116,7 @@ func (s *Server) Get(ctx context.Context, getReq *pb.GetRequest) (resp *pb.GetRe
func (s *Server) Update(ctx context.Context, updateReq *pb.UpdateRequest) (resp *pb.UpdateResponse, err error) {
s.logger.Debug("entering statdb Update")
APIKeyBytes := []byte(updateReq.APIKey)
APIKeyBytes := updateReq.APIKey
err = s.validateAuth(APIKeyBytes)
if err != nil {
return nil, err
@ -132,10 +131,10 @@ func (s *Server) Update(ctx context.Context, updateReq *pb.UpdateRequest) (resp
auditSuccessCount := dbNode.AuditSuccessCount
totalAuditCount := dbNode.TotalAuditCount
auditSuccessRatio := dbNode.AuditSuccessRatio
uptimeSuccessCount := dbNode.UptimeSuccessCount
var auditSuccessRatio float64
var uptimeSuccessCount int64
totalUptimeCount := dbNode.TotalUptimeCount
uptimeRatio := dbNode.UptimeRatio
var uptimeRatio float64
updateFields := dbx.Node_Update_Fields{}
@ -181,7 +180,7 @@ func (s *Server) Update(ctx context.Context, updateReq *pb.UpdateRequest) (resp
func (s *Server) UpdateBatch(ctx context.Context, updateBatchReq *pb.UpdateBatchRequest) (resp *pb.UpdateBatchResponse, err error) {
s.logger.Debug("entering statdb UpdateBatch")
APIKeyBytes := []byte(updateBatchReq.APIKey)
APIKeyBytes := updateBatchReq.APIKey
nodeStatsList := make([]*pb.NodeStats, len(updateBatchReq.NodeList))
for i, node := range updateBatchReq.NodeList {
updateReq := &pb.UpdateRequest{

View File

@ -6,8 +6,9 @@ package buckets
import (
context "context"
gomock "github.com/golang/mock/gomock"
reflect "reflect"
gomock "github.com/golang/mock/gomock"
buckets "storj.io/storj/pkg/storage/buckets"
objects "storj.io/storj/pkg/storage/objects"
)

View File

@ -73,9 +73,10 @@ func (o *objStore) Get(ctx context.Context, path paths.Path) (
func (o *objStore) Put(ctx context.Context, path paths.Path, data io.Reader,
metadata SerializableMeta, expiration time.Time) (meta Meta, err error) {
defer mon.Task()(&ctx)(&err)
if metadata.GetContentType() == "" {
// TODO(kaloyan): autodetect content type
}
// TODO(kaloyan): autodetect content type
// if metadata.GetContentType() == "" {}
// TODO(kaloyan): encrypt metadata.UserDefined before serializing
b, err := proto.Marshal(&metadata)
if err != nil {

View File

@ -116,7 +116,7 @@ func TestSegmentStorePutRemote(t *testing.T) {
mockOC.EXPECT().Choose(
gomock.Any(), gomock.Any(), gomock.Any(),
).Return([]*opb.Node{
&opb.Node{Id: "im-a-node"},
{Id: "im-a-node"},
}, nil),
mockEC.EXPECT().Put(
gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
@ -185,8 +185,7 @@ func TestSegmentStoreGetInline(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
var ti time.Time
ti = time.Unix(0, 0).UTC()
ti := time.Unix(0, 0).UTC()
someTime, err := ptypes.TimestampProto(ti)
assert.NoError(t, err)
@ -236,8 +235,7 @@ func TestSegmentStoreGetRemote(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
var ti time.Time
ti = time.Unix(0, 0).UTC()
ti := time.Unix(0, 0).UTC()
someTime, err := ptypes.TimestampProto(ti)
assert.NoError(t, err)
@ -299,8 +297,7 @@ func TestSegmentStoreDeleteInline(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
var ti time.Time
ti = time.Unix(0, 0).UTC()
ti := time.Unix(0, 0).UTC()
someTime, err := ptypes.TimestampProto(ti)
assert.NoError(t, err)
@ -353,8 +350,7 @@ func TestSegmentStoreDeleteRemote(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
var ti time.Time
ti = time.Unix(0, 0).UTC()
ti := time.Unix(0, 0).UTC()
someTime, err := ptypes.TimestampProto(ti)
assert.NoError(t, err)
@ -444,8 +440,7 @@ func TestSegmentStoreList(t *testing.T) {
startAfter := paths.New(tt.startAfterInput)
listedPath := paths.New(tt.itemPath)
var ti time.Time
ti = time.Unix(0, 0).UTC()
ti := time.Unix(0, 0).UTC()
someTime, err := ptypes.TimestampProto(ti)
assert.NoError(t, err)
@ -454,7 +449,7 @@ func TestSegmentStoreList(t *testing.T) {
gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(),
gomock.Any(), gomock.Any(), gomock.Any(),
).Return([]pdb.ListItem{
pdb.ListItem{
{
Path: listedPath,
Pointer: &ppb.Pointer{
Type: ppb.Pointer_INLINE,

View File

@ -13,7 +13,7 @@ import (
// MockClient is an autogenerated mock type for the client type
type MockClient struct {
mock.Mock
client Client
// client Client
}
// Report provides a mock function with given fields: ctx

View File

@ -4,6 +4,7 @@
package telemetry
import (
"context"
"testing"
"github.com/stretchr/testify/assert"
@ -32,13 +33,13 @@ func TestServe_ReturnErrorOnConnFail(t *testing.T) {
server.conn.Close()
server.conn = nil
errServe := server.Serve(nil, nil)
errServe := server.Serve(context.Background(), nil)
assert.EqualError(t, errServe, "telemetry error: invalid conn: <nil>")
}
func TestListenAndServe_ReturnErrorOnListenFails(t *testing.T) {
err := ListenAndServe(nil, "1", nil)
err := ListenAndServe(context.Background(), "1", nil)
assert.Error(t, err)
}

View File

@ -21,6 +21,10 @@ func GetBytes(key interface{}) ([]byte, error) {
return buf.Bytes(), nil
}
// ParseURL extracts database parameters from a string as a URL
// bolt://storj.db
// bolt://C:\storj.db
// redis://hostname
func ParseURL(s string) (*url.URL, error) {
if strings.HasPrefix(s, "bolt://") {
return &url.URL{

View File

@ -51,7 +51,7 @@ func NewClient(logger *zap.Logger, path, bucket string) (*Client, error) {
return err
})
if err != nil {
db.Close()
_ = db.Close()
return nil, err
}
@ -122,7 +122,7 @@ func (c *Client) listHelper(reverseList bool, startingKey storage.Key, limit sto
}
for ; k != nil; k, _ = iterate() {
paths = append(paths, k)
if limit > 0 && int(limit) == int(len(paths)) {
if limit > 0 && int(limit) == len(paths) {
break
}
}

View File

@ -28,7 +28,7 @@ type Client struct {
TTL time.Duration
}
// NewClient returns a configured Client instance, verifying a sucessful connection to redis
// NewClient returns a configured Client instance, verifying a successful connection to redis
func NewClient(address, password string, db int) (*Client, error) {
c := &Client{
db: redis.NewClient(&redis.Options{