Stylistic Go Cleanup (#2524)
This commit is contained in:
parent
9204c57c06
commit
53d96be44a
@ -39,9 +39,9 @@ func New(config Config) (*DB, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewInMemory creates new inmemory master database for storage node
|
||||
// NewInMemory creates new in-memory master database for storage node
|
||||
// TODO: still stores data on disk
|
||||
func NewInMemory(storageDir string) (*DB, error) {
|
||||
func NewInMemory() (*DB, error) {
|
||||
return &DB{
|
||||
kdb: teststore.New(),
|
||||
ndb: teststore.New(),
|
||||
|
@ -21,7 +21,7 @@ const (
|
||||
)
|
||||
|
||||
// rootQuery creates query for graphql
|
||||
func rootQuery(service *bootstrapweb.Service, types Types) *graphql.Object {
|
||||
func rootQuery(service *bootstrapweb.Service) *graphql.Object {
|
||||
return graphql.NewObject(graphql.ObjectConfig{
|
||||
Name: Query,
|
||||
Fields: graphql.Fields{
|
||||
|
@ -22,7 +22,7 @@ type TypeCreator struct {
|
||||
// Create create types and check for error
|
||||
func (c *TypeCreator) Create(service *bootstrapweb.Service) error {
|
||||
// root objects
|
||||
c.query = rootQuery(service, c)
|
||||
c.query = rootQuery(service)
|
||||
|
||||
err := c.query.Error()
|
||||
if err != nil {
|
||||
|
@ -224,7 +224,6 @@ func (peer *Peer) Run(ctx context.Context) error {
|
||||
// Close closes all the resources.
|
||||
func (peer *Peer) Close() error {
|
||||
var errlist errs.Group
|
||||
|
||||
// TODO: ensure that Close can be called on nil-s that way this code won't need the checks.
|
||||
|
||||
// close servers, to avoid new connections to closing subsystems
|
||||
|
@ -44,7 +44,7 @@ func (planet *Planet) newBootstrap() (peer *bootstrap.Peer, err error) {
|
||||
if planet.config.Reconfigure.NewBootstrapDB != nil {
|
||||
db, err = planet.config.Reconfigure.NewBootstrapDB(0)
|
||||
} else {
|
||||
db, err = bootstrapdb.NewInMemory(dbDir)
|
||||
db, err = bootstrapdb.NewInMemory()
|
||||
}
|
||||
|
||||
err = db.CreateTables()
|
||||
|
@ -36,7 +36,7 @@ func TestAuditSegment(t *testing.T) {
|
||||
// change limit in library to 5 in
|
||||
// list api call, default is 0 == 1000 listing
|
||||
//populate metainfo with 10 non-expired pointers of test data
|
||||
tests, cursor, metainfo := populateTestData(t, planet, time.Now().Add(3*time.Second))
|
||||
tests, cursor, metainfoSvc := populateTestData(t, planet, time.Now().Add(3*time.Second))
|
||||
|
||||
t.Run("NextStripe", func(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
@ -54,7 +54,7 @@ func TestAuditSegment(t *testing.T) {
|
||||
|
||||
// test to see how random paths are
|
||||
t.Run("probabilisticTest", func(t *testing.T) {
|
||||
list, _, err := metainfo.List(ctx, "", "", "", true, 10, meta.None)
|
||||
list, _, err := metainfoSvc.List(ctx, "", "", "", true, 10, meta.None)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, list, 10)
|
||||
|
||||
@ -112,9 +112,9 @@ func TestDeleteExpired(t *testing.T) {
|
||||
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
//populate metainfo with 10 expired pointers of test data
|
||||
_, cursor, metainfo := populateTestData(t, planet, time.Now().Add(-time.Second*1))
|
||||
_, cursor, metainfoSvc := populateTestData(t, planet, time.Now().Add(-time.Second*1))
|
||||
//make sure it they're in there
|
||||
list, _, err := metainfo.List(ctx, "", "", "", true, 10, meta.None)
|
||||
list, _, err := metainfoSvc.List(ctx, "", "", "", true, 10, meta.None)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, list, 10)
|
||||
// make sure an error and no pointer is returned
|
||||
@ -124,7 +124,7 @@ func TestDeleteExpired(t *testing.T) {
|
||||
require.Nil(t, stripe)
|
||||
})
|
||||
//make sure it they're not in there anymore
|
||||
list, _, err = metainfo.List(ctx, "", "", "", true, 10, meta.None)
|
||||
list, _, err = metainfoSvc.List(ctx, "", "", "", true, 10, meta.None)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, list, 0)
|
||||
})
|
||||
@ -149,23 +149,23 @@ func populateTestData(t *testing.T, planet *testplanet.Planet, expiration time.T
|
||||
{bm: "success-9", path: "Pictures/Animals/Dogs/dogs.png"},
|
||||
{bm: "success-10", path: "Nada/ビデオ/😶"},
|
||||
}
|
||||
metainfo := planet.Satellites[0].Metainfo.Service
|
||||
cursor := audit.NewCursor(metainfo)
|
||||
metainfoSvc := planet.Satellites[0].Metainfo.Service
|
||||
cursor := audit.NewCursor(metainfoSvc)
|
||||
|
||||
// put 10 pointers in db with expirations
|
||||
// put 10 pointers in db with expiration
|
||||
t.Run("putToDB", func(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
test := tt
|
||||
t.Run(test.bm, func(t *testing.T) {
|
||||
pointer := makePointer(test.path, expiration)
|
||||
require.NoError(t, metainfo.Put(ctx, test.path, pointer))
|
||||
pointer := makePointer(expiration)
|
||||
require.NoError(t, metainfoSvc.Put(ctx, test.path, pointer))
|
||||
})
|
||||
}
|
||||
})
|
||||
return tests, cursor, metainfo
|
||||
return tests, cursor, metainfoSvc
|
||||
}
|
||||
|
||||
func makePointer(path storj.Path, expiration time.Time) *pb.Pointer {
|
||||
func makePointer(expiration time.Time) *pb.Pointer {
|
||||
var rps []*pb.RemotePiece
|
||||
rps = append(rps, &pb.RemotePiece{
|
||||
PieceNum: 1,
|
||||
|
@ -49,14 +49,14 @@ func TestDisqualificationTooManyFailedAudits(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
var (
|
||||
sat = planet.Satellites[0]
|
||||
nodeID = planet.StorageNodes[0].ID()
|
||||
report = &audit.Report{
|
||||
satellitePeer = planet.Satellites[0]
|
||||
nodeID = planet.StorageNodes[0].ID()
|
||||
report = &audit.Report{
|
||||
Fails: storj.NodeIDList{nodeID},
|
||||
}
|
||||
)
|
||||
|
||||
dossier, err := sat.Overlay.Service.Get(ctx, nodeID)
|
||||
dossier, err := satellitePeer.Overlay.Service.Get(ctx, nodeID)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, alpha0, dossier.Reputation.AuditReputationAlpha)
|
||||
@ -68,10 +68,10 @@ func TestDisqualificationTooManyFailedAudits(t *testing.T) {
|
||||
// failed audits
|
||||
iterations := 1
|
||||
for ; ; iterations++ {
|
||||
_, err := sat.Audit.Service.Reporter.RecordAudits(ctx, report)
|
||||
_, err := satellitePeer.Audit.Service.Reporter.RecordAudits(ctx, report)
|
||||
require.NoError(t, err)
|
||||
|
||||
dossier, err := sat.Overlay.Service.Get(ctx, nodeID)
|
||||
dossier, err := satellitePeer.Overlay.Service.Get(ctx, nodeID)
|
||||
require.NoError(t, err)
|
||||
|
||||
reputation := calcReputation(dossier)
|
||||
@ -118,42 +118,42 @@ func TestDisqualifiedNodesGetNoDownload(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
satellite := planet.Satellites[0]
|
||||
upl := planet.Uplinks[0]
|
||||
satellitePeer := planet.Satellites[0]
|
||||
uplinkPeer := planet.Uplinks[0]
|
||||
|
||||
err := satellite.Audit.Service.Close()
|
||||
err := satellitePeer.Audit.Service.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
testData := testrand.Bytes(8 * memory.KiB)
|
||||
|
||||
err = upl.Upload(ctx, planet.Satellites[0], "testbucket", "test/path", testData)
|
||||
err = uplinkPeer.Upload(ctx, planet.Satellites[0], "testbucket", "test/path", testData)
|
||||
require.NoError(t, err)
|
||||
|
||||
projects, err := satellite.DB.Console().Projects().GetAll(ctx)
|
||||
projects, err := satellitePeer.DB.Console().Projects().GetAll(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, projects, 1)
|
||||
|
||||
bucketID := []byte(storj.JoinPaths(projects[0].ID.String(), "testbucket"))
|
||||
|
||||
encParameters := upl.GetConfig(satellite).GetEncryptionParameters()
|
||||
encParameters := uplinkPeer.GetConfig(satellitePeer).GetEncryptionParameters()
|
||||
cipherSuite := encParameters.CipherSuite
|
||||
store := encryption.NewStore()
|
||||
store.SetDefaultKey(new(storj.Key))
|
||||
encryptedPath, err := encryption.EncryptPath("testbucket", paths.NewUnencrypted("test/path"), cipherSuite, store)
|
||||
require.NoError(t, err)
|
||||
lastSegPath := storj.JoinPaths(projects[0].ID.String(), "l", "testbucket", encryptedPath.Raw())
|
||||
pointer, err := satellite.Metainfo.Service.Get(ctx, lastSegPath)
|
||||
pointer, err := satellitePeer.Metainfo.Service.Get(ctx, lastSegPath)
|
||||
require.NoError(t, err)
|
||||
|
||||
disqualifiedNode := pointer.GetRemote().GetRemotePieces()[0].NodeId
|
||||
disqualifyNode(t, ctx, satellite, disqualifiedNode)
|
||||
disqualifyNode(t, ctx, satellitePeer, disqualifiedNode)
|
||||
|
||||
limits, _, err := satellite.Orders.Service.CreateGetOrderLimits(ctx, bucketID, pointer)
|
||||
limits, _, err := satellitePeer.Orders.Service.CreateGetOrderLimits(ctx, bucketID, pointer)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, limits, len(pointer.GetRemote().GetRemotePieces())-1)
|
||||
|
||||
for _, orderLimit := range limits {
|
||||
assert.False(t, isDisqualified(t, ctx, satellite, orderLimit.Limit.StorageNodeId))
|
||||
assert.False(t, isDisqualified(t, ctx, satellitePeer, orderLimit.Limit.StorageNodeId))
|
||||
assert.NotEqual(t, orderLimit.Limit.StorageNodeId, disqualifiedNode)
|
||||
}
|
||||
})
|
||||
@ -167,13 +167,13 @@ func TestDisqualifiedNodesGetNoUpload(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
satellite := planet.Satellites[0]
|
||||
satellitePeer := planet.Satellites[0]
|
||||
disqualifiedNode := planet.StorageNodes[0]
|
||||
|
||||
err := satellite.Audit.Service.Close()
|
||||
err := satellitePeer.Audit.Service.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
disqualifyNode(t, ctx, satellite, disqualifiedNode.ID())
|
||||
disqualifyNode(t, ctx, satellitePeer, disqualifiedNode.ID())
|
||||
|
||||
request := overlay.FindStorageNodesRequest{
|
||||
MinimumRequiredNodes: 4,
|
||||
@ -183,12 +183,12 @@ func TestDisqualifiedNodesGetNoUpload(t *testing.T) {
|
||||
ExcludedNodes: nil,
|
||||
MinimumVersion: "", // semver or empty
|
||||
}
|
||||
nodes, err := satellite.Overlay.Service.FindStorageNodes(ctx, request)
|
||||
nodes, err := satellitePeer.Overlay.Service.FindStorageNodes(ctx, request)
|
||||
assert.True(t, overlay.ErrNotEnoughNodes.Has(err))
|
||||
|
||||
assert.Len(t, nodes, 3)
|
||||
for _, node := range nodes {
|
||||
assert.False(t, isDisqualified(t, ctx, satellite, node.Id))
|
||||
assert.False(t, isDisqualified(t, ctx, satellitePeer, node.Id))
|
||||
assert.NotEqual(t, node.Id, disqualifiedNode)
|
||||
}
|
||||
|
||||
@ -204,20 +204,20 @@ func TestDisqualifiedNodeRemainsDisqualified(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
satellite := planet.Satellites[0]
|
||||
satellitePeer := planet.Satellites[0]
|
||||
|
||||
err := satellite.Audit.Service.Close()
|
||||
err := satellitePeer.Audit.Service.Close()
|
||||
require.NoError(t, err)
|
||||
|
||||
disqualifiedNode := planet.StorageNodes[0]
|
||||
disqualifyNode(t, ctx, satellite, disqualifiedNode.ID())
|
||||
disqualifyNode(t, ctx, satellitePeer, disqualifiedNode.ID())
|
||||
|
||||
_, err = satellite.DB.OverlayCache().UpdateUptime(ctx, disqualifiedNode.ID(), true, 0, 1, 0)
|
||||
_, err = satellitePeer.DB.OverlayCache().UpdateUptime(ctx, disqualifiedNode.ID(), true, 0, 1, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, isDisqualified(t, ctx, satellite, disqualifiedNode.ID()))
|
||||
assert.True(t, isDisqualified(t, ctx, satellitePeer, disqualifiedNode.ID()))
|
||||
|
||||
_, err = satellite.DB.OverlayCache().UpdateStats(ctx, &overlay.UpdateRequest{
|
||||
_, err = satellitePeer.DB.OverlayCache().UpdateStats(ctx, &overlay.UpdateRequest{
|
||||
NodeID: disqualifiedNode.ID(),
|
||||
IsUp: true,
|
||||
AuditSuccess: true,
|
||||
@ -230,7 +230,7 @@ func TestDisqualifiedNodeRemainsDisqualified(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.True(t, isDisqualified(t, ctx, satellite, disqualifiedNode.ID()))
|
||||
assert.True(t, isDisqualified(t, ctx, satellitePeer, disqualifiedNode.ID()))
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -46,23 +46,23 @@ func TestDataRepair(t *testing.T) {
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
// first, upload some remote data
|
||||
ul := planet.Uplinks[0]
|
||||
satellite := planet.Satellites[0]
|
||||
uplinkPeer := planet.Uplinks[0]
|
||||
satellitePeer := planet.Satellites[0]
|
||||
// stop discovery service so that we do not get a race condition when we delete nodes from overlay cache
|
||||
satellite.Discovery.Service.Discovery.Stop()
|
||||
satellite.Discovery.Service.Refresh.Stop()
|
||||
satellitePeer.Discovery.Service.Discovery.Stop()
|
||||
satellitePeer.Discovery.Service.Refresh.Stop()
|
||||
// stop audit to prevent possible interactions i.e. repair timeout problems
|
||||
satellite.Audit.Service.Loop.Stop()
|
||||
satellitePeer.Audit.Service.Loop.Stop()
|
||||
|
||||
satellite.Repair.Checker.Loop.Pause()
|
||||
satellite.Repair.Repairer.Loop.Pause()
|
||||
satellitePeer.Repair.Checker.Loop.Pause()
|
||||
satellitePeer.Repair.Repairer.Loop.Pause()
|
||||
|
||||
var (
|
||||
testData = testrand.Bytes(8 * memory.KiB)
|
||||
minThreshold = 3
|
||||
successThreshold = 7
|
||||
)
|
||||
err := ul.UploadWithConfig(ctx, satellite, &uplink.RSConfig{
|
||||
err := uplinkPeer.UploadWithConfig(ctx, satellitePeer, &uplink.RSConfig{
|
||||
MinThreshold: minThreshold,
|
||||
RepairThreshold: 5,
|
||||
SuccessThreshold: successThreshold,
|
||||
@ -70,7 +70,7 @@ func TestDataRepair(t *testing.T) {
|
||||
}, "testbucket", "test/path", testData)
|
||||
require.NoError(t, err)
|
||||
|
||||
pointer, path := getRemoteSegment(t, ctx, satellite)
|
||||
pointer, path := getRemoteSegment(t, ctx, satellitePeer)
|
||||
|
||||
// calculate how many storagenodes to kill
|
||||
redundancy := pointer.GetRemote().GetRedundancy()
|
||||
@ -114,28 +114,28 @@ func TestDataRepair(t *testing.T) {
|
||||
|
||||
for _, node := range planet.StorageNodes {
|
||||
if nodesToDisqualify[node.ID()] {
|
||||
disqualifyNode(t, ctx, satellite, node.ID())
|
||||
disqualifyNode(t, ctx, satellitePeer, node.ID())
|
||||
continue
|
||||
}
|
||||
if nodesToKill[node.ID()] {
|
||||
err = planet.StopPeer(node)
|
||||
require.NoError(t, err)
|
||||
_, err = satellite.Overlay.Service.UpdateUptime(ctx, node.ID(), false)
|
||||
_, err = satellitePeer.Overlay.Service.UpdateUptime(ctx, node.ID(), false)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
satellite.Repair.Checker.Loop.Restart()
|
||||
satellite.Repair.Checker.Loop.TriggerWait()
|
||||
satellite.Repair.Checker.Loop.Pause()
|
||||
satellite.Repair.Repairer.Loop.Restart()
|
||||
satellite.Repair.Repairer.Loop.TriggerWait()
|
||||
satellite.Repair.Repairer.Loop.Pause()
|
||||
satellite.Repair.Repairer.Limiter.Wait()
|
||||
satellitePeer.Repair.Checker.Loop.Restart()
|
||||
satellitePeer.Repair.Checker.Loop.TriggerWait()
|
||||
satellitePeer.Repair.Checker.Loop.Pause()
|
||||
satellitePeer.Repair.Repairer.Loop.Restart()
|
||||
satellitePeer.Repair.Repairer.Loop.TriggerWait()
|
||||
satellitePeer.Repair.Repairer.Loop.Pause()
|
||||
satellitePeer.Repair.Repairer.Limiter.Wait()
|
||||
|
||||
// repaired segment should not contain any piece in the killed and DQ nodes
|
||||
metainfo := satellite.Metainfo.Service
|
||||
pointer, err = metainfo.Get(ctx, path)
|
||||
metainfoService := satellitePeer.Metainfo.Service
|
||||
pointer, err = metainfoService.Get(ctx, path)
|
||||
require.NoError(t, err)
|
||||
|
||||
nodesToKillForMinThreshold := len(remotePieces) - minThreshold
|
||||
@ -153,7 +153,7 @@ func TestDataRepair(t *testing.T) {
|
||||
}
|
||||
|
||||
// we should be able to download data without any of the original nodes
|
||||
newData, err := ul.Download(ctx, satellite, "testbucket", "test/path")
|
||||
newData, err := uplinkPeer.Download(ctx, satellitePeer, "testbucket", "test/path")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, newData, testData)
|
||||
})
|
||||
@ -173,18 +173,18 @@ func TestRepairMultipleDisqualified(t *testing.T) {
|
||||
UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
// first, upload some remote data
|
||||
ul := planet.Uplinks[0]
|
||||
satellite := planet.Satellites[0]
|
||||
uplinkPeer := planet.Uplinks[0]
|
||||
satellitePeer := planet.Satellites[0]
|
||||
// stop discovery service so that we do not get a race condition when we delete nodes from overlay cache
|
||||
satellite.Discovery.Service.Discovery.Stop()
|
||||
satellite.Discovery.Service.Refresh.Stop()
|
||||
satellitePeer.Discovery.Service.Discovery.Stop()
|
||||
satellitePeer.Discovery.Service.Refresh.Stop()
|
||||
|
||||
satellite.Repair.Checker.Loop.Pause()
|
||||
satellite.Repair.Repairer.Loop.Pause()
|
||||
satellitePeer.Repair.Checker.Loop.Pause()
|
||||
satellitePeer.Repair.Repairer.Loop.Pause()
|
||||
|
||||
testData := testrand.Bytes(8 * memory.KiB)
|
||||
|
||||
err := ul.UploadWithConfig(ctx, satellite, &uplink.RSConfig{
|
||||
err := uplinkPeer.UploadWithConfig(ctx, satellitePeer, &uplink.RSConfig{
|
||||
MinThreshold: 3,
|
||||
RepairThreshold: 5,
|
||||
SuccessThreshold: 7,
|
||||
@ -193,7 +193,7 @@ func TestRepairMultipleDisqualified(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// get a remote segment from metainfo
|
||||
metainfo := satellite.Metainfo.Service
|
||||
metainfo := satellitePeer.Metainfo.Service
|
||||
listResponse, _, err := metainfo.List(ctx, "", "", "", true, 0, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -234,16 +234,16 @@ func TestRepairMultipleDisqualified(t *testing.T) {
|
||||
|
||||
for _, node := range planet.StorageNodes {
|
||||
if nodesToDisqualify[node.ID()] {
|
||||
disqualifyNode(t, ctx, satellite, node.ID())
|
||||
disqualifyNode(t, ctx, satellitePeer, node.ID())
|
||||
}
|
||||
}
|
||||
|
||||
err = satellite.Repair.Checker.RefreshReliabilityCache(ctx)
|
||||
err = satellitePeer.Repair.Checker.RefreshReliabilityCache(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
satellite.Repair.Checker.Loop.TriggerWait()
|
||||
satellite.Repair.Repairer.Loop.TriggerWait()
|
||||
satellite.Repair.Repairer.Limiter.Wait()
|
||||
satellitePeer.Repair.Checker.Loop.TriggerWait()
|
||||
satellitePeer.Repair.Repairer.Loop.TriggerWait()
|
||||
satellitePeer.Repair.Repairer.Limiter.Wait()
|
||||
|
||||
// kill nodes kept alive to ensure repair worked
|
||||
for _, node := range planet.StorageNodes {
|
||||
@ -251,13 +251,13 @@ func TestRepairMultipleDisqualified(t *testing.T) {
|
||||
err = planet.StopPeer(node)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = satellite.Overlay.Service.UpdateUptime(ctx, node.ID(), false)
|
||||
_, err = satellitePeer.Overlay.Service.UpdateUptime(ctx, node.ID(), false)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// we should be able to download data without any of the original nodes
|
||||
newData, err := ul.Download(ctx, satellite, "testbucket", "test/path")
|
||||
newData, err := uplinkPeer.Download(ctx, satellitePeer, "testbucket", "test/path")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, newData, testData)
|
||||
|
||||
|
@ -82,13 +82,13 @@ func isOperatorEmailValid(log *zap.Logger, email string) error {
|
||||
|
||||
func isOperatorWalletValid(log *zap.Logger, wallet string) error {
|
||||
if wallet == "" {
|
||||
return fmt.Errorf("Operator wallet address isn't specified")
|
||||
return fmt.Errorf("operator wallet address isn't specified")
|
||||
}
|
||||
r := regexp.MustCompile("^0x[a-fA-F0-9]{40}$")
|
||||
if match := r.MatchString(wallet); !match {
|
||||
return fmt.Errorf("Operator wallet address isn't valid")
|
||||
return fmt.Errorf("operator wallet address isn't valid")
|
||||
}
|
||||
|
||||
log.Sugar().Info("Operator wallet: ", wallet)
|
||||
log.Sugar().Info("operator wallet: ", wallet)
|
||||
return nil
|
||||
}
|
||||
|
@ -174,7 +174,7 @@ func (k *Kademlia) Bootstrap(ctx context.Context) (err error) {
|
||||
|
||||
ident, err := k.dialer.FetchPeerIdentityUnverified(ctx, node.Address.Address)
|
||||
if err != nil {
|
||||
errGroup.Add(err)
|
||||
errGroup.Add(BootstrapErr.Wrap(err))
|
||||
continue
|
||||
}
|
||||
|
||||
@ -197,7 +197,7 @@ func (k *Kademlia) Bootstrap(ctx context.Context) (err error) {
|
||||
}
|
||||
|
||||
if !foundOnlineBootstrap {
|
||||
errGroup.Add(Error.New("no bootstrap node found online"))
|
||||
errGroup.Add(BootstrapErr.New("no bootstrap node found online"))
|
||||
continue
|
||||
}
|
||||
|
||||
@ -207,7 +207,7 @@ func (k *Kademlia) Bootstrap(ctx context.Context) (err error) {
|
||||
k.routingTable.mutex.Unlock()
|
||||
_, err := k.lookup(ctx, id)
|
||||
if err != nil {
|
||||
errGroup.Add(err)
|
||||
errGroup.Add(BootstrapErr.Wrap(err))
|
||||
continue
|
||||
}
|
||||
return nil
|
||||
@ -219,7 +219,7 @@ func (k *Kademlia) Bootstrap(ctx context.Context) (err error) {
|
||||
// ```
|
||||
}
|
||||
|
||||
errGroup.Add(Error.New("unable to start bootstrap after final wait time of %s", waitInterval))
|
||||
errGroup.Add(BootstrapErr.New("unable to start bootstrap after final wait time of %s", waitInterval))
|
||||
return errGroup.Err()
|
||||
}
|
||||
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
@ -65,8 +64,8 @@ func TestNewKademlia(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
for i, v := range cases {
|
||||
kad, err := newKademlia(ctx, zaptest.NewLogger(t), pb.NodeType_STORAGE, v.bn, v.addr, pb.NodeOperator{}, v.id, ctx.Dir(strconv.Itoa(i)), defaultAlpha)
|
||||
for _, v := range cases {
|
||||
kad, err := newKademlia(zaptest.NewLogger(t), pb.NodeType_STORAGE, v.bn, v.addr, pb.NodeOperator{}, v.id, defaultAlpha)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, v.expectedErr, err)
|
||||
assert.Equal(t, kad.bootstrapNodes, v.bn)
|
||||
@ -93,7 +92,7 @@ func TestPeerDiscovery(t *testing.T) {
|
||||
operator := pb.NodeOperator{
|
||||
Wallet: "OperatorWallet",
|
||||
}
|
||||
k, err := newKademlia(ctx, zaptest.NewLogger(t), pb.NodeType_STORAGE, bootstrapNodes, testAddress, operator, testID, ctx.Dir("test"), defaultAlpha)
|
||||
k, err := newKademlia(zaptest.NewLogger(t), pb.NodeType_STORAGE, bootstrapNodes, testAddress, operator, testID, defaultAlpha)
|
||||
require.NoError(t, err)
|
||||
rt := k.routingTable
|
||||
assert.Equal(t, rt.Local().Operator.Wallet, "OperatorWallet")
|
||||
@ -153,7 +152,7 @@ func TestBootstrap(t *testing.T) {
|
||||
func testNode(ctx *testcontext.Context, name string, t *testing.T, bn []pb.Node) (*Kademlia, *grpc.Server, func()) {
|
||||
// new address
|
||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
require.NoErrorf(t, err, "node: %s", name)
|
||||
// new config
|
||||
// new identity
|
||||
fid, err := testidentity.NewTestIdentity(ctx)
|
||||
@ -161,7 +160,7 @@ func testNode(ctx *testcontext.Context, name string, t *testing.T, bn []pb.Node)
|
||||
// new kademlia
|
||||
|
||||
logger := zaptest.NewLogger(t)
|
||||
k, err := newKademlia(ctx, logger, pb.NodeType_STORAGE, bn, lis.Addr().String(), pb.NodeOperator{}, fid, ctx.Dir(name), defaultAlpha)
|
||||
k, err := newKademlia(logger, pb.NodeType_STORAGE, bn, lis.Addr().String(), pb.NodeOperator{}, fid, defaultAlpha)
|
||||
require.NoError(t, err)
|
||||
|
||||
s := NewEndpoint(logger, k, k.routingTable)
|
||||
@ -231,7 +230,7 @@ func TestFindNear(t *testing.T) {
|
||||
//start kademlia
|
||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
srv, _ := newTestServer(ctx, []*pb.Node{{Id: teststorj.NodeIDFromString("foo")}})
|
||||
srv, _ := newTestServer(ctx)
|
||||
|
||||
defer srv.Stop()
|
||||
ctx.Go(func() error {
|
||||
@ -243,13 +242,13 @@ func TestFindNear(t *testing.T) {
|
||||
})
|
||||
|
||||
bootstrap := []pb.Node{{Id: fid2.ID, Address: &pb.NodeAddress{Address: lis.Addr().String()}}}
|
||||
k, err := newKademlia(ctx, zaptest.NewLogger(t), pb.NodeType_STORAGE, bootstrap,
|
||||
lis.Addr().String(), pb.NodeOperator{}, fid, ctx.Dir("kademlia"), defaultAlpha)
|
||||
k, err := newKademlia(zaptest.NewLogger(t), pb.NodeType_STORAGE, bootstrap,
|
||||
lis.Addr().String(), pb.NodeOperator{}, fid, defaultAlpha)
|
||||
require.NoError(t, err)
|
||||
defer ctx.Check(k.Close)
|
||||
|
||||
// add nodes
|
||||
nodes := []*pb.Node{}
|
||||
var nodes []*pb.Node
|
||||
newNode := func(id string, bw, disk int64) pb.Node {
|
||||
nodeID := teststorj.NodeIDFromString(id)
|
||||
n := &pb.Node{Id: nodeID}
|
||||
@ -303,12 +302,12 @@ func startTestNodeServer(ctx *testcontext.Context) (*grpc.Server, *mockNodesServ
|
||||
if err != nil {
|
||||
return nil, nil, nil, ""
|
||||
}
|
||||
identity, err := ca.NewIdentity()
|
||||
fullIdentity, err := ca.NewIdentity()
|
||||
if err != nil {
|
||||
return nil, nil, nil, ""
|
||||
}
|
||||
|
||||
serverOptions, err := tlsopts.NewOptions(identity, tlsopts.Config{})
|
||||
serverOptions, err := tlsopts.NewOptions(fullIdentity, tlsopts.Config{})
|
||||
if err != nil {
|
||||
return nil, nil, nil, ""
|
||||
}
|
||||
@ -326,19 +325,19 @@ func startTestNodeServer(ctx *testcontext.Context) (*grpc.Server, *mockNodesServ
|
||||
return err
|
||||
})
|
||||
|
||||
return grpcServer, mn, identity, lis.Addr().String()
|
||||
return grpcServer, mn, fullIdentity, lis.Addr().String()
|
||||
}
|
||||
|
||||
func newTestServer(ctx *testcontext.Context, nn []*pb.Node) (*grpc.Server, *mockNodesServer) {
|
||||
func newTestServer(ctx *testcontext.Context) (*grpc.Server, *mockNodesServer) {
|
||||
ca, err := testidentity.NewTestCA(ctx)
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
identity, err := ca.NewIdentity()
|
||||
fullIdentity, err := ca.NewIdentity()
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
serverOptions, err := tlsopts.NewOptions(identity, tlsopts.Config{})
|
||||
serverOptions, err := tlsopts.NewOptions(fullIdentity, tlsopts.Config{})
|
||||
if err != nil {
|
||||
return nil, nil
|
||||
}
|
||||
@ -408,7 +407,7 @@ func (mn *mockNodesServer) RequestInfo(ctx context.Context, req *pb.InfoRequest)
|
||||
}
|
||||
|
||||
// newKademlia returns a newly configured Kademlia instance
|
||||
func newKademlia(ctx context.Context, log *zap.Logger, nodeType pb.NodeType, bootstrapNodes []pb.Node, address string, operator pb.NodeOperator, identity *identity.FullIdentity, path string, alpha int) (*Kademlia, error) {
|
||||
func newKademlia(log *zap.Logger, nodeType pb.NodeType, bootstrapNodes []pb.Node, address string, operator pb.NodeOperator, identity *identity.FullIdentity, alpha int) (*Kademlia, error) {
|
||||
self := &overlay.NodeDossier{
|
||||
Node: pb.Node{
|
||||
Id: identity.ID,
|
||||
|
@ -8,7 +8,6 @@ import (
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/storj/pkg/kademlia/kademliaclient"
|
||||
@ -29,9 +28,6 @@ type peerDiscovery struct {
|
||||
queue discoveryQueue
|
||||
}
|
||||
|
||||
// ErrMaxRetries is used when a lookup has been retried the max number of times
|
||||
var ErrMaxRetries = errs.Class("max retries exceeded for id:")
|
||||
|
||||
func newPeerDiscovery(log *zap.Logger, dialer *kademliaclient.Dialer, target storj.NodeID, startingNodes []*pb.Node, k, alpha int, self *pb.Node) *peerDiscovery {
|
||||
discovery := &peerDiscovery{
|
||||
log: log,
|
||||
|
@ -26,8 +26,9 @@ func TestSortByXOR(t *testing.T) {
|
||||
}
|
||||
|
||||
func BenchmarkSortByXOR(b *testing.B) {
|
||||
nodes := []storj.NodeID{}
|
||||
for k := 0; k < 1000; k++ {
|
||||
l := 1000
|
||||
nodes := make([]storj.NodeID, l)
|
||||
for k := 0; k < l; k++ {
|
||||
nodes = append(nodes, testrand.NodeID())
|
||||
}
|
||||
|
||||
|
@ -216,14 +216,14 @@ func runTest(t *testing.T, test func(*testing.T, context.Context, *testplanet.Pl
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
db, streams, err := newMetainfoParts(t, planet)
|
||||
db, streams, err := newMetainfoParts(planet)
|
||||
require.NoError(t, err)
|
||||
|
||||
test(t, ctx, planet, db, streams)
|
||||
})
|
||||
}
|
||||
|
||||
func newMetainfoParts(t *testing.T, planet *testplanet.Planet) (*kvmetainfo.DB, streams.Store, error) {
|
||||
func newMetainfoParts(planet *testplanet.Planet) (*kvmetainfo.DB, streams.Store, error) {
|
||||
// TODO(kaloyan): We should have a better way for configuring the Satellite's API Key
|
||||
// add project to satisfy constraint
|
||||
project, err := planet.Satellites[0].DB.Console().Projects().Insert(context.Background(), &console.Project{
|
||||
|
@ -136,9 +136,9 @@ func TestGetObjectStream(t *testing.T) {
|
||||
_, err = db.GetObjectStream(ctx, bucket.Name, "non-existing-file")
|
||||
assert.True(t, storj.ErrObjectNotFound.Has(err))
|
||||
|
||||
assertStream(ctx, t, db, streams, bucket, "empty-file", 0, []byte{})
|
||||
assertStream(ctx, t, db, streams, bucket, "small-file", 4, []byte("test"))
|
||||
assertStream(ctx, t, db, streams, bucket, "large-file", 32*memory.KiB.Int64(), data)
|
||||
assertStream(ctx, t, db, streams, bucket, "empty-file", []byte{})
|
||||
assertStream(ctx, t, db, streams, bucket, "small-file", []byte("test"))
|
||||
assertStream(ctx, t, db, streams, bucket, "large-file", data)
|
||||
|
||||
/* TODO: Disable stopping due to flakiness.
|
||||
// Stop randomly half of the storage nodes and remove them from satellite's overlay cache
|
||||
@ -174,7 +174,7 @@ func upload(ctx context.Context, t *testing.T, db *kvmetainfo.DB, streams stream
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func assertStream(ctx context.Context, t *testing.T, db *kvmetainfo.DB, streams streams.Store, bucket storj.Bucket, path storj.Path, size int64, content []byte) {
|
||||
func assertStream(ctx context.Context, t *testing.T, db *kvmetainfo.DB, streams streams.Store, bucket storj.Bucket, path storj.Path, content []byte) {
|
||||
readOnly, err := db.GetObjectStream(ctx, bucket.Name, path)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -301,12 +301,12 @@ func TestIsVetted(t *testing.T) {
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
var err error
|
||||
satellite := planet.Satellites[0]
|
||||
satellite.Audit.Service.Loop.Pause()
|
||||
satellite.Repair.Checker.Loop.Pause()
|
||||
service := satellite.Overlay.Service
|
||||
satellitePeer := planet.Satellites[0]
|
||||
satellitePeer.Audit.Service.Loop.Pause()
|
||||
satellitePeer.Repair.Checker.Loop.Pause()
|
||||
service := satellitePeer.Overlay.Service
|
||||
|
||||
_, err = satellite.DB.OverlayCache().UpdateStats(ctx, &overlay.UpdateRequest{
|
||||
_, err = satellitePeer.DB.OverlayCache().UpdateStats(ctx, &overlay.UpdateRequest{
|
||||
NodeID: planet.StorageNodes[0].ID(),
|
||||
IsUp: true,
|
||||
AuditSuccess: true,
|
||||
@ -319,7 +319,7 @@ func TestIsVetted(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = satellite.DB.OverlayCache().UpdateStats(ctx, &overlay.UpdateRequest{
|
||||
_, err = satellitePeer.DB.OverlayCache().UpdateStats(ctx, &overlay.UpdateRequest{
|
||||
NodeID: planet.StorageNodes[1].ID(),
|
||||
IsUp: true,
|
||||
AuditSuccess: true,
|
||||
@ -344,8 +344,8 @@ func TestIsVetted(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.False(t, reputable)
|
||||
|
||||
// test dqing for bad uptime
|
||||
_, err = satellite.DB.OverlayCache().UpdateStats(ctx, &overlay.UpdateRequest{
|
||||
// test dq-ing for bad uptime
|
||||
_, err = satellitePeer.DB.OverlayCache().UpdateStats(ctx, &overlay.UpdateRequest{
|
||||
NodeID: planet.StorageNodes[0].ID(),
|
||||
IsUp: false,
|
||||
AuditSuccess: true,
|
||||
@ -358,8 +358,8 @@ func TestIsVetted(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// test dqing for bad audit
|
||||
_, err = satellite.DB.OverlayCache().UpdateStats(ctx, &overlay.UpdateRequest{
|
||||
// test dq-ing for bad audit
|
||||
_, err = satellitePeer.DB.OverlayCache().UpdateStats(ctx, &overlay.UpdateRequest{
|
||||
NodeID: planet.StorageNodes[1].ID(),
|
||||
IsUp: true,
|
||||
AuditSuccess: false,
|
||||
|
@ -65,7 +65,7 @@ func ParseNodeURL(s string) (NodeURL, error) {
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// IsZero returns whether tthe url is empty.
|
||||
// IsZero returns whether the url is empty.
|
||||
func (url NodeURL) IsZero() bool {
|
||||
return url == NodeURL{}
|
||||
}
|
||||
@ -102,8 +102,8 @@ func ParseNodeURLs(s string) (NodeURLs, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
for _, url := range strings.Split(s, ",") {
|
||||
u, err := ParseNodeURL(url)
|
||||
for _, s := range strings.Split(s, ",") {
|
||||
u, err := ParseNodeURL(s)
|
||||
if err != nil {
|
||||
return nil, ErrNodeURL.Wrap(err)
|
||||
}
|
||||
@ -116,8 +116,8 @@ func ParseNodeURLs(s string) (NodeURLs, error) {
|
||||
// String converts NodeURLs to a string
|
||||
func (urls NodeURLs) String() string {
|
||||
var xs []string
|
||||
for _, url := range urls {
|
||||
xs = append(xs, url.String())
|
||||
for _, u := range urls {
|
||||
xs = append(xs, u.String())
|
||||
}
|
||||
return strings.Join(xs, ",")
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ func (containment *containment) Delete(ctx context.Context, id pb.NodeID) (_ boo
|
||||
|
||||
isDeleted, err := tx.Delete_PendingAudits_By_NodeId(ctx, dbx.PendingAudits_NodeId(id.Bytes()))
|
||||
if err != nil {
|
||||
return isDeleted, audit.ContainError.Wrap(errs.Combine(err, tx.Rollback()))
|
||||
return isDeleted, audit.ContainError.Wrap(errs.Combine(audit.ErrContainDelete.Wrap(err), tx.Rollback()))
|
||||
}
|
||||
|
||||
updateContained := dbx.Node_Update_Fields{
|
||||
|
@ -32,30 +32,3 @@ func ListKeys(ctx context.Context, store KeyValueStore, first Key, limit int) (_
|
||||
|
||||
return keys, err
|
||||
}
|
||||
|
||||
// ReverseListKeys returns keys starting from first and upto limit in reverse order
|
||||
// limit is capped to LookupLimit
|
||||
func ReverseListKeys(ctx context.Context, store KeyValueStore, first Key, limit int) (_ Keys, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if limit <= 0 || limit > LookupLimit {
|
||||
limit = LookupLimit
|
||||
}
|
||||
|
||||
keys := make(Keys, 0, limit)
|
||||
err = store.Iterate(ctx, IterateOptions{
|
||||
First: first,
|
||||
Recurse: true,
|
||||
Reverse: true,
|
||||
}, func(ctx context.Context, it Iterator) error {
|
||||
var item ListItem
|
||||
for ; limit > 0 && it.Next(ctx, &item); limit-- {
|
||||
if item.Key == nil {
|
||||
panic("nil key")
|
||||
}
|
||||
keys = append(keys, CloneKey(item.Key))
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return keys, err
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user