satellite/overlay: remove overlay.IsVetted (#3203)
This commit is contained in:
parent
7485a1d0f5
commit
0ea0d8c3da
@ -53,8 +53,6 @@ type DB interface {
|
||||
Paginate(ctx context.Context, offset int64, limit int) ([]*NodeDossier, bool, error)
|
||||
// PaginateQualified will page through the qualified nodes
|
||||
PaginateQualified(ctx context.Context, offset int64, limit int) ([]*pb.Node, bool, error)
|
||||
// IsVetted returns whether or not the node reaches reputable thresholds
|
||||
IsVetted(ctx context.Context, id storj.NodeID, criteria *NodeCriteria) (bool, error)
|
||||
// Update updates node address
|
||||
UpdateAddress(ctx context.Context, value *pb.Node, defaults NodeSelectionConfig) error
|
||||
// BatchUpdateStats updates multiple storagenode's stats in one transaction
|
||||
@ -349,20 +347,6 @@ func (service *Service) Put(ctx context.Context, nodeID storj.NodeID, value pb.N
|
||||
return service.db.UpdateAddress(ctx, &value, service.config.Node)
|
||||
}
|
||||
|
||||
// IsVetted returns whether or not the node reaches reputable thresholds
|
||||
func (service *Service) IsVetted(ctx context.Context, nodeID storj.NodeID) (reputable bool, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
criteria := &NodeCriteria{
|
||||
AuditCount: service.config.Node.AuditCount,
|
||||
UptimeCount: service.config.Node.UptimeCount,
|
||||
}
|
||||
reputable, err = service.db.IsVetted(ctx, nodeID, criteria)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return reputable, nil
|
||||
}
|
||||
|
||||
// BatchUpdateStats updates multiple storagenode's stats in one transaction
|
||||
func (service *Service) BatchUpdateStats(ctx context.Context, requests []*UpdateRequest) (failed storj.NodeIDList, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
@ -10,7 +10,6 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/zaptest"
|
||||
|
||||
"storj.io/storj/internal/testcontext"
|
||||
@ -292,99 +291,6 @@ func TestRandomizedSelection(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestIsVetted(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 3, UplinkCount: 0,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Overlay.Node.AuditCount = 1
|
||||
config.Overlay.Node.UptimeCount = 1
|
||||
},
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
var err error
|
||||
satellitePeer := planet.Satellites[0]
|
||||
satellitePeer.Audit.Chore.Loop.Pause()
|
||||
satellitePeer.Audit.Worker.Loop.Pause()
|
||||
satellitePeer.Repair.Checker.Loop.Pause()
|
||||
service := satellitePeer.Overlay.Service
|
||||
|
||||
_, err = satellitePeer.DB.OverlayCache().UpdateStats(ctx, &overlay.UpdateRequest{
|
||||
NodeID: planet.StorageNodes[0].ID(),
|
||||
IsUp: true,
|
||||
AuditSuccess: true,
|
||||
AuditLambda: 1,
|
||||
AuditWeight: 1,
|
||||
AuditDQ: 0.5,
|
||||
UptimeLambda: 1,
|
||||
UptimeWeight: 1,
|
||||
UptimeDQ: 0.5,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = satellitePeer.DB.OverlayCache().UpdateStats(ctx, &overlay.UpdateRequest{
|
||||
NodeID: planet.StorageNodes[1].ID(),
|
||||
IsUp: true,
|
||||
AuditSuccess: true,
|
||||
AuditLambda: 1,
|
||||
AuditWeight: 1,
|
||||
AuditDQ: 0.5,
|
||||
UptimeLambda: 1,
|
||||
UptimeWeight: 1,
|
||||
UptimeDQ: 0.5,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
reputable, err := service.IsVetted(ctx, planet.StorageNodes[0].ID())
|
||||
require.NoError(t, err)
|
||||
require.True(t, reputable)
|
||||
|
||||
reputable, err = service.IsVetted(ctx, planet.StorageNodes[1].ID())
|
||||
require.NoError(t, err)
|
||||
require.True(t, reputable)
|
||||
|
||||
reputable, err = service.IsVetted(ctx, planet.StorageNodes[2].ID())
|
||||
require.NoError(t, err)
|
||||
require.False(t, reputable)
|
||||
|
||||
// test dq-ing for bad uptime
|
||||
_, err = satellitePeer.DB.OverlayCache().UpdateStats(ctx, &overlay.UpdateRequest{
|
||||
NodeID: planet.StorageNodes[0].ID(),
|
||||
IsUp: false,
|
||||
AuditSuccess: true,
|
||||
AuditLambda: 1,
|
||||
AuditWeight: 1,
|
||||
AuditDQ: 0.5,
|
||||
UptimeLambda: 0,
|
||||
UptimeWeight: 1,
|
||||
UptimeDQ: 0.5,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// test dq-ing for bad audit
|
||||
_, err = satellitePeer.DB.OverlayCache().UpdateStats(ctx, &overlay.UpdateRequest{
|
||||
NodeID: planet.StorageNodes[1].ID(),
|
||||
IsUp: true,
|
||||
AuditSuccess: false,
|
||||
AuditLambda: 0,
|
||||
AuditWeight: 1,
|
||||
AuditDQ: 0.5,
|
||||
UptimeLambda: 1,
|
||||
UptimeWeight: 1,
|
||||
UptimeDQ: 0.5,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
reputable, err = service.IsVetted(ctx, planet.StorageNodes[0].ID())
|
||||
require.NoError(t, err)
|
||||
require.False(t, reputable)
|
||||
|
||||
reputable, err = service.IsVetted(ctx, planet.StorageNodes[1].ID())
|
||||
require.NoError(t, err)
|
||||
require.False(t, reputable)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNodeInfo(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 0,
|
||||
|
@ -837,7 +837,7 @@ func (m *lockedOverlayCache) Get(ctx context.Context, nodeID storj.NodeID) (*ove
|
||||
return m.db.Get(ctx, nodeID)
|
||||
}
|
||||
|
||||
// GetExitingNodes returns nodes who have initiated a graceful exit.
|
||||
// GetExitingNodes returns nodes who have initiated a graceful exit, but have not completed it.
|
||||
func (m *lockedOverlayCache) GetExitingNodes(ctx context.Context) (exitingNodes storj.NodeIDList, err error) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
@ -851,13 +851,6 @@ func (m *lockedOverlayCache) GetExitingNodesLoopIncomplete(ctx context.Context)
|
||||
return m.db.GetExitingNodesLoopIncomplete(ctx)
|
||||
}
|
||||
|
||||
// IsVetted returns whether or not the node reaches reputable thresholds
|
||||
func (m *lockedOverlayCache) IsVetted(ctx context.Context, id storj.NodeID, criteria *overlay.NodeCriteria) (bool, error) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
return m.db.IsVetted(ctx, id, criteria)
|
||||
}
|
||||
|
||||
// KnownOffline filters a set of nodes to offline nodes
|
||||
func (m *lockedOverlayCache) KnownOffline(ctx context.Context, a1 *overlay.NodeCriteria, a2 storj.NodeIDList) (storj.NodeIDList, error) {
|
||||
m.Lock()
|
||||
|
@ -363,29 +363,6 @@ func (cache *overlaycache) Get(ctx context.Context, id storj.NodeID) (_ *overlay
|
||||
return convertDBNode(ctx, node)
|
||||
}
|
||||
|
||||
// IsVetted returns whether or not the node reaches reputable thresholds
|
||||
func (cache *overlaycache) IsVetted(ctx context.Context, id storj.NodeID, criteria *overlay.NodeCriteria) (_ bool, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
row := cache.db.QueryRow(cache.db.Rebind(`SELECT id
|
||||
FROM nodes
|
||||
WHERE id = ?
|
||||
AND disqualified IS NULL
|
||||
AND type = ?
|
||||
AND total_audit_count >= ?
|
||||
AND total_uptime_count >= ?
|
||||
`), id, pb.NodeType_STORAGE, criteria.AuditCount, criteria.UptimeCount)
|
||||
var bytes *[]byte
|
||||
err = row.Scan(&bytes)
|
||||
if err != nil {
|
||||
if err == sql.ErrNoRows {
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// KnownOffline filters a set of nodes to offline nodes
|
||||
func (cache *overlaycache) KnownOffline(ctx context.Context, criteria *overlay.NodeCriteria, nodeIds storj.NodeIDList) (offlineNodes storj.NodeIDList, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
Loading…
Reference in New Issue
Block a user