private/testplanet: move Metabase outside Metainfo for satellite
At some point we moved metabase package outside Metainfo but we didn't do that for satellite structure. This change refactors only tests. When uplink will be adjusted we can remove old entries in Metainfo struct. Change-Id: I2b66ed29f539b0ec0f490cad42c72840e0351bcb
This commit is contained in:
parent
9eaddee530
commit
c258f4bbac
@ -96,9 +96,16 @@ type Satellite struct {
|
||||
}
|
||||
|
||||
Metainfo struct {
|
||||
Metabase *metabase.DB
|
||||
Service *metainfo.Service
|
||||
Endpoint *metainfo.Endpoint
|
||||
// TODO remove when uplink will be adjusted to use Metabase.DB
|
||||
Metabase *metabase.DB
|
||||
Service *metainfo.Service
|
||||
Endpoint *metainfo.Endpoint
|
||||
// TODO remove when uplink will be adjusted to use Metabase.SegmentLoop
|
||||
SegmentLoop *segmentloop.Service
|
||||
}
|
||||
|
||||
Metabase struct {
|
||||
DB *metabase.DB
|
||||
SegmentLoop *segmentloop.Service
|
||||
}
|
||||
|
||||
@ -562,10 +569,13 @@ func createNewSystem(name string, log *zap.Logger, config satellite.Config, peer
|
||||
|
||||
system.Reputation.Service = peer.Reputation.Service
|
||||
|
||||
system.Metainfo.Metabase = api.Metainfo.Metabase
|
||||
// system.Metainfo.Metabase = api.Metainfo.Metabase
|
||||
system.Metainfo.Service = peer.Metainfo.Service
|
||||
system.Metainfo.Endpoint = api.Metainfo.Endpoint
|
||||
system.Metainfo.SegmentLoop = peer.Metainfo.SegmentLoop
|
||||
// system.Metainfo.SegmentLoop = peer.Metainfo.SegmentLoop
|
||||
|
||||
system.Metabase.DB = api.Metainfo.Metabase
|
||||
system.Metabase.SegmentLoop = peer.Metainfo.SegmentLoop
|
||||
|
||||
system.Inspector.Endpoint = api.Inspector.Endpoint
|
||||
|
||||
|
@ -88,7 +88,7 @@ func TestDownloadWithSomeNodesOffline(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// get a remote segment
|
||||
segments, err := satellite.Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
segments, err := satellite.Metabase.DB.TestingAllSegments(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// calculate how many storagenodes to kill
|
||||
@ -161,7 +161,7 @@ func TestDownloadFromUnresponsiveNode(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// get a remote segment from metabase
|
||||
segments, err := planet.Satellites[0].Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
segments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, segments, 1)
|
||||
require.NotEmpty(t, segments[0].Pieces)
|
||||
|
@ -241,10 +241,10 @@ func TestBilling_AuditRepairTraffic(t *testing.T) {
|
||||
require.NotZero(t, projectTotal.Egress)
|
||||
|
||||
// get the only metainfo record (our upload)
|
||||
objectsBefore, err := planet.Satellites[0].Metainfo.Metabase.TestingAllObjects(ctx)
|
||||
objectsBefore, err := planet.Satellites[0].Metabase.DB.TestingAllObjects(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
segmentsBefore, err := planet.Satellites[0].Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
segmentsBefore, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Cause repair traffic
|
||||
@ -269,7 +269,7 @@ func TestBilling_AuditRepairTraffic(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// get the only metainfo record (our upload)
|
||||
segments, err := planet.Satellites[0].Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
segments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotEqual(t, segmentsBefore[0], segments[0])
|
||||
|
@ -44,7 +44,7 @@ func TestCalculateNodeAtRestData(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
obs := nodetally.NewObserver(planet.Satellites[0].Log.Named("observer"), time.Now())
|
||||
err = planet.Satellites[0].Metainfo.SegmentLoop.Join(ctx, obs)
|
||||
err = planet.Satellites[0].Metabase.SegmentLoop.Join(ctx, obs)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Confirm the correct number of shares were stored
|
||||
|
@ -590,7 +590,7 @@ func TestProjectUsage_FreeUsedStorageSpace(t *testing.T) {
|
||||
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "bucket", "1", data)
|
||||
require.NoError(t, err)
|
||||
|
||||
segments, err := planet.Satellites[0].Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
segments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
usage, err := accounting.ProjectUsage.GetProjectStorageTotals(ctx, project.ID)
|
||||
|
@ -96,7 +96,7 @@ func TestOnlyInline(t *testing.T) {
|
||||
|
||||
// run multiple times to ensure we add tallies
|
||||
for i := 0; i < 2; i++ {
|
||||
collector := tally.NewBucketTallyCollector(planet.Satellites[0].Log.Named("bucket tally"), time.Now(), planet.Satellites[0].Metainfo.Metabase, planet.Satellites[0].Config.Tally)
|
||||
collector := tally.NewBucketTallyCollector(planet.Satellites[0].Log.Named("bucket tally"), time.Now(), planet.Satellites[0].Metabase.DB, planet.Satellites[0].Config.Tally)
|
||||
err := collector.Run(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -139,10 +139,10 @@ func TestCalculateBucketAtRestData(t *testing.T) {
|
||||
err = planet.Uplinks[1].Upload(ctx, satellite, "alpha", "remote", make([]byte, 30*memory.KiB))
|
||||
require.NoError(t, err)
|
||||
|
||||
objects, err := satellite.Metainfo.Metabase.TestingAllObjects(ctx)
|
||||
objects, err := satellite.Metabase.DB.TestingAllObjects(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
segments, err := satellite.Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
segments, err := satellite.Metabase.DB.TestingAllSegments(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedTotal := map[metabase.BucketLocation]*accounting.BucketTally{}
|
||||
@ -171,7 +171,7 @@ func TestCalculateBucketAtRestData(t *testing.T) {
|
||||
}
|
||||
require.Len(t, expectedTotal, 3)
|
||||
|
||||
collector := tally.NewBucketTallyCollector(satellite.Log.Named("bucket tally"), time.Now(), satellite.Metainfo.Metabase, planet.Satellites[0].Config.Tally)
|
||||
collector := tally.NewBucketTallyCollector(satellite.Log.Named("bucket tally"), time.Now(), satellite.Metabase.DB, planet.Satellites[0].Config.Tally)
|
||||
err = collector.Run(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedTotal, collector.Bucket)
|
||||
@ -188,7 +188,7 @@ func TestTallyIgnoresExpiredPointers(t *testing.T) {
|
||||
err := planet.Uplinks[0].UploadWithExpiration(ctx, planet.Satellites[0], "bucket", "path", []byte{1}, now.Add(12*time.Hour))
|
||||
require.NoError(t, err)
|
||||
|
||||
collector := tally.NewBucketTallyCollector(satellite.Log.Named("bucket tally"), now.Add(24*time.Hour), satellite.Metainfo.Metabase, planet.Satellites[0].Config.Tally)
|
||||
collector := tally.NewBucketTallyCollector(satellite.Log.Named("bucket tally"), now.Add(24*time.Hour), satellite.Metabase.DB, planet.Satellites[0].Config.Tally)
|
||||
err = collector.Run(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -210,7 +210,7 @@ func TestTallyLiveAccounting(t *testing.T) {
|
||||
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "test/path", expectedData)
|
||||
require.NoError(t, err)
|
||||
|
||||
segments, err := planet.Satellites[0].Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
segments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, segments, 1)
|
||||
|
||||
|
@ -51,7 +51,7 @@ func TestAuditCollector(t *testing.T) {
|
||||
|
||||
r := rand.New(rand.NewSource(time.Now().Unix()))
|
||||
observer := audit.NewCollector(4, r)
|
||||
err := satellite.Metainfo.SegmentLoop.Join(ctx, observer)
|
||||
err := satellite.Metabase.SegmentLoop.Join(ctx, observer)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, node := range planet.StorageNodes {
|
||||
|
@ -130,7 +130,7 @@ func TestDisqualifiedNodesGetNoDownload(t *testing.T) {
|
||||
|
||||
bucket := metabase.BucketLocation{ProjectID: uplinkPeer.Projects[0].ID, BucketName: "testbucket"}
|
||||
|
||||
segments, err := satellitePeer.Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
segments, err := satellitePeer.Metabase.DB.TestingAllSegments(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(segments))
|
||||
|
||||
|
@ -58,7 +58,7 @@ func reformVerifierWithMockConnector(t testing.TB, sat *testplanet.Satellite, mo
|
||||
|
||||
verifier := audit.NewVerifier(
|
||||
zaptest.NewLogger(t).Named("a-special-verifier"),
|
||||
sat.Metainfo.Metabase,
|
||||
sat.Metabase.DB,
|
||||
newDialer,
|
||||
sat.Overlay.Service,
|
||||
sat.DB.Containment(),
|
||||
@ -92,7 +92,7 @@ func TestGetShareDoesNameLookupIfNecessary(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := testSatellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := testSatellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
@ -146,7 +146,7 @@ func TestGetSharePrefers(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := testSatellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := testSatellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
|
@ -56,7 +56,7 @@ func TestReverifySuccess(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
@ -135,7 +135,7 @@ func TestReverifyFailMissingShare(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
@ -221,7 +221,7 @@ func TestReverifyFailBadData(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
@ -293,7 +293,7 @@ func TestReverifyOffline(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
@ -367,7 +367,7 @@ func TestReverifyOfflineDialTimeout(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
@ -394,7 +394,7 @@ func TestReverifyOfflineDialTimeout(t *testing.T) {
|
||||
|
||||
verifier := audit.NewVerifier(
|
||||
satellite.Log.Named("verifier"),
|
||||
satellite.Metainfo.Metabase,
|
||||
satellite.Metabase.DB,
|
||||
dialer,
|
||||
satellite.Overlay.Service,
|
||||
satellite.DB.Containment(),
|
||||
@ -469,7 +469,7 @@ func TestReverifyDeletedSegment(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
@ -559,7 +559,7 @@ func TestReverifyModifiedSegment(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
@ -587,7 +587,7 @@ func TestReverifyModifiedSegment(t *testing.T) {
|
||||
|
||||
// remove a piece from the file (a piece that the contained node isn't holding)
|
||||
audits.Verifier.OnTestingCheckSegmentAlteredHook = func() {
|
||||
err = satellite.Metainfo.Metabase.UpdateSegmentPieces(ctx, metabase.UpdateSegmentPieces{
|
||||
err = satellite.Metabase.DB.UpdateSegmentPieces(ctx, metabase.UpdateSegmentPieces{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
OldPieces: segment.Pieces,
|
||||
@ -656,7 +656,7 @@ func TestReverifyReplacedSegment(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
@ -757,13 +757,13 @@ func TestReverifyDifferentShare(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, queueSegment1, queueSegment2)
|
||||
|
||||
segment1, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment1, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment1.StreamID,
|
||||
Position: queueSegment1.Position,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
segment2, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment2, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment2.StreamID,
|
||||
Position: queueSegment2.Position,
|
||||
})
|
||||
@ -916,13 +916,13 @@ func TestReverifyExpired2(t *testing.T) {
|
||||
queueSegment1, queueSegment2 = queueSegment2, queueSegment1
|
||||
}
|
||||
|
||||
segment1, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment1, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment1.StreamID,
|
||||
Position: queueSegment1.Position,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
segment2, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment2, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment2.StreamID,
|
||||
Position: queueSegment2.Position,
|
||||
})
|
||||
@ -1033,7 +1033,7 @@ func TestReverifySlowDownload(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
@ -1122,7 +1122,7 @@ func TestReverifyUnknownError(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
@ -1214,7 +1214,7 @@ func TestMaxReverifyCount(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
|
@ -53,7 +53,7 @@ func TestDownloadSharesHappyPath(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
@ -106,7 +106,7 @@ func TestDownloadSharesOfflineNode(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
@ -167,7 +167,7 @@ func TestDownloadSharesMissingPiece(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
@ -224,7 +224,7 @@ func TestDownloadSharesDialTimeout(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
@ -250,7 +250,7 @@ func TestDownloadSharesDialTimeout(t *testing.T) {
|
||||
|
||||
verifier := audit.NewVerifier(
|
||||
satellite.Log.Named("verifier"),
|
||||
satellite.Metainfo.Metabase,
|
||||
satellite.Metabase.DB,
|
||||
dialer,
|
||||
satellite.Overlay.Service,
|
||||
satellite.DB.Containment(),
|
||||
@ -310,7 +310,7 @@ func TestDownloadSharesDownloadTimeout(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
@ -325,7 +325,7 @@ func TestDownloadSharesDownloadTimeout(t *testing.T) {
|
||||
|
||||
verifier := audit.NewVerifier(
|
||||
satellite.Log.Named("verifier"),
|
||||
satellite.Metainfo.Metabase,
|
||||
satellite.Metabase.DB,
|
||||
satellite.Dialer,
|
||||
satellite.Overlay.Service,
|
||||
satellite.DB.Containment(),
|
||||
@ -374,7 +374,7 @@ func TestVerifierHappyPath(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
@ -449,7 +449,7 @@ func TestVerifierOfflineNode(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
@ -491,7 +491,7 @@ func TestVerifierMissingPiece(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
@ -542,7 +542,7 @@ func TestVerifierNotEnoughPieces(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
@ -603,7 +603,7 @@ func TestVerifierDialTimeout(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
@ -626,7 +626,7 @@ func TestVerifierDialTimeout(t *testing.T) {
|
||||
|
||||
verifier := audit.NewVerifier(
|
||||
satellite.Log.Named("verifier"),
|
||||
satellite.Metainfo.Metabase,
|
||||
satellite.Metabase.DB,
|
||||
dialer,
|
||||
satellite.Overlay.Service,
|
||||
satellite.DB.Containment(),
|
||||
@ -707,13 +707,13 @@ func TestVerifierModifiedSegment(t *testing.T) {
|
||||
var segment metabase.Segment
|
||||
audits.Verifier.OnTestingCheckSegmentAlteredHook = func() {
|
||||
// remove one piece from the segment so that checkIfSegmentAltered fails
|
||||
segment, err = satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err = satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = satellite.Metainfo.Metabase.UpdateSegmentPieces(ctx, metabase.UpdateSegmentPieces{
|
||||
err = satellite.Metabase.DB.UpdateSegmentPieces(ctx, metabase.UpdateSegmentPieces{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
OldPieces: segment.Pieces,
|
||||
@ -757,7 +757,7 @@ func TestVerifierReplacedSegment(t *testing.T) {
|
||||
segment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segmentInfo, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segmentInfo, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: segment.StreamID,
|
||||
Position: segment.Position,
|
||||
})
|
||||
@ -803,7 +803,7 @@ func TestVerifierModifiedSegmentFailsOnce(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
@ -866,7 +866,7 @@ func TestVerifierSlowDownload(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
@ -921,7 +921,7 @@ func TestVerifierUnknownError(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := satellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := satellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
|
@ -86,7 +86,7 @@ func TestGarbageCollection(t *testing.T) {
|
||||
require.NotZero(t, keptPieceID)
|
||||
|
||||
// Delete one object from metainfo service on satellite
|
||||
_, err = satellite.Metainfo.Metabase.DeleteObjectsAllVersions(ctx, metabase.DeleteObjectsAllVersions{
|
||||
_, err = satellite.Metabase.DB.DeleteObjectsAllVersions(ctx, metabase.DeleteObjectsAllVersions{
|
||||
Locations: []metabase.ObjectLocation{objectLocationToDelete},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@ -150,7 +150,7 @@ func getSegment(ctx *testcontext.Context, t *testing.T, satellite *testplanet.Sa
|
||||
ObjectKey: metabase.ObjectKey(encryptedPath.Raw()),
|
||||
}
|
||||
|
||||
lastSegment, err := satellite.Metainfo.Metabase.GetLatestObjectLastSegment(ctx, metabase.GetLatestObjectLastSegment{
|
||||
lastSegment, err := satellite.Metabase.DB.GetLatestObjectLastSegment(ctx, metabase.GetLatestObjectLastSegment{
|
||||
ObjectLocation: objectLocation,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@ -203,7 +203,7 @@ func TestGarbageCollection_PendingObject(t *testing.T) {
|
||||
testData := testrand.Bytes(15 * memory.KiB)
|
||||
pendingStreamID := startMultipartUpload(ctx, t, upl, satellite, "testbucket", "multi", testData)
|
||||
|
||||
segments, err := satellite.Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
segments, err := satellite.Metabase.DB.TestingAllSegments(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, segments, 1)
|
||||
require.Len(t, segments[0].Pieces, 1)
|
||||
@ -220,7 +220,7 @@ func TestGarbageCollection_PendingObject(t *testing.T) {
|
||||
InitialPieces: 10,
|
||||
}, lastPieceCounts)
|
||||
|
||||
err = satellite.Metainfo.SegmentLoop.Join(ctx, pieceTracker)
|
||||
err = satellite.Metabase.SegmentLoop.Join(ctx, pieceTracker)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotEmpty(t, pieceTracker.RetainInfos)
|
||||
|
@ -196,7 +196,7 @@ func TestDurabilityRatio(t *testing.T) {
|
||||
require.Len(t, nodeIDs, 1)
|
||||
|
||||
// retrieve remote segment
|
||||
segments, err := satellite.Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
segments, err := satellite.Metabase.DB.TestingAllSegments(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, segments, 2)
|
||||
|
||||
@ -210,7 +210,7 @@ func TestDurabilityRatio(t *testing.T) {
|
||||
idx++
|
||||
}
|
||||
}
|
||||
err = satellite.Metainfo.Metabase.UpdateSegmentPieces(ctx, metabase.UpdateSegmentPieces{
|
||||
err = satellite.Metabase.DB.UpdateSegmentPieces(ctx, metabase.UpdateSegmentPieces{
|
||||
StreamID: segment.StreamID,
|
||||
Position: segment.Position,
|
||||
|
||||
|
@ -813,7 +813,7 @@ func testSuccessSegmentUpdate(t *testing.T, ctx *testcontext.Context, nodeFullID
|
||||
// even though we failed 1, it eventually succeeded, so the count should be 0
|
||||
require.EqualValues(t, 0, progress.PiecesFailed)
|
||||
|
||||
segments, err := satellite.Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
segments, err := satellite.Metabase.DB.TestingAllSegments(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, segments, 1)
|
||||
found := 0
|
||||
@ -887,7 +887,7 @@ func testUpdateSegmentFailureDuplicatedNodeID(t *testing.T, ctx *testcontext.Con
|
||||
}
|
||||
|
||||
// update segment to include the new receiving node before responding to satellite
|
||||
segments, err := satellite.Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
segments, err := satellite.Metabase.DB.TestingAllSegments(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, segments, 1)
|
||||
require.True(t, len(segments[0].Pieces) > 0)
|
||||
@ -930,7 +930,7 @@ func testUpdateSegmentFailureDuplicatedNodeID(t *testing.T, ctx *testcontext.Con
|
||||
}
|
||||
|
||||
// check exiting node is still in the segment
|
||||
segments, err := satellite.Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
segments, err := satellite.Metabase.DB.TestingAllSegments(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, segments, 1)
|
||||
|
||||
@ -1232,7 +1232,7 @@ func TestFailureNotFound(t *testing.T) {
|
||||
|
||||
// check that node is no longer in the segment
|
||||
|
||||
segments, err := satellite.Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
segments, err := satellite.Metabase.DB.TestingAllSegments(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, segments, 1)
|
||||
|
||||
@ -1545,7 +1545,7 @@ func findNodeToExit(ctx context.Context, planet *testplanet.Planet, objects int)
|
||||
pieceCountMap[node.ID()] = 0
|
||||
}
|
||||
|
||||
segments, err := satellite.Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
segments, err := satellite.Metabase.DB.TestingAllSegments(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1586,7 +1586,7 @@ func TestUpdatePiecesCheckDuplicates(t *testing.T) {
|
||||
err := uplinkPeer.Upload(ctx, satellite, "test1", path, testrand.Bytes(5*memory.KiB))
|
||||
require.NoError(t, err)
|
||||
|
||||
segments, err := satellite.Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
segments, err := satellite.Metabase.DB.TestingAllSegments(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, segments, 1)
|
||||
|
||||
|
@ -57,7 +57,7 @@ func TestInspectorStats(t *testing.T) {
|
||||
ObjectKey: metabase.ObjectKey(encryptedPath.Raw()),
|
||||
}
|
||||
|
||||
segment, err := satellite.Metainfo.Metabase.GetLatestObjectLastSegment(ctx, metabase.GetLatestObjectLastSegment{
|
||||
segment, err := satellite.Metabase.DB.GetLatestObjectLastSegment(ctx, metabase.GetLatestObjectLastSegment{
|
||||
ObjectLocation: objectLocation,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
@ -51,7 +51,7 @@ func TestSegmentsLoop(t *testing.T) {
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
ul := planet.Uplinks[0]
|
||||
satellite := planet.Satellites[0]
|
||||
segmentLoop := satellite.Metainfo.SegmentLoop
|
||||
segmentLoop := satellite.Metabase.SegmentLoop
|
||||
|
||||
// upload 5 remote objects with 1 segment
|
||||
for i := 0; i < 5; i++ {
|
||||
@ -124,7 +124,7 @@ func TestSegmentsLoop_AllData(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
loop := planet.Satellites[0].Metainfo.SegmentLoop
|
||||
loop := planet.Satellites[0].Metabase.SegmentLoop
|
||||
|
||||
obs := newTestObserver(nil)
|
||||
err := loop.Join(ctx, obs)
|
||||
@ -158,7 +158,7 @@ func TestSegmentsLoopObserverCancel(t *testing.T) {
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
ul := planet.Uplinks[0]
|
||||
satellite := planet.Satellites[0]
|
||||
loop := satellite.Metainfo.SegmentLoop
|
||||
loop := satellite.Metabase.SegmentLoop
|
||||
|
||||
// upload 3 remote files with 1 segment
|
||||
for i := 0; i < 3; i++ {
|
||||
@ -254,7 +254,7 @@ func TestSegmentsLoopCancel(t *testing.T) {
|
||||
loop := segmentloop.New(zaptest.NewLogger(t), segmentloop.Config{
|
||||
CoalesceDuration: 1 * time.Second,
|
||||
ListLimit: 10000,
|
||||
}, satellite.Metainfo.Metabase)
|
||||
}, satellite.Metabase.DB)
|
||||
|
||||
// create a cancelable context to pass into metaLoop.Run
|
||||
loopCtx, cancel := context.WithCancel(ctx)
|
||||
@ -326,7 +326,7 @@ func TestSegmentsLoop_MonitorCancel(t *testing.T) {
|
||||
loop := segmentloop.New(zaptest.NewLogger(t), segmentloop.Config{
|
||||
CoalesceDuration: time.Nanosecond,
|
||||
ListLimit: 10000,
|
||||
}, satellite.Metainfo.Metabase)
|
||||
}, satellite.Metabase.DB)
|
||||
|
||||
obs1 := newTestObserver(func(ctx context.Context) error {
|
||||
return errors.New("test error")
|
||||
|
@ -67,7 +67,7 @@ func TestZombieDeletion(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that all objects are in the metabase
|
||||
objects, err := planet.Satellites[0].Metainfo.Metabase.TestingAllObjects(ctx)
|
||||
objects, err := planet.Satellites[0].Metabase.DB.TestingAllObjects(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, objects, 3)
|
||||
|
||||
@ -79,7 +79,7 @@ func TestZombieDeletion(t *testing.T) {
|
||||
zombieChore.Loop.TriggerWait()
|
||||
|
||||
// Verify that only one object remain in the metabase
|
||||
objects, err = planet.Satellites[0].Metainfo.Metabase.TestingAllObjects(ctx)
|
||||
objects, err = planet.Satellites[0].Metabase.DB.TestingAllObjects(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, objects, 1)
|
||||
require.Equal(t, metabase.Committed, objects[0].Status)
|
||||
@ -125,17 +125,17 @@ func TestZombieDeletion_LastSegmentActive(t *testing.T) {
|
||||
err = partUpload.Commit()
|
||||
require.NoError(t, err)
|
||||
|
||||
objects, err := planet.Satellites[0].Metainfo.Metabase.TestingAllObjects(ctx)
|
||||
objects, err := planet.Satellites[0].Metabase.DB.TestingAllObjects(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, objects, 1)
|
||||
require.Equal(t, metabase.Pending, objects[0].Status)
|
||||
|
||||
segments, err := planet.Satellites[0].Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
segments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, segments, 3)
|
||||
|
||||
// now we need to change creation dates for all segments
|
||||
db := planet.Satellites[0].Metainfo.Metabase.UnderlyingTagSQL()
|
||||
db := planet.Satellites[0].Metabase.DB.UnderlyingTagSQL()
|
||||
|
||||
// change object zombie_deletion_deadline to trigger full segments verification before deletion
|
||||
zombieDeletionDeadline := now.Add(-12 * time.Hour)
|
||||
@ -163,7 +163,7 @@ func TestZombieDeletion_LastSegmentActive(t *testing.T) {
|
||||
zombieChore.Loop.TriggerWait()
|
||||
|
||||
// no changes in DB, no segment or object was deleted as last segment was uploaded less then 24h ago
|
||||
afterObjects, err := planet.Satellites[0].Metainfo.Metabase.TestingAllObjects(ctx)
|
||||
afterObjects, err := planet.Satellites[0].Metabase.DB.TestingAllObjects(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Diff is used because DB manipulation changes value time zone and require.Equal
|
||||
@ -172,7 +172,7 @@ func TestZombieDeletion_LastSegmentActive(t *testing.T) {
|
||||
cmpopts.EquateApproxTime(1*time.Second))
|
||||
require.Zero(t, diff)
|
||||
|
||||
afterSegments, err := planet.Satellites[0].Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
afterSegments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
|
||||
require.NoError(t, err)
|
||||
diff = cmp.Diff(segments, afterSegments,
|
||||
cmpopts.EquateApproxTime(1*time.Second))
|
||||
|
@ -33,14 +33,14 @@ func TestEndpoint_DeleteCommittedObject(t *testing.T) {
|
||||
}
|
||||
deleteObject := func(ctx context.Context, t *testing.T, planet *testplanet.Planet) {
|
||||
projectID := planet.Uplinks[0].Projects[0].ID
|
||||
items, err := planet.Satellites[0].Metainfo.Metabase.TestingAllCommittedObjects(ctx, projectID, bucketName)
|
||||
items, err := planet.Satellites[0].Metabase.DB.TestingAllCommittedObjects(ctx, projectID, bucketName)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, items, 1)
|
||||
|
||||
_, err = planet.Satellites[0].Metainfo.Endpoint.DeleteCommittedObject(ctx, projectID, bucketName, items[0].ObjectKey)
|
||||
require.NoError(t, err)
|
||||
|
||||
items, err = planet.Satellites[0].Metainfo.Metabase.TestingAllCommittedObjects(ctx, projectID, bucketName)
|
||||
items, err = planet.Satellites[0].Metabase.DB.TestingAllCommittedObjects(ctx, projectID, bucketName)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, items, 0)
|
||||
}
|
||||
@ -68,7 +68,7 @@ func TestEndpoint_DeletePendingObject(t *testing.T) {
|
||||
}
|
||||
deleteObject := func(ctx context.Context, t *testing.T, planet *testplanet.Planet) {
|
||||
projectID := planet.Uplinks[0].Projects[0].ID
|
||||
items, err := planet.Satellites[0].Metainfo.Metabase.TestingAllPendingObjects(ctx, projectID, bucketName)
|
||||
items, err := planet.Satellites[0].Metabase.DB.TestingAllPendingObjects(ctx, projectID, bucketName)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, items, 1)
|
||||
|
||||
@ -83,7 +83,7 @@ func TestEndpoint_DeletePendingObject(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Len(t, deletedObjects, 1)
|
||||
|
||||
items, err = planet.Satellites[0].Metainfo.Metabase.TestingAllPendingObjects(ctx, projectID, bucketName)
|
||||
items, err = planet.Satellites[0].Metabase.DB.TestingAllPendingObjects(ctx, projectID, bucketName)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, items, 0)
|
||||
}
|
||||
@ -98,7 +98,7 @@ func TestEndpoint_DeleteObjectAnyStatus(t *testing.T) {
|
||||
}
|
||||
deleteCommittedObject := func(ctx context.Context, t *testing.T, planet *testplanet.Planet) {
|
||||
projectID := planet.Uplinks[0].Projects[0].ID
|
||||
items, err := planet.Satellites[0].Metainfo.Metabase.TestingAllCommittedObjects(ctx, projectID, bucketName)
|
||||
items, err := planet.Satellites[0].Metabase.DB.TestingAllCommittedObjects(ctx, projectID, bucketName)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, items, 1)
|
||||
|
||||
@ -110,7 +110,7 @@ func TestEndpoint_DeleteObjectAnyStatus(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Len(t, deletedObjects, 1)
|
||||
|
||||
items, err = planet.Satellites[0].Metainfo.Metabase.TestingAllPendingObjects(ctx, projectID, bucketName)
|
||||
items, err = planet.Satellites[0].Metabase.DB.TestingAllPendingObjects(ctx, projectID, bucketName)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, items, 0)
|
||||
}
|
||||
@ -136,7 +136,7 @@ func TestEndpoint_DeleteObjectAnyStatus(t *testing.T) {
|
||||
|
||||
deletePendingObject := func(ctx context.Context, t *testing.T, planet *testplanet.Planet) {
|
||||
projectID := planet.Uplinks[0].Projects[0].ID
|
||||
items, err := planet.Satellites[0].Metainfo.Metabase.TestingAllPendingObjects(ctx, projectID, bucketName)
|
||||
items, err := planet.Satellites[0].Metabase.DB.TestingAllPendingObjects(ctx, projectID, bucketName)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, items, 1)
|
||||
|
||||
@ -148,7 +148,7 @@ func TestEndpoint_DeleteObjectAnyStatus(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Len(t, deletedObjects, 1)
|
||||
|
||||
items, err = planet.Satellites[0].Metainfo.Metabase.TestingAllPendingObjects(ctx, projectID, bucketName)
|
||||
items, err = planet.Satellites[0].Metabase.DB.TestingAllPendingObjects(ctx, projectID, bucketName)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, items, 0)
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ func TestExpiredDeletion(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify that all four objects are in the metabase
|
||||
objects, err := satellite.Metainfo.Metabase.TestingAllObjects(ctx)
|
||||
objects, err := satellite.Metabase.DB.TestingAllObjects(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, objects, 4)
|
||||
|
||||
@ -59,7 +59,7 @@ func TestExpiredDeletion(t *testing.T) {
|
||||
expiredChore.Loop.TriggerWait()
|
||||
|
||||
// Verify that only two objects remain in the metabase
|
||||
objects, err = satellite.Metainfo.Metabase.TestingAllObjects(ctx)
|
||||
objects, err = satellite.Metabase.DB.TestingAllObjects(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, objects, 2)
|
||||
})
|
||||
|
@ -729,7 +729,7 @@ func TestBeginCommit(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
project := planet.Uplinks[0].Projects[0]
|
||||
allObjects, err := planet.Satellites[0].Metainfo.Metabase.TestingAllCommittedObjects(ctx, project.ID, object.Bucket)
|
||||
allObjects, err := planet.Satellites[0].Metabase.DB.TestingAllCommittedObjects(ctx, project.ID, object.Bucket)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, allObjects, 1)
|
||||
})
|
||||
@ -1454,17 +1454,17 @@ func TestInlineSegmentThreshold(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// we don't know encrypted path
|
||||
segments, err := planet.Satellites[0].Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
segments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, segments, 1)
|
||||
require.Zero(t, segments[0].Redundancy)
|
||||
require.NotEmpty(t, segments[0].InlineData)
|
||||
|
||||
// clean up - delete the uploaded object
|
||||
objects, err := planet.Satellites[0].Metainfo.Metabase.TestingAllObjects(ctx)
|
||||
objects, err := planet.Satellites[0].Metabase.DB.TestingAllObjects(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, objects, 1)
|
||||
_, err = planet.Satellites[0].Metainfo.Metabase.DeleteObjectLatestVersion(ctx, metabase.DeleteObjectLatestVersion{
|
||||
_, err = planet.Satellites[0].Metabase.DB.DeleteObjectLatestVersion(ctx, metabase.DeleteObjectLatestVersion{
|
||||
ObjectLocation: objects[0].Location(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@ -1475,17 +1475,17 @@ func TestInlineSegmentThreshold(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// we don't know encrypted path
|
||||
segments, err := planet.Satellites[0].Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
segments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, segments, 1)
|
||||
require.NotZero(t, segments[0].Redundancy)
|
||||
require.Empty(t, segments[0].InlineData)
|
||||
|
||||
// clean up - delete the uploaded object
|
||||
objects, err := planet.Satellites[0].Metainfo.Metabase.TestingAllObjects(ctx)
|
||||
objects, err := planet.Satellites[0].Metabase.DB.TestingAllObjects(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, objects, 1)
|
||||
_, err = planet.Satellites[0].Metainfo.Metabase.DeleteObjectLatestVersion(ctx, metabase.DeleteObjectLatestVersion{
|
||||
_, err = planet.Satellites[0].Metabase.DB.DeleteObjectLatestVersion(ctx, metabase.DeleteObjectLatestVersion{
|
||||
ObjectLocation: objects[0].Location(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@ -1761,7 +1761,7 @@ func TestMultipartObjectDownloadRejection(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer ctx.Check(metainfoClient.Close)
|
||||
|
||||
objects, err := planet.Satellites[0].Metainfo.Metabase.TestingAllCommittedObjects(ctx, planet.Uplinks[0].Projects[0].ID, "pip-first")
|
||||
objects, err := planet.Satellites[0].Metabase.DB.TestingAllCommittedObjects(ctx, planet.Uplinks[0].Projects[0].ID, "pip-first")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, objects, 1)
|
||||
|
||||
@ -1779,7 +1779,7 @@ func TestMultipartObjectDownloadRejection(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
objects, err = planet.Satellites[0].Metainfo.Metabase.TestingAllCommittedObjects(ctx, planet.Uplinks[0].Projects[0].ID, "pip-second")
|
||||
objects, err = planet.Satellites[0].Metabase.DB.TestingAllCommittedObjects(ctx, planet.Uplinks[0].Projects[0].ID, "pip-second")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, objects, 1)
|
||||
|
||||
@ -1798,7 +1798,7 @@ func TestMultipartObjectDownloadRejection(t *testing.T) {
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "Used uplink version cannot download multipart objects.")
|
||||
|
||||
objects, err = planet.Satellites[0].Metainfo.Metabase.TestingAllCommittedObjects(ctx, planet.Uplinks[0].Projects[0].ID, "pip-third")
|
||||
objects, err = planet.Satellites[0].Metabase.DB.TestingAllCommittedObjects(ctx, planet.Uplinks[0].Projects[0].ID, "pip-third")
|
||||
require.NoError(t, err)
|
||||
require.Len(t, objects, 1)
|
||||
|
||||
@ -1949,7 +1949,7 @@ func TestObjectSegmentExpiresAt(t *testing.T) {
|
||||
err = planet.Uplinks[0].UploadWithExpiration(ctx, planet.Satellites[0], "hohoho", "remote_object", remoteData, remoteExpiration)
|
||||
require.NoError(t, err)
|
||||
|
||||
segments, err := planet.Satellites[0].Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
segments, err := planet.Satellites[0].Metabase.DB.TestingAllSegments(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, segments, 2)
|
||||
|
||||
|
@ -34,7 +34,7 @@ func TestOrderLimitsEncryptedMetadata(t *testing.T) {
|
||||
|
||||
bucket := metabase.BucketLocation{ProjectID: projectID, BucketName: bucketName}
|
||||
|
||||
segments, err := satellitePeer.Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
segments, err := satellitePeer.Metabase.DB.TestingAllSegments(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(segments))
|
||||
|
||||
|
@ -154,7 +154,7 @@ func TestIdentifyIrreparableSegments(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
expectedLocation.ObjectKey = "piece"
|
||||
_, err = planet.Satellites[0].Metainfo.Metabase.DeleteObjectLatestVersion(ctx, metabase.DeleteObjectLatestVersion{
|
||||
_, err = planet.Satellites[0].Metabase.DB.DeleteObjectLatestVersion(ctx, metabase.DeleteObjectLatestVersion{
|
||||
ObjectLocation: expectedLocation.Object(),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@ -284,7 +284,7 @@ func createLostPieces(planet *testplanet.Planet, rs storj.RedundancyScheme) meta
|
||||
}
|
||||
|
||||
func insertSegment(ctx context.Context, t *testing.T, planet *testplanet.Planet, rs storj.RedundancyScheme, location metabase.SegmentLocation, pieces metabase.Pieces, expiresAt *time.Time) uuid.UUID {
|
||||
metabaseDB := planet.Satellites[0].Metainfo.Metabase
|
||||
metabaseDB := planet.Satellites[0].Metabase.DB
|
||||
|
||||
obj := metabase.ObjectStream{
|
||||
ProjectID: location.ProjectID,
|
||||
|
@ -960,7 +960,7 @@ func testRepairMultipleDisqualifiedAndSuspended(t *testing.T, inMemoryRepair boo
|
||||
require.NoError(t, err)
|
||||
|
||||
// get a remote segment from metainfo
|
||||
segments, err := satellite.Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
segments, err := satellite.Metabase.DB.TestingAllSegments(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, segments, 1)
|
||||
require.False(t, segments[0].Inline())
|
||||
@ -1016,7 +1016,7 @@ func testRepairMultipleDisqualifiedAndSuspended(t *testing.T, inMemoryRepair boo
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, newData, testData)
|
||||
|
||||
segments, err = satellite.Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
segments, err = satellite.Metabase.DB.TestingAllSegments(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, segments, 1)
|
||||
|
||||
@ -1479,11 +1479,11 @@ func getRemoteSegment(
|
||||
) (_ metabase.Segment, key metabase.SegmentKey) {
|
||||
t.Helper()
|
||||
|
||||
objects, err := satellite.Metainfo.Metabase.TestingAllObjects(ctx)
|
||||
objects, err := satellite.Metabase.DB.TestingAllObjects(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, objects, 1)
|
||||
|
||||
segments, err := satellite.Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
segments, err := satellite.Metabase.DB.TestingAllSegments(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, segments, 1)
|
||||
require.False(t, segments[0].Inline())
|
||||
@ -1600,7 +1600,7 @@ func TestECRepairerGetDoesNameLookupIfNecessary(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := testSatellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := testSatellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
@ -1671,7 +1671,7 @@ func TestECRepairerGetPrefersCachedIPPort(t *testing.T) {
|
||||
queueSegment, err := queue.Next()
|
||||
require.NoError(t, err)
|
||||
|
||||
segment, err := testSatellite.Metainfo.Metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
segment, err := testSatellite.Metabase.DB.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: queueSegment.StreamID,
|
||||
Position: queueSegment.Position,
|
||||
})
|
||||
|
@ -161,7 +161,7 @@ func getNodePieceCounts(ctx context.Context, planet *testplanet.Planet) (_ map[s
|
||||
func findNodeToExit(ctx context.Context, planet *testplanet.Planet) (*testplanet.StorageNode, error) {
|
||||
satellite := planet.Satellites[0]
|
||||
|
||||
objects, err := satellite.Metainfo.Metabase.TestingAllSegments(ctx)
|
||||
objects, err := satellite.Metabase.DB.TestingAllSegments(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user