satellite/satellitedb: fix issue with shared memory on range for bucket rollups

A uuid.UUID is an array of bytes, and slicing it refers to the
underlying value, much like taking the address. Because range
in Go reuses the same value for every loop iteration, this means
that later iterations would overwrite earlier stored project
ids. We fix that by making a copy of the value before slicing it
for every loop iteration.

Change-Id: Iae3f11138d11a176ce360bd5af2244307c74fdad
This commit is contained in:
Jeff Wendling 2020-01-23 21:57:00 -07:00
parent 2f77ce48f0
commit 665ed3b6b1
2 changed files with 60 additions and 4 deletions

View File

@ -183,6 +183,62 @@ func TestUploadDownloadBandwidth(t *testing.T) {
})
}
func TestMultiProjectUploadDownloadBandwidth(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 2,
Reconfigure: testplanet.Reconfigure{
Satellite: testplanet.ReconfigureRS(2, 3, 4, 4),
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
wayInTheFuture := time.Now().UTC().Add(1000 * time.Hour)
hourBeforeTheFuture := wayInTheFuture.Add(-time.Hour)
planet.Satellites[0].Audit.Worker.Loop.Pause()
for _, storageNode := range planet.StorageNodes {
storageNode.Storage2.Orders.Sender.Pause()
}
// Upload some data to two different projects in different buckets.
firstExpectedData := testrand.Bytes(50 * memory.KiB)
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket0", "test/path", firstExpectedData)
require.NoError(t, err)
data, err := planet.Uplinks[0].Download(ctx, planet.Satellites[0], "testbucket0", "test/path")
require.NoError(t, err)
require.Equal(t, firstExpectedData, data)
secondExpectedData := testrand.Bytes(100 * memory.KiB)
err = planet.Uplinks[1].Upload(ctx, planet.Satellites[0], "testbucket1", "test/path", secondExpectedData)
require.NoError(t, err)
data, err = planet.Uplinks[1].Download(ctx, planet.Satellites[0], "testbucket1", "test/path")
require.NoError(t, err)
require.Equal(t, secondExpectedData, data)
//HACKFIX: We need enough time to pass after the download ends for storagenodes to save orders
time.Sleep(200 * time.Millisecond)
// Have the nodes send up the orders.
for _, storageNode := range planet.StorageNodes {
storageNode.Storage2.Orders.Sender.TriggerWait()
}
// Run the chore as if we were far in the future so that the orders are expired.
reportedRollupChore := planet.Satellites[0].Core.Accounting.ReportedRollupChore
require.NoError(t, reportedRollupChore.RunOnce(ctx, wayInTheFuture))
// Query and ensure that there's no data recorded for the bucket from the other project
ordersDB := planet.Satellites[0].DB.Orders()
uplink0Project := planet.Uplinks[0].ProjectID[planet.Satellites[0].ID()]
uplink1Project := planet.Uplinks[1].ProjectID[planet.Satellites[0].ID()]
wrongBucketBandwidth, err := ordersDB.GetBucketBandwidth(ctx, uplink0Project, []byte("testbucket1"), hourBeforeTheFuture, wayInTheFuture)
require.NoError(t, err)
require.Equal(t, int64(0), wrongBucketBandwidth)
wrongBucketBandwidth, err = ordersDB.GetBucketBandwidth(ctx, uplink1Project, []byte("testbucket0"), hourBeforeTheFuture, wayInTheFuture)
require.NoError(t, err)
require.Equal(t, int64(0), wrongBucketBandwidth)
})
}
func TestSplitBucketIDInvalid(t *testing.T) {
var testCases = []struct {
name string

View File

@ -478,10 +478,10 @@ func (tx *ordersDBTx) UpdateBucketBandwidthBatch(ctx context.Context, intervalSt
}
if lastProjectID != rollup.ProjectID {
lastProjectID = rollup.ProjectID
// take the slice over rollup.ProjectID, because it is going to stay
// the same up to the ExecContext call, whereas lastProjectID is likely
// to be overwritten
args = append(args, rollup.ProjectID[:])
// Take the slice over a copy of the value so that we don't mutate
// the underlying value for different range iterations. :grrcox:
project := rollup.ProjectID
args = append(args, project[:])
projectIDArgNum = len(args)
}
if lastBucketName != rollup.BucketName {