storj/satellite/accounting/tally/tally_test.go

282 lines
9.2 KiB
Go
Raw Normal View History

2019-01-24 20:15:10 +00:00
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
2019-01-23 19:58:44 +00:00
package tally_test
import (
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"storj.io/common/memory"
"storj.io/common/storj"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/common/uuid"
"storj.io/storj/private/testplanet"
"storj.io/storj/private/teststorj"
"storj.io/storj/satellite/accounting"
"storj.io/storj/satellite/accounting/tally"
"storj.io/storj/satellite/metabase"
)
func TestDeleteTalliesBefore(t *testing.T) {
tests := []struct {
eraseBefore time.Time
expectedRaws int
}{
{
eraseBefore: time.Now(),
expectedRaws: 1,
},
{
eraseBefore: time.Now().Add(24 * time.Hour),
expectedRaws: 0,
},
}
for _, tt := range tests {
test := tt
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 0, UplinkCount: 0,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
id := teststorj.NodeIDFromBytes([]byte{})
nodeData := make(map[storj.NodeID]float64)
nodeData[id] = float64(1000)
err := planet.Satellites[0].DB.StoragenodeAccounting().SaveTallies(ctx, time.Now(), nodeData)
require.NoError(t, err)
err = planet.Satellites[0].DB.StoragenodeAccounting().DeleteTalliesBefore(ctx, test.eraseBefore)
require.NoError(t, err)
raws, err := planet.Satellites[0].DB.StoragenodeAccounting().GetTallies(ctx)
require.NoError(t, err)
assert.Len(t, raws, test.expectedRaws)
})
}
}
func TestOnlyInline(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
planet.Satellites[0].Accounting.Tally.Loop.Pause()
uplink := planet.Uplinks[0]
2019-06-13 17:58:40 +01:00
// Setup: create data for the uplink to upload
expectedData := testrand.Bytes(1 * memory.KiB)
// Setup: get the expected size of the data that will be stored in pointer
// Since the data is small enough to be stored inline, when it is encrypted, we only
// add 16 bytes of encryption authentication overhead. No encryption block
// padding will be added since we are not chunking data that we store inline.
const encryptionAuthOverhead = 16 // bytes
expectedTotalBytes := len(expectedData) + encryptionAuthOverhead
// Setup: The data in this tally should match the pointer that the uplink.upload created
2019-06-13 17:58:40 +01:00
expectedBucketName := "testbucket"
expectedTally := &accounting.BucketTally{
BucketLocation: metabase.BucketLocation{
ProjectID: uplink.Projects[0].ID,
BucketName: expectedBucketName,
},
ObjectCount: 1,
InlineSegments: 1,
InlineBytes: int64(expectedTotalBytes),
MetadataSize: 0,
}
// Execute test: upload a file, then calculate at rest data
err := uplink.Upload(ctx, planet.Satellites[0], expectedBucketName, "test/path", expectedData)
assert.NoError(t, err)
// run multiple times to ensure we add tallies
for i := 0; i < 2; i++ {
obs := tally.NewObserver(planet.Satellites[0].Log.Named("observer"), time.Now())
err := planet.Satellites[0].Metainfo.Loop.Join(ctx, obs)
require.NoError(t, err)
now := time.Now().Add(time.Duration(i) * time.Second)
err = planet.Satellites[0].DB.ProjectAccounting().SaveTallies(ctx, now, obs.Bucket)
require.NoError(t, err)
assert.Equal(t, 1, len(obs.Bucket))
for _, actualTally := range obs.Bucket {
// checking the exact metadata size is brittle, instead, verify that it's not zero
assert.NotZero(t, actualTally.MetadataSize)
actualTally.MetadataSize = expectedTally.MetadataSize
assert.Equal(t, expectedTally, actualTally)
}
}
})
}
2019-06-13 17:58:40 +01:00
func TestCalculateBucketAtRestData(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 2,
Reconfigure: testplanet.Reconfigure{
Satellite: testplanet.Combine(
testplanet.ReconfigureRS(2, 3, 4, 4),
testplanet.MaxSegmentSize(20*memory.KiB),
),
},
2019-06-13 17:58:40 +01:00
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
err := planet.Uplinks[0].Upload(ctx, satellite, "alpha", "inline", make([]byte, 10*memory.KiB))
require.NoError(t, err)
err = planet.Uplinks[0].Upload(ctx, satellite, "alpha", "remote", make([]byte, 30*memory.KiB))
require.NoError(t, err)
err = planet.Uplinks[0].Upload(ctx, satellite, "beta", "remote", make([]byte, 30*memory.KiB))
require.NoError(t, err)
err = planet.Uplinks[1].Upload(ctx, satellite, "alpha", "remote", make([]byte, 30*memory.KiB))
require.NoError(t, err)
objects, err := satellite.Metainfo.Metabase.TestingAllObjects(ctx)
require.NoError(t, err)
segments, err := satellite.Metainfo.Metabase.TestingAllSegments(ctx)
require.NoError(t, err)
expectedTotal := map[metabase.BucketLocation]*accounting.BucketTally{}
ensure := func(loc metabase.BucketLocation) *accounting.BucketTally {
if t, ok := expectedTotal[loc]; ok {
return t
}
t := &accounting.BucketTally{BucketLocation: loc}
expectedTotal[loc] = t
return t
}
streamLocation := map[uuid.UUID]metabase.BucketLocation{}
for _, object := range objects {
loc := object.Location().Bucket()
streamLocation[object.StreamID] = loc
t := ensure(loc)
t.ObjectCount++
t.MetadataSize += int64(len(object.EncryptedMetadata))
2019-06-13 17:58:40 +01:00
}
for _, segment := range segments {
loc := streamLocation[segment.StreamID]
t := ensure(loc)
if len(segment.Pieces) > 0 {
t.RemoteSegments++
t.RemoteBytes += int64(segment.EncryptedSize)
} else {
t.InlineSegments++
t.InlineBytes += int64(segment.EncryptedSize)
}
}
require.Len(t, expectedTotal, 3)
obs := tally.NewObserver(satellite.Log.Named("observer"), time.Now())
err = satellite.Metainfo.Loop.Join(ctx, obs)
require.NoError(t, err)
require.Equal(t, expectedTotal, obs.Bucket)
})
}
2019-06-13 17:58:40 +01:00
func TestTallyIgnoresExpiredPointers(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
now := time.Now()
err := planet.Uplinks[0].UploadWithExpiration(ctx, planet.Satellites[0], "bucket", "path", []byte{1}, now.Add(12*time.Hour))
require.NoError(t, err)
obs := tally.NewObserver(satellite.Log.Named("observer"), now.Add(24*time.Hour))
err = satellite.Metainfo.Loop.Join(ctx, obs)
require.NoError(t, err)
// there should be no observed buckets because all of the pointers are expired
require.Equal(t, obs.Bucket, map[metabase.BucketLocation]*accounting.BucketTally{})
})
}
satellite/accounting: refactor live accounting to hold current estimated totals live accounting used to be a cache to store writes before they are picked up during the tally iteration, after which the cache is cleared. This created a window in which users could potentially exceed the storage limit. This PR refactors live accounting to hold current estimations of space used per project. This should also reduce DB load since we no longer need to query the satellite DB when checking space used for limiting. The mechanism by which the new live accounting system works is as follows: During the upload of any segment, the size of that segment is added to its respective project total in live accounting. At the beginning of the tally iteration we record the current values in live accounting as `initialLiveTotals`. At the end of the tally iteration we again record the current totals in live accounting as `latestLiveTotals`. The metainfo loop observer in tally allows us to get the project totals from what it observed in metainfo DB which are stored in `tallyProjectTotals`. However, for any particular segment uploaded during the metainfo loop, the observer may or may not have seen it. Thus, we take half of the difference between `latestLiveTotals` and `initialLiveTotals`, and add that to the total that was found during tally and set that as the new live accounting total. Initially, live accounting was storing the total stored amount across all nodes rather than the segment size, which is inconsistent with how we record amounts stored in the project accounting DB, so we have refactored live accounting to record segment size Change-Id: Ie48bfdef453428fcdc180b2d781a69d58fd927fb
2019-10-31 17:27:38 +00:00
func TestTallyLiveAccounting(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
tally := planet.Satellites[0].Accounting.Tally
projectID := planet.Uplinks[0].Projects[0].ID
satellite/accounting: refactor live accounting to hold current estimated totals live accounting used to be a cache to store writes before they are picked up during the tally iteration, after which the cache is cleared. This created a window in which users could potentially exceed the storage limit. This PR refactors live accounting to hold current estimations of space used per project. This should also reduce DB load since we no longer need to query the satellite DB when checking space used for limiting. The mechanism by which the new live accounting system works is as follows: During the upload of any segment, the size of that segment is added to its respective project total in live accounting. At the beginning of the tally iteration we record the current values in live accounting as `initialLiveTotals`. At the end of the tally iteration we again record the current totals in live accounting as `latestLiveTotals`. The metainfo loop observer in tally allows us to get the project totals from what it observed in metainfo DB which are stored in `tallyProjectTotals`. However, for any particular segment uploaded during the metainfo loop, the observer may or may not have seen it. Thus, we take half of the difference between `latestLiveTotals` and `initialLiveTotals`, and add that to the total that was found during tally and set that as the new live accounting total. Initially, live accounting was storing the total stored amount across all nodes rather than the segment size, which is inconsistent with how we record amounts stored in the project accounting DB, so we have refactored live accounting to record segment size Change-Id: Ie48bfdef453428fcdc180b2d781a69d58fd927fb
2019-10-31 17:27:38 +00:00
tally.Loop.Pause()
expectedData := testrand.Bytes(5 * memory.MB)
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "test/path", expectedData)
require.NoError(t, err)
segments, err := planet.Satellites[0].Metainfo.Metabase.TestingAllSegments(ctx)
satellite/accounting: refactor live accounting to hold current estimated totals live accounting used to be a cache to store writes before they are picked up during the tally iteration, after which the cache is cleared. This created a window in which users could potentially exceed the storage limit. This PR refactors live accounting to hold current estimations of space used per project. This should also reduce DB load since we no longer need to query the satellite DB when checking space used for limiting. The mechanism by which the new live accounting system works is as follows: During the upload of any segment, the size of that segment is added to its respective project total in live accounting. At the beginning of the tally iteration we record the current values in live accounting as `initialLiveTotals`. At the end of the tally iteration we again record the current totals in live accounting as `latestLiveTotals`. The metainfo loop observer in tally allows us to get the project totals from what it observed in metainfo DB which are stored in `tallyProjectTotals`. However, for any particular segment uploaded during the metainfo loop, the observer may or may not have seen it. Thus, we take half of the difference between `latestLiveTotals` and `initialLiveTotals`, and add that to the total that was found during tally and set that as the new live accounting total. Initially, live accounting was storing the total stored amount across all nodes rather than the segment size, which is inconsistent with how we record amounts stored in the project accounting DB, so we have refactored live accounting to record segment size Change-Id: Ie48bfdef453428fcdc180b2d781a69d58fd927fb
2019-10-31 17:27:38 +00:00
require.NoError(t, err)
require.Len(t, segments, 1)
satellite/accounting: refactor live accounting to hold current estimated totals live accounting used to be a cache to store writes before they are picked up during the tally iteration, after which the cache is cleared. This created a window in which users could potentially exceed the storage limit. This PR refactors live accounting to hold current estimations of space used per project. This should also reduce DB load since we no longer need to query the satellite DB when checking space used for limiting. The mechanism by which the new live accounting system works is as follows: During the upload of any segment, the size of that segment is added to its respective project total in live accounting. At the beginning of the tally iteration we record the current values in live accounting as `initialLiveTotals`. At the end of the tally iteration we again record the current totals in live accounting as `latestLiveTotals`. The metainfo loop observer in tally allows us to get the project totals from what it observed in metainfo DB which are stored in `tallyProjectTotals`. However, for any particular segment uploaded during the metainfo loop, the observer may or may not have seen it. Thus, we take half of the difference between `latestLiveTotals` and `initialLiveTotals`, and add that to the total that was found during tally and set that as the new live accounting total. Initially, live accounting was storing the total stored amount across all nodes rather than the segment size, which is inconsistent with how we record amounts stored in the project accounting DB, so we have refactored live accounting to record segment size Change-Id: Ie48bfdef453428fcdc180b2d781a69d58fd927fb
2019-10-31 17:27:38 +00:00
segmentSize := int64(segments[0].EncryptedSize)
satellite/accounting: refactor live accounting to hold current estimated totals live accounting used to be a cache to store writes before they are picked up during the tally iteration, after which the cache is cleared. This created a window in which users could potentially exceed the storage limit. This PR refactors live accounting to hold current estimations of space used per project. This should also reduce DB load since we no longer need to query the satellite DB when checking space used for limiting. The mechanism by which the new live accounting system works is as follows: During the upload of any segment, the size of that segment is added to its respective project total in live accounting. At the beginning of the tally iteration we record the current values in live accounting as `initialLiveTotals`. At the end of the tally iteration we again record the current totals in live accounting as `latestLiveTotals`. The metainfo loop observer in tally allows us to get the project totals from what it observed in metainfo DB which are stored in `tallyProjectTotals`. However, for any particular segment uploaded during the metainfo loop, the observer may or may not have seen it. Thus, we take half of the difference between `latestLiveTotals` and `initialLiveTotals`, and add that to the total that was found during tally and set that as the new live accounting total. Initially, live accounting was storing the total stored amount across all nodes rather than the segment size, which is inconsistent with how we record amounts stored in the project accounting DB, so we have refactored live accounting to record segment size Change-Id: Ie48bfdef453428fcdc180b2d781a69d58fd927fb
2019-10-31 17:27:38 +00:00
tally.Loop.TriggerWait()
expectedSize := segmentSize
total, err := planet.Satellites[0].Accounting.ProjectUsage.GetProjectStorageTotals(ctx, projectID)
require.NoError(t, err)
require.Equal(t, expectedSize, total)
for i := 0; i < 5; i++ {
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", fmt.Sprintf("test/path/%d", i), expectedData)
require.NoError(t, err)
tally.Loop.TriggerWait()
expectedSize += segmentSize
total, err := planet.Satellites[0].Accounting.ProjectUsage.GetProjectStorageTotals(ctx, projectID)
require.NoError(t, err)
require.Equal(t, expectedSize, total)
}
})
}
func TestTallyEmptyProjectUpdatesLiveAccounting(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 2,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
planet.Satellites[0].Accounting.Tally.Loop.Pause()
project1 := planet.Uplinks[1].Projects[0].ID
data := testrand.Bytes(1 * memory.MB)
// we need an extra bucket with data for this test. If no buckets are found at all,
// the update block is skipped in tally
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "bucket", "test", data)
require.NoError(t, err)
err = planet.Uplinks[1].Upload(ctx, planet.Satellites[0], "bucket", "test", data)
require.NoError(t, err)
planet.Satellites[0].Accounting.Tally.Loop.TriggerWait()
planet.Satellites[0].Accounting.Tally.Loop.Pause()
total, err := planet.Satellites[0].Accounting.ProjectUsage.GetProjectStorageTotals(ctx, project1)
require.NoError(t, err)
require.True(t, total >= int64(len(data)))
err = planet.Uplinks[1].DeleteObject(ctx, planet.Satellites[0], "bucket", "test")
require.NoError(t, err)
planet.Satellites[0].Accounting.Tally.Loop.TriggerWait()
p1Total, err := planet.Satellites[0].Accounting.ProjectUsage.GetProjectStorageTotals(ctx, project1)
require.NoError(t, err)
require.Zero(t, p1Total)
})
}