satellite/accounting: fix how we put segments usage into cache
We had two problems here. First was how we were handling errors message from GetProjectSegmentUsage. We were always returning error, even for ErrKeyNotFound which should be used to refresh value in cache and not propagated out of method. Second, we were using wrong value to update cache. We used current value from cache which obviously it's not what we intend. Fixes https://github.com/storj/storj/issues/4389 Change-Id: I4d7ca7a1a0a119587db6a5041b44319102ef64f8
This commit is contained in:
parent
bfad42f29a
commit
4be84c3a6c
@ -133,22 +133,20 @@ func (usage *Service) ExceedsUploadLimits(ctx context.Context, projectID uuid.UU
|
||||
group.Go(func() error {
|
||||
var err error
|
||||
segmentUsage, err = usage.GetProjectSegmentUsage(ctx, projectID)
|
||||
if err != nil {
|
||||
// Verify If the cache key was not found
|
||||
if ErrKeyNotFound.Has(err) {
|
||||
segmentGetTotal, err := usage.GetProjectSegments(ctx, projectID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create cache key with database value.
|
||||
err = usage.liveAccounting.UpdateProjectSegmentUsage(ctx, projectID, segmentUsage, usage.bandwidthCacheTTL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
segmentUsage = segmentGetTotal
|
||||
// Verify If the cache key was not found
|
||||
if err != nil && ErrKeyNotFound.Has(err) {
|
||||
segmentGetTotal, err := usage.GetProjectSegments(ctx, projectID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create cache key with database value.
|
||||
if err := usage.liveAccounting.UpdateProjectSegmentUsage(ctx, projectID, segmentGetTotal, usage.bandwidthCacheTTL); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
segmentUsage = segmentGetTotal
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
|
@ -196,7 +196,6 @@ func TestProjectSegmentLimit(t *testing.T) {
|
||||
// successful upload
|
||||
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "test/path/0", data)
|
||||
require.NoError(t, err)
|
||||
planet.Satellites[0].Accounting.Tally.Loop.TriggerWait()
|
||||
|
||||
// upload fails due to segment limit
|
||||
err = planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "test/path/1", data)
|
||||
@ -234,6 +233,34 @@ func TestProjectSegmentLimitInline(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestProjectSegmentLimitWithoutCache(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, UplinkCount: 1,
|
||||
Reconfigure: testplanet.Reconfigure{
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Metainfo.ProjectLimits.ValidateSegmentLimit = true
|
||||
config.Console.UsageLimits.Segment.Free = 5
|
||||
config.Console.UsageLimits.Segment.Paid = 5
|
||||
// this effectively disable live accounting cache
|
||||
config.LiveAccounting.BandwidthCacheTTL = -1
|
||||
},
|
||||
},
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
data := testrand.Bytes(1 * memory.KiB)
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
// successful upload
|
||||
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "test/path/"+strconv.Itoa(i), data)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// upload fails due to segment limit
|
||||
err := planet.Uplinks[0].Upload(ctx, planet.Satellites[0], "testbucket", "test/path/5", data)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "Exceeded Segments Limit")
|
||||
})
|
||||
}
|
||||
|
||||
func TestProjectSegmentLimitMultipartUpload(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, UplinkCount: 1,
|
||||
|
Loading…
Reference in New Issue
Block a user