2019-01-24 20:15:10 +00:00
// Copyright (C) 2019 Storj Labs, Inc.
2018-11-08 16:18:28 +00:00
// See LICENSE for copying information.
2019-01-23 19:58:44 +00:00
package tally_test
2018-11-08 16:18:28 +00:00
import (
2019-04-09 14:48:35 +01:00
"crypto/rand"
2018-11-08 16:18:28 +00:00
"testing"
"time"
2019-02-07 19:22:49 +00:00
"github.com/stretchr/testify/assert"
2019-02-01 18:50:12 +00:00
"github.com/stretchr/testify/require"
2018-12-07 09:59:31 +00:00
2019-04-09 14:48:35 +01:00
"storj.io/storj/internal/memory"
2018-12-07 11:55:25 +00:00
"storj.io/storj/internal/testcontext"
2019-02-01 18:50:12 +00:00
"storj.io/storj/internal/testplanet"
2019-04-03 04:55:24 +01:00
"storj.io/storj/internal/teststorj"
2019-04-09 14:48:35 +01:00
"storj.io/storj/pkg/accounting"
"storj.io/storj/pkg/encryption"
2019-04-03 04:55:24 +01:00
"storj.io/storj/pkg/storj"
2018-11-08 16:18:28 +00:00
)
2019-05-10 20:05:42 +01:00
func TestDeleteTalliesBefore ( t * testing . T ) {
2019-04-03 04:55:24 +01:00
tests := [ ] struct {
eraseBefore time . Time
expectedRaws int
} {
{
eraseBefore : time . Now ( ) ,
expectedRaws : 1 ,
} ,
{
eraseBefore : time . Now ( ) . Add ( 24 * time . Hour ) ,
expectedRaws : 0 ,
} ,
}
for _ , tt := range tests {
2019-05-29 14:30:16 +01:00
test := tt
2019-04-03 04:55:24 +01:00
testplanet . Run ( t , testplanet . Config {
SatelliteCount : 1 , StorageNodeCount : 0 , UplinkCount : 0 ,
} , func ( t * testing . T , ctx * testcontext . Context , planet * testplanet . Planet ) {
id := teststorj . NodeIDFromBytes ( [ ] byte { } )
nodeData := make ( map [ storj . NodeID ] float64 )
nodeData [ id ] = float64 ( 1000 )
2019-05-10 20:05:42 +01:00
err := planet . Satellites [ 0 ] . DB . StoragenodeAccounting ( ) . SaveTallies ( ctx , time . Now ( ) , nodeData )
2019-04-03 04:55:24 +01:00
require . NoError ( t , err )
2019-05-29 14:30:16 +01:00
err = planet . Satellites [ 0 ] . DB . StoragenodeAccounting ( ) . DeleteTalliesBefore ( ctx , test . eraseBefore )
2019-04-03 04:55:24 +01:00
require . NoError ( t , err )
2019-05-10 20:05:42 +01:00
raws , err := planet . Satellites [ 0 ] . DB . StoragenodeAccounting ( ) . GetTallies ( ctx )
2019-04-03 04:55:24 +01:00
require . NoError ( t , err )
2019-05-29 14:30:16 +01:00
assert . Len ( t , raws , test . expectedRaws )
2019-04-03 04:55:24 +01:00
} )
}
}
2019-04-09 14:48:35 +01:00
2019-04-29 18:46:38 +01:00
func TestOnlyInline ( t * testing . T ) {
testplanet . Run ( t , testplanet . Config {
SatelliteCount : 1 , StorageNodeCount : 6 , UplinkCount : 1 ,
} , func ( t * testing . T , ctx * testcontext . Context , planet * testplanet . Planet ) {
tallySvc := planet . Satellites [ 0 ] . Accounting . Tally
uplink := planet . Uplinks [ 0 ]
2019-06-13 17:58:40 +01:00
ps , err1 := planet . Satellites [ 0 ] . DB . Console ( ) . Projects ( ) . GetAll ( ctx )
if err1 != nil {
assert . NoError ( t , err1 )
}
project := ps [ 0 ]
projectID := [ ] byte ( project . ID . String ( ) )
2019-04-29 18:46:38 +01:00
// Setup: create data for the uplink to upload
expectedData := make ( [ ] byte , 1 * memory . KiB )
_ , err := rand . Read ( expectedData )
require . NoError ( t , err )
// Setup: get the expected size of the data that will be stored in pointer
2019-06-19 09:11:27 +01:00
// Since the data is small enough to be stored inline, when it is encrypted, we only
// add 16 bytes of encryption authentication overhead. No encryption block
// padding will be added since we are not chunking data that we store inline.
const encryptionAuthOverhead = 16 // bytes
expectedTotalBytes := len ( expectedData ) + encryptionAuthOverhead
2019-04-29 18:46:38 +01:00
// Setup: The data in this tally should match the pointer that the uplink.upload created
2019-06-13 17:58:40 +01:00
expectedBucketName := "testbucket"
2019-04-29 18:46:38 +01:00
expectedTally := accounting . BucketTally {
2019-06-13 17:58:40 +01:00
BucketName : [ ] byte ( expectedBucketName ) ,
ProjectID : projectID ,
2019-04-29 18:46:38 +01:00
Segments : 1 ,
InlineSegments : 1 ,
Files : 1 ,
InlineFiles : 1 ,
2019-06-19 09:11:27 +01:00
Bytes : int64 ( expectedTotalBytes ) ,
InlineBytes : int64 ( expectedTotalBytes ) ,
2019-04-29 18:46:38 +01:00
MetadataSize : 111 , // brittle, this is hardcoded since its too difficult to get this value progamatically
}
// Execute test: upload a file, then calculate at rest data
err = uplink . Upload ( ctx , planet . Satellites [ 0 ] , expectedBucketName , "test/path" , expectedData )
assert . NoError ( t , err )
// Run calculate twice to test unique constraint issue
for i := 0 ; i < 2 ; i ++ {
latestTally , actualNodeData , actualBucketData , err := tallySvc . CalculateAtRestData ( ctx )
require . NoError ( t , err )
assert . Len ( t , actualNodeData , 0 )
2019-05-10 20:05:42 +01:00
_ , err = planet . Satellites [ 0 ] . DB . ProjectAccounting ( ) . SaveTallies ( ctx , latestTally , actualBucketData )
2019-04-29 18:46:38 +01:00
require . NoError ( t , err )
// Confirm the correct bucket storage tally was created
assert . Equal ( t , len ( actualBucketData ) , 1 )
for bucketID , actualTally := range actualBucketData {
assert . Contains ( t , bucketID , expectedBucketName )
assert . Equal ( t , expectedTally , * actualTally )
}
}
} )
}
2019-06-13 17:58:40 +01:00
func TestCalculateNodeAtRestData ( t * testing . T ) {
2019-04-09 14:48:35 +01:00
testplanet . Run ( t , testplanet . Config {
SatelliteCount : 1 , StorageNodeCount : 6 , UplinkCount : 1 ,
} , func ( t * testing . T , ctx * testcontext . Context , planet * testplanet . Planet ) {
tallySvc := planet . Satellites [ 0 ] . Accounting . Tally
uplink := planet . Uplinks [ 0 ]
// Setup: create 50KiB of data for the uplink to upload
expectedData := make ( [ ] byte , 50 * memory . KiB )
_ , err := rand . Read ( expectedData )
require . NoError ( t , err )
// Setup: get the expected size of the data that will be stored in pointer
uplinkConfig := uplink . GetConfig ( planet . Satellites [ 0 ] )
expectedTotalBytes , err := encryption . CalcEncryptedSize ( int64 ( len ( expectedData ) ) , uplinkConfig . GetEncryptionScheme ( ) )
require . NoError ( t , err )
// Execute test: upload a file, then calculate at rest data
expectedBucketName := "testbucket"
err = uplink . Upload ( ctx , planet . Satellites [ 0 ] , expectedBucketName , "test/path" , expectedData )
2019-06-13 17:58:40 +01:00
2019-04-09 14:48:35 +01:00
assert . NoError ( t , err )
2019-06-13 17:58:40 +01:00
_ , actualNodeData , _ , err := tallySvc . CalculateAtRestData ( ctx )
2019-04-09 14:48:35 +01:00
require . NoError ( t , err )
// Confirm the correct number of shares were stored
uplinkRS := uplinkConfig . GetRedundancyScheme ( )
if ! correctRedundencyScheme ( len ( actualNodeData ) , uplinkRS ) {
t . Fatalf ( "expected between: %d and %d, actual: %d" , uplinkRS . RepairShares , uplinkRS . TotalShares , len ( actualNodeData ) )
}
// Confirm the correct number of bytes were stored on each node
for _ , actualTotalBytes := range actualNodeData {
assert . Equal ( t , int64 ( actualTotalBytes ) , expectedTotalBytes )
}
2019-06-13 17:58:40 +01:00
} )
}
func TestCalculateBucketAtRestData ( t * testing . T ) {
2019-06-18 03:20:40 +01:00
t . Skip ( "TODO: this test is flaky" )
2019-06-13 17:58:40 +01:00
testplanet . Run ( t , testplanet . Config {
SatelliteCount : 1 , StorageNodeCount : 6 , UplinkCount : 1 ,
} , func ( t * testing . T , ctx * testcontext . Context , planet * testplanet . Planet ) {
tallySvc := planet . Satellites [ 0 ] . Accounting . Tally
uplink := planet . Uplinks [ 0 ]
ps , err1 := planet . Satellites [ 0 ] . DB . Console ( ) . Projects ( ) . GetAll ( ctx )
if err1 != nil {
assert . NoError ( t , err1 )
}
project := ps [ 0 ]
projectID := [ ] byte ( project . ID . String ( ) )
// Setup: create 50KiB of data for the uplink to upload
expectedData := make ( [ ] byte , 50 * memory . KiB )
_ , err := rand . Read ( expectedData )
require . NoError ( t , err )
2019-06-18 03:20:40 +01:00
expectedData2 := make ( [ ] byte , 100 * memory . KiB )
_ , err = rand . Read ( expectedData )
require . NoError ( t , err )
2019-06-13 17:58:40 +01:00
// Setup: get the expected size of the data that will be stored in pointer
uplinkConfig := uplink . GetConfig ( planet . Satellites [ 0 ] )
expectedTotalBytes , err := encryption . CalcEncryptedSize ( int64 ( len ( expectedData ) ) , uplinkConfig . GetEncryptionScheme ( ) )
require . NoError ( t , err )
2019-06-18 03:20:40 +01:00
expectedTotalBytes2 , err := encryption . CalcEncryptedSize ( int64 ( len ( expectedData2 ) ) , uplinkConfig . GetEncryptionScheme ( ) )
require . NoError ( t , err )
2019-06-13 17:58:40 +01:00
// Setup: The data in this tally should match the pointer that the uplink.upload created
expectedBucketName1 := "testbucket1"
expectedTally1 := accounting . BucketTally {
BucketName : [ ] byte ( expectedBucketName1 ) ,
ProjectID : projectID ,
Segments : 1 ,
RemoteSegments : 1 ,
Files : 1 ,
RemoteFiles : 1 ,
Bytes : expectedTotalBytes ,
RemoteBytes : expectedTotalBytes ,
MetadataSize : 112 , // brittle, this is hardcoded since its too difficult to get this value progamatically
}
expectedBucketName2 := "testbucket2"
expectedTally2 := accounting . BucketTally {
BucketName : [ ] byte ( expectedBucketName2 ) ,
ProjectID : projectID ,
2019-06-18 03:20:40 +01:00
Segments : 1 ,
RemoteSegments : 1 ,
Files : 1 ,
RemoteFiles : 1 ,
Bytes : expectedTotalBytes2 ,
RemoteBytes : expectedTotalBytes2 ,
MetadataSize : 112 , // brittle, this is hardcoded since its too difficult to get this value progamatically
}
expectedBucketName3 := "testbucket3"
expectedTally3 := accounting . BucketTally {
BucketName : [ ] byte ( expectedBucketName3 ) ,
ProjectID : projectID ,
Segments : 0 ,
RemoteSegments : 0 ,
Files : 0 ,
RemoteFiles : 0 ,
Bytes : 0 ,
RemoteBytes : 0 ,
MetadataSize : 0 ,
}
expectedBucketName4 := "testbucket4"
expectedTally4 := accounting . BucketTally {
BucketName : [ ] byte ( expectedBucketName4 ) ,
ProjectID : projectID ,
2019-06-13 17:58:40 +01:00
Segments : 2 ,
RemoteSegments : 2 ,
Files : 2 ,
RemoteFiles : 2 ,
2019-06-18 03:20:40 +01:00
Bytes : expectedTotalBytes + expectedTotalBytes2 ,
RemoteBytes : expectedTotalBytes + expectedTotalBytes2 ,
MetadataSize : 224 ,
2019-06-13 17:58:40 +01:00
}
// Execute test: upload a file, then calculate at rest data
2019-06-18 03:20:40 +01:00
err = uplink . Upload ( ctx , planet . Satellites [ 0 ] , expectedBucketName2 , "test/path2" , expectedData2 )
assert . NoError ( t , err )
2019-06-13 17:58:40 +01:00
err = uplink . Upload ( ctx , planet . Satellites [ 0 ] , expectedBucketName1 , "test/path1" , expectedData )
assert . NoError ( t , err )
2019-06-18 03:20:40 +01:00
err = uplink . Upload ( ctx , planet . Satellites [ 0 ] , expectedBucketName4 , "test/path2" , expectedData2 )
2019-06-13 17:58:40 +01:00
assert . NoError ( t , err )
2019-06-18 03:20:40 +01:00
err = uplink . Upload ( ctx , planet . Satellites [ 0 ] , expectedBucketName4 , "test/path1" , expectedData )
2019-06-13 17:58:40 +01:00
assert . NoError ( t , err )
_ , _ , actualBucketData , err := tallySvc . CalculateAtRestData ( ctx )
require . NoError ( t , err )
2019-04-09 14:48:35 +01:00
// Confirm the correct bucket storage tally was created
2019-06-18 03:20:40 +01:00
assert . Equal ( t , len ( actualBucketData ) , 3 )
2019-04-09 14:48:35 +01:00
for bucketID , actualTally := range actualBucketData {
2019-06-13 17:58:40 +01:00
var bucketName = string ( actualTally . BucketName )
2019-06-18 03:20:40 +01:00
assert . True ( t , bucketName == expectedBucketName1 || bucketName == expectedBucketName2 || bucketName == expectedBucketName3 || bucketName == expectedBucketName4 , "Test bucket names do not exist in results" )
switch bucketName {
case expectedBucketName1 :
2019-06-13 17:58:40 +01:00
assert . Contains ( t , bucketID , expectedBucketName1 )
assert . Equal ( t , expectedTally1 , * actualTally )
2019-06-18 03:20:40 +01:00
case expectedBucketName2 :
2019-06-13 17:58:40 +01:00
assert . Contains ( t , bucketID , expectedBucketName2 )
assert . Equal ( t , expectedTally2 , * actualTally )
2019-06-18 03:20:40 +01:00
case expectedBucketName3 :
assert . Contains ( t , bucketID , expectedBucketName3 )
assert . Equal ( t , expectedTally3 , * actualTally )
case expectedBucketName4 :
assert . Contains ( t , bucketID , expectedBucketName4 )
assert . Equal ( t , expectedTally4 , * actualTally )
2019-06-13 17:58:40 +01:00
}
2019-04-09 14:48:35 +01:00
}
} )
}
func correctRedundencyScheme ( shareCount int , uplinkRS storj . RedundancyScheme ) bool {
// The shareCount should be a value between RequiredShares and TotalShares where
// RequiredShares is the min number of shares required to recover a segment and
// TotalShares is the number of shares to encode
if int ( uplinkRS . RepairShares ) <= shareCount && shareCount <= int ( uplinkRS . TotalShares ) {
return true
}
return false
}