2019-01-24 20:15:10 +00:00
// Copyright (C) 2019 Storj Labs, Inc.
2018-11-08 16:18:28 +00:00
// See LICENSE for copying information.
2019-01-23 19:58:44 +00:00
package tally_test
2018-11-08 16:18:28 +00:00
import (
2019-06-20 20:15:13 +01:00
"fmt"
2018-11-08 16:18:28 +00:00
"testing"
"time"
2019-02-07 19:22:49 +00:00
"github.com/stretchr/testify/assert"
2019-02-01 18:50:12 +00:00
"github.com/stretchr/testify/require"
2018-12-07 09:59:31 +00:00
2019-12-27 11:48:47 +00:00
"storj.io/common/encryption"
"storj.io/common/memory"
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/common/testcontext"
"storj.io/common/testrand"
2020-03-30 10:08:50 +01:00
"storj.io/common/uuid"
2019-11-14 19:46:15 +00:00
"storj.io/storj/private/testplanet"
"storj.io/storj/private/teststorj"
2019-07-28 06:55:36 +01:00
"storj.io/storj/satellite/accounting"
2019-10-07 21:55:20 +01:00
"storj.io/storj/satellite/accounting/tally"
2020-08-31 11:14:20 +01:00
"storj.io/storj/satellite/metainfo/metabase"
2018-11-08 16:18:28 +00:00
)
2019-05-10 20:05:42 +01:00
func TestDeleteTalliesBefore ( t * testing . T ) {
2019-04-03 04:55:24 +01:00
tests := [ ] struct {
eraseBefore time . Time
expectedRaws int
} {
{
eraseBefore : time . Now ( ) ,
expectedRaws : 1 ,
} ,
{
eraseBefore : time . Now ( ) . Add ( 24 * time . Hour ) ,
expectedRaws : 0 ,
} ,
}
for _ , tt := range tests {
2019-05-29 14:30:16 +01:00
test := tt
2019-04-03 04:55:24 +01:00
testplanet . Run ( t , testplanet . Config {
SatelliteCount : 1 , StorageNodeCount : 0 , UplinkCount : 0 ,
} , func ( t * testing . T , ctx * testcontext . Context , planet * testplanet . Planet ) {
id := teststorj . NodeIDFromBytes ( [ ] byte { } )
nodeData := make ( map [ storj . NodeID ] float64 )
nodeData [ id ] = float64 ( 1000 )
2019-05-10 20:05:42 +01:00
err := planet . Satellites [ 0 ] . DB . StoragenodeAccounting ( ) . SaveTallies ( ctx , time . Now ( ) , nodeData )
2019-04-03 04:55:24 +01:00
require . NoError ( t , err )
2019-05-29 14:30:16 +01:00
err = planet . Satellites [ 0 ] . DB . StoragenodeAccounting ( ) . DeleteTalliesBefore ( ctx , test . eraseBefore )
2019-04-03 04:55:24 +01:00
require . NoError ( t , err )
2019-05-10 20:05:42 +01:00
raws , err := planet . Satellites [ 0 ] . DB . StoragenodeAccounting ( ) . GetTallies ( ctx )
2019-04-03 04:55:24 +01:00
require . NoError ( t , err )
2019-05-29 14:30:16 +01:00
assert . Len ( t , raws , test . expectedRaws )
2019-04-03 04:55:24 +01:00
} )
}
}
2019-04-09 14:48:35 +01:00
2019-04-29 18:46:38 +01:00
func TestOnlyInline ( t * testing . T ) {
testplanet . Run ( t , testplanet . Config {
SatelliteCount : 1 , StorageNodeCount : 6 , UplinkCount : 1 ,
} , func ( t * testing . T , ctx * testcontext . Context , planet * testplanet . Planet ) {
2019-10-07 21:55:20 +01:00
planet . Satellites [ 0 ] . Accounting . Tally . Loop . Pause ( )
2019-04-29 18:46:38 +01:00
uplink := planet . Uplinks [ 0 ]
2019-06-13 17:58:40 +01:00
2019-04-29 18:46:38 +01:00
// Setup: create data for the uplink to upload
2019-06-26 11:38:51 +01:00
expectedData := testrand . Bytes ( 1 * memory . KiB )
2019-04-29 18:46:38 +01:00
// Setup: get the expected size of the data that will be stored in pointer
2019-06-19 09:11:27 +01:00
// Since the data is small enough to be stored inline, when it is encrypted, we only
// add 16 bytes of encryption authentication overhead. No encryption block
// padding will be added since we are not chunking data that we store inline.
const encryptionAuthOverhead = 16 // bytes
expectedTotalBytes := len ( expectedData ) + encryptionAuthOverhead
2019-04-29 18:46:38 +01:00
// Setup: The data in this tally should match the pointer that the uplink.upload created
2019-06-13 17:58:40 +01:00
expectedBucketName := "testbucket"
2019-10-07 21:55:20 +01:00
expectedTally := & accounting . BucketTally {
2020-08-31 11:14:20 +01:00
BucketLocation : metabase . BucketLocation {
ProjectID : uplink . Projects [ 0 ] . ID ,
BucketName : expectedBucketName ,
} ,
2019-09-13 14:51:41 +01:00
ObjectCount : 1 ,
2019-04-29 18:46:38 +01:00
InlineSegments : 1 ,
2019-06-19 09:11:27 +01:00
InlineBytes : int64 ( expectedTotalBytes ) ,
2019-08-22 22:15:58 +01:00
MetadataSize : 113 , // brittle, this is hardcoded since its too difficult to get this value progamatically
2019-04-29 18:46:38 +01:00
}
// Execute test: upload a file, then calculate at rest data
2019-06-26 11:38:51 +01:00
err := uplink . Upload ( ctx , planet . Satellites [ 0 ] , expectedBucketName , "test/path" , expectedData )
2019-04-29 18:46:38 +01:00
assert . NoError ( t , err )
2019-10-07 21:55:20 +01:00
// run multiple times to ensure we add tallies
2019-04-29 18:46:38 +01:00
for i := 0 ; i < 2 ; i ++ {
2020-04-10 18:35:58 +01:00
obs := tally . NewObserver ( planet . Satellites [ 0 ] . Log . Named ( "observer" ) , time . Now ( ) )
2019-10-07 21:55:20 +01:00
err := planet . Satellites [ 0 ] . Metainfo . Loop . Join ( ctx , obs )
2019-04-29 18:46:38 +01:00
require . NoError ( t , err )
2019-10-07 21:55:20 +01:00
now := time . Now ( ) . Add ( time . Duration ( i ) * time . Second )
err = planet . Satellites [ 0 ] . DB . ProjectAccounting ( ) . SaveTallies ( ctx , now , obs . Bucket )
2019-04-29 18:46:38 +01:00
require . NoError ( t , err )
2019-10-07 21:55:20 +01:00
assert . Equal ( t , 1 , len ( obs . Bucket ) )
for _ , actualTally := range obs . Bucket {
assert . Equal ( t , expectedTally , actualTally )
2019-04-29 18:46:38 +01:00
}
}
} )
}
2019-06-13 17:58:40 +01:00
func TestCalculateNodeAtRestData ( t * testing . T ) {
2019-04-09 14:48:35 +01:00
testplanet . Run ( t , testplanet . Config {
SatelliteCount : 1 , StorageNodeCount : 6 , UplinkCount : 1 ,
} , func ( t * testing . T , ctx * testcontext . Context , planet * testplanet . Planet ) {
tallySvc := planet . Satellites [ 0 ] . Accounting . Tally
2019-10-04 20:09:52 +01:00
tallySvc . Loop . Pause ( )
2019-04-09 14:48:35 +01:00
uplink := planet . Uplinks [ 0 ]
// Setup: create 50KiB of data for the uplink to upload
2019-06-26 11:38:51 +01:00
expectedData := testrand . Bytes ( 50 * memory . KiB )
2019-04-09 14:48:35 +01:00
2020-03-30 10:08:02 +01:00
// TODO uplink currently hardcode block size so we need to use the same value in test
encryptionParameters := storj . EncryptionParameters {
CipherSuite : storj . EncAESGCM ,
BlockSize : 29 * 256 * memory . B . Int32 ( ) ,
}
expectedTotalBytes , err := encryption . CalcEncryptedSize ( int64 ( len ( expectedData ) ) , encryptionParameters )
2019-04-09 14:48:35 +01:00
require . NoError ( t , err )
// Execute test: upload a file, then calculate at rest data
expectedBucketName := "testbucket"
err = uplink . Upload ( ctx , planet . Satellites [ 0 ] , expectedBucketName , "test/path" , expectedData )
2019-10-07 21:55:20 +01:00
require . NoError ( t , err )
2019-06-13 17:58:40 +01:00
2020-04-10 18:35:58 +01:00
obs := tally . NewObserver ( planet . Satellites [ 0 ] . Log . Named ( "observer" ) , time . Now ( ) )
2019-10-07 21:55:20 +01:00
err = planet . Satellites [ 0 ] . Metainfo . Loop . Join ( ctx , obs )
2019-04-09 14:48:35 +01:00
require . NoError ( t , err )
// Confirm the correct number of shares were stored
2020-10-27 17:34:59 +00:00
rs := satelliteRS ( t , planet . Satellites [ 0 ] )
2020-05-26 09:05:43 +01:00
if ! correctRedundencyScheme ( len ( obs . Node ) , rs ) {
t . Fatalf ( "expected between: %d and %d, actual: %d" , rs . RepairShares , rs . TotalShares , len ( obs . Node ) )
2019-04-09 14:48:35 +01:00
}
// Confirm the correct number of bytes were stored on each node
2019-10-07 21:55:20 +01:00
for _ , actualTotalBytes := range obs . Node {
assert . Equal ( t , expectedTotalBytes , int64 ( actualTotalBytes ) )
2019-04-09 14:48:35 +01:00
}
2019-06-13 17:58:40 +01:00
} )
}
func TestCalculateBucketAtRestData ( t * testing . T ) {
2019-06-20 20:15:13 +01:00
var testCases = [ ] struct {
name string
2019-06-25 16:58:42 +01:00
project string
2020-09-03 14:54:56 +01:00
segmentIndex int64
2019-06-20 20:15:13 +01:00
bucketName string
objectName string
inline bool
last bool
} {
2020-09-03 14:54:56 +01:00
{ "inline, same project, same bucket" , "9656af6e-2d9c-42fa-91f2-bfd516a722d7" , metabase . LastSegmentIndex , "mockBucketName" , "mockObjectName" , true , true } ,
{ "remote, same project, same bucket" , "9656af6e-2d9c-42fa-91f2-bfd516a722d7" , 0 , "mockBucketName" , "mockObjectName1" , false , false } ,
{ "last segment, same project, different bucket" , "9656af6e-2d9c-42fa-91f2-bfd516a722d7" , metabase . LastSegmentIndex , "mockBucketName1" , "mockObjectName2" , false , true } ,
{ "different project" , "9656af6e-2d9c-42fa-91f2-bfd516a722d1" , 0 , "mockBucketName" , "mockObjectName" , false , false } ,
2019-06-20 20:15:13 +01:00
}
2019-10-04 20:09:52 +01:00
2019-06-13 17:58:40 +01:00
testplanet . Run ( t , testplanet . Config {
SatelliteCount : 1 , StorageNodeCount : 6 , UplinkCount : 1 ,
} , func ( t * testing . T , ctx * testcontext . Context , planet * testplanet . Planet ) {
2019-06-20 20:15:13 +01:00
satellitePeer := planet . Satellites [ 0 ]
2020-10-27 17:34:59 +00:00
redundancyScheme := satelliteRS ( t , satellitePeer )
2020-08-31 11:14:20 +01:00
expectedBucketTallies := make ( map [ metabase . BucketLocation ] * accounting . BucketTally )
2019-06-20 20:15:13 +01:00
for _ , tt := range testCases {
2019-10-04 20:09:52 +01:00
tt := tt // avoid scopelint error
2019-06-20 20:15:13 +01:00
t . Run ( tt . name , func ( t * testing . T ) {
2020-04-02 13:30:43 +01:00
projectID , err := uuid . FromString ( tt . project )
2019-06-25 16:58:42 +01:00
require . NoError ( t , err )
2019-06-20 20:15:13 +01:00
// setup: create a pointer and save it to pointerDB
2020-10-27 06:59:14 +00:00
pointer , err := makePointer ( planet . StorageNodes , redundancyScheme , int64 ( 20 ) , tt . inline )
require . NoError ( t , err )
2019-06-20 20:15:13 +01:00
metainfo := satellitePeer . Metainfo . Service
2020-09-03 14:54:56 +01:00
location := metabase . SegmentLocation {
ProjectID : projectID ,
BucketName : tt . bucketName ,
Index : tt . segmentIndex ,
ObjectKey : metabase . ObjectKey ( tt . objectName ) ,
}
err = metainfo . Put ( ctx , location . Encode ( ) , pointer )
2019-06-20 20:15:13 +01:00
require . NoError ( t , err )
2020-08-31 11:14:20 +01:00
bucketLocation := metabase . BucketLocation {
ProjectID : projectID ,
BucketName : tt . bucketName ,
}
newTally := addBucketTally ( expectedBucketTallies [ bucketLocation ] , tt . inline , tt . last )
newTally . BucketName = tt . bucketName
2020-04-02 13:30:43 +01:00
newTally . ProjectID = projectID
2020-08-31 11:14:20 +01:00
expectedBucketTallies [ bucketLocation ] = newTally
2019-06-20 20:15:13 +01:00
2020-04-10 18:35:58 +01:00
obs := tally . NewObserver ( satellitePeer . Log . Named ( "observer" ) , time . Now ( ) )
2019-10-07 21:55:20 +01:00
err = satellitePeer . Metainfo . Loop . Join ( ctx , obs )
2019-06-20 20:15:13 +01:00
require . NoError ( t , err )
2019-10-07 21:55:20 +01:00
require . Equal ( t , expectedBucketTallies , obs . Bucket )
2019-06-20 20:15:13 +01:00
} )
2019-06-13 17:58:40 +01:00
}
2019-06-20 20:15:13 +01:00
} )
}
2019-06-13 17:58:40 +01:00
2020-04-10 18:35:58 +01:00
func TestTallyIgnoresExpiredPointers ( t * testing . T ) {
testplanet . Run ( t , testplanet . Config {
SatelliteCount : 1 , StorageNodeCount : 6 , UplinkCount : 1 ,
} , func ( t * testing . T , ctx * testcontext . Context , planet * testplanet . Planet ) {
satellitePeer := planet . Satellites [ 0 ]
2020-10-27 17:34:59 +00:00
redundancyScheme := satelliteRS ( t , satellitePeer )
2020-04-10 18:35:58 +01:00
2020-09-03 14:54:56 +01:00
projectID , err := uuid . FromString ( "9656af6e-2d9c-42fa-91f2-bfd516a722d7" )
require . NoError ( t , err )
2020-04-10 18:35:58 +01:00
bucket := "bucket"
// setup: create an expired pointer and save it to pointerDB
2020-10-27 06:59:14 +00:00
pointer , err := makePointer ( planet . StorageNodes , redundancyScheme , int64 ( 2 ) , false )
require . NoError ( t , err )
2020-04-10 18:35:58 +01:00
pointer . ExpirationDate = time . Now ( ) . Add ( - 24 * time . Hour )
metainfo := satellitePeer . Metainfo . Service
2020-09-03 14:54:56 +01:00
location := metabase . SegmentLocation {
ProjectID : projectID ,
BucketName : bucket ,
Index : metabase . LastSegmentIndex ,
ObjectKey : metabase . ObjectKey ( "object/name" ) ,
}
err = metainfo . Put ( ctx , location . Encode ( ) , pointer )
2020-04-10 18:35:58 +01:00
require . NoError ( t , err )
obs := tally . NewObserver ( satellitePeer . Log . Named ( "observer" ) , time . Now ( ) )
err = satellitePeer . Metainfo . Loop . Join ( ctx , obs )
require . NoError ( t , err )
// there should be no observed buckets because all of the pointers are expired
2020-08-31 11:14:20 +01:00
require . Equal ( t , obs . Bucket , map [ metabase . BucketLocation ] * accounting . BucketTally { } )
2020-04-10 18:35:58 +01:00
} )
}
2019-10-31 17:27:38 +00:00
func TestTallyLiveAccounting ( t * testing . T ) {
testplanet . Run ( t , testplanet . Config {
SatelliteCount : 1 , StorageNodeCount : 6 , UplinkCount : 1 ,
} , func ( t * testing . T , ctx * testcontext . Context , planet * testplanet . Planet ) {
tally := planet . Satellites [ 0 ] . Accounting . Tally
2020-04-16 13:12:46 +01:00
projectID := planet . Uplinks [ 0 ] . Projects [ 0 ] . ID
2019-10-31 17:27:38 +00:00
tally . Loop . Pause ( )
expectedData := testrand . Bytes ( 5 * memory . MB )
err := planet . Uplinks [ 0 ] . Upload ( ctx , planet . Satellites [ 0 ] , "testbucket" , "test/path" , expectedData )
require . NoError ( t , err )
key , err := planet . Satellites [ 0 ] . Metainfo . Database . List ( ctx , nil , 10 )
require . NoError ( t , err )
require . Len ( t , key , 1 )
2020-09-03 14:54:56 +01:00
ptr , err := planet . Satellites [ 0 ] . Metainfo . Service . Get ( ctx , metabase . SegmentKey ( key [ 0 ] ) )
2019-10-31 17:27:38 +00:00
require . NoError ( t , err )
require . NotNil ( t , ptr )
segmentSize := ptr . GetSegmentSize ( )
tally . Loop . TriggerWait ( )
expectedSize := segmentSize
total , err := planet . Satellites [ 0 ] . Accounting . ProjectUsage . GetProjectStorageTotals ( ctx , projectID )
require . NoError ( t , err )
require . Equal ( t , expectedSize , total )
for i := 0 ; i < 5 ; i ++ {
err := planet . Uplinks [ 0 ] . Upload ( ctx , planet . Satellites [ 0 ] , "testbucket" , fmt . Sprintf ( "test/path/%d" , i ) , expectedData )
require . NoError ( t , err )
tally . Loop . TriggerWait ( )
expectedSize += segmentSize
total , err := planet . Satellites [ 0 ] . Accounting . ProjectUsage . GetProjectStorageTotals ( ctx , projectID )
require . NoError ( t , err )
require . Equal ( t , expectedSize , total )
}
} )
}
2020-01-29 14:34:12 +00:00
func TestTallyEmptyProjectUpdatesLiveAccounting ( t * testing . T ) {
testplanet . Run ( t , testplanet . Config {
SatelliteCount : 1 , StorageNodeCount : 6 , UplinkCount : 2 ,
} , func ( t * testing . T , ctx * testcontext . Context , planet * testplanet . Planet ) {
planet . Satellites [ 0 ] . Accounting . Tally . Loop . Pause ( )
2020-04-16 13:12:46 +01:00
project1 := planet . Uplinks [ 1 ] . Projects [ 0 ] . ID
2020-01-29 14:34:12 +00:00
data := testrand . Bytes ( 1 * memory . MB )
// we need an extra bucket with data for this test. If no buckets are found at all,
// the update block is skipped in tally
err := planet . Uplinks [ 0 ] . Upload ( ctx , planet . Satellites [ 0 ] , "bucket" , "test" , data )
require . NoError ( t , err )
err = planet . Uplinks [ 1 ] . Upload ( ctx , planet . Satellites [ 0 ] , "bucket" , "test" , data )
require . NoError ( t , err )
planet . Satellites [ 0 ] . Accounting . Tally . Loop . TriggerWait ( )
planet . Satellites [ 0 ] . Accounting . Tally . Loop . Pause ( )
total , err := planet . Satellites [ 0 ] . Accounting . ProjectUsage . GetProjectStorageTotals ( ctx , project1 )
require . NoError ( t , err )
require . True ( t , total >= int64 ( len ( data ) ) )
err = planet . Uplinks [ 1 ] . DeleteObject ( ctx , planet . Satellites [ 0 ] , "bucket" , "test" )
require . NoError ( t , err )
planet . Satellites [ 0 ] . Accounting . Tally . Loop . TriggerWait ( )
p1Total , err := planet . Satellites [ 0 ] . Accounting . ProjectUsage . GetProjectStorageTotals ( ctx , project1 )
require . NoError ( t , err )
require . Zero ( t , p1Total )
} )
}
2019-06-20 20:15:13 +01:00
// addBucketTally creates a new expected bucket tally based on the
2020-07-16 15:18:02 +01:00
// pointer that was just created for the test case.
2019-06-20 20:15:13 +01:00
func addBucketTally ( existingTally * accounting . BucketTally , inline , last bool ) * accounting . BucketTally {
// if there is already an existing tally for this project and bucket, then
// add the new pointer data to the existing tally
if existingTally != nil {
2020-10-27 06:59:14 +00:00
existingTally . MetadataSize += int64 ( 2 )
2019-06-20 20:15:13 +01:00
existingTally . RemoteSegments ++
2020-10-27 06:59:14 +00:00
existingTally . RemoteBytes += int64 ( 20 )
2019-06-20 20:15:13 +01:00
return existingTally
}
2019-06-13 17:58:40 +01:00
2019-06-20 20:15:13 +01:00
// if the pointer was inline, create a tally with inline info
if inline {
2019-10-04 20:09:52 +01:00
return & accounting . BucketTally {
2019-09-13 14:51:41 +01:00
ObjectCount : int64 ( 1 ) ,
2019-06-20 20:15:13 +01:00
InlineSegments : int64 ( 1 ) ,
2020-10-27 06:59:14 +00:00
InlineBytes : int64 ( 20 ) ,
MetadataSize : int64 ( 2 ) ,
2019-06-13 17:58:40 +01:00
}
2019-06-20 20:15:13 +01:00
}
2019-06-13 17:58:40 +01:00
2019-06-20 20:15:13 +01:00
// if the pointer was remote, create a tally with remote info
2019-10-04 20:09:52 +01:00
newRemoteTally := & accounting . BucketTally {
2019-06-20 20:15:13 +01:00
RemoteSegments : int64 ( 1 ) ,
2020-10-27 06:59:14 +00:00
RemoteBytes : int64 ( 20 ) ,
MetadataSize : int64 ( 2 ) ,
2019-06-20 20:15:13 +01:00
}
2019-06-18 03:20:40 +01:00
2019-06-20 20:15:13 +01:00
if last {
2019-09-13 14:51:41 +01:00
newRemoteTally . ObjectCount ++
2019-06-20 20:15:13 +01:00
}
2019-06-18 03:20:40 +01:00
2019-10-04 20:09:52 +01:00
return newRemoteTally
2019-06-20 20:15:13 +01:00
}
2019-06-13 17:58:40 +01:00
2020-07-16 15:18:02 +01:00
// makePointer creates a pointer.
2020-10-27 06:59:14 +00:00
func makePointer ( storageNodes [ ] * testplanet . StorageNode , rs storj . RedundancyScheme , segmentSize int64 , inline bool ) ( * pb . Pointer , error ) {
metadata , err := pb . Marshal ( & pb . StreamMeta { NumberOfSegments : 1 } )
if err != nil {
return nil , err
}
2019-06-20 20:15:13 +01:00
if inline {
inlinePointer := & pb . Pointer {
2019-07-08 23:16:50 +01:00
CreationDate : time . Now ( ) ,
2019-06-20 20:15:13 +01:00
Type : pb . Pointer_INLINE ,
InlineSegment : make ( [ ] byte , segmentSize ) ,
SegmentSize : segmentSize ,
2020-10-27 06:59:14 +00:00
Metadata : metadata ,
2019-06-20 20:15:13 +01:00
}
2020-10-27 06:59:14 +00:00
return inlinePointer , nil
2019-06-20 20:15:13 +01:00
}
2019-06-13 17:58:40 +01:00
2019-10-04 11:05:25 +01:00
pieces := make ( [ ] * pb . RemotePiece , rs . TotalShares )
for i := range pieces {
pieces [ i ] = & pb . RemotePiece {
2019-06-20 20:15:13 +01:00
PieceNum : int32 ( i ) ,
2019-10-04 11:05:25 +01:00
NodeId : storageNodes [ i ] . ID ( ) ,
}
2019-06-20 20:15:13 +01:00
}
2019-04-09 14:48:35 +01:00
2019-10-04 20:09:52 +01:00
return & pb . Pointer {
2019-07-08 23:16:50 +01:00
CreationDate : time . Now ( ) ,
Type : pb . Pointer_REMOTE ,
2019-06-20 20:15:13 +01:00
Remote : & pb . RemoteSegment {
2019-09-25 16:51:12 +01:00
RootPieceId : storj . PieceID { 0xFF } ,
2019-06-20 20:15:13 +01:00
Redundancy : & pb . RedundancyScheme {
Type : pb . RedundancyScheme_RS ,
MinReq : int32 ( rs . RequiredShares ) ,
Total : int32 ( rs . TotalShares ) ,
RepairThreshold : int32 ( rs . RepairShares ) ,
SuccessThreshold : int32 ( rs . OptimalShares ) ,
ErasureShareSize : rs . ShareSize ,
} ,
RemotePieces : pieces ,
} ,
SegmentSize : segmentSize ,
2020-10-27 06:59:14 +00:00
Metadata : metadata ,
} , nil
2019-04-09 14:48:35 +01:00
}
func correctRedundencyScheme ( shareCount int , uplinkRS storj . RedundancyScheme ) bool {
// The shareCount should be a value between RequiredShares and TotalShares where
// RequiredShares is the min number of shares required to recover a segment and
// TotalShares is the number of shares to encode
2019-10-04 20:09:52 +01:00
return int ( uplinkRS . RepairShares ) <= shareCount && shareCount <= int ( uplinkRS . TotalShares )
2019-04-09 14:48:35 +01:00
}
2020-05-26 09:05:43 +01:00
2020-10-27 17:34:59 +00:00
func satelliteRS ( t * testing . T , satellite * testplanet . Satellite ) storj . RedundancyScheme {
rs := satellite . Config . Metainfo . RS
2020-05-26 09:05:43 +01:00
return storj . RedundancyScheme {
2020-10-27 17:34:59 +00:00
RequiredShares : int16 ( rs . Min ) ,
RepairShares : int16 ( rs . Repair ) ,
OptimalShares : int16 ( rs . Success ) ,
TotalShares : int16 ( rs . Total ) ,
ShareSize : rs . ErasureShareSize . Int32 ( ) ,
2020-05-26 09:05:43 +01:00
}
}