Always encrypt inline segments without padding (#2183)

This commit is contained in:
Jess G 2019-06-19 01:11:27 -07:00 committed by Kaloyan Raev
parent ad8cad4909
commit f0f59a5577
8 changed files with 41 additions and 29 deletions

View File

@ -184,7 +184,7 @@ func (p *Project) OpenBucket(ctx context.Context, bucketName string, access *Enc
}
segmentStore := segments.NewSegmentStore(p.metainfo, ec, rs, p.maxInlineSize.Int(), maxEncryptedSegmentSize)
streamStore, err := streams.NewStreamStore(segmentStore, cfg.Volatile.SegmentsSize.Int64(), &access.Key, int(encryptionScheme.BlockSize), encryptionScheme.Cipher)
streamStore, err := streams.NewStreamStore(segmentStore, cfg.Volatile.SegmentsSize.Int64(), &access.Key, int(encryptionScheme.BlockSize), encryptionScheme.Cipher, p.maxInlineSize.Int())
if err != nil {
return nil, err
}

View File

@ -167,8 +167,9 @@ func (u *Uplink) OpenProject(ctx context.Context, satelliteAddr string, apiKey A
// TODO: fix before the final alpha network wipe
encryptionKey = new(storj.Key)
}
streams, err := streams.NewStreamStore(segments, maxBucketMetaSize.Int64(),
encryptionKey, memory.KiB.Int(), storj.AESGCM)
streams, err := streams.NewStreamStore(segments, maxBucketMetaSize.Int64(), encryptionKey,
memory.KiB.Int(), storj.AESGCM, maxBucketMetaSize.Int(),
)
if err != nil {
return nil, Error.New("failed to create stream store: %v", err)
}

View File

@ -77,9 +77,11 @@ func TestOnlyInline(t *testing.T) {
require.NoError(t, err)
// Setup: get the expected size of the data that will be stored in pointer
uplinkConfig := uplink.GetConfig(planet.Satellites[0])
expectedTotalBytes, err := encryption.CalcEncryptedSize(int64(len(expectedData)), uplinkConfig.GetEncryptionScheme())
require.NoError(t, err)
// Since the data is small enough to be stored inline, when it is encrypted, we only
// add 16 bytes of encryption authentication overhead. No encryption block
// padding will be added since we are not chunking data that we store inline.
const encryptionAuthOverhead = 16 // bytes
expectedTotalBytes := len(expectedData) + encryptionAuthOverhead
// Setup: The data in this tally should match the pointer that the uplink.upload created
expectedBucketName := "testbucket"
@ -90,8 +92,8 @@ func TestOnlyInline(t *testing.T) {
InlineSegments: 1,
Files: 1,
InlineFiles: 1,
Bytes: expectedTotalBytes,
InlineBytes: expectedTotalBytes,
Bytes: int64(expectedTotalBytes),
InlineBytes: int64(expectedTotalBytes),
MetadataSize: 111, // brittle, this is hardcoded since its too difficult to get this value progamatically
}

View File

@ -369,7 +369,8 @@ func newMetainfoParts(planet *testplanet.Planet) (*kvmetainfo.DB, buckets.Store,
const stripesPerBlock = 2
blockSize := stripesPerBlock * rs.StripeSize()
streams, err := streams.NewStreamStore(segments, 64*memory.MiB.Int64(), key, blockSize, storj.AESGCM)
inlineThreshold := 8 * memory.KiB.Int()
streams, err := streams.NewStreamStore(segments, 64*memory.MiB.Int64(), key, blockSize, storj.AESGCM, inlineThreshold)
if err != nil {
return nil, nil, nil, err
}

View File

@ -706,7 +706,8 @@ func initEnv(ctx context.Context, planet *testplanet.Planet) (minio.ObjectLayer,
copy(encKey[:], TestEncKey)
blockSize := rs.StripeSize()
streams, err := streams.NewStreamStore(segments, 64*memory.MiB.Int64(), encKey, blockSize, storj.AESGCM)
inlineThreshold := 4 * memory.KiB.Int()
streams, err := streams.NewStreamStore(segments, 64*memory.MiB.Int64(), encKey, blockSize, storj.AESGCM, inlineThreshold)
if err != nil {
return nil, nil, nil, err
}

View File

@ -59,15 +59,16 @@ type Store interface {
// streamStore is a store for streams
type streamStore struct {
segments segments.Store
segmentSize int64
rootKey *storj.Key
encBlockSize int
cipher storj.Cipher
segments segments.Store
segmentSize int64
rootKey *storj.Key
encBlockSize int
cipher storj.Cipher
inlineThreshold int
}
// NewStreamStore stuff
func NewStreamStore(segments segments.Store, segmentSize int64, rootKey *storj.Key, encBlockSize int, cipher storj.Cipher) (Store, error) {
func NewStreamStore(segments segments.Store, segmentSize int64, rootKey *storj.Key, encBlockSize int, cipher storj.Cipher, inlineThreshold int) (Store, error) {
if segmentSize <= 0 {
return nil, errs.New("segment size must be larger than 0")
}
@ -79,11 +80,12 @@ func NewStreamStore(segments segments.Store, segmentSize int64, rootKey *storj.K
}
return &streamStore{
segments: segments,
segmentSize: segmentSize,
rootKey: rootKey,
encBlockSize: encBlockSize,
cipher: cipher,
segments: segments,
segmentSize: segmentSize,
rootKey: rootKey,
encBlockSize: encBlockSize,
cipher: cipher,
inlineThreshold: inlineThreshold,
}, nil
}
@ -168,12 +170,13 @@ func (s *streamStore) upload(ctx context.Context, path storj.Path, pathCipher st
sizeReader := NewSizeReader(eofReader)
segmentReader := io.LimitReader(sizeReader, s.segmentSize)
peekReader := segments.NewPeekThresholdReader(segmentReader)
largeData, err := peekReader.IsLargerThan(encrypter.InBlockSize())
// If the data is larger than the inline threshold size, then it will be a remote segment
isRemote, err := peekReader.IsLargerThan(s.inlineThreshold)
if err != nil {
return Meta{}, currentSegment, err
}
var transformedReader io.Reader
if largeData {
if isRemote {
paddedReader := eestream.PadReader(ioutil.NopCloser(peekReader), encrypter.InBlockSize())
transformedReader = encryption.TransformReader(paddedReader, encrypter, 0)
} else {

View File

@ -89,7 +89,7 @@ func TestStreamStoreMeta(t *testing.T) {
Meta(gomock.Any(), gomock.Any()).
Return(test.segmentMeta, test.segmentError)
streamStore, err := NewStreamStore(mockSegmentStore, 10, new(storj.Key), 10, storj.AESGCM)
streamStore, err := NewStreamStore(mockSegmentStore, 10, new(storj.Key), 10, storj.AESGCM, 4)
if err != nil {
t.Fatal(err)
}
@ -114,6 +114,7 @@ func TestStreamStorePut(t *testing.T) {
segSize = 10
pathCipher = storj.AESGCM
dataCipher = storj.Unencrypted
inlineSize = 0
)
staticTime := time.Now()
@ -168,7 +169,7 @@ func TestStreamStorePut(t *testing.T) {
Delete(gomock.Any(), gomock.Any()).
Return(test.segmentError)
streamStore, err := NewStreamStore(mockSegmentStore, segSize, new(storj.Key), encBlockSize, dataCipher)
streamStore, err := NewStreamStore(mockSegmentStore, segSize, new(storj.Key), encBlockSize, dataCipher, inlineSize)
if err != nil {
t.Fatal(err)
}
@ -205,6 +206,7 @@ func TestStreamStoreGet(t *testing.T) {
const (
segSize = 10
inlineSize = 5
encBlockSize = 10
dataCipher = storj.Unencrypted
pathCipher = storj.AESGCM
@ -277,7 +279,7 @@ func TestStreamStoreGet(t *testing.T) {
gomock.InOrder(calls...)
streamStore, err := NewStreamStore(mockSegmentStore, segSize, new(storj.Key), encBlockSize, dataCipher)
streamStore, err := NewStreamStore(mockSegmentStore, segSize, new(storj.Key), encBlockSize, dataCipher, inlineSize)
if err != nil {
t.Fatal(err)
}
@ -326,7 +328,7 @@ func TestStreamStoreDelete(t *testing.T) {
Delete(gomock.Any(), gomock.Any()).
Return(test.segmentError)
streamStore, err := NewStreamStore(mockSegmentStore, 10, new(storj.Key), 10, 0)
streamStore, err := NewStreamStore(mockSegmentStore, 10, new(storj.Key), 10, 0, 0)
if err != nil {
t.Fatal(err)
}
@ -370,7 +372,7 @@ func TestStreamStoreList(t *testing.T) {
List(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
Return(test.segments, test.segmentMore, test.segmentError)
streamStore, err := NewStreamStore(mockSegmentStore, 10, new(storj.Key), 10, 0)
streamStore, err := NewStreamStore(mockSegmentStore, 10, new(storj.Key), 10, 0, 0)
if err != nil {
t.Fatal(err)
}

View File

@ -126,7 +126,9 @@ func (c Config) GetMetainfo(ctx context.Context, identity *identity.FullIdentity
return nil, nil, Error.Wrap(err)
}
streams, err := streams.NewStreamStore(segments, c.Client.SegmentSize.Int64(), key, int(blockSize), storj.Cipher(c.Enc.DataType))
streams, err := streams.NewStreamStore(segments, c.Client.SegmentSize.Int64(), key,
int(blockSize), storj.Cipher(c.Enc.DataType), c.Client.MaxInlineSize.Int(),
)
if err != nil {
return nil, nil, Error.New("failed to create stream store: %v", err)
}