2019-01-24 20:15:10 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
2018-07-16 21:44:28 +01:00
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package streams
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2018-09-26 14:32:23 +01:00
|
|
|
"crypto/rand"
|
2018-07-16 21:44:28 +01:00
|
|
|
"io"
|
2018-09-26 14:32:23 +01:00
|
|
|
"io/ioutil"
|
2019-07-24 15:40:22 +01:00
|
|
|
"strings"
|
2018-07-16 21:44:28 +01:00
|
|
|
"time"
|
|
|
|
|
2018-11-20 18:29:07 +00:00
|
|
|
"github.com/gogo/protobuf/proto"
|
2018-08-24 04:56:38 +01:00
|
|
|
"github.com/zeebo/errs"
|
2018-10-04 14:52:12 +01:00
|
|
|
"go.uber.org/zap"
|
2018-08-24 04:56:38 +01:00
|
|
|
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
|
|
|
|
2018-10-18 12:10:29 +01:00
|
|
|
"storj.io/storj/pkg/encryption"
|
2019-06-24 20:23:07 +01:00
|
|
|
"storj.io/storj/pkg/paths"
|
2018-09-18 05:39:06 +01:00
|
|
|
"storj.io/storj/pkg/pb"
|
2018-10-25 21:28:16 +01:00
|
|
|
"storj.io/storj/pkg/ranger"
|
2018-10-19 14:38:13 +01:00
|
|
|
"storj.io/storj/pkg/storj"
|
2019-07-28 06:55:36 +01:00
|
|
|
"storj.io/storj/uplink/eestream"
|
2019-09-10 16:39:47 +01:00
|
|
|
"storj.io/storj/uplink/metainfo"
|
2019-07-28 06:55:36 +01:00
|
|
|
"storj.io/storj/uplink/storage/segments"
|
2018-07-16 21:44:28 +01:00
|
|
|
)
|
|
|
|
|
2018-08-24 04:56:38 +01:00
|
|
|
var mon = monkit.Package()
|
|
|
|
|
2019-04-02 22:15:31 +01:00
|
|
|
// Meta info about a stream
|
2018-07-30 19:57:50 +01:00
|
|
|
type Meta struct {
|
2019-04-10 23:27:04 +01:00
|
|
|
Modified time.Time
|
|
|
|
Expiration time.Time
|
|
|
|
Size int64
|
|
|
|
Data []byte
|
2018-07-30 19:57:50 +01:00
|
|
|
}
|
|
|
|
|
2019-08-22 22:15:58 +01:00
|
|
|
func numberOfSegments(stream *pb.StreamInfo, streamMeta *pb.StreamMeta) int64 {
|
|
|
|
if streamMeta.NumberOfSegments > 0 {
|
|
|
|
return streamMeta.NumberOfSegments
|
|
|
|
}
|
|
|
|
return stream.DeprecatedNumberOfSegments
|
|
|
|
}
|
|
|
|
|
2018-08-24 04:56:38 +01:00
|
|
|
// convertMeta converts segment metadata to stream metadata
|
2019-09-10 16:39:47 +01:00
|
|
|
func convertMeta(modified, expiration time.Time, stream pb.StreamInfo, streamMeta pb.StreamMeta) Meta {
|
2018-08-24 04:56:38 +01:00
|
|
|
return Meta{
|
2019-09-10 16:39:47 +01:00
|
|
|
Modified: modified,
|
|
|
|
Expiration: expiration,
|
2019-08-22 22:15:58 +01:00
|
|
|
Size: ((numberOfSegments(&stream, &streamMeta) - 1) * stream.SegmentsSize) + stream.LastSegmentSize,
|
2019-04-10 23:27:04 +01:00
|
|
|
Data: stream.Metadata,
|
2019-04-02 22:15:31 +01:00
|
|
|
}
|
2018-07-30 19:57:50 +01:00
|
|
|
}
|
|
|
|
|
2018-08-24 04:56:38 +01:00
|
|
|
// Store interface methods for streams to satisfy to be a store
|
2019-06-24 20:23:07 +01:00
|
|
|
type typedStore interface {
|
2019-07-03 19:07:44 +01:00
|
|
|
Meta(ctx context.Context, path Path, pathCipher storj.CipherSuite) (Meta, error)
|
|
|
|
Get(ctx context.Context, path Path, pathCipher storj.CipherSuite) (ranger.Ranger, Meta, error)
|
|
|
|
Put(ctx context.Context, path Path, pathCipher storj.CipherSuite, data io.Reader, metadata []byte, expiration time.Time) (Meta, error)
|
|
|
|
Delete(ctx context.Context, path Path, pathCipher storj.CipherSuite) error
|
2019-09-25 22:30:41 +01:00
|
|
|
List(ctx context.Context, prefix Path, startAfter string, pathCipher storj.CipherSuite, recursive bool, limit int, metaFlags uint32) (items []ListItem, more bool, err error)
|
2018-07-16 21:44:28 +01:00
|
|
|
}
|
2018-08-24 04:56:38 +01:00
|
|
|
|
2019-06-24 20:23:07 +01:00
|
|
|
// streamStore is a store for streams. It implements typedStore as part of an ongoing migration
|
|
|
|
// to use typed paths. See the shim for the store that the rest of the world interacts with.
|
2018-08-24 04:56:38 +01:00
|
|
|
type streamStore struct {
|
2019-10-29 15:49:16 +00:00
|
|
|
metainfo *metainfo.Client
|
|
|
|
segments segments.Store
|
|
|
|
segmentSize int64
|
|
|
|
encStore *encryption.Store
|
|
|
|
encBlockSize int
|
|
|
|
cipher storj.CipherSuite
|
|
|
|
inlineThreshold int
|
|
|
|
maxEncryptedSegmentSize int64
|
2018-08-24 04:56:38 +01:00
|
|
|
}
|
|
|
|
|
2019-06-24 20:23:07 +01:00
|
|
|
// newTypedStreamStore constructs a typedStore backed by a streamStore.
|
2019-10-29 15:49:16 +00:00
|
|
|
func newTypedStreamStore(metainfo *metainfo.Client, segments segments.Store, segmentSize int64, encStore *encryption.Store, encBlockSize int, cipher storj.CipherSuite, inlineThreshold int, maxEncryptedSegmentSize int64) (typedStore, error) {
|
2018-08-24 04:56:38 +01:00
|
|
|
if segmentSize <= 0 {
|
|
|
|
return nil, errs.New("segment size must be larger than 0")
|
|
|
|
}
|
2018-09-26 14:32:23 +01:00
|
|
|
if encBlockSize <= 0 {
|
|
|
|
return nil, errs.New("encryption block size must be larger than 0")
|
|
|
|
}
|
|
|
|
|
|
|
|
return &streamStore{
|
2019-10-29 15:49:16 +00:00
|
|
|
metainfo: metainfo,
|
|
|
|
segments: segments,
|
|
|
|
segmentSize: segmentSize,
|
|
|
|
encStore: encStore,
|
|
|
|
encBlockSize: encBlockSize,
|
|
|
|
cipher: cipher,
|
|
|
|
inlineThreshold: inlineThreshold,
|
|
|
|
maxEncryptedSegmentSize: maxEncryptedSegmentSize,
|
2018-09-26 14:32:23 +01:00
|
|
|
}, nil
|
2018-08-24 04:56:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Put breaks up data as it comes in into s.segmentSize length pieces, then
|
|
|
|
// store the first piece at s0/<path>, second piece at s1/<path>, and the
|
|
|
|
// *last* piece at l/<path>. Store the given metadata, along with the number
|
|
|
|
// of segments, in a new protobuf, in the metadata of l/<path>.
|
2019-07-03 19:07:44 +01:00
|
|
|
func (s *streamStore) Put(ctx context.Context, path Path, pathCipher storj.CipherSuite, data io.Reader, metadata []byte, expiration time.Time) (m Meta, err error) {
|
2018-08-24 04:56:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-06-24 20:23:07 +01:00
|
|
|
|
2018-10-08 15:19:54 +01:00
|
|
|
// previously file uploaded?
|
2018-11-13 12:21:52 +00:00
|
|
|
err = s.Delete(ctx, path, pathCipher)
|
2019-09-10 16:39:47 +01:00
|
|
|
if err != nil && !storj.ErrObjectNotFound.Has(err) {
|
2019-06-24 20:23:07 +01:00
|
|
|
// something wrong happened checking for an existing
|
|
|
|
// file with the same name
|
2018-10-08 15:19:54 +01:00
|
|
|
return Meta{}, err
|
|
|
|
}
|
|
|
|
|
2019-11-15 09:37:20 +00:00
|
|
|
return s.upload(ctx, path, pathCipher, data, metadata, expiration)
|
2018-10-19 15:36:43 +01:00
|
|
|
}
|
|
|
|
|
2019-11-15 09:37:20 +00:00
|
|
|
// upload registers segments in metainfo and uploads them to storage nodes.
|
|
|
|
//
|
|
|
|
// If there is an error, it cleans up any uploaded segment before returning.
|
|
|
|
func (s *streamStore) upload(ctx context.Context, path Path, pathCipher storj.CipherSuite, data io.Reader, metadata []byte, expiration time.Time) (_ Meta, err error) {
|
2018-10-19 15:36:43 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-07-05 09:36:35 +01:00
|
|
|
derivedKey, err := encryption.DeriveContentKey(path.Bucket(), path.UnencryptedPath(), s.encStore)
|
2019-06-24 20:23:07 +01:00
|
|
|
if err != nil {
|
2019-11-15 09:37:20 +00:00
|
|
|
return Meta{}, err
|
2019-06-24 20:23:07 +01:00
|
|
|
}
|
2019-07-05 09:36:35 +01:00
|
|
|
encPath, err := encryption.EncryptPath(path.Bucket(), path.UnencryptedPath(), pathCipher, s.encStore)
|
2018-09-26 14:32:23 +01:00
|
|
|
if err != nil {
|
2019-11-15 09:37:20 +00:00
|
|
|
return Meta{}, err
|
2019-09-10 16:39:47 +01:00
|
|
|
}
|
|
|
|
|
2019-10-29 15:49:16 +00:00
|
|
|
beginObjectReq := &metainfo.BeginObjectParams{
|
2019-09-10 16:39:47 +01:00
|
|
|
Bucket: []byte(path.Bucket()),
|
|
|
|
EncryptedPath: []byte(encPath.Raw()),
|
|
|
|
ExpiresAt: expiration,
|
2019-10-29 15:49:16 +00:00
|
|
|
}
|
|
|
|
|
2019-11-12 07:30:18 +00:00
|
|
|
var (
|
2019-11-15 09:37:20 +00:00
|
|
|
committedSegments int64
|
|
|
|
streamID storj.StreamID
|
2019-11-12 07:30:18 +00:00
|
|
|
)
|
2019-09-10 16:39:47 +01:00
|
|
|
defer func() {
|
2019-11-15 09:37:20 +00:00
|
|
|
if err != nil {
|
|
|
|
s.cancelHandler(context.Background(), streamID, committedSegments, path, pathCipher)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-09-10 16:39:47 +01:00
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2019-11-15 09:37:20 +00:00
|
|
|
s.cancelHandler(context.Background(), streamID, committedSegments, path, pathCipher)
|
2019-09-10 16:39:47 +01:00
|
|
|
default:
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2019-11-12 07:30:18 +00:00
|
|
|
var (
|
2019-11-15 09:37:20 +00:00
|
|
|
currentSegment int64
|
2019-11-12 07:30:18 +00:00
|
|
|
contentKey storj.Key
|
2019-11-15 09:37:20 +00:00
|
|
|
prevSegmentCommitReq *metainfo.CommitSegmentParams
|
2019-11-12 07:30:18 +00:00
|
|
|
streamSize int64
|
|
|
|
lastSegmentSize int64
|
|
|
|
encryptedKey []byte
|
|
|
|
keyNonce storj.Nonce
|
|
|
|
)
|
2018-09-26 14:32:23 +01:00
|
|
|
|
2019-11-12 07:30:18 +00:00
|
|
|
eofReader := NewEOFReader(data)
|
2018-09-26 14:32:23 +01:00
|
|
|
for !eofReader.isEOF() && !eofReader.hasError() {
|
2018-10-17 12:34:50 +01:00
|
|
|
// generate random key for encrypting the segment's content
|
2019-11-12 07:30:18 +00:00
|
|
|
_, err := rand.Read(contentKey[:])
|
2018-09-26 14:32:23 +01:00
|
|
|
if err != nil {
|
2019-11-15 09:37:20 +00:00
|
|
|
return Meta{}, err
|
2018-09-26 14:32:23 +01:00
|
|
|
}
|
|
|
|
|
2019-11-15 09:37:20 +00:00
|
|
|
// Initialize the content nonce with the current total segment incremented
|
|
|
|
// by 1 because at this moment the next segment has not been already
|
|
|
|
// uploaded.
|
2018-10-17 12:34:50 +01:00
|
|
|
// The increment by 1 is to avoid nonce reuse with the metadata encryption,
|
|
|
|
// which is encrypted with the zero nonce.
|
2019-11-12 07:30:18 +00:00
|
|
|
contentNonce := storj.Nonce{}
|
|
|
|
_, err = encryption.Increment(&contentNonce, currentSegment+1)
|
2018-09-26 14:32:23 +01:00
|
|
|
if err != nil {
|
2019-11-15 09:37:20 +00:00
|
|
|
return Meta{}, err
|
2018-09-26 14:32:23 +01:00
|
|
|
}
|
|
|
|
|
2018-10-17 12:34:50 +01:00
|
|
|
// generate random nonce for encrypting the content key
|
2018-10-15 19:58:57 +01:00
|
|
|
_, err = rand.Read(keyNonce[:])
|
|
|
|
if err != nil {
|
2019-11-15 09:37:20 +00:00
|
|
|
return Meta{}, err
|
2018-10-15 19:58:57 +01:00
|
|
|
}
|
|
|
|
|
2019-11-12 07:30:18 +00:00
|
|
|
encryptedKey, err = encryption.EncryptKey(&contentKey, s.cipher, derivedKey, &keyNonce)
|
2018-09-26 14:32:23 +01:00
|
|
|
if err != nil {
|
2019-11-15 09:37:20 +00:00
|
|
|
return Meta{}, err
|
2018-09-26 14:32:23 +01:00
|
|
|
}
|
2018-08-24 04:56:38 +01:00
|
|
|
|
2018-09-26 14:32:23 +01:00
|
|
|
sizeReader := NewSizeReader(eofReader)
|
|
|
|
segmentReader := io.LimitReader(sizeReader, s.segmentSize)
|
|
|
|
peekReader := segments.NewPeekThresholdReader(segmentReader)
|
2019-06-19 09:11:27 +01:00
|
|
|
// If the data is larger than the inline threshold size, then it will be a remote segment
|
|
|
|
isRemote, err := peekReader.IsLargerThan(s.inlineThreshold)
|
2018-09-26 14:32:23 +01:00
|
|
|
if err != nil {
|
2019-11-15 09:37:20 +00:00
|
|
|
return Meta{}, err
|
2018-09-26 14:32:23 +01:00
|
|
|
}
|
2019-10-29 15:49:16 +00:00
|
|
|
|
|
|
|
segmentEncryption := storj.SegmentEncryption{}
|
|
|
|
if s.cipher != storj.EncNull {
|
|
|
|
segmentEncryption = storj.SegmentEncryption{
|
|
|
|
EncryptedKey: encryptedKey,
|
|
|
|
EncryptedKeyNonce: keyNonce,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-19 09:11:27 +01:00
|
|
|
if isRemote {
|
2019-10-29 15:49:16 +00:00
|
|
|
encrypter, err := encryption.NewEncrypter(s.cipher, &contentKey, &contentNonce, s.encBlockSize)
|
|
|
|
if err != nil {
|
2019-11-15 09:37:20 +00:00
|
|
|
return Meta{}, err
|
2019-10-29 15:49:16 +00:00
|
|
|
}
|
|
|
|
|
2018-09-26 14:32:23 +01:00
|
|
|
paddedReader := eestream.PadReader(ioutil.NopCloser(peekReader), encrypter.InBlockSize())
|
2019-10-29 15:49:16 +00:00
|
|
|
transformedReader := encryption.TransformReader(paddedReader, encrypter, 0)
|
|
|
|
|
|
|
|
beginSegment := &metainfo.BeginSegmentParams{
|
|
|
|
MaxOrderLimit: s.maxEncryptedSegmentSize,
|
|
|
|
Position: storj.SegmentPosition{
|
|
|
|
Index: int32(currentSegment),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
var responses []metainfo.BatchResponse
|
|
|
|
if currentSegment == 0 {
|
|
|
|
responses, err = s.metainfo.Batch(ctx, beginObjectReq, beginSegment)
|
|
|
|
if err != nil {
|
2019-11-15 09:37:20 +00:00
|
|
|
return Meta{}, err
|
2019-10-29 15:49:16 +00:00
|
|
|
}
|
|
|
|
objResponse, err := responses[0].BeginObject()
|
|
|
|
if err != nil {
|
2019-11-15 09:37:20 +00:00
|
|
|
return Meta{}, err
|
2019-10-29 15:49:16 +00:00
|
|
|
}
|
|
|
|
streamID = objResponse.StreamID
|
|
|
|
} else {
|
|
|
|
beginSegment.StreamID = streamID
|
2019-11-15 09:37:20 +00:00
|
|
|
responses, err = s.metainfo.Batch(ctx, prevSegmentCommitReq, beginSegment)
|
|
|
|
if len(responses) > 0 {
|
|
|
|
// We increment because the first request has succeeded
|
|
|
|
committedSegments++
|
|
|
|
}
|
2019-10-29 15:49:16 +00:00
|
|
|
if err != nil {
|
2019-11-15 09:37:20 +00:00
|
|
|
return Meta{}, err
|
2019-10-29 15:49:16 +00:00
|
|
|
}
|
|
|
|
}
|
2019-11-15 09:37:20 +00:00
|
|
|
|
2019-10-29 15:49:16 +00:00
|
|
|
segResponse, err := responses[1].BeginSegment()
|
|
|
|
if err != nil {
|
2019-11-15 09:37:20 +00:00
|
|
|
return Meta{}, err
|
2019-10-29 15:49:16 +00:00
|
|
|
}
|
|
|
|
segmentID := segResponse.SegmentID
|
|
|
|
limits := segResponse.Limits
|
|
|
|
piecePrivateKey := segResponse.PiecePrivateKey
|
|
|
|
|
|
|
|
uploadResults, size, err := s.segments.Put(ctx, transformedReader, expiration, limits, piecePrivateKey)
|
|
|
|
if err != nil {
|
2019-11-15 09:37:20 +00:00
|
|
|
return Meta{}, err
|
2019-10-29 15:49:16 +00:00
|
|
|
}
|
|
|
|
|
2019-11-15 09:37:20 +00:00
|
|
|
prevSegmentCommitReq = &metainfo.CommitSegmentParams{
|
2019-10-29 15:49:16 +00:00
|
|
|
SegmentID: segmentID,
|
|
|
|
SizeEncryptedData: size,
|
|
|
|
Encryption: segmentEncryption,
|
|
|
|
UploadResult: uploadResults,
|
|
|
|
}
|
2018-09-26 14:32:23 +01:00
|
|
|
} else {
|
|
|
|
data, err := ioutil.ReadAll(peekReader)
|
|
|
|
if err != nil {
|
2019-11-15 09:37:20 +00:00
|
|
|
return Meta{}, err
|
2018-09-26 14:32:23 +01:00
|
|
|
}
|
2018-10-19 14:38:13 +01:00
|
|
|
cipherData, err := encryption.Encrypt(data, s.cipher, &contentKey, &contentNonce)
|
2018-09-26 14:32:23 +01:00
|
|
|
if err != nil {
|
2019-11-15 09:37:20 +00:00
|
|
|
return Meta{}, err
|
2018-09-26 14:32:23 +01:00
|
|
|
}
|
2018-08-24 04:56:38 +01:00
|
|
|
|
2019-10-29 15:49:16 +00:00
|
|
|
makeInlineSegment := &metainfo.MakeInlineSegmentParams{
|
|
|
|
Position: storj.SegmentPosition{
|
|
|
|
Index: int32(currentSegment),
|
|
|
|
},
|
|
|
|
Encryption: segmentEncryption,
|
|
|
|
EncryptedInlineData: cipherData,
|
|
|
|
}
|
|
|
|
if currentSegment == 0 {
|
|
|
|
responses, err := s.metainfo.Batch(ctx, beginObjectReq, makeInlineSegment)
|
|
|
|
if err != nil {
|
2019-11-15 09:37:20 +00:00
|
|
|
return Meta{}, err
|
2019-10-29 15:49:16 +00:00
|
|
|
}
|
|
|
|
objResponse, err := responses[0].BeginObject()
|
|
|
|
if err != nil {
|
2019-11-15 09:37:20 +00:00
|
|
|
return Meta{}, err
|
2019-10-29 15:49:16 +00:00
|
|
|
}
|
|
|
|
streamID = objResponse.StreamID
|
|
|
|
} else {
|
|
|
|
makeInlineSegment.StreamID = streamID
|
|
|
|
err = s.metainfo.MakeInlineSegment(ctx, *makeInlineSegment)
|
|
|
|
if err != nil {
|
2019-11-15 09:37:20 +00:00
|
|
|
return Meta{}, err
|
2018-10-15 19:58:57 +01:00
|
|
|
}
|
2019-06-24 20:23:07 +01:00
|
|
|
}
|
2019-11-15 09:37:20 +00:00
|
|
|
|
|
|
|
committedSegments++
|
2019-09-10 16:39:47 +01:00
|
|
|
}
|
2018-10-15 19:58:57 +01:00
|
|
|
|
2019-11-12 07:30:18 +00:00
|
|
|
lastSegmentSize = sizeReader.Size()
|
|
|
|
streamSize += lastSegmentSize
|
2019-11-15 09:37:20 +00:00
|
|
|
currentSegment++
|
2019-11-12 07:30:18 +00:00
|
|
|
}
|
2018-10-15 19:58:57 +01:00
|
|
|
|
2019-11-15 09:37:20 +00:00
|
|
|
totalSegments := currentSegment
|
|
|
|
|
2019-11-12 07:30:18 +00:00
|
|
|
if eofReader.hasError() {
|
2019-11-15 09:37:20 +00:00
|
|
|
return Meta{}, eofReader.err
|
2019-11-12 07:30:18 +00:00
|
|
|
}
|
2018-10-17 12:34:50 +01:00
|
|
|
|
2019-11-12 07:30:18 +00:00
|
|
|
streamInfo, err := proto.Marshal(&pb.StreamInfo{
|
2019-11-15 09:37:20 +00:00
|
|
|
DeprecatedNumberOfSegments: totalSegments,
|
2019-11-12 07:30:18 +00:00
|
|
|
SegmentsSize: s.segmentSize,
|
|
|
|
LastSegmentSize: lastSegmentSize,
|
|
|
|
Metadata: metadata,
|
|
|
|
})
|
|
|
|
if err != nil {
|
2019-11-15 09:37:20 +00:00
|
|
|
return Meta{}, err
|
2019-11-12 07:30:18 +00:00
|
|
|
}
|
2018-10-15 19:58:57 +01:00
|
|
|
|
2019-11-12 07:30:18 +00:00
|
|
|
// encrypt metadata with the content encryption key and zero nonce
|
|
|
|
encryptedStreamInfo, err := encryption.Encrypt(streamInfo, s.cipher, &contentKey, &storj.Nonce{})
|
|
|
|
if err != nil {
|
2019-11-15 09:37:20 +00:00
|
|
|
return Meta{}, err
|
2019-11-12 07:30:18 +00:00
|
|
|
}
|
2018-10-15 19:58:57 +01:00
|
|
|
|
2019-11-12 07:30:18 +00:00
|
|
|
streamMeta := pb.StreamMeta{
|
2019-11-15 09:37:20 +00:00
|
|
|
NumberOfSegments: totalSegments,
|
2019-11-12 07:30:18 +00:00
|
|
|
EncryptedStreamInfo: encryptedStreamInfo,
|
|
|
|
EncryptionType: int32(s.cipher),
|
|
|
|
EncryptionBlockSize: int32(s.encBlockSize),
|
|
|
|
}
|
2018-09-26 14:32:23 +01:00
|
|
|
|
2019-11-12 07:30:18 +00:00
|
|
|
if s.cipher != storj.EncNull {
|
|
|
|
streamMeta.LastSegmentMeta = &pb.SegmentMeta{
|
|
|
|
EncryptedKey: encryptedKey,
|
|
|
|
KeyNonce: keyNonce[:],
|
|
|
|
}
|
2018-08-24 04:56:38 +01:00
|
|
|
}
|
2018-10-17 12:34:50 +01:00
|
|
|
|
2019-11-12 07:30:18 +00:00
|
|
|
objectMetadata, err := proto.Marshal(&streamMeta)
|
|
|
|
if err != nil {
|
2019-11-15 09:37:20 +00:00
|
|
|
return Meta{}, err
|
2019-09-10 16:39:47 +01:00
|
|
|
}
|
|
|
|
|
2019-10-29 15:49:16 +00:00
|
|
|
commitObject := metainfo.CommitObjectParams{
|
2019-09-10 16:39:47 +01:00
|
|
|
StreamID: streamID,
|
|
|
|
EncryptedMetadata: objectMetadata,
|
2019-10-29 15:49:16 +00:00
|
|
|
}
|
2019-11-15 09:37:20 +00:00
|
|
|
if prevSegmentCommitReq != nil {
|
|
|
|
var responses []metainfo.BatchResponse
|
|
|
|
responses, err = s.metainfo.Batch(ctx, prevSegmentCommitReq, &commitObject)
|
|
|
|
if len(responses) > 0 {
|
|
|
|
// We increment because the first request has succeeded
|
|
|
|
committedSegments++
|
|
|
|
}
|
2019-10-29 15:49:16 +00:00
|
|
|
} else {
|
|
|
|
err = s.metainfo.CommitObject(ctx, commitObject)
|
|
|
|
}
|
2019-09-10 16:39:47 +01:00
|
|
|
if err != nil {
|
2019-11-15 09:37:20 +00:00
|
|
|
return Meta{}, err
|
2018-09-08 16:41:40 +01:00
|
|
|
}
|
2018-08-24 04:56:38 +01:00
|
|
|
|
|
|
|
resultMeta := Meta{
|
2019-04-10 23:27:04 +01:00
|
|
|
Expiration: expiration,
|
|
|
|
Size: streamSize,
|
|
|
|
Data: metadata,
|
2018-08-24 04:56:38 +01:00
|
|
|
}
|
|
|
|
|
2019-11-15 09:37:20 +00:00
|
|
|
return resultMeta, nil
|
2018-08-24 04:56:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Get returns a ranger that knows what the overall size is (from l/<path>)
|
|
|
|
// and then returns the appropriate data from segments s0/<path>, s1/<path>,
|
|
|
|
// ..., l/<path>.
|
2019-07-03 19:07:44 +01:00
|
|
|
func (s *streamStore) Get(ctx context.Context, path Path, pathCipher storj.CipherSuite) (rr ranger.Ranger, meta Meta, err error) {
|
2018-08-24 04:56:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-07-05 09:36:35 +01:00
|
|
|
encPath, err := encryption.EncryptPath(path.Bucket(), path.UnencryptedPath(), pathCipher, s.encStore)
|
2018-10-15 16:39:09 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, Meta{}, err
|
|
|
|
}
|
|
|
|
|
2019-10-28 16:23:20 +00:00
|
|
|
resps, err := s.metainfo.Batch(ctx,
|
|
|
|
&metainfo.GetObjectParams{
|
|
|
|
Bucket: []byte(path.Bucket()),
|
|
|
|
EncryptedPath: []byte(encPath.Raw()),
|
|
|
|
},
|
|
|
|
&metainfo.DownloadSegmentParams{
|
|
|
|
Position: storj.SegmentPosition{
|
|
|
|
Index: -1, // Request the last segment
|
|
|
|
},
|
|
|
|
},
|
|
|
|
)
|
2018-08-24 04:56:38 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, Meta{}, err
|
|
|
|
}
|
|
|
|
|
2019-10-28 16:23:20 +00:00
|
|
|
if len(resps) != 2 {
|
|
|
|
return nil, Meta{}, errs.New(
|
|
|
|
"metainfo.Batch request returned an unexpected number of responses. Want: 2, got: %d", len(resps),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
var object storj.ObjectInfo
|
|
|
|
{
|
|
|
|
resp, err := resps[0].GetObject()
|
|
|
|
if err != nil {
|
|
|
|
return nil, Meta{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
object = resp.Info
|
|
|
|
}
|
|
|
|
|
|
|
|
var (
|
|
|
|
info storj.SegmentDownloadInfo
|
|
|
|
limits []*pb.AddressedOrderLimit
|
|
|
|
)
|
|
|
|
{
|
|
|
|
resp, err := resps[1].DownloadSegment()
|
|
|
|
if err != nil {
|
|
|
|
return nil, Meta{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
info = resp.Info
|
|
|
|
limits = resp.Limits
|
|
|
|
}
|
|
|
|
|
|
|
|
lastSegmentRanger, err := s.segments.Ranger(ctx, info, limits, object.RedundancyScheme)
|
2019-06-24 20:23:07 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, Meta{}, err
|
|
|
|
}
|
|
|
|
|
2019-09-10 16:39:47 +01:00
|
|
|
streamInfo, streamMeta, err := TypedDecryptStreamInfo(ctx, object.Metadata, path, s.encStore)
|
2018-08-24 04:56:38 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, Meta{}, err
|
|
|
|
}
|
|
|
|
|
2018-10-15 19:58:57 +01:00
|
|
|
stream := pb.StreamInfo{}
|
2018-10-17 12:34:50 +01:00
|
|
|
err = proto.Unmarshal(streamInfo, &stream)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Meta{}, err
|
|
|
|
}
|
|
|
|
|
2019-07-05 09:36:35 +01:00
|
|
|
derivedKey, err := encryption.DeriveContentKey(path.Bucket(), path.UnencryptedPath(), s.encStore)
|
2018-09-26 14:32:23 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, Meta{}, err
|
|
|
|
}
|
2018-08-24 04:56:38 +01:00
|
|
|
|
2018-09-26 14:32:23 +01:00
|
|
|
var rangers []ranger.Ranger
|
2019-08-22 22:15:58 +01:00
|
|
|
for i := int64(0); i < numberOfSegments(&stream, &streamMeta)-1; i++ {
|
2018-10-19 14:38:13 +01:00
|
|
|
var contentNonce storj.Nonce
|
2019-06-24 20:23:07 +01:00
|
|
|
_, err = encryption.Increment(&contentNonce, i+1)
|
2018-09-26 14:32:23 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, Meta{}, err
|
|
|
|
}
|
2019-06-24 20:23:07 +01:00
|
|
|
|
|
|
|
rangers = append(rangers, &lazySegmentRanger{
|
2019-10-28 16:23:20 +00:00
|
|
|
metainfo: s.metainfo,
|
2018-09-26 14:32:23 +01:00
|
|
|
segments: s.segments,
|
2019-09-10 16:39:47 +01:00
|
|
|
streamID: object.StreamID,
|
|
|
|
segmentIndex: int32(i),
|
|
|
|
rs: object.RedundancyScheme,
|
2019-06-24 20:23:07 +01:00
|
|
|
size: stream.SegmentsSize,
|
2018-10-25 21:28:16 +01:00
|
|
|
derivedKey: derivedKey,
|
2018-10-17 12:34:50 +01:00
|
|
|
startingNonce: &contentNonce,
|
2018-10-15 19:58:57 +01:00
|
|
|
encBlockSize: int(streamMeta.EncryptionBlockSize),
|
2019-07-03 19:07:44 +01:00
|
|
|
cipher: storj.CipherSuite(streamMeta.EncryptionType),
|
2019-06-24 20:23:07 +01:00
|
|
|
})
|
2018-08-24 04:56:38 +01:00
|
|
|
}
|
|
|
|
|
2018-10-19 14:38:13 +01:00
|
|
|
var contentNonce storj.Nonce
|
2019-08-22 22:15:58 +01:00
|
|
|
_, err = encryption.Increment(&contentNonce, numberOfSegments(&stream, &streamMeta))
|
2018-10-03 14:05:40 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, Meta{}, err
|
|
|
|
}
|
2019-06-24 20:23:07 +01:00
|
|
|
|
2018-10-15 19:58:57 +01:00
|
|
|
encryptedKey, keyNonce := getEncryptedKeyAndNonce(streamMeta.LastSegmentMeta)
|
2018-10-03 14:05:40 +01:00
|
|
|
decryptedLastSegmentRanger, err := decryptRanger(
|
|
|
|
ctx,
|
|
|
|
lastSegmentRanger,
|
2018-10-15 19:58:57 +01:00
|
|
|
stream.LastSegmentSize,
|
2019-07-03 19:07:44 +01:00
|
|
|
storj.CipherSuite(streamMeta.EncryptionType),
|
2018-10-25 21:28:16 +01:00
|
|
|
derivedKey,
|
2018-10-15 19:58:57 +01:00
|
|
|
encryptedKey,
|
|
|
|
keyNonce,
|
2018-10-17 12:34:50 +01:00
|
|
|
&contentNonce,
|
2018-10-15 19:58:57 +01:00
|
|
|
int(streamMeta.EncryptionBlockSize),
|
2018-10-03 14:05:40 +01:00
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Meta{}, err
|
|
|
|
}
|
|
|
|
|
2019-04-02 22:15:31 +01:00
|
|
|
rangers = append(rangers, decryptedLastSegmentRanger)
|
2018-08-24 04:56:38 +01:00
|
|
|
catRangers := ranger.Concat(rangers...)
|
2019-09-10 16:39:47 +01:00
|
|
|
meta = convertMeta(object.Modified, object.Expires, stream, streamMeta)
|
2018-10-15 19:58:57 +01:00
|
|
|
return catRangers, meta, nil
|
2018-08-24 04:56:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Meta implements Store.Meta
|
2019-07-03 19:07:44 +01:00
|
|
|
func (s *streamStore) Meta(ctx context.Context, path Path, pathCipher storj.CipherSuite) (meta Meta, err error) {
|
2018-10-15 16:39:09 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-07-05 09:36:35 +01:00
|
|
|
encPath, err := encryption.EncryptPath(path.Bucket(), path.UnencryptedPath(), pathCipher, s.encStore)
|
2019-06-24 20:23:07 +01:00
|
|
|
if err != nil {
|
|
|
|
return Meta{}, err
|
|
|
|
}
|
|
|
|
|
2019-09-10 16:39:47 +01:00
|
|
|
object, err := s.metainfo.GetObject(ctx, metainfo.GetObjectParams{
|
|
|
|
Bucket: []byte(path.Bucket()),
|
|
|
|
EncryptedPath: []byte(encPath.Raw()),
|
|
|
|
})
|
2018-08-24 04:56:38 +01:00
|
|
|
|
2019-09-10 16:39:47 +01:00
|
|
|
streamInfo, streamMeta, err := TypedDecryptStreamInfo(ctx, object.Metadata, path, s.encStore)
|
2018-08-24 04:56:38 +01:00
|
|
|
if err != nil {
|
|
|
|
return Meta{}, err
|
|
|
|
}
|
2019-06-24 20:23:07 +01:00
|
|
|
|
2019-04-02 22:15:31 +01:00
|
|
|
var stream pb.StreamInfo
|
|
|
|
if err := proto.Unmarshal(streamInfo, &stream); err != nil {
|
2018-10-17 12:34:50 +01:00
|
|
|
return Meta{}, err
|
|
|
|
}
|
|
|
|
|
2019-09-10 16:39:47 +01:00
|
|
|
return convertMeta(object.Modified, object.Expires, stream, streamMeta), nil
|
2018-08-24 04:56:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Delete all the segments, with the last one last
|
2019-07-03 19:07:44 +01:00
|
|
|
func (s *streamStore) Delete(ctx context.Context, path Path, pathCipher storj.CipherSuite) (err error) {
|
2018-08-24 04:56:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-07-05 09:36:35 +01:00
|
|
|
encPath, err := encryption.EncryptPath(path.Bucket(), path.UnencryptedPath(), pathCipher, s.encStore)
|
2019-06-24 20:23:07 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-10-24 22:18:48 +01:00
|
|
|
batchItems := []metainfo.BatchItem{
|
|
|
|
&metainfo.BeginDeleteObjectParams{
|
|
|
|
Bucket: []byte(path.Bucket()),
|
|
|
|
EncryptedPath: []byte(encPath.Raw()),
|
|
|
|
},
|
|
|
|
&metainfo.ListSegmentsParams{
|
|
|
|
CursorPosition: storj.SegmentPosition{
|
|
|
|
Index: 0,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
resps, err := s.metainfo.Batch(ctx, batchItems...)
|
2018-10-15 16:39:09 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-06-24 20:23:07 +01:00
|
|
|
|
2019-10-24 22:18:48 +01:00
|
|
|
if len(resps) != 2 {
|
|
|
|
return errs.New(
|
|
|
|
"metainfo.Batch request returned an unexpected number of responses. Want: 2, got: %d", len(resps),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
|
|
|
|
delResp, err := resps[0].BeginDeleteObject()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
listResp, err := resps[1].ListSegment()
|
2018-08-24 04:56:38 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-10-24 22:18:48 +01:00
|
|
|
// TODO handle listResp.More
|
|
|
|
|
2019-09-05 13:25:30 +01:00
|
|
|
var errlist errs.Group
|
2019-10-24 22:18:48 +01:00
|
|
|
for _, item := range listResp.Items {
|
|
|
|
err = s.segments.Delete(ctx, delResp.StreamID, item.Position.Index)
|
2018-08-24 04:56:38 +01:00
|
|
|
if err != nil {
|
2019-09-05 13:25:30 +01:00
|
|
|
errlist.Add(err)
|
|
|
|
continue
|
2018-08-24 04:56:38 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-05 13:25:30 +01:00
|
|
|
return errlist.Err()
|
2018-08-24 04:56:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListItem is a single item in a listing
|
|
|
|
type ListItem struct {
|
2019-06-24 20:23:07 +01:00
|
|
|
Path string
|
2018-09-07 15:20:15 +01:00
|
|
|
Meta Meta
|
|
|
|
IsPrefix bool
|
2018-08-24 04:56:38 +01:00
|
|
|
}
|
|
|
|
|
2019-07-24 15:40:22 +01:00
|
|
|
// pathForKey removes the trailing `/` from the raw path, which is required so
|
|
|
|
// the derived key matches the final list path (which also has the trailing
|
|
|
|
// encrypted `/` part of the path removed)
|
|
|
|
func pathForKey(raw string) paths.Unencrypted {
|
|
|
|
return paths.NewUnencrypted(strings.TrimSuffix(raw, "/"))
|
|
|
|
}
|
|
|
|
|
2018-08-24 04:56:38 +01:00
|
|
|
// List all the paths inside l/, stripping off the l/ prefix
|
2019-09-25 22:30:41 +01:00
|
|
|
func (s *streamStore) List(ctx context.Context, prefix Path, startAfter string, pathCipher storj.CipherSuite, recursive bool, limit int, metaFlags uint32) (items []ListItem, more bool, err error) {
|
2018-08-24 04:56:38 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-09-10 16:39:47 +01:00
|
|
|
// TODO use flags with listing
|
|
|
|
// if metaFlags&meta.Size != 0 {
|
|
|
|
// Calculating the stream's size require also the user-defined metadata,
|
|
|
|
// where stream store keeps info about the number of segments and their size.
|
|
|
|
// metaFlags |= meta.UserDefined
|
|
|
|
// }
|
2018-09-12 14:03:31 +01:00
|
|
|
|
2019-07-24 15:40:22 +01:00
|
|
|
prefixKey, err := encryption.DerivePathKey(prefix.Bucket(), pathForKey(prefix.UnencryptedPath().Raw()), s.encStore)
|
2018-10-15 16:39:09 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, false, err
|
|
|
|
}
|
|
|
|
|
2019-07-05 09:36:35 +01:00
|
|
|
encPrefix, err := encryption.EncryptPath(prefix.Bucket(), prefix.UnencryptedPath(), pathCipher, s.encStore)
|
2018-10-15 16:39:09 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, false, err
|
|
|
|
}
|
|
|
|
|
2019-07-24 15:40:22 +01:00
|
|
|
// If the raw unencrypted path ends in a `/` we need to remove the final
|
|
|
|
// section of the encrypted path. For example, if we are listing the path
|
|
|
|
// `/bob/`, the encrypted path results in `enc("")/enc("bob")/enc("")`. This
|
|
|
|
// is an incorrect list prefix, what we really want is `enc("")/enc("bob")`
|
|
|
|
if strings.HasSuffix(prefix.UnencryptedPath().Raw(), "/") {
|
|
|
|
lastSlashIdx := strings.LastIndex(encPrefix.Raw(), "/")
|
|
|
|
encPrefix = paths.NewEncrypted(encPrefix.Raw()[:lastSlashIdx])
|
|
|
|
}
|
|
|
|
|
2019-09-25 22:30:41 +01:00
|
|
|
// We have to encrypt startAfter but only if it doesn't contain a bucket.
|
|
|
|
// It contains a bucket if and only if the prefix has no bucket. This is why it is a raw
|
|
|
|
// string instead of a typed string: it's either a bucket or an unencrypted path component
|
2019-06-24 20:23:07 +01:00
|
|
|
// and that isn't known at compile time.
|
|
|
|
needsEncryption := prefix.Bucket() != ""
|
|
|
|
if needsEncryption {
|
|
|
|
startAfter, err = encryption.EncryptPathRaw(startAfter, pathCipher, prefixKey)
|
|
|
|
if err != nil {
|
|
|
|
return nil, false, err
|
|
|
|
}
|
2018-10-15 16:39:09 +01:00
|
|
|
}
|
2018-10-17 12:34:50 +01:00
|
|
|
|
2019-09-10 16:39:47 +01:00
|
|
|
objects, more, err := s.metainfo.ListObjects(ctx, metainfo.ListObjectsParams{
|
|
|
|
Bucket: []byte(prefix.Bucket()),
|
|
|
|
EncryptedPrefix: []byte(encPrefix.Raw()),
|
|
|
|
EncryptedCursor: []byte(startAfter),
|
|
|
|
Limit: int32(limit),
|
|
|
|
Recursive: recursive,
|
|
|
|
})
|
2018-10-15 16:39:09 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, false, err
|
|
|
|
}
|
|
|
|
|
2019-09-10 16:39:47 +01:00
|
|
|
items = make([]ListItem, len(objects))
|
|
|
|
for i, item := range objects {
|
2019-06-24 20:23:07 +01:00
|
|
|
var path Path
|
|
|
|
var itemPath string
|
|
|
|
|
|
|
|
if needsEncryption {
|
2019-09-10 16:39:47 +01:00
|
|
|
itemPath, err = encryption.DecryptPathRaw(string(item.EncryptedPath), pathCipher, prefixKey)
|
2019-06-24 20:23:07 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, false, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(jeff): this shouldn't be necessary if we handled trailing slashes
|
|
|
|
// appropriately. there's some issues with list.
|
|
|
|
fullPath := prefix.UnencryptedPath().Raw()
|
|
|
|
if len(fullPath) > 0 && fullPath[len(fullPath)-1] != '/' {
|
|
|
|
fullPath += "/"
|
|
|
|
}
|
|
|
|
fullPath += itemPath
|
|
|
|
|
|
|
|
path = CreatePath(prefix.Bucket(), paths.NewUnencrypted(fullPath))
|
|
|
|
} else {
|
2019-09-10 16:39:47 +01:00
|
|
|
itemPath = string(item.EncryptedPath)
|
|
|
|
path = CreatePath(string(item.EncryptedPath), paths.Unencrypted{})
|
2018-08-24 04:56:38 +01:00
|
|
|
}
|
2018-10-17 12:34:50 +01:00
|
|
|
|
2019-09-10 16:39:47 +01:00
|
|
|
streamInfo, streamMeta, err := TypedDecryptStreamInfo(ctx, item.EncryptedMetadata, path, s.encStore)
|
2018-10-15 16:39:09 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, false, err
|
|
|
|
}
|
2019-06-24 20:23:07 +01:00
|
|
|
|
2019-04-02 22:15:31 +01:00
|
|
|
var stream pb.StreamInfo
|
|
|
|
if err := proto.Unmarshal(streamInfo, &stream); err != nil {
|
2018-10-17 12:34:50 +01:00
|
|
|
return nil, false, err
|
|
|
|
}
|
|
|
|
|
2019-09-10 16:39:47 +01:00
|
|
|
newMeta := convertMeta(item.CreatedAt, item.ExpiresAt, stream, streamMeta)
|
2019-06-24 20:23:07 +01:00
|
|
|
items[i] = ListItem{
|
|
|
|
Path: itemPath,
|
|
|
|
Meta: newMeta,
|
|
|
|
IsPrefix: item.IsPrefix,
|
|
|
|
}
|
2018-08-24 04:56:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return items, more, nil
|
|
|
|
}
|
2018-09-12 14:06:44 +01:00
|
|
|
|
|
|
|
type lazySegmentRanger struct {
|
2018-09-26 14:32:23 +01:00
|
|
|
ranger ranger.Ranger
|
2019-10-28 16:23:20 +00:00
|
|
|
metainfo *metainfo.Client
|
2018-09-26 14:32:23 +01:00
|
|
|
segments segments.Store
|
2019-09-10 16:39:47 +01:00
|
|
|
streamID storj.StreamID
|
|
|
|
segmentIndex int32
|
|
|
|
rs storj.RedundancyScheme
|
2018-09-26 14:32:23 +01:00
|
|
|
size int64
|
2018-10-19 14:38:13 +01:00
|
|
|
derivedKey *storj.Key
|
|
|
|
startingNonce *storj.Nonce
|
2018-09-26 14:32:23 +01:00
|
|
|
encBlockSize int
|
2019-07-03 19:07:44 +01:00
|
|
|
cipher storj.CipherSuite
|
2018-09-12 14:06:44 +01:00
|
|
|
}
|
|
|
|
|
2019-10-29 14:39:17 +00:00
|
|
|
// Size implements Ranger.Size.
|
2018-09-12 14:06:44 +01:00
|
|
|
func (lr *lazySegmentRanger) Size() int64 {
|
|
|
|
return lr.size
|
|
|
|
}
|
|
|
|
|
2019-10-29 14:39:17 +00:00
|
|
|
// Range implements Ranger.Range to be lazily connected.
|
2019-06-04 12:36:27 +01:00
|
|
|
func (lr *lazySegmentRanger) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-09-12 14:06:44 +01:00
|
|
|
if lr.ranger == nil {
|
2019-10-28 16:23:20 +00:00
|
|
|
info, limits, err := lr.metainfo.DownloadSegment(ctx, metainfo.DownloadSegmentParams{
|
|
|
|
StreamID: lr.streamID,
|
|
|
|
Position: storj.SegmentPosition{
|
|
|
|
Index: lr.segmentIndex,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
rr, err := lr.segments.Ranger(ctx, info, limits, lr.rs)
|
2018-09-26 14:32:23 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-09-10 16:39:47 +01:00
|
|
|
|
2019-10-28 16:23:20 +00:00
|
|
|
encryptedKey, keyNonce := info.SegmentEncryption.EncryptedKey, info.SegmentEncryption.EncryptedKeyNonce
|
2019-09-10 16:39:47 +01:00
|
|
|
lr.ranger, err = decryptRanger(ctx, rr, lr.size, lr.cipher, lr.derivedKey, encryptedKey, &keyNonce, lr.startingNonce, lr.encBlockSize)
|
2018-09-26 14:32:23 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-10-03 14:05:40 +01:00
|
|
|
}
|
|
|
|
return lr.ranger.Range(ctx, offset, length)
|
|
|
|
}
|
|
|
|
|
2019-10-29 14:39:17 +00:00
|
|
|
// decryptRanger returns a decrypted ranger of the given rr ranger.
|
2019-07-03 19:07:44 +01:00
|
|
|
func decryptRanger(ctx context.Context, rr ranger.Ranger, decryptedSize int64, cipher storj.CipherSuite, derivedKey *storj.Key, encryptedKey storj.EncryptedPrivateKey, encryptedKeyNonce, startingNonce *storj.Nonce, encBlockSize int) (decrypted ranger.Ranger, err error) {
|
2019-06-04 12:36:27 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-10-19 14:38:13 +01:00
|
|
|
contentKey, err := encryption.DecryptKey(encryptedKey, cipher, derivedKey, encryptedKeyNonce)
|
2018-10-03 14:05:40 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-10-19 14:38:13 +01:00
|
|
|
|
|
|
|
decrypter, err := encryption.NewDecrypter(cipher, contentKey, startingNonce, encBlockSize)
|
2018-10-03 14:05:40 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var rd ranger.Ranger
|
|
|
|
if rr.Size()%int64(decrypter.InBlockSize()) != 0 {
|
|
|
|
reader, err := rr.Range(ctx, 0, rr.Size())
|
2018-09-12 14:06:44 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-04-10 23:27:04 +01:00
|
|
|
defer func() { err = errs.Combine(err, reader.Close()) }()
|
2018-10-03 14:05:40 +01:00
|
|
|
cipherData, err := ioutil.ReadAll(reader)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2018-09-26 14:32:23 +01:00
|
|
|
}
|
2018-10-19 14:38:13 +01:00
|
|
|
data, err := encryption.Decrypt(cipherData, cipher, contentKey, startingNonce)
|
2018-10-03 14:05:40 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return ranger.ByteRanger(data), nil
|
2018-09-12 14:06:44 +01:00
|
|
|
}
|
2018-10-03 14:05:40 +01:00
|
|
|
|
2018-10-18 12:10:29 +01:00
|
|
|
rd, err = encryption.Transform(rr, decrypter)
|
2018-10-03 14:05:40 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return eestream.Unpad(rd, int(rd.Size()-decryptedSize))
|
2018-09-12 14:06:44 +01:00
|
|
|
}
|
2018-10-04 14:52:12 +01:00
|
|
|
|
2019-06-24 20:23:07 +01:00
|
|
|
// CancelHandler handles clean up of segments on receiving CTRL+C
|
2019-09-10 16:39:47 +01:00
|
|
|
func (s *streamStore) cancelHandler(ctx context.Context, streamID storj.StreamID, totalSegments int64, path Path, pathCipher storj.CipherSuite) {
|
2019-06-24 20:23:07 +01:00
|
|
|
defer mon.Task()(&ctx)(nil)
|
2018-10-25 21:28:16 +01:00
|
|
|
|
2018-10-04 14:52:12 +01:00
|
|
|
for i := int64(0); i < totalSegments; i++ {
|
2019-09-10 16:39:47 +01:00
|
|
|
err := s.segments.Delete(ctx, streamID, int32(i))
|
2018-10-04 14:52:12 +01:00
|
|
|
if err != nil {
|
2019-11-05 21:04:07 +00:00
|
|
|
zap.L().Warn("Failed deleting segment", zap.Stringer("path", path), zap.Int64("Segment Index", i), zap.Error(err))
|
2019-06-24 20:23:07 +01:00
|
|
|
continue
|
2018-10-04 14:52:12 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-10-15 19:58:57 +01:00
|
|
|
|
2018-10-19 14:38:13 +01:00
|
|
|
func getEncryptedKeyAndNonce(m *pb.SegmentMeta) (storj.EncryptedPrivateKey, *storj.Nonce) {
|
2018-10-15 19:58:57 +01:00
|
|
|
if m == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2018-10-19 14:38:13 +01:00
|
|
|
var nonce storj.Nonce
|
2018-10-17 12:34:50 +01:00
|
|
|
copy(nonce[:], m.KeyNonce)
|
2018-10-15 19:58:57 +01:00
|
|
|
|
|
|
|
return m.EncryptedKey, &nonce
|
|
|
|
}
|
2018-10-17 12:34:50 +01:00
|
|
|
|
2019-06-24 20:23:07 +01:00
|
|
|
// TypedDecryptStreamInfo decrypts stream info
|
|
|
|
func TypedDecryptStreamInfo(ctx context.Context, streamMetaBytes []byte, path Path, encStore *encryption.Store) (
|
2019-04-02 22:15:31 +01:00
|
|
|
streamInfo []byte, streamMeta pb.StreamMeta, err error) {
|
2019-06-04 12:36:27 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-06-24 20:23:07 +01:00
|
|
|
|
2019-04-02 22:15:31 +01:00
|
|
|
err = proto.Unmarshal(streamMetaBytes, &streamMeta)
|
2018-10-17 12:34:50 +01:00
|
|
|
if err != nil {
|
2019-04-02 22:15:31 +01:00
|
|
|
return nil, pb.StreamMeta{}, err
|
2018-10-17 12:34:50 +01:00
|
|
|
}
|
|
|
|
|
2019-07-05 09:36:35 +01:00
|
|
|
derivedKey, err := encryption.DeriveContentKey(path.Bucket(), path.UnencryptedPath(), encStore)
|
2018-10-17 12:34:50 +01:00
|
|
|
if err != nil {
|
2019-04-02 22:15:31 +01:00
|
|
|
return nil, pb.StreamMeta{}, err
|
2018-10-17 12:34:50 +01:00
|
|
|
}
|
|
|
|
|
2019-07-03 19:07:44 +01:00
|
|
|
cipher := storj.CipherSuite(streamMeta.EncryptionType)
|
2018-10-17 12:34:50 +01:00
|
|
|
encryptedKey, keyNonce := getEncryptedKeyAndNonce(streamMeta.LastSegmentMeta)
|
2018-10-25 21:28:16 +01:00
|
|
|
contentKey, err := encryption.DecryptKey(encryptedKey, cipher, derivedKey, keyNonce)
|
2018-10-17 12:34:50 +01:00
|
|
|
if err != nil {
|
2019-04-02 22:15:31 +01:00
|
|
|
return nil, pb.StreamMeta{}, err
|
2018-10-17 12:34:50 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// decrypt metadata with the content encryption key and zero nonce
|
2019-04-02 22:15:31 +01:00
|
|
|
streamInfo, err = encryption.Decrypt(streamMeta.EncryptedStreamInfo, cipher, contentKey, &storj.Nonce{})
|
|
|
|
return streamInfo, streamMeta, err
|
2018-10-17 12:34:50 +01:00
|
|
|
}
|