30f790a040
* add path implementation This commit adds a pkg/paths package which contains two types, Encrypted and Unencrypted, to statically enforce what is contained in a path. It's part of a refactoring of the code base to be more clear about what is contained in a storj.Path at all the layers. Change-Id: Ifc4d4932da26a97ea99749b8356b4543496a8864 * add encryption store This change adds an encryption.Store type to keep a collection of root keys for arbitrary locations in some buckets. It allows one to look up all of the necessary information to encrypt paths, decrypt paths and decrypt list operations. It adds some exported functions to perform encryption on paths using a Store. Change-Id: I1a3d230c521d65f0ede727f93e1cb389f8be9497 * add shim around streams store This commit changes no functionality, but just reorganizes the code so that changes can be made directly to the streams store implementation without affecting callers. It also adds a Path type that will be used at the interface boundary for the streams store so that it can be sure that it's getting well formed paths that it expects. Change-Id: I50bd682995b185beb653b00562fab62ef11f1ab5 * refactor streams to use encryption store This commit changes the streams store to use the path type as well as the encryption store to handle all of it's encryption and decryption. Some changes were made to how the default key is returned in the encryption store to have it include the case when the bucket exists but no paths matched. The path iterator could also be simplified to not report if a consume was valid: that information is no longer necessary. The kvmetainfo tests were changed to appropriately pass the subtests *testing.T rather than having the closure it executes use the parent one. The test framework now correctly reports which test did the failing. There are still some latent issues with listing in that listing for "a/" and listing for "a" are not the same operation, but we treat them as such. I suspect that there are also issues with paths like "/" or "//foo", but that's for another time. Change-Id: I81cad4ba2850c3d14ba7e632777c4cac93db9472 * use an encryption store at the upper layers Change-Id: Id9b4dd5f27b3ecac863de586e9ae076f4f927f6f * fix linting failures Change-Id: Ifb8378879ad308d4d047a0483850156371a41280 * fix linting in encryption test Change-Id: Ia35647dfe18b0f20fe13763b28e53294f75c38fa * get rid of kvmetainfo rootKey Change-Id: Id795ca03d9417e3fe9634365a121430eb678d6d5 * Fix linting failure for return with else Change-Id: I0b9ffd92be42ffcd8fef7ea735c5fc114a55d3b5 * fix some bugs adding enc store to kvmetainfo Change-Id: I8e765970ba817289c65ec62971ae3bfa2c53a1ba * respond to review feedback Change-Id: I43e2ce29ce2fb6677b1cd6b9469838d80ec92c86
425 lines
12 KiB
Go
425 lines
12 KiB
Go
// Copyright (C) 2019 Storj Labs, Inc.
|
|
// See LICENSE for copying information.
|
|
|
|
package kvmetainfo
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"time"
|
|
|
|
"github.com/gogo/protobuf/proto"
|
|
"github.com/golang/protobuf/ptypes"
|
|
"github.com/golang/protobuf/ptypes/timestamp"
|
|
"go.uber.org/zap"
|
|
|
|
"storj.io/storj/internal/memory"
|
|
"storj.io/storj/pkg/encryption"
|
|
"storj.io/storj/pkg/paths"
|
|
"storj.io/storj/pkg/pb"
|
|
"storj.io/storj/pkg/storage/meta"
|
|
"storj.io/storj/pkg/storage/objects"
|
|
"storj.io/storj/pkg/storage/segments"
|
|
"storj.io/storj/pkg/storage/streams"
|
|
"storj.io/storj/pkg/storj"
|
|
"storj.io/storj/storage"
|
|
)
|
|
|
|
// DefaultRS default values for RedundancyScheme
|
|
var DefaultRS = storj.RedundancyScheme{
|
|
Algorithm: storj.ReedSolomon,
|
|
RequiredShares: 20,
|
|
RepairShares: 30,
|
|
OptimalShares: 40,
|
|
TotalShares: 50,
|
|
ShareSize: 1 * memory.KiB.Int32(),
|
|
}
|
|
|
|
// DefaultES default values for EncryptionScheme
|
|
// BlockSize should default to the size of a stripe
|
|
var DefaultES = storj.EncryptionScheme{
|
|
Cipher: storj.AESGCM,
|
|
BlockSize: DefaultRS.StripeSize(),
|
|
}
|
|
|
|
// GetObject returns information about an object
|
|
func (db *DB) GetObject(ctx context.Context, bucket string, path storj.Path) (info storj.Object, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
_, info, err = db.getInfo(ctx, bucket, path)
|
|
|
|
return info, err
|
|
}
|
|
|
|
// GetObjectStream returns interface for reading the object stream
|
|
func (db *DB) GetObjectStream(ctx context.Context, bucket string, path storj.Path) (stream storj.ReadOnlyStream, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
meta, info, err := db.getInfo(ctx, bucket, path)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
streamKey, err := encryption.StoreDeriveContentKey(bucket, meta.fullpath.UnencryptedPath(), db.encStore)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return &readonlyStream{
|
|
db: db,
|
|
info: info,
|
|
bucket: meta.bucket,
|
|
encPath: meta.encPath.Raw(),
|
|
streamKey: streamKey,
|
|
}, nil
|
|
}
|
|
|
|
// CreateObject creates an uploading object and returns an interface for uploading Object information
|
|
func (db *DB) CreateObject(ctx context.Context, bucket string, path storj.Path, createInfo *storj.CreateObject) (object storj.MutableObject, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
bucketInfo, err := db.GetBucket(ctx, bucket)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
if path == "" {
|
|
return nil, storj.ErrNoPath.New("")
|
|
}
|
|
|
|
info := storj.Object{
|
|
Bucket: bucketInfo,
|
|
Path: path,
|
|
}
|
|
|
|
if createInfo != nil {
|
|
info.Metadata = createInfo.Metadata
|
|
info.ContentType = createInfo.ContentType
|
|
info.Expires = createInfo.Expires
|
|
info.RedundancyScheme = createInfo.RedundancyScheme
|
|
info.EncryptionScheme = createInfo.EncryptionScheme
|
|
}
|
|
|
|
// TODO: autodetect content type from the path extension
|
|
// if info.ContentType == "" {}
|
|
|
|
if info.EncryptionScheme.IsZero() {
|
|
info.EncryptionScheme = storj.EncryptionScheme{
|
|
Cipher: DefaultES.Cipher,
|
|
BlockSize: DefaultES.BlockSize,
|
|
}
|
|
}
|
|
|
|
if info.RedundancyScheme.IsZero() {
|
|
info.RedundancyScheme = DefaultRS
|
|
|
|
// If the provided EncryptionScheme.BlockSize isn't a multiple of the
|
|
// DefaultRS stripeSize, then overwrite the EncryptionScheme with the DefaultES values
|
|
if err := validateBlockSize(DefaultRS, info.EncryptionScheme.BlockSize); err != nil {
|
|
info.EncryptionScheme.BlockSize = DefaultES.BlockSize
|
|
}
|
|
}
|
|
|
|
return &mutableObject{
|
|
db: db,
|
|
info: info,
|
|
}, nil
|
|
}
|
|
|
|
// ModifyObject modifies a committed object
|
|
func (db *DB) ModifyObject(ctx context.Context, bucket string, path storj.Path) (object storj.MutableObject, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
return nil, errors.New("not implemented")
|
|
}
|
|
|
|
// DeleteObject deletes an object from database
|
|
func (db *DB) DeleteObject(ctx context.Context, bucket string, path storj.Path) (err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
bucketInfo, err := db.GetBucket(ctx, bucket)
|
|
if err != nil {
|
|
if storage.ErrKeyNotFound.Has(err) {
|
|
err = storj.ErrBucketNotFound.Wrap(err)
|
|
}
|
|
return err
|
|
}
|
|
prefixed := prefixedObjStore{
|
|
store: objects.NewStore(db.streams, bucketInfo.PathCipher),
|
|
prefix: bucket,
|
|
}
|
|
return prefixed.Delete(ctx, path)
|
|
}
|
|
|
|
// ModifyPendingObject creates an interface for updating a partially uploaded object
|
|
func (db *DB) ModifyPendingObject(ctx context.Context, bucket string, path storj.Path) (object storj.MutableObject, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
return nil, errors.New("not implemented")
|
|
}
|
|
|
|
// ListPendingObjects lists pending objects in bucket based on the ListOptions
|
|
func (db *DB) ListPendingObjects(ctx context.Context, bucket string, options storj.ListOptions) (list storj.ObjectList, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
return storj.ObjectList{}, errors.New("not implemented")
|
|
}
|
|
|
|
// ListObjects lists objects in bucket based on the ListOptions
|
|
func (db *DB) ListObjects(ctx context.Context, bucket string, options storj.ListOptions) (list storj.ObjectList, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
bucketInfo, err := db.GetBucket(ctx, bucket)
|
|
if err != nil {
|
|
return storj.ObjectList{}, err
|
|
}
|
|
|
|
objects := prefixedObjStore{
|
|
store: objects.NewStore(db.streams, bucketInfo.PathCipher),
|
|
prefix: bucket,
|
|
}
|
|
|
|
var startAfter, endBefore string
|
|
switch options.Direction {
|
|
case storj.Before:
|
|
// before lists backwards from cursor, without cursor
|
|
endBefore = options.Cursor
|
|
case storj.Backward:
|
|
// backward lists backwards from cursor, including cursor
|
|
endBefore = keyAfter(options.Cursor)
|
|
case storj.Forward:
|
|
// forward lists forwards from cursor, including cursor
|
|
startAfter = keyBefore(options.Cursor)
|
|
case storj.After:
|
|
// after lists forwards from cursor, without cursor
|
|
startAfter = options.Cursor
|
|
default:
|
|
return storj.ObjectList{}, errClass.New("invalid direction %d", options.Direction)
|
|
}
|
|
|
|
// TODO: remove this hack-fix of specifying the last key
|
|
if options.Cursor == "" && (options.Direction == storj.Before || options.Direction == storj.Backward) {
|
|
endBefore = "\x7f\x7f\x7f\x7f\x7f\x7f\x7f"
|
|
}
|
|
|
|
items, more, err := objects.List(ctx, options.Prefix, startAfter, endBefore, options.Recursive, options.Limit, meta.All)
|
|
if err != nil {
|
|
return storj.ObjectList{}, err
|
|
}
|
|
|
|
list = storj.ObjectList{
|
|
Bucket: bucket,
|
|
Prefix: options.Prefix,
|
|
More: more,
|
|
Items: make([]storj.Object, 0, len(items)),
|
|
}
|
|
|
|
for _, item := range items {
|
|
list.Items = append(list.Items, objectFromMeta(bucketInfo, item.Path, item.IsPrefix, item.Meta))
|
|
}
|
|
|
|
return list, nil
|
|
}
|
|
|
|
type object struct {
|
|
fullpath streams.Path
|
|
bucket string
|
|
encPath paths.Encrypted
|
|
lastSegmentMeta segments.Meta
|
|
streamInfo pb.StreamInfo
|
|
streamMeta pb.StreamMeta
|
|
}
|
|
|
|
func (db *DB) getInfo(ctx context.Context, bucket string, path storj.Path) (obj object, info storj.Object, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
// TODO: we shouldn't need to go load the bucket metadata every time we get object info
|
|
bucketInfo, err := db.GetBucket(ctx, bucket)
|
|
if err != nil {
|
|
return object{}, storj.Object{}, err
|
|
}
|
|
|
|
if path == "" {
|
|
return object{}, storj.Object{}, storj.ErrNoPath.New("")
|
|
}
|
|
|
|
fullpath := streams.CreatePath(bucket, paths.NewUnencrypted(path))
|
|
|
|
encPath, err := encryption.StoreEncryptPath(bucket, paths.NewUnencrypted(path), bucketInfo.PathCipher, db.encStore)
|
|
if err != nil {
|
|
return object{}, storj.Object{}, err
|
|
}
|
|
|
|
pointer, err := db.metainfo.SegmentInfo(ctx, bucket, encPath.Raw(), -1)
|
|
if err != nil {
|
|
if storage.ErrKeyNotFound.Has(err) {
|
|
err = storj.ErrObjectNotFound.Wrap(err)
|
|
}
|
|
return object{}, storj.Object{}, err
|
|
}
|
|
|
|
var redundancyScheme *pb.RedundancyScheme
|
|
if pointer.GetType() == pb.Pointer_REMOTE {
|
|
redundancyScheme = pointer.GetRemote().GetRedundancy()
|
|
} else {
|
|
// TODO: handle better
|
|
redundancyScheme = &pb.RedundancyScheme{
|
|
Type: pb.RedundancyScheme_RS,
|
|
MinReq: -1,
|
|
Total: -1,
|
|
RepairThreshold: -1,
|
|
SuccessThreshold: -1,
|
|
ErasureShareSize: -1,
|
|
}
|
|
}
|
|
|
|
lastSegmentMeta := segments.Meta{
|
|
Modified: convertTime(pointer.GetCreationDate()),
|
|
Expiration: convertTime(pointer.GetExpirationDate()),
|
|
Size: pointer.GetSegmentSize(),
|
|
Data: pointer.GetMetadata(),
|
|
}
|
|
|
|
streamInfoData, streamMeta, err := streams.TypedDecryptStreamInfo(ctx, lastSegmentMeta.Data, fullpath, db.encStore)
|
|
if err != nil {
|
|
return object{}, storj.Object{}, err
|
|
}
|
|
|
|
streamInfo := pb.StreamInfo{}
|
|
err = proto.Unmarshal(streamInfoData, &streamInfo)
|
|
if err != nil {
|
|
return object{}, storj.Object{}, err
|
|
}
|
|
|
|
info, err = objectStreamFromMeta(bucketInfo, path, lastSegmentMeta, streamInfo, streamMeta, redundancyScheme)
|
|
if err != nil {
|
|
return object{}, storj.Object{}, err
|
|
}
|
|
|
|
return object{
|
|
fullpath: fullpath,
|
|
bucket: bucket,
|
|
encPath: encPath,
|
|
lastSegmentMeta: lastSegmentMeta,
|
|
streamInfo: streamInfo,
|
|
streamMeta: streamMeta,
|
|
}, info, nil
|
|
}
|
|
|
|
func objectFromMeta(bucket storj.Bucket, path storj.Path, isPrefix bool, meta objects.Meta) storj.Object {
|
|
return storj.Object{
|
|
Version: 0, // TODO:
|
|
Bucket: bucket,
|
|
Path: path,
|
|
IsPrefix: isPrefix,
|
|
|
|
Metadata: meta.UserDefined,
|
|
|
|
ContentType: meta.ContentType,
|
|
Created: meta.Modified, // TODO: use correct field
|
|
Modified: meta.Modified, // TODO: use correct field
|
|
Expires: meta.Expiration,
|
|
|
|
Stream: storj.Stream{
|
|
Size: meta.Size,
|
|
Checksum: []byte(meta.Checksum),
|
|
},
|
|
}
|
|
}
|
|
|
|
func objectStreamFromMeta(bucket storj.Bucket, path storj.Path, lastSegment segments.Meta, stream pb.StreamInfo, streamMeta pb.StreamMeta, redundancyScheme *pb.RedundancyScheme) (storj.Object, error) {
|
|
var nonce storj.Nonce
|
|
var encryptedKey storj.EncryptedPrivateKey
|
|
if streamMeta.LastSegmentMeta != nil {
|
|
copy(nonce[:], streamMeta.LastSegmentMeta.KeyNonce)
|
|
encryptedKey = streamMeta.LastSegmentMeta.EncryptedKey
|
|
}
|
|
|
|
serMetaInfo := pb.SerializableMeta{}
|
|
err := proto.Unmarshal(stream.Metadata, &serMetaInfo)
|
|
if err != nil {
|
|
return storj.Object{}, err
|
|
}
|
|
|
|
return storj.Object{
|
|
Version: 0, // TODO:
|
|
Bucket: bucket,
|
|
Path: path,
|
|
IsPrefix: false,
|
|
|
|
Metadata: serMetaInfo.UserDefined,
|
|
|
|
ContentType: serMetaInfo.ContentType,
|
|
Created: lastSegment.Modified, // TODO: use correct field
|
|
Modified: lastSegment.Modified, // TODO: use correct field
|
|
Expires: lastSegment.Expiration, // TODO: use correct field
|
|
|
|
Stream: storj.Stream{
|
|
Size: stream.SegmentsSize*(stream.NumberOfSegments-1) + stream.LastSegmentSize,
|
|
// Checksum: []byte(object.Checksum),
|
|
|
|
SegmentCount: stream.NumberOfSegments,
|
|
FixedSegmentSize: stream.SegmentsSize,
|
|
|
|
RedundancyScheme: storj.RedundancyScheme{
|
|
Algorithm: storj.ReedSolomon,
|
|
ShareSize: redundancyScheme.GetErasureShareSize(),
|
|
RequiredShares: int16(redundancyScheme.GetMinReq()),
|
|
RepairShares: int16(redundancyScheme.GetRepairThreshold()),
|
|
OptimalShares: int16(redundancyScheme.GetSuccessThreshold()),
|
|
TotalShares: int16(redundancyScheme.GetTotal()),
|
|
},
|
|
EncryptionScheme: storj.EncryptionScheme{
|
|
Cipher: storj.Cipher(streamMeta.EncryptionType),
|
|
BlockSize: streamMeta.EncryptionBlockSize,
|
|
},
|
|
LastSegment: storj.LastSegment{
|
|
Size: stream.LastSegmentSize,
|
|
EncryptedKeyNonce: nonce,
|
|
EncryptedKey: encryptedKey,
|
|
},
|
|
},
|
|
}, nil
|
|
}
|
|
|
|
// convertTime converts gRPC timestamp to Go time
|
|
func convertTime(ts *timestamp.Timestamp) time.Time {
|
|
if ts == nil {
|
|
return time.Time{}
|
|
}
|
|
t, err := ptypes.Timestamp(ts)
|
|
if err != nil {
|
|
zap.S().Warnf("Failed converting timestamp %v: %v", ts, err)
|
|
}
|
|
return t
|
|
}
|
|
|
|
type mutableObject struct {
|
|
db *DB
|
|
info storj.Object
|
|
}
|
|
|
|
func (object *mutableObject) Info() storj.Object { return object.info }
|
|
|
|
func (object *mutableObject) CreateStream(ctx context.Context) (_ storj.MutableStream, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
return &mutableStream{
|
|
db: object.db,
|
|
info: object.info,
|
|
}, nil
|
|
}
|
|
|
|
func (object *mutableObject) ContinueStream(ctx context.Context) (_ storj.MutableStream, err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
return nil, errors.New("not implemented")
|
|
}
|
|
|
|
func (object *mutableObject) DeleteStream(ctx context.Context) (err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
return errors.New("not implemented")
|
|
}
|
|
|
|
func (object *mutableObject) Commit(ctx context.Context) (err error) {
|
|
defer mon.Task()(&ctx)(&err)
|
|
_, info, err := object.db.getInfo(ctx, object.info.Bucket.Name, object.info.Path)
|
|
object.info = info
|
|
return err
|
|
}
|