2019-01-24 20:15:10 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
2018-11-06 11:40:06 +00:00
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package kvmetainfo
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"errors"
|
2018-11-08 08:45:48 +00:00
|
|
|
"time"
|
2018-11-06 11:40:06 +00:00
|
|
|
|
2018-11-20 18:29:07 +00:00
|
|
|
"github.com/gogo/protobuf/proto"
|
2018-11-08 08:45:48 +00:00
|
|
|
"github.com/golang/protobuf/ptypes"
|
|
|
|
"github.com/golang/protobuf/ptypes/timestamp"
|
|
|
|
"go.uber.org/zap"
|
2018-11-06 11:40:06 +00:00
|
|
|
|
2018-11-30 13:50:52 +00:00
|
|
|
"storj.io/storj/internal/memory"
|
2018-11-06 11:40:06 +00:00
|
|
|
"storj.io/storj/pkg/encryption"
|
|
|
|
"storj.io/storj/pkg/pb"
|
|
|
|
"storj.io/storj/pkg/storage/meta"
|
|
|
|
"storj.io/storj/pkg/storage/objects"
|
|
|
|
"storj.io/storj/pkg/storage/segments"
|
|
|
|
"storj.io/storj/pkg/storage/streams"
|
|
|
|
"storj.io/storj/pkg/storj"
|
2018-11-15 15:31:33 +00:00
|
|
|
"storj.io/storj/storage"
|
2018-11-06 11:40:06 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// commitedPrefix is prefix where completed object info is stored
|
|
|
|
committedPrefix = "l/"
|
|
|
|
)
|
|
|
|
|
2019-02-04 16:56:10 +00:00
|
|
|
// DefaultRS default values for RedundancyScheme
|
|
|
|
var DefaultRS = storj.RedundancyScheme{
|
2018-11-30 13:50:52 +00:00
|
|
|
Algorithm: storj.ReedSolomon,
|
|
|
|
RequiredShares: 20,
|
|
|
|
RepairShares: 30,
|
|
|
|
OptimalShares: 40,
|
|
|
|
TotalShares: 50,
|
2019-03-18 10:55:06 +00:00
|
|
|
ShareSize: 1 * memory.KiB.Int32(),
|
2018-11-30 13:50:52 +00:00
|
|
|
}
|
|
|
|
|
2019-02-04 16:56:10 +00:00
|
|
|
// DefaultES default values for EncryptionScheme
|
|
|
|
var DefaultES = storj.EncryptionScheme{
|
2018-11-30 13:50:52 +00:00
|
|
|
Cipher: storj.AESGCM,
|
2019-03-18 10:55:06 +00:00
|
|
|
BlockSize: 1 * memory.KiB.Int32(),
|
2018-11-30 13:50:52 +00:00
|
|
|
}
|
|
|
|
|
2018-11-06 11:40:06 +00:00
|
|
|
// GetObject returns information about an object
|
2018-11-16 13:59:27 +00:00
|
|
|
func (db *DB) GetObject(ctx context.Context, bucket string, path storj.Path) (info storj.Object, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
_, info, err = db.getInfo(ctx, committedPrefix, bucket, path)
|
|
|
|
|
2018-11-06 11:40:06 +00:00
|
|
|
return info, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetObjectStream returns interface for reading the object stream
|
2018-11-16 13:59:27 +00:00
|
|
|
func (db *DB) GetObjectStream(ctx context.Context, bucket string, path storj.Path) (stream storj.ReadOnlyStream, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2018-11-06 11:40:06 +00:00
|
|
|
meta, info, err := db.getInfo(ctx, committedPrefix, bucket, path)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
streamKey, err := encryption.DeriveContentKey(meta.fullpath, db.rootKey)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return &readonlyStream{
|
|
|
|
db: db,
|
|
|
|
info: info,
|
|
|
|
encryptedPath: meta.encryptedPath,
|
|
|
|
streamKey: streamKey,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// CreateObject creates an uploading object and returns an interface for uploading Object information
|
2018-11-16 13:59:27 +00:00
|
|
|
func (db *DB) CreateObject(ctx context.Context, bucket string, path storj.Path, createInfo *storj.CreateObject) (object storj.MutableObject, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-11-30 13:50:52 +00:00
|
|
|
|
2018-12-03 14:38:03 +00:00
|
|
|
bucketInfo, err := db.GetBucket(ctx, bucket)
|
2018-11-30 13:50:52 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
if path == "" {
|
|
|
|
return nil, storj.ErrNoPath.New("")
|
|
|
|
}
|
|
|
|
|
|
|
|
info := storj.Object{
|
2018-12-03 14:38:03 +00:00
|
|
|
Bucket: bucketInfo,
|
2018-11-30 13:50:52 +00:00
|
|
|
Path: path,
|
|
|
|
}
|
|
|
|
|
|
|
|
if createInfo != nil {
|
|
|
|
info.Metadata = createInfo.Metadata
|
|
|
|
info.ContentType = createInfo.ContentType
|
|
|
|
info.Expires = createInfo.Expires
|
|
|
|
info.RedundancyScheme = createInfo.RedundancyScheme
|
|
|
|
info.EncryptionScheme = createInfo.EncryptionScheme
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO: autodetect content type from the path extension
|
|
|
|
// if info.ContentType == "" {}
|
|
|
|
|
|
|
|
if info.RedundancyScheme.IsZero() {
|
2019-02-04 16:56:10 +00:00
|
|
|
info.RedundancyScheme = DefaultRS
|
2018-11-30 13:50:52 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if info.EncryptionScheme.IsZero() {
|
|
|
|
info.EncryptionScheme = storj.EncryptionScheme{
|
2019-02-04 16:56:10 +00:00
|
|
|
Cipher: DefaultES.Cipher,
|
2018-11-30 13:50:52 +00:00
|
|
|
BlockSize: info.RedundancyScheme.ShareSize,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &mutableObject{
|
|
|
|
db: db,
|
|
|
|
info: info,
|
|
|
|
}, nil
|
2018-11-06 11:40:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ModifyObject modifies a committed object
|
2018-11-16 13:59:27 +00:00
|
|
|
func (db *DB) ModifyObject(ctx context.Context, bucket string, path storj.Path) (object storj.MutableObject, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-11-06 11:40:06 +00:00
|
|
|
return nil, errors.New("not implemented")
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeleteObject deletes an object from database
|
2018-11-16 13:59:27 +00:00
|
|
|
func (db *DB) DeleteObject(ctx context.Context, bucket string, path storj.Path) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2018-11-15 15:31:33 +00:00
|
|
|
store, err := db.buckets.GetObjectStore(ctx, bucket)
|
2018-11-14 09:26:18 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-11-21 11:17:28 +00:00
|
|
|
return store.Delete(ctx, path)
|
2018-11-06 11:40:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// ModifyPendingObject creates an interface for updating a partially uploaded object
|
2018-11-16 13:59:27 +00:00
|
|
|
func (db *DB) ModifyPendingObject(ctx context.Context, bucket string, path storj.Path) (object storj.MutableObject, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-11-06 11:40:06 +00:00
|
|
|
return nil, errors.New("not implemented")
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListPendingObjects lists pending objects in bucket based on the ListOptions
|
2018-11-16 13:59:27 +00:00
|
|
|
func (db *DB) ListPendingObjects(ctx context.Context, bucket string, options storj.ListOptions) (list storj.ObjectList, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-11-06 11:40:06 +00:00
|
|
|
return storj.ObjectList{}, errors.New("not implemented")
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListObjects lists objects in bucket based on the ListOptions
|
2018-11-16 13:59:27 +00:00
|
|
|
func (db *DB) ListObjects(ctx context.Context, bucket string, options storj.ListOptions) (list storj.ObjectList, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2018-12-03 14:38:03 +00:00
|
|
|
bucketInfo, err := db.GetBucket(ctx, bucket)
|
|
|
|
if err != nil {
|
|
|
|
return storj.ObjectList{}, err
|
|
|
|
}
|
|
|
|
|
2018-11-14 09:26:18 +00:00
|
|
|
objects, err := db.buckets.GetObjectStore(ctx, bucket)
|
|
|
|
if err != nil {
|
|
|
|
return storj.ObjectList{}, err
|
|
|
|
}
|
|
|
|
|
2018-11-06 11:40:06 +00:00
|
|
|
var startAfter, endBefore string
|
|
|
|
switch options.Direction {
|
|
|
|
case storj.Before:
|
|
|
|
// before lists backwards from cursor, without cursor
|
|
|
|
endBefore = options.Cursor
|
|
|
|
case storj.Backward:
|
|
|
|
// backward lists backwards from cursor, including cursor
|
|
|
|
endBefore = keyAfter(options.Cursor)
|
|
|
|
case storj.Forward:
|
|
|
|
// forward lists forwards from cursor, including cursor
|
|
|
|
startAfter = keyBefore(options.Cursor)
|
|
|
|
case storj.After:
|
|
|
|
// after lists forwards from cursor, without cursor
|
|
|
|
startAfter = options.Cursor
|
|
|
|
default:
|
|
|
|
return storj.ObjectList{}, errClass.New("invalid direction %d", options.Direction)
|
|
|
|
}
|
|
|
|
|
2018-11-14 10:40:53 +00:00
|
|
|
// TODO: remove this hack-fix of specifying the last key
|
|
|
|
if options.Cursor == "" && (options.Direction == storj.Before || options.Direction == storj.Backward) {
|
|
|
|
endBefore = "\x7f\x7f\x7f\x7f\x7f\x7f\x7f"
|
|
|
|
}
|
|
|
|
|
2018-11-14 09:26:18 +00:00
|
|
|
items, more, err := objects.List(ctx, options.Prefix, startAfter, endBefore, options.Recursive, options.Limit, meta.All)
|
2018-11-06 11:40:06 +00:00
|
|
|
if err != nil {
|
|
|
|
return storj.ObjectList{}, err
|
|
|
|
}
|
|
|
|
|
2018-11-16 13:59:27 +00:00
|
|
|
list = storj.ObjectList{
|
2018-11-06 11:40:06 +00:00
|
|
|
Bucket: bucket,
|
|
|
|
Prefix: options.Prefix,
|
|
|
|
More: more,
|
|
|
|
Items: make([]storj.Object, 0, len(items)),
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, item := range items {
|
2018-12-03 14:38:03 +00:00
|
|
|
list.Items = append(list.Items, objectFromMeta(bucketInfo, item.Path, item.IsPrefix, item.Meta))
|
2018-11-06 11:40:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return list, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type object struct {
|
|
|
|
fullpath string
|
|
|
|
encryptedPath string
|
|
|
|
lastSegmentMeta segments.Meta
|
|
|
|
streamInfo pb.StreamInfo
|
|
|
|
streamMeta pb.StreamMeta
|
|
|
|
}
|
|
|
|
|
2018-11-16 13:59:27 +00:00
|
|
|
func (db *DB) getInfo(ctx context.Context, prefix string, bucket string, path storj.Path) (obj object, info storj.Object, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2018-11-14 09:26:18 +00:00
|
|
|
bucketInfo, err := db.GetBucket(ctx, bucket)
|
|
|
|
if err != nil {
|
|
|
|
return object{}, storj.Object{}, err
|
|
|
|
}
|
|
|
|
|
2018-11-15 15:31:33 +00:00
|
|
|
if path == "" {
|
2018-11-30 13:50:52 +00:00
|
|
|
return object{}, storj.Object{}, storj.ErrNoPath.New("")
|
2018-11-15 15:31:33 +00:00
|
|
|
}
|
|
|
|
|
2018-11-06 11:40:06 +00:00
|
|
|
fullpath := bucket + "/" + path
|
|
|
|
|
2018-11-14 09:26:18 +00:00
|
|
|
encryptedPath, err := streams.EncryptAfterBucket(fullpath, bucketInfo.PathCipher, db.rootKey)
|
2018-11-06 11:40:06 +00:00
|
|
|
if err != nil {
|
|
|
|
return object{}, storj.Object{}, err
|
|
|
|
}
|
|
|
|
|
2019-03-22 09:01:49 +00:00
|
|
|
pointer, err := db.metainfo.SegmentInfo(ctx, bucket, storj.JoinPaths(storj.SplitPath(encryptedPath)[1:]...), -1)
|
2018-11-06 11:40:06 +00:00
|
|
|
if err != nil {
|
2018-11-21 11:17:28 +00:00
|
|
|
if storage.ErrKeyNotFound.Has(err) {
|
|
|
|
err = storj.ErrObjectNotFound.Wrap(err)
|
|
|
|
}
|
2018-11-06 11:40:06 +00:00
|
|
|
return object{}, storj.Object{}, err
|
|
|
|
}
|
|
|
|
|
2018-11-08 08:45:48 +00:00
|
|
|
var redundancyScheme *pb.RedundancyScheme
|
|
|
|
if pointer.GetType() == pb.Pointer_REMOTE {
|
|
|
|
redundancyScheme = pointer.GetRemote().GetRedundancy()
|
|
|
|
} else {
|
|
|
|
// TODO: handle better
|
|
|
|
redundancyScheme = &pb.RedundancyScheme{
|
|
|
|
Type: pb.RedundancyScheme_RS,
|
|
|
|
MinReq: -1,
|
|
|
|
Total: -1,
|
|
|
|
RepairThreshold: -1,
|
|
|
|
SuccessThreshold: -1,
|
|
|
|
ErasureShareSize: -1,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
lastSegmentMeta := segments.Meta{
|
|
|
|
Modified: convertTime(pointer.GetCreationDate()),
|
|
|
|
Expiration: convertTime(pointer.GetExpirationDate()),
|
2018-11-20 17:09:35 +00:00
|
|
|
Size: pointer.GetSegmentSize(),
|
2018-11-08 08:45:48 +00:00
|
|
|
Data: pointer.GetMetadata(),
|
|
|
|
}
|
|
|
|
|
2019-04-02 22:15:31 +01:00
|
|
|
streamInfoData, streamMeta, err := streams.DecryptStreamInfo(ctx, lastSegmentMeta.Data, fullpath, db.rootKey)
|
2018-11-06 11:40:06 +00:00
|
|
|
if err != nil {
|
|
|
|
return object{}, storj.Object{}, err
|
|
|
|
}
|
|
|
|
|
|
|
|
streamInfo := pb.StreamInfo{}
|
|
|
|
err = proto.Unmarshal(streamInfoData, &streamInfo)
|
|
|
|
if err != nil {
|
|
|
|
return object{}, storj.Object{}, err
|
|
|
|
}
|
|
|
|
|
2018-12-07 18:31:29 +00:00
|
|
|
info, err = objectStreamFromMeta(bucketInfo, path, lastSegmentMeta, streamInfo, streamMeta, redundancyScheme)
|
|
|
|
if err != nil {
|
|
|
|
return object{}, storj.Object{}, err
|
|
|
|
}
|
2019-04-02 22:15:31 +01:00
|
|
|
lastSegmentMeta.RedundancyScheme = info.Stream.RedundancyScheme
|
2018-11-06 11:40:06 +00:00
|
|
|
|
|
|
|
return object{
|
|
|
|
fullpath: fullpath,
|
|
|
|
encryptedPath: encryptedPath,
|
|
|
|
lastSegmentMeta: lastSegmentMeta,
|
|
|
|
streamInfo: streamInfo,
|
|
|
|
streamMeta: streamMeta,
|
|
|
|
}, info, nil
|
|
|
|
}
|
|
|
|
|
2018-12-03 14:38:03 +00:00
|
|
|
func objectFromMeta(bucket storj.Bucket, path storj.Path, isPrefix bool, meta objects.Meta) storj.Object {
|
2018-11-06 11:40:06 +00:00
|
|
|
return storj.Object{
|
|
|
|
Version: 0, // TODO:
|
|
|
|
Bucket: bucket,
|
|
|
|
Path: path,
|
|
|
|
IsPrefix: isPrefix,
|
|
|
|
|
2018-12-07 18:31:29 +00:00
|
|
|
Metadata: meta.UserDefined,
|
2018-11-06 11:40:06 +00:00
|
|
|
|
|
|
|
ContentType: meta.ContentType,
|
|
|
|
Created: meta.Modified, // TODO: use correct field
|
|
|
|
Modified: meta.Modified, // TODO: use correct field
|
|
|
|
Expires: meta.Expiration,
|
|
|
|
|
|
|
|
Stream: storj.Stream{
|
|
|
|
Size: meta.Size,
|
|
|
|
Checksum: []byte(meta.Checksum),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-07 18:31:29 +00:00
|
|
|
func objectStreamFromMeta(bucket storj.Bucket, path storj.Path, lastSegment segments.Meta, stream pb.StreamInfo, streamMeta pb.StreamMeta, redundancyScheme *pb.RedundancyScheme) (storj.Object, error) {
|
2018-11-06 11:40:06 +00:00
|
|
|
var nonce storj.Nonce
|
|
|
|
copy(nonce[:], streamMeta.LastSegmentMeta.KeyNonce)
|
2018-12-07 18:31:29 +00:00
|
|
|
|
|
|
|
serMetaInfo := pb.SerializableMeta{}
|
|
|
|
err := proto.Unmarshal(stream.Metadata, &serMetaInfo)
|
|
|
|
if err != nil {
|
|
|
|
return storj.Object{}, err
|
|
|
|
}
|
|
|
|
|
2018-11-06 11:40:06 +00:00
|
|
|
return storj.Object{
|
|
|
|
Version: 0, // TODO:
|
|
|
|
Bucket: bucket,
|
|
|
|
Path: path,
|
|
|
|
IsPrefix: false,
|
|
|
|
|
2018-12-07 18:31:29 +00:00
|
|
|
Metadata: serMetaInfo.UserDefined,
|
2018-11-06 11:40:06 +00:00
|
|
|
|
2018-12-07 18:31:29 +00:00
|
|
|
ContentType: serMetaInfo.ContentType,
|
|
|
|
Created: lastSegment.Modified, // TODO: use correct field
|
|
|
|
Modified: lastSegment.Modified, // TODO: use correct field
|
|
|
|
Expires: lastSegment.Expiration, // TODO: use correct field
|
2018-11-06 11:40:06 +00:00
|
|
|
|
|
|
|
Stream: storj.Stream{
|
|
|
|
Size: stream.SegmentsSize*(stream.NumberOfSegments-1) + stream.LastSegmentSize,
|
|
|
|
// Checksum: []byte(object.Checksum),
|
|
|
|
|
|
|
|
SegmentCount: stream.NumberOfSegments,
|
|
|
|
FixedSegmentSize: stream.SegmentsSize,
|
|
|
|
|
2019-04-02 22:15:31 +01:00
|
|
|
RedundancyScheme: segments.RedundancySchemeFromProto(redundancyScheme),
|
2018-11-06 11:40:06 +00:00
|
|
|
EncryptionScheme: storj.EncryptionScheme{
|
|
|
|
Cipher: storj.Cipher(streamMeta.EncryptionType),
|
|
|
|
BlockSize: streamMeta.EncryptionBlockSize,
|
|
|
|
},
|
|
|
|
LastSegment: storj.LastSegment{
|
|
|
|
Size: stream.LastSegmentSize,
|
|
|
|
EncryptedKeyNonce: nonce,
|
|
|
|
EncryptedKey: streamMeta.LastSegmentMeta.EncryptedKey,
|
|
|
|
},
|
|
|
|
},
|
2018-12-07 18:31:29 +00:00
|
|
|
}, nil
|
2018-11-06 11:40:06 +00:00
|
|
|
}
|
2018-11-08 08:45:48 +00:00
|
|
|
|
|
|
|
// convertTime converts gRPC timestamp to Go time
|
|
|
|
func convertTime(ts *timestamp.Timestamp) time.Time {
|
|
|
|
if ts == nil {
|
|
|
|
return time.Time{}
|
|
|
|
}
|
|
|
|
t, err := ptypes.Timestamp(ts)
|
|
|
|
if err != nil {
|
|
|
|
zap.S().Warnf("Failed converting timestamp %v: %v", ts, err)
|
|
|
|
}
|
|
|
|
return t
|
|
|
|
}
|
2018-11-30 13:50:52 +00:00
|
|
|
|
|
|
|
type mutableObject struct {
|
|
|
|
db *DB
|
|
|
|
info storj.Object
|
|
|
|
}
|
|
|
|
|
|
|
|
func (object *mutableObject) Info() storj.Object { return object.info }
|
|
|
|
|
|
|
|
func (object *mutableObject) CreateStream(ctx context.Context) (storj.MutableStream, error) {
|
|
|
|
return &mutableStream{
|
|
|
|
db: object.db,
|
|
|
|
info: object.info,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (object *mutableObject) ContinueStream(ctx context.Context) (storj.MutableStream, error) {
|
|
|
|
return nil, errors.New("not implemented")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (object *mutableObject) DeleteStream(ctx context.Context) error {
|
|
|
|
return errors.New("not implemented")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (object *mutableObject) Commit(ctx context.Context) error {
|
2018-12-07 18:31:29 +00:00
|
|
|
_, info, err := object.db.getInfo(ctx, committedPrefix, object.info.Bucket.Name, object.info.Path)
|
|
|
|
object.info = info
|
|
|
|
return err
|
2018-11-30 13:50:52 +00:00
|
|
|
}
|