lib/uplink: encryption context (#2349)
* lib/uplink: encryption context Change-Id: I5c23dca3286a46b713b30c4997e9ae6e630b2280 * lib/uplink: bucket operation examples Change-Id: Ia0f6e69f365dcff0cf11c731f51b30842bce053b * lib/uplink: encryption key sharing test cases Change-Id: I3a172d565f33f4e591402cdcb9460664a7cc7fbe * fix encrypted path prefix restriction issue Change-Id: I8f3921f9d52aaf4b84039de608b8cbbc88769554 * implement panics in libuplink encryption code todo on cipher suite selection as well as an api concern Change-Id: Ifa39eb3cc4b3443f7d96f9304df9b2ac4ec4085d * implement GetProjectInfo api call to get salt Change-Id: Ic5f6b3be9ea35df48c1aa214ab5d355fb328e2cf * some fixes and accessors for encryption store Change-Id: I3bb61f6712a037900e2a96e72ad4029ec1d3f718 * general fixes to builds/tests/etc Change-Id: I9930fa96acb3b221d9a001f8e274af5729cc8a47 * java bindings changes Change-Id: Ia2bd4c9c69739c8d3154d79616cff1f36fb403b6 * get libuplink examples passing Change-Id: I828f09a144160e0a5dd932324f78491ae2ec8a07 * fix proto.lock file Change-Id: I2fbbf4d0976a7d0473c2645e6dcb21aaa3be7651 * fix proto.lock again Change-Id: I92702cf49e1a340eef6379c2be4f7c4a268112a9 * fix golint issues Change-Id: I631ff9f43307a58e3b25a58cbb4a4cc2495f5eb6 * more linting fixes Change-Id: I51f8f30b367b5bca14c94b15417b9a4c9e7aa0ce * bug fixed by structs bump Change-Id: Ibb03c691fce7606c35c08721b3ef0781ab48a38a * retrigger Change-Id: Ieee0470b6a2d07168a1578552e8e7f271ae93a13 * retrigger Change-Id: I753d63853171e6a436c104ce176048892eb974c5 * semantic merge conflict Change-Id: I9419448496de90340569047a6a16a1b858a7978a * update total to match prod defaults Change-Id: I693d55c1ebb28b5803ee1d26e9e198decf82308b * retrigger Change-Id: I28b74d5d6202f61aa3866fe407d423f6a0a14b9e * retrigger Change-Id: I6fd054885c715f602e2cef623fd464c42e88742c * retrigger Change-Id: I6a01bae88c72406d4ed5a8f13bf8a2b3c650bd2d
This commit is contained in:
parent
27c92ffc10
commit
efcdaa43a3
@ -26,6 +26,7 @@ import (
|
||||
"storj.io/storj/pkg/process"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/uplink"
|
||||
"storj.io/storj/uplink/setup"
|
||||
)
|
||||
|
||||
// GatewayFlags configuration flags
|
||||
@ -224,7 +225,7 @@ func (flags GatewayFlags) action(ctx context.Context, cliCtx *cli.Context) (err
|
||||
|
||||
// NewGateway creates a new minio Gateway
|
||||
func (flags GatewayFlags) NewGateway(ctx context.Context) (gw minio.Gateway, err error) {
|
||||
encKey, err := uplink.LoadEncryptionKey(flags.Enc.KeyFilepath)
|
||||
encCtx, err := setup.LoadEncryptionCtx(ctx, flags.Enc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -236,7 +237,7 @@ func (flags GatewayFlags) NewGateway(ctx context.Context) (gw minio.Gateway, err
|
||||
|
||||
return miniogw.NewStorjGateway(
|
||||
project,
|
||||
encKey,
|
||||
encCtx,
|
||||
storj.Cipher(flags.Enc.PathType).ToCipherSuite(),
|
||||
flags.GetEncryptionScheme().ToEncryptionParameters(),
|
||||
flags.GetRedundancyScheme(),
|
||||
@ -291,7 +292,7 @@ func (flags GatewayFlags) interactive(
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
err = uplink.SaveEncryptionKey(humanReadableKey, encryptionKeyFilepath)
|
||||
err = setup.SaveEncryptionKey(humanReadableKey, encryptionKeyFilepath)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
@ -332,7 +333,7 @@ func (flags GatewayFlags) nonInteractive(
|
||||
cmd *cobra.Command, setupDir string, encryptionKeyFilepath string, overrides map[string]interface{},
|
||||
) error {
|
||||
if setupCfg.Enc.EncryptionKey != "" {
|
||||
err := uplink.SaveEncryptionKey(setupCfg.Enc.EncryptionKey, encryptionKeyFilepath)
|
||||
err := setup.SaveEncryptionKey(setupCfg.Enc.EncryptionKey, encryptionKeyFilepath)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
@ -1,22 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
libuplink "storj.io/storj/lib/uplink"
|
||||
"storj.io/storj/uplink"
|
||||
)
|
||||
|
||||
// loadEncryptionAccess loads the encryption key stored in the file pointed by
|
||||
// filepath and creates an EncryptionAccess with it.
|
||||
func loadEncryptionAccess(filepath string) (libuplink.EncryptionAccess, error) {
|
||||
key, err := uplink.LoadEncryptionKey(filepath)
|
||||
if err != nil {
|
||||
return libuplink.EncryptionAccess{}, err
|
||||
}
|
||||
|
||||
return libuplink.EncryptionAccess{
|
||||
Key: *key,
|
||||
}, nil
|
||||
}
|
@ -1,67 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/internal/testrand"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/uplink"
|
||||
)
|
||||
|
||||
func TestLoadEncryptionKeyIntoEncryptionAccess(t *testing.T) {
|
||||
t.Run("ok", func(t *testing.T) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
|
||||
passphrase := testrand.BytesInt(testrand.Intn(100) + 1)
|
||||
|
||||
expectedKey, err := storj.NewKey(passphrase)
|
||||
require.NoError(t, err)
|
||||
|
||||
filename := ctx.File("encryption.key")
|
||||
err = ioutil.WriteFile(filename, expectedKey[:], os.FileMode(0400))
|
||||
require.NoError(t, err)
|
||||
|
||||
access, err := loadEncryptionAccess(filename)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, *expectedKey, access.Key)
|
||||
})
|
||||
|
||||
t.Run("error", func(t *testing.T) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
|
||||
filename := ctx.File("encryption.key")
|
||||
|
||||
_, err := loadEncryptionAccess(filename)
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSaveLoadEncryptionKey(t *testing.T) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
|
||||
inputKey := string(testrand.BytesInt(testrand.Intn(storj.KeySize)*3 + 1))
|
||||
|
||||
filename := ctx.File("storj-test-cmd-uplink", "encryption.key")
|
||||
err := uplink.SaveEncryptionKey(inputKey, filename)
|
||||
require.NoError(t, err)
|
||||
|
||||
access, err := loadEncryptionAccess(filename)
|
||||
require.NoError(t, err)
|
||||
|
||||
if len(inputKey) > storj.KeySize {
|
||||
require.Equal(t, []byte(inputKey[:storj.KeySize]), access.Key[:])
|
||||
} else {
|
||||
require.Equal(t, []byte(inputKey), access.Key[:len(inputKey)])
|
||||
}
|
||||
}
|
@ -19,6 +19,7 @@ import (
|
||||
"storj.io/storj/internal/fpath"
|
||||
libuplink "storj.io/storj/lib/uplink"
|
||||
"storj.io/storj/pkg/process"
|
||||
"storj.io/storj/uplink/setup"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -82,12 +83,12 @@ func upload(ctx context.Context, src fpath.FPath, dst fpath.FPath, showProgress
|
||||
return fmt.Errorf("source cannot be a directory: %s", src)
|
||||
}
|
||||
|
||||
access, err := loadEncryptionAccess(cfg.Enc.KeyFilepath)
|
||||
encCtx, err := setup.LoadEncryptionCtx(ctx, cfg.Enc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
project, bucket, err := cfg.GetProjectAndBucket(ctx, dst.Bucket(), access)
|
||||
project, bucket, err := cfg.GetProjectAndBucket(ctx, dst.Bucket(), encCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -135,12 +136,12 @@ func download(ctx context.Context, src fpath.FPath, dst fpath.FPath, showProgres
|
||||
return fmt.Errorf("destination must be local path: %s", dst)
|
||||
}
|
||||
|
||||
access, err := loadEncryptionAccess(cfg.Enc.KeyFilepath)
|
||||
encCtx, err := setup.LoadEncryptionCtx(ctx, cfg.Enc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
project, bucket, err := cfg.GetProjectAndBucket(ctx, src.Bucket(), access)
|
||||
project, bucket, err := cfg.GetProjectAndBucket(ctx, src.Bucket(), encCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -214,12 +215,12 @@ func copyObject(ctx context.Context, src fpath.FPath, dst fpath.FPath) (err erro
|
||||
return fmt.Errorf("destination must be Storj URL: %s", dst)
|
||||
}
|
||||
|
||||
access, err := loadEncryptionAccess(cfg.Enc.KeyFilepath)
|
||||
encCtx, err := setup.LoadEncryptionCtx(ctx, cfg.Enc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
project, bucket, err := cfg.GetProjectAndBucket(ctx, dst.Bucket(), access)
|
||||
project, bucket, err := cfg.GetProjectAndBucket(ctx, dst.Bucket(), encCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -14,6 +14,7 @@ import (
|
||||
libuplink "storj.io/storj/lib/uplink"
|
||||
"storj.io/storj/pkg/process"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/uplink/setup"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -42,7 +43,7 @@ func list(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
}()
|
||||
|
||||
access, err := loadEncryptionAccess(cfg.Enc.KeyFilepath)
|
||||
encCtx, err := setup.LoadEncryptionCtx(ctx, cfg.Enc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -57,7 +58,7 @@ func list(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("No bucket specified, use format sj://bucket/")
|
||||
}
|
||||
|
||||
bucket, err := project.OpenBucket(ctx, src.Bucket(), &access)
|
||||
bucket, err := project.OpenBucket(ctx, src.Bucket(), encCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -86,7 +87,7 @@ func list(cmd *cobra.Command, args []string) error {
|
||||
for _, bucket := range list.Items {
|
||||
fmt.Println("BKT", formatTime(bucket.Created), bucket.Name)
|
||||
if *recursiveFlag {
|
||||
if err := listFilesFromBucket(ctx, project, bucket.Name, access); err != nil {
|
||||
if err := listFilesFromBucket(ctx, project, bucket.Name, encCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -105,13 +106,13 @@ func list(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func listFilesFromBucket(ctx context.Context, project *libuplink.Project, bucketName string, access libuplink.EncryptionAccess) error {
|
||||
func listFilesFromBucket(ctx context.Context, project *libuplink.Project, bucketName string, encCtx *libuplink.EncryptionCtx) error {
|
||||
prefix, err := fpath.New(fmt.Sprintf("sj://%s/", bucketName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bucket, err := project.OpenBucket(ctx, bucketName, &access)
|
||||
bucket, err := project.OpenBucket(ctx, bucketName, encCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
"storj.io/storj/internal/fpath"
|
||||
"storj.io/storj/pkg/process"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/uplink/setup"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -41,12 +42,12 @@ func deleteBucket(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("Nested buckets not supported, use format sj://bucket/")
|
||||
}
|
||||
|
||||
access, err := loadEncryptionAccess(cfg.Enc.KeyFilepath)
|
||||
encCtx, err := setup.LoadEncryptionCtx(ctx, cfg.Enc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
project, bucket, err := cfg.GetProjectAndBucket(ctx, dst.Bucket(), access)
|
||||
project, bucket, err := cfg.GetProjectAndBucket(ctx, dst.Bucket(), encCtx)
|
||||
if err != nil {
|
||||
return convertError(err, dst)
|
||||
}
|
||||
|
@ -10,6 +10,7 @@ import (
|
||||
|
||||
"storj.io/storj/internal/fpath"
|
||||
"storj.io/storj/pkg/process"
|
||||
"storj.io/storj/uplink/setup"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -36,12 +37,12 @@ func deleteObject(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("No bucket specified, use format sj://bucket/")
|
||||
}
|
||||
|
||||
access, err := loadEncryptionAccess(cfg.Enc.KeyFilepath)
|
||||
encCtx, err := setup.LoadEncryptionCtx(ctx, cfg.Enc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
project, bucket, err := cfg.GetProjectAndBucket(ctx, dst.Bucket(), access)
|
||||
project, bucket, err := cfg.GetProjectAndBucket(ctx, dst.Bucket(), encCtx)
|
||||
if err != nil {
|
||||
return convertError(err, dst)
|
||||
}
|
||||
|
@ -12,6 +12,8 @@ import (
|
||||
"runtime/pprof"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/storj/internal/fpath"
|
||||
libuplink "storj.io/storj/lib/uplink"
|
||||
@ -27,7 +29,20 @@ type UplinkFlags struct {
|
||||
uplink.Config
|
||||
}
|
||||
|
||||
var cfg UplinkFlags
|
||||
var (
|
||||
cfg UplinkFlags
|
||||
confDir string
|
||||
|
||||
defaults = cfgstruct.DefaultsFlag(RootCmd)
|
||||
|
||||
// Error is the class of errors returned by this package
|
||||
Error = errs.Class("uplink")
|
||||
)
|
||||
|
||||
func init() {
|
||||
defaultConfDir := fpath.ApplicationDir("storj", "uplink")
|
||||
cfgstruct.SetupFlag(zap.L(), RootCmd, &confDir, "config-dir", defaultConfDir, "main directory for uplink configuration")
|
||||
}
|
||||
|
||||
var cpuProfile = flag.String("profile.cpu", "", "file path of the cpu profile to be created")
|
||||
var memoryProfile = flag.String("profile.mem", "", "file path of the memory profile to be created")
|
||||
@ -97,7 +112,7 @@ func (cliCfg *UplinkFlags) GetProject(ctx context.Context) (*libuplink.Project,
|
||||
}
|
||||
|
||||
// GetProjectAndBucket returns a *libuplink.Bucket for interacting with a specific project's bucket
|
||||
func (cliCfg *UplinkFlags) GetProjectAndBucket(ctx context.Context, bucketName string, access libuplink.EncryptionAccess) (project *libuplink.Project, bucket *libuplink.Bucket, err error) {
|
||||
func (cliCfg *UplinkFlags) GetProjectAndBucket(ctx context.Context, bucketName string, encCtx *libuplink.EncryptionCtx) (project *libuplink.Project, bucket *libuplink.Bucket, err error) {
|
||||
project, err = cliCfg.GetProject(ctx)
|
||||
if err != nil {
|
||||
return project, bucket, err
|
||||
@ -111,7 +126,7 @@ func (cliCfg *UplinkFlags) GetProjectAndBucket(ctx context.Context, bucketName s
|
||||
}
|
||||
}()
|
||||
|
||||
bucket, err = project.OpenBucket(ctx, bucketName, &access)
|
||||
bucket, err = project.OpenBucket(ctx, bucketName, encCtx)
|
||||
if err != nil {
|
||||
return project, bucket, err
|
||||
}
|
||||
|
@ -11,14 +11,12 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/storj/cmd/internal/wizard"
|
||||
"storj.io/storj/internal/fpath"
|
||||
"storj.io/storj/pkg/cfgstruct"
|
||||
"storj.io/storj/pkg/process"
|
||||
"storj.io/storj/uplink"
|
||||
"storj.io/storj/uplink/setup"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -28,19 +26,10 @@ var (
|
||||
RunE: cmdSetup,
|
||||
Annotations: map[string]string{"type": "setup"},
|
||||
}
|
||||
|
||||
setupCfg UplinkFlags
|
||||
confDir string
|
||||
defaults cfgstruct.BindOpt
|
||||
|
||||
// Error is the default uplink setup errs class
|
||||
Error = errs.Class("uplink setup error")
|
||||
)
|
||||
|
||||
func init() {
|
||||
defaultConfDir := fpath.ApplicationDir("storj", "uplink")
|
||||
cfgstruct.SetupFlag(zap.L(), RootCmd, &confDir, "config-dir", defaultConfDir, "main directory for uplink configuration")
|
||||
defaults = cfgstruct.DefaultsFlag(RootCmd)
|
||||
RootCmd.AddCommand(setupCmd)
|
||||
process.Bind(setupCmd, &setupCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.SetupMode())
|
||||
}
|
||||
@ -88,7 +77,7 @@ func cmdSetup(cmd *cobra.Command, args []string) (err error) {
|
||||
// or to a default path whose directory tree exists.
|
||||
func cmdSetupNonInteractive(cmd *cobra.Command, setupDir string, encryptionKeyFilepath string) error {
|
||||
if setupCfg.Enc.EncryptionKey != "" {
|
||||
err := uplink.SaveEncryptionKey(setupCfg.Enc.EncryptionKey, encryptionKeyFilepath)
|
||||
err := setup.SaveEncryptionKey(setupCfg.Enc.EncryptionKey, encryptionKeyFilepath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -132,7 +121,7 @@ func cmdSetupInteractive(cmd *cobra.Command, setupDir string, encryptionKeyFilep
|
||||
return err
|
||||
}
|
||||
|
||||
err = uplink.SaveEncryptionKey(humanReadableKey, encryptionKeyFilepath)
|
||||
err = setup.SaveEncryptionKey(humanReadableKey, encryptionKeyFilepath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -13,9 +13,9 @@ import (
|
||||
|
||||
"storj.io/storj/internal/fpath"
|
||||
libuplink "storj.io/storj/lib/uplink"
|
||||
"storj.io/storj/pkg/encryption"
|
||||
"storj.io/storj/pkg/macaroon"
|
||||
"storj.io/storj/pkg/process"
|
||||
"storj.io/storj/uplink/setup"
|
||||
)
|
||||
|
||||
var shareCfg struct {
|
||||
@ -79,11 +79,39 @@ func shareMain(cmd *cobra.Command, args []string) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
var restrictions []libuplink.EncryptionRestriction
|
||||
for _, path := range shareCfg.AllowedPathPrefix {
|
||||
p, err := fpath.New(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if p.IsLocal() {
|
||||
return errs.New("required path must be remote: %q", path)
|
||||
}
|
||||
|
||||
restrictions = append(restrictions, libuplink.EncryptionRestriction{
|
||||
Bucket: p.Bucket(),
|
||||
PathPrefix: p.Path(),
|
||||
})
|
||||
}
|
||||
|
||||
key, err := libuplink.ParseAPIKey(cfg.Client.APIKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
encCtx, err := setup.LoadEncryptionCtx(ctx, cfg.Enc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(restrictions) > 0 {
|
||||
key, encCtx, err = encCtx.Restrict(key, restrictions...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
caveat, err := macaroon.NewCaveat()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -96,53 +124,6 @@ func shareMain(cmd *cobra.Command, args []string) (err error) {
|
||||
caveat.NotBefore = notBefore
|
||||
caveat.NotAfter = notAfter
|
||||
|
||||
var project *libuplink.Project
|
||||
|
||||
access, err := loadEncryptionAccess(cfg.Enc.KeyFilepath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cache := make(map[string]*libuplink.BucketConfig)
|
||||
|
||||
for _, path := range shareCfg.AllowedPathPrefix {
|
||||
p, err := fpath.New(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if p.IsLocal() {
|
||||
return errs.New("required path must be remote: %q", path)
|
||||
}
|
||||
|
||||
bi, ok := cache[p.Bucket()]
|
||||
if !ok {
|
||||
if project == nil {
|
||||
project, err = cfg.GetProject(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { err = errs.Combine(err, project.Close()) }()
|
||||
}
|
||||
|
||||
_, bi, err = project.GetBucketInfo(ctx, p.Bucket())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cache[p.Bucket()] = bi
|
||||
}
|
||||
|
||||
encPath, err := encryption.EncryptPath(path, bi.PathCipher.ToCipher(), &access.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
caveat.AllowedPaths = append(caveat.AllowedPaths, &macaroon.Caveat_Path{
|
||||
Bucket: []byte(p.Bucket()),
|
||||
EncryptedPathPrefix: []byte(encPath),
|
||||
})
|
||||
}
|
||||
|
||||
{
|
||||
// Times don't marshal very well with MarshalTextString, and the nonce doesn't
|
||||
// matter to humans, so handle those explicitly and then dispatch to the generic
|
||||
@ -165,6 +146,12 @@ func shareMain(cmd *cobra.Command, args []string) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("new key:", key.Serialize())
|
||||
encCtxData, err := encCtx.Serialize()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println("api key:", key.Serialize())
|
||||
fmt.Println("enc ctx:", encCtxData)
|
||||
return nil
|
||||
}
|
||||
|
2
go.mod
2
go.mod
@ -106,7 +106,7 @@ require (
|
||||
github.com/zeebo/errs v1.2.1-0.20190617123220-06a113fed680
|
||||
github.com/zeebo/float16 v0.1.0 // indirect
|
||||
github.com/zeebo/incenc v0.0.0-20180505221441-0d92902eec54 // indirect
|
||||
github.com/zeebo/structs v1.0.1
|
||||
github.com/zeebo/structs v1.0.2
|
||||
go.etcd.io/bbolt v1.3.2 // indirect
|
||||
go.uber.org/atomic v1.3.2 // indirect
|
||||
go.uber.org/multierr v1.1.0 // indirect
|
||||
|
4
go.sum
4
go.sum
@ -387,8 +387,8 @@ github.com/zeebo/float16 v0.1.0 h1:kRqxv5og6z1emEyz5FpW0/BVHe5VfxEAw6b1ljCZlUc=
|
||||
github.com/zeebo/float16 v0.1.0/go.mod h1:fssGvvXu+XS8MH57cKmyrLB/cqioYeYX/2mXCN3a5wo=
|
||||
github.com/zeebo/incenc v0.0.0-20180505221441-0d92902eec54 h1:+cwNE5KJ3pika4HuzmDHkDlK5myo0G9Sv+eO7WWxnUQ=
|
||||
github.com/zeebo/incenc v0.0.0-20180505221441-0d92902eec54/go.mod h1:EI8LcOBDlSL3POyqwC1eJhOYlMBMidES+613EtmmT5w=
|
||||
github.com/zeebo/structs v1.0.1 h1:MopCKn+ah1DF83tdMjcN+1V/gFpPO8eUnXEaiarwFLI=
|
||||
github.com/zeebo/structs v1.0.1/go.mod h1:LphfpprlqJQcbCq+eA3iIK/NsejMwk9mlfH/tM1XuKQ=
|
||||
github.com/zeebo/structs v1.0.2 h1:kvcd7s2LqXuO9cdV5LqrGHCOAfCBXaZpKCA3jD9SJIc=
|
||||
github.com/zeebo/structs v1.0.2/go.mod h1:LphfpprlqJQcbCq+eA3iIK/NsejMwk9mlfH/tM1XuKQ=
|
||||
go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0=
|
||||
|
@ -13,15 +13,21 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/vivint/infectious"
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/storj/pkg/auth/signing"
|
||||
"storj.io/storj/pkg/cfgstruct"
|
||||
"storj.io/storj/pkg/eestream"
|
||||
"storj.io/storj/pkg/encryption"
|
||||
"storj.io/storj/pkg/identity"
|
||||
"storj.io/storj/pkg/macaroon"
|
||||
"storj.io/storj/pkg/metainfo/kvmetainfo"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/peertls/tlsopts"
|
||||
ecclient "storj.io/storj/pkg/storage/ec"
|
||||
"storj.io/storj/pkg/storage/segments"
|
||||
"storj.io/storj/pkg/storage/streams"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/pkg/stream"
|
||||
@ -194,7 +200,7 @@ func (uplink *Uplink) UploadWithExpirationAndConfig(ctx context.Context, satelli
|
||||
}
|
||||
}
|
||||
|
||||
metainfo, streams, err := config.GetMetainfo(ctx, uplink.Identity)
|
||||
metainfo, streams, err := GetMetainfo(ctx, config, uplink.Identity)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -250,7 +256,7 @@ func uploadStream(ctx context.Context, streams streams.Store, mutableObject stor
|
||||
// DownloadStream returns stream for downloading data.
|
||||
func (uplink *Uplink) DownloadStream(ctx context.Context, satellite *satellite.Peer, bucket string, path storj.Path) (*stream.Download, error) {
|
||||
config := uplink.GetConfig(satellite)
|
||||
metainfo, streams, err := config.GetMetainfo(ctx, uplink.Identity)
|
||||
metainfo, streams, err := GetMetainfo(ctx, config, uplink.Identity)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -281,7 +287,7 @@ func (uplink *Uplink) Download(ctx context.Context, satellite *satellite.Peer, b
|
||||
// Delete data to specific satellite
|
||||
func (uplink *Uplink) Delete(ctx context.Context, satellite *satellite.Peer, bucket string, path storj.Path) error {
|
||||
config := uplink.GetConfig(satellite)
|
||||
metainfo, _, err := config.GetMetainfo(ctx, uplink.Identity)
|
||||
metainfo, _, err := GetMetainfo(ctx, config, uplink.Identity)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -321,3 +327,75 @@ func atLeastOne(value int) int {
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
// GetMetainfo returns a metainfo and streams store for the given configuration and identity.
|
||||
func GetMetainfo(ctx context.Context, config uplink.Config, identity *identity.FullIdentity) (db storj.Metainfo, ss streams.Store, err error) {
|
||||
tlsOpts, err := tlsopts.NewOptions(identity, config.TLS)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// ToDo: Handle Versioning for Uplinks here
|
||||
|
||||
tc := transport.NewClientWithTimeouts(tlsOpts, transport.Timeouts{
|
||||
Request: config.Client.RequestTimeout,
|
||||
Dial: config.Client.DialTimeout,
|
||||
})
|
||||
|
||||
if config.Client.SatelliteAddr == "" {
|
||||
return nil, nil, errs.New("satellite address not specified")
|
||||
}
|
||||
|
||||
m, err := metainfo.Dial(ctx, tc, config.Client.SatelliteAddr, config.Client.APIKey)
|
||||
if err != nil {
|
||||
return nil, nil, errs.New("failed to connect to metainfo service: %v", err)
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
err = errs.Combine(err, m.Close())
|
||||
}
|
||||
}()
|
||||
|
||||
project, err := kvmetainfo.SetupProject(m)
|
||||
if err != nil {
|
||||
return nil, nil, errs.New("failed to create project: %v", err)
|
||||
}
|
||||
|
||||
ec := ecclient.NewClient(tc, config.RS.MaxBufferMem.Int())
|
||||
fc, err := infectious.NewFEC(config.RS.MinThreshold, config.RS.MaxThreshold)
|
||||
if err != nil {
|
||||
return nil, nil, errs.New("failed to create erasure coding client: %v", err)
|
||||
}
|
||||
rs, err := eestream.NewRedundancyStrategy(eestream.NewRSScheme(fc, config.RS.ErasureShareSize.Int()), config.RS.RepairThreshold, config.RS.SuccessThreshold)
|
||||
if err != nil {
|
||||
return nil, nil, errs.New("failed to create redundancy strategy: %v", err)
|
||||
}
|
||||
|
||||
maxEncryptedSegmentSize, err := encryption.CalcEncryptedSize(config.Client.SegmentSize.Int64(), config.GetEncryptionScheme())
|
||||
if err != nil {
|
||||
return nil, nil, errs.New("failed to calculate max encrypted segment size: %v", err)
|
||||
}
|
||||
segment := segments.NewSegmentStore(m, ec, rs, config.Client.MaxInlineSize.Int(), maxEncryptedSegmentSize)
|
||||
|
||||
blockSize := config.GetEncryptionScheme().BlockSize
|
||||
if int(blockSize)%config.RS.ErasureShareSize.Int()*config.RS.MinThreshold != 0 {
|
||||
err = errs.New("EncryptionBlockSize must be a multiple of ErasureShareSize * RS MinThreshold")
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// TODO(jeff): there's some cycles with libuplink and this package in the libuplink tests
|
||||
// and so this package can't import libuplink. that's why this function is duplicated
|
||||
// in some spots.
|
||||
|
||||
encStore := encryption.NewStore()
|
||||
encStore.SetDefaultKey(new(storj.Key))
|
||||
|
||||
strms, err := streams.NewStreamStore(segment, config.Client.SegmentSize.Int64(), encStore,
|
||||
int(blockSize), storj.Cipher(config.Enc.DataType), config.Client.MaxInlineSize.Int(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, errs.New("failed to create stream store: %v", err)
|
||||
}
|
||||
|
||||
return kvmetainfo.New(project, m, strms, segment, encStore), strms, nil
|
||||
}
|
||||
|
@ -53,9 +53,12 @@ func testPlanetWithLibUplink(t *testing.T, cfg testConfig,
|
||||
})
|
||||
}
|
||||
|
||||
func simpleEncryptionAccess(encKey string) (access EncryptionAccess) {
|
||||
copy(access.Key[:], encKey)
|
||||
return access
|
||||
func simpleEncryptionAccess(encKey string) (access *EncryptionCtx) {
|
||||
key, err := storj.NewKey([]byte(encKey))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return NewEncryptionCtxWithDefaultKey(*key)
|
||||
}
|
||||
|
||||
// check that partner bucket attributes are stored and retrieved correctly.
|
||||
@ -83,7 +86,7 @@ func TestPartnerBucketAttrs(t *testing.T) {
|
||||
|
||||
// partner ID set
|
||||
proj.uplinkCfg.Volatile.PartnerID = partnerID
|
||||
got, err := proj.OpenBucket(ctx, bucketName, &access)
|
||||
got, err := proj.OpenBucket(ctx, bucketName, access)
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := db.Get(ctx, consoleProject.ID, []byte(bucketName))
|
||||
@ -92,7 +95,7 @@ func TestPartnerBucketAttrs(t *testing.T) {
|
||||
|
||||
// partner ID NOT set
|
||||
proj.uplinkCfg.Volatile.PartnerID = ""
|
||||
got, err = proj.OpenBucket(ctx, bucketName, &access)
|
||||
got, err = proj.OpenBucket(ctx, bucketName, access)
|
||||
require.NoError(t, err)
|
||||
defer ctx.Check(got.Close)
|
||||
})
|
||||
@ -139,7 +142,7 @@ func TestBucketAttrs(t *testing.T) {
|
||||
assert.Equal(t, bucketName, bucket.Name)
|
||||
assert.Falsef(t, bucket.Created.Before(before), "impossible creation time %v", bucket.Created)
|
||||
|
||||
got, err := proj.OpenBucket(ctx, bucketName, &access)
|
||||
got, err := proj.OpenBucket(ctx, bucketName, access)
|
||||
require.NoError(t, err)
|
||||
defer ctx.Check(got.Close)
|
||||
|
||||
@ -198,7 +201,7 @@ func TestBucketAttrsApply(t *testing.T) {
|
||||
_, err := proj.CreateBucket(ctx, bucketName, &inBucketConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
bucket, err := proj.OpenBucket(ctx, bucketName, &access)
|
||||
bucket, err := proj.OpenBucket(ctx, bucketName, access)
|
||||
require.NoError(t, err)
|
||||
defer ctx.Check(bucket.Close)
|
||||
|
||||
|
@ -4,6 +4,14 @@
|
||||
package uplink
|
||||
|
||||
import (
|
||||
"github.com/btcsuite/btcutil/base58"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/storj/pkg/encryption"
|
||||
"storj.io/storj/pkg/macaroon"
|
||||
"storj.io/storj/pkg/paths"
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/storj"
|
||||
)
|
||||
|
||||
@ -11,13 +19,169 @@ const (
|
||||
defaultCipher = storj.EncAESGCM
|
||||
)
|
||||
|
||||
// EncryptionAccess specifies the encryption details needed to encrypt or
|
||||
// decrypt objects.
|
||||
type EncryptionAccess struct {
|
||||
// Key is the base encryption key to be used for decrypting objects.
|
||||
Key storj.Key
|
||||
// EncryptedPathPrefix is the (possibly empty) encrypted version of the
|
||||
// path from the top of the storage Bucket to this point. This is
|
||||
// necessary to have in order to derive further encryption keys.
|
||||
EncryptedPathPrefix storj.Path
|
||||
// EncryptionCtx represents an encryption context. It holds information about
|
||||
// how various buckets and objects should be encrypted and decrypted.
|
||||
type EncryptionCtx struct {
|
||||
store *encryption.Store
|
||||
}
|
||||
|
||||
// NewEncryptionCtx creates an encryption ctx
|
||||
func NewEncryptionCtx() *EncryptionCtx {
|
||||
return &EncryptionCtx{
|
||||
store: encryption.NewStore(),
|
||||
}
|
||||
}
|
||||
|
||||
// NewEncryptionCtxWithDefaultKey creates an encryption ctx with a default key set
|
||||
func NewEncryptionCtxWithDefaultKey(defaultKey storj.Key) *EncryptionCtx {
|
||||
ec := NewEncryptionCtx()
|
||||
ec.SetDefaultKey(defaultKey)
|
||||
return ec
|
||||
}
|
||||
|
||||
// Store returns the underlying encryption store for the context.
|
||||
func (s *EncryptionCtx) Store() *encryption.Store {
|
||||
return s.store
|
||||
}
|
||||
|
||||
// SetDefaultKey sets the default key for the encryption context.
|
||||
func (s *EncryptionCtx) SetDefaultKey(defaultKey storj.Key) {
|
||||
s.store.SetDefaultKey(&defaultKey)
|
||||
}
|
||||
|
||||
// Import merges the other encryption context into this one. In cases
|
||||
// of conflicting path decryption settings (including if both contexts have
|
||||
// a default key), the new settings are kept.
|
||||
func (s *EncryptionCtx) Import(other *EncryptionCtx) error {
|
||||
if key := other.store.GetDefaultKey(); key != nil {
|
||||
s.store.SetDefaultKey(key)
|
||||
}
|
||||
return other.store.Iterate(s.store.Add)
|
||||
}
|
||||
|
||||
// EncryptionRestriction represents a scenario where some set of objects
|
||||
// may need to be encrypted/decrypted
|
||||
type EncryptionRestriction struct {
|
||||
Bucket string
|
||||
PathPrefix storj.Path
|
||||
}
|
||||
|
||||
// Restrict creates a new EncryptionCtx with no default key, where the key material
|
||||
// in the new context is just enough to allow someone to access all of the given
|
||||
// restrictions but no more.
|
||||
func (s *EncryptionCtx) Restrict(apiKey APIKey, restrictions ...EncryptionRestriction) (APIKey, *EncryptionCtx, error) {
|
||||
if len(restrictions) == 0 {
|
||||
// Should the function signature be
|
||||
// func (s *EncryptionCtx) Restrict(apiKey APIKey, restriction EncryptionRestriction, restrictions ...EncryptionRestriction) (APIKey, *EncryptionCtx, error) {
|
||||
// so we don't have to do this test?
|
||||
return APIKey{}, nil, errs.New("at least one restriction required")
|
||||
}
|
||||
|
||||
caveat := macaroon.Caveat{}
|
||||
encCtx := NewEncryptionCtx()
|
||||
|
||||
for _, res := range restrictions {
|
||||
unencPath := paths.NewUnencrypted(res.PathPrefix)
|
||||
cipher := storj.AESGCM // TODO(jeff): pick the right path cipher
|
||||
|
||||
encPath, err := encryption.StoreEncryptPath(res.Bucket, unencPath, cipher, s.store)
|
||||
if err != nil {
|
||||
return APIKey{}, nil, err
|
||||
}
|
||||
derivedKey, err := encryption.StoreDerivePathKey(res.Bucket, unencPath, s.store)
|
||||
if err != nil {
|
||||
return APIKey{}, nil, err
|
||||
}
|
||||
|
||||
if err := encCtx.store.Add(res.Bucket, unencPath, encPath, *derivedKey); err != nil {
|
||||
return APIKey{}, nil, err
|
||||
}
|
||||
caveat.AllowedPaths = append(caveat.AllowedPaths, &macaroon.Caveat_Path{
|
||||
Bucket: []byte(res.Bucket),
|
||||
EncryptedPathPrefix: []byte(encPath.Raw()),
|
||||
})
|
||||
}
|
||||
|
||||
apiKey, err := apiKey.Restrict(caveat)
|
||||
if err != nil {
|
||||
return APIKey{}, nil, err
|
||||
}
|
||||
|
||||
return apiKey, encCtx, nil
|
||||
}
|
||||
|
||||
// Serialize turns an EncryptionCtx into base58
|
||||
func (s *EncryptionCtx) Serialize() (string, error) {
|
||||
var storeEntries []*pb.EncryptionCtx_StoreEntry
|
||||
err := s.store.Iterate(func(bucket string, unenc paths.Unencrypted, enc paths.Encrypted, key storj.Key) error {
|
||||
storeEntries = append(storeEntries, &pb.EncryptionCtx_StoreEntry{
|
||||
Bucket: []byte(bucket),
|
||||
UnencryptedPath: []byte(unenc.Raw()),
|
||||
EncryptedPath: []byte(enc.Raw()),
|
||||
Key: key[:],
|
||||
})
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var defaultKey []byte
|
||||
if key := s.store.GetDefaultKey(); key != nil {
|
||||
defaultKey = key[:]
|
||||
}
|
||||
|
||||
data, err := proto.Marshal(&pb.EncryptionCtx{
|
||||
DefaultKey: defaultKey,
|
||||
StoreEntries: storeEntries,
|
||||
})
|
||||
if err != nil {
|
||||
return "", errs.New("unable to marshal encryption ctx: %v", err)
|
||||
}
|
||||
|
||||
return base58.CheckEncode(data, 0), nil
|
||||
|
||||
}
|
||||
|
||||
// ParseEncryptionCtx parses a base58 serialized encryption context into a working one.
|
||||
func ParseEncryptionCtx(b58data string) (*EncryptionCtx, error) {
|
||||
data, version, err := base58.CheckDecode(b58data)
|
||||
if err != nil || version != 0 {
|
||||
return nil, errs.New("invalid encryption context format")
|
||||
}
|
||||
|
||||
p := new(pb.EncryptionCtx)
|
||||
if err := proto.Unmarshal(data, p); err != nil {
|
||||
return nil, errs.New("unable to unmarshal encryption context: %v", err)
|
||||
}
|
||||
|
||||
encCtx := NewEncryptionCtx()
|
||||
|
||||
if len(p.DefaultKey) > 0 {
|
||||
if len(p.DefaultKey) != len(storj.Key{}) {
|
||||
return nil, errs.New("invalid default key in encryption context")
|
||||
}
|
||||
var defaultKey storj.Key
|
||||
copy(defaultKey[:], p.DefaultKey)
|
||||
encCtx.SetDefaultKey(defaultKey)
|
||||
}
|
||||
|
||||
for _, entry := range p.StoreEntries {
|
||||
if len(entry.Key) != len(storj.Key{}) {
|
||||
return nil, errs.New("invalid key in encryption context entry")
|
||||
}
|
||||
var key storj.Key
|
||||
copy(key[:], entry.Key)
|
||||
|
||||
err := encCtx.store.Add(
|
||||
string(entry.Bucket),
|
||||
paths.NewUnencrypted(string(entry.UnencryptedPath)),
|
||||
paths.NewEncrypted(string(entry.EncryptedPath)),
|
||||
key)
|
||||
if err != nil {
|
||||
return nil, errs.New("invalid encryption context entry: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return encCtx, nil
|
||||
}
|
||||
|
62
lib/uplink/ex_create_bucket_test.go
Normal file
62
lib/uplink/ex_create_bucket_test.go
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package uplink_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/storj/lib/uplink"
|
||||
)
|
||||
|
||||
func CreateBucketExample(ctx context.Context, satelliteAddress string, apiKey string, cfg *uplink.Config, out io.Writer) (err error) {
|
||||
errCatch := func(fn func() error) { err = errs.Combine(err, fn()) }
|
||||
|
||||
// First, create an Uplink handle.
|
||||
ul, err := uplink.NewUplink(ctx, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer errCatch(ul.Close)
|
||||
|
||||
// Then, parse the API key. API keys are "macaroons" that allow you to create new, restricted
|
||||
// API keys.
|
||||
key, err := uplink.ParseAPIKey(apiKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Next, open the project in question. Projects are identified by a specific Satellite and API key
|
||||
p, err := ul.OpenProject(ctx, satelliteAddress, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer errCatch(p.Close)
|
||||
|
||||
// Last, create the bucket!
|
||||
_, err = p.CreateBucket(ctx, "testbucket", nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprintln(out, "success!")
|
||||
return nil
|
||||
}
|
||||
|
||||
func Example_createBucket() {
|
||||
// The satellite address is the address of the satellite your API key is valid on
|
||||
satelliteAddress := "us-central-1.tardigrade.io:7777"
|
||||
|
||||
// The API key can be created in the web interface
|
||||
apiKey := "qPSUM3k0bZyOIyil2xrVWiSuc9HuB2yBP3qDrA2Gc"
|
||||
|
||||
err := CreateBucketExample(context.Background(), satelliteAddress, apiKey, &uplink.Config{}, os.Stdout)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
62
lib/uplink/ex_delete_bucket_test.go
Normal file
62
lib/uplink/ex_delete_bucket_test.go
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package uplink_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/storj/lib/uplink"
|
||||
)
|
||||
|
||||
func DeleteBucketExample(ctx context.Context, satelliteAddress string, apiKey string, cfg *uplink.Config, out io.Writer) (err error) {
|
||||
errCatch := func(fn func() error) { err = errs.Combine(err, fn()) }
|
||||
|
||||
// First, create an Uplink handle.
|
||||
ul, err := uplink.NewUplink(ctx, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer errCatch(ul.Close)
|
||||
|
||||
// Then, parse the API key. API keys are "macaroons" that allow you to create new, restricted
|
||||
// API keys.
|
||||
key, err := uplink.ParseAPIKey(apiKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Next, open the project in question. Projects are identified by a specific Satellite and API key
|
||||
p, err := ul.OpenProject(ctx, satelliteAddress, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer errCatch(p.Close)
|
||||
|
||||
// Last, delete the bucket!
|
||||
err = p.DeleteBucket(ctx, "testbucket")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Fprintln(out, "success!")
|
||||
return nil
|
||||
}
|
||||
|
||||
func Example_deleteBucket() {
|
||||
// The satellite address is the address of the satellite your API key is valid on
|
||||
satelliteAddress := "us-central-1.tardigrade.io:7777"
|
||||
|
||||
// The API key can be created in the web interface
|
||||
apiKey := "qPSUM3k0bZyOIyil2xrVWiSuc9HuB2yBP3qDrA2Gc"
|
||||
|
||||
err := DeleteBucketExample(context.Background(), satelliteAddress, apiKey, &uplink.Config{}, os.Stdout)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
@ -48,7 +48,7 @@ func WorkWithLibUplink(satelliteAddress string, encryptionKey *storj.Key, apiKey
|
||||
}
|
||||
|
||||
// Open up the desired Bucket within the Project
|
||||
bucket, err := proj.OpenBucket(ctx, bucketName, &uplink.EncryptionAccess{Key: *encryptionKey})
|
||||
bucket, err := proj.OpenBucket(ctx, bucketName, uplink.NewEncryptionCtxWithDefaultKey(*encryptionKey))
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not open bucket %q: %v", bucketName, err)
|
||||
}
|
73
lib/uplink/ex_list_buckets_test.go
Normal file
73
lib/uplink/ex_list_buckets_test.go
Normal file
@ -0,0 +1,73 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package uplink_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/storj/lib/uplink"
|
||||
"storj.io/storj/pkg/storj"
|
||||
)
|
||||
|
||||
func ListBucketsExample(ctx context.Context, satelliteAddress string, apiKey string, cfg *uplink.Config, out io.Writer) (err error) {
|
||||
errCatch := func(fn func() error) { err = errs.Combine(err, fn()) }
|
||||
|
||||
// First, create an Uplink handle.
|
||||
ul, err := uplink.NewUplink(ctx, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer errCatch(ul.Close)
|
||||
|
||||
// Then, parse the API key. API keys are "macaroons" that allow you to create new, restricted
|
||||
// API keys.
|
||||
key, err := uplink.ParseAPIKey(apiKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Next, open the project in question. Projects are identified by a specific Satellite and API key
|
||||
p, err := ul.OpenProject(ctx, satelliteAddress, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer errCatch(p.Close)
|
||||
|
||||
// Last, list the buckets! Bucket listing is paginated, so you'll need to use pagination.
|
||||
list := uplink.BucketListOptions{
|
||||
Direction: storj.Forward}
|
||||
for {
|
||||
result, err := p.ListBuckets(ctx, &list)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, bucket := range result.Items {
|
||||
fmt.Fprintf(out, "Bucket: %v\n", bucket.Name)
|
||||
}
|
||||
if !result.More {
|
||||
break
|
||||
}
|
||||
list = list.NextPage(result)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func Example_listBuckets() {
|
||||
// The satellite address is the address of the satellite your API key is valid on
|
||||
satelliteAddress := "us-central-1.tardigrade.io:7777"
|
||||
|
||||
// The API key can be created in the web interface
|
||||
apiKey := "qPSUM3k0bZyOIyil2xrVWiSuc9HuB2yBP3qDrA2Gc"
|
||||
|
||||
err := ListBucketsExample(context.Background(), satelliteAddress, apiKey, &uplink.Config{}, os.Stdout)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
161
lib/uplink/ex_password_test.go
Normal file
161
lib/uplink/ex_password_test.go
Normal file
@ -0,0 +1,161 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package uplink_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/storj/lib/uplink"
|
||||
)
|
||||
|
||||
func CreateEncryptionKeyExampleByAdmin1(ctx context.Context, satelliteAddress, apiKey string, cfg *uplink.Config, out io.Writer) (serializedEncCtx string, err error) {
|
||||
errCatch := func(fn func() error) { err = errs.Combine(err, fn()) }
|
||||
|
||||
// First, create an Uplink handle.
|
||||
ul, err := uplink.NewUplink(ctx, cfg)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer errCatch(ul.Close)
|
||||
|
||||
// Parse the API key. API keys are "macaroons" that allow you to create new, restricted API keys.
|
||||
key, err := uplink.ParseAPIKey(apiKey)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Open the project in question. Projects are identified by a specific Satellite and API key
|
||||
p, err := ul.OpenProject(ctx, satelliteAddress, key)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer errCatch(p.Close)
|
||||
|
||||
// Make a key
|
||||
encKey, err := p.SaltedKeyFromPassphrase(ctx, "my secret passphrase")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Make an encryption context
|
||||
encCtx := uplink.NewEncryptionCtxWithDefaultKey(*encKey)
|
||||
// serialize it
|
||||
serializedEncCtx, err = encCtx.Serialize()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Create a bucket
|
||||
_, err = p.CreateBucket(ctx, "prod", nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Open bucket
|
||||
bucket, err := p.OpenBucket(ctx, "prod", encCtx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer errCatch(bucket.Close)
|
||||
|
||||
// Upload a file
|
||||
err = bucket.UploadObject(ctx, "webserver/logs/log.txt", strings.NewReader("hello world"), nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
fmt.Fprintln(out, "success!")
|
||||
return serializedEncCtx, nil
|
||||
}
|
||||
|
||||
func CreateEncryptionKeyExampleByAdmin2(ctx context.Context, satelliteAddress, apiKey string, serializedEncCtx string, cfg *uplink.Config, out io.Writer) (err error) {
|
||||
errCatch := func(fn func() error) { err = errs.Combine(err, fn()) }
|
||||
|
||||
// First, create an Uplink handle.
|
||||
ul, err := uplink.NewUplink(ctx, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer errCatch(ul.Close)
|
||||
|
||||
// Parse the API key. API keys are "macaroons" that allow you to create new, restricted API keys.
|
||||
key, err := uplink.ParseAPIKey(apiKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Open the project in question. Projects are identified by a specific Satellite and API key
|
||||
p, err := ul.OpenProject(ctx, satelliteAddress, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer errCatch(p.Close)
|
||||
|
||||
// Parse the encryption context
|
||||
encCtx, err := uplink.ParseEncryptionCtx(serializedEncCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Open bucket
|
||||
bucket, err := p.OpenBucket(ctx, "prod", encCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer errCatch(bucket.Close)
|
||||
|
||||
// Open file
|
||||
obj, err := bucket.OpenObject(ctx, "webserver/logs/log.txt")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer errCatch(obj.Close)
|
||||
|
||||
// Get a reader for the entire file
|
||||
r, err := obj.DownloadRange(ctx, 0, -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer errCatch(r.Close)
|
||||
|
||||
// Read the file
|
||||
data, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Print it!
|
||||
fmt.Fprintln(out, string(data))
|
||||
return nil
|
||||
}
|
||||
|
||||
func Example_createEncryptionKey() {
|
||||
// The satellite address is the address of the satellite your API key is valid on
|
||||
satelliteAddress := "us-central-1.tardigrade.io:7777"
|
||||
|
||||
// The API key can be created in the web interface
|
||||
admin1APIKey := "qPSUM3k0bZyOIyil2xrVWiSuc9HuB2yBP3qDrA2Gc"
|
||||
admin2APIKey := "udP0lzCC2rgwRZfdY70PcwWrXzrq9cl5usbiFaeyo"
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Admin1 is going to create an encryption context and share it
|
||||
encCtx, err := CreateEncryptionKeyExampleByAdmin1(ctx, satelliteAddress, admin1APIKey, &uplink.Config{}, os.Stdout)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Admin2 is going to use the provided encryption context to load the uploaded file
|
||||
err = CreateEncryptionKeyExampleByAdmin2(ctx, satelliteAddress, admin2APIKey, encCtx, &uplink.Config{}, os.Stdout)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
147
lib/uplink/ex_restrict_test.go
Normal file
147
lib/uplink/ex_restrict_test.go
Normal file
@ -0,0 +1,147 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package uplink_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/storj/lib/uplink"
|
||||
"storj.io/storj/pkg/macaroon"
|
||||
)
|
||||
|
||||
func RestrictAccessExampleByAdmin(ctx context.Context, satelliteAddress, apiKey string, adminEncCtx string, cfg *uplink.Config, out io.Writer) (serializedUserAPIKey string, serializedEncCtx string, err error) {
|
||||
// Parse the API key. API keys are "macaroons" that allow you to create new, restricted API keys.
|
||||
key, err := uplink.ParseAPIKey(apiKey)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// Restrict the API key to be read only and to be for just the prod and staging buckets
|
||||
// for the path webserver/logs/
|
||||
userAPIKey, err := key.Restrict(macaroon.Caveat{
|
||||
DisallowWrites: true,
|
||||
DisallowDeletes: true,
|
||||
})
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// Load the existing encryption context
|
||||
encCtx, err := uplink.ParseEncryptionCtx(adminEncCtx)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// Restrict the encryption context to just the prod and staging buckets
|
||||
// for webserver/logs/
|
||||
userAPIKey, userEncCtx, err := encCtx.Restrict(userAPIKey,
|
||||
uplink.EncryptionRestriction{Bucket: "prod", PathPrefix: "webserver/logs"},
|
||||
uplink.EncryptionRestriction{Bucket: "staging", PathPrefix: "webserver/logs"},
|
||||
)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// Serialize the encryption context
|
||||
serializedUserEncCtx, err := userEncCtx.Serialize()
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
fmt.Fprintln(out, "success!")
|
||||
return userAPIKey.Serialize(), serializedUserEncCtx, nil
|
||||
}
|
||||
|
||||
func RestrictAccessExampleByUser(ctx context.Context, satelliteAddress, apiKey string, serializedEncCtx string, cfg *uplink.Config, out io.Writer) (err error) {
|
||||
errCatch := func(fn func() error) { err = errs.Combine(err, fn()) }
|
||||
|
||||
// First, create an Uplink handle.
|
||||
ul, err := uplink.NewUplink(ctx, cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer errCatch(ul.Close)
|
||||
|
||||
// Parse the API key.
|
||||
key, err := uplink.ParseAPIKey(apiKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Open the project in question. Projects are identified by a specific Satellite and API key
|
||||
p, err := ul.OpenProject(ctx, satelliteAddress, key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer errCatch(p.Close)
|
||||
|
||||
// Parse the encryption context
|
||||
encCtx, err := uplink.ParseEncryptionCtx(serializedEncCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Open bucket
|
||||
bucket, err := p.OpenBucket(ctx, "prod", encCtx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer errCatch(bucket.Close)
|
||||
|
||||
// Open file
|
||||
obj, err := bucket.OpenObject(ctx, "webserver/logs/log.txt")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer errCatch(obj.Close)
|
||||
|
||||
// Get a reader for the entire file
|
||||
r, err := obj.DownloadRange(ctx, 0, -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer errCatch(r.Close)
|
||||
|
||||
// Read the file
|
||||
data, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Print it!
|
||||
fmt.Fprintln(out, string(data))
|
||||
return nil
|
||||
}
|
||||
|
||||
func Example_restrictAccess() {
|
||||
// The satellite address is the address of the satellite your API key is valid on
|
||||
satelliteAddress := "us-central-1.tardigrade.io:7777"
|
||||
|
||||
// The API key can be created in the web interface
|
||||
adminAPIKey := "qPSUM3k0bZyOIyil2xrVWiSuc9HuB2yBP3qDrA2Gc"
|
||||
|
||||
// The encryption context was created using NewEncryptionCtxWithDefaultKey and
|
||||
// (*Project).SaltedKeyFromPassphrase() earlier
|
||||
adminEncCtx := "HYGoqCEz43mCE40Hc5lQD3DtUYynx9Vo1GjOx75hQ"
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Admin1 is going to create an encryption context and share it
|
||||
userAPIKey, encCtx, err := RestrictAccessExampleByAdmin(ctx, satelliteAddress, adminAPIKey, adminEncCtx, &uplink.Config{}, os.Stdout)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Admin2 is going to use the provided encryption context to load the uploaded file
|
||||
err = RestrictAccessExampleByUser(ctx, satelliteAddress, userAPIKey, encCtx, &uplink.Config{}, os.Stdout)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
74
lib/uplink/examples_test.go
Normal file
74
lib/uplink/examples_test.go
Normal file
@ -0,0 +1,74 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package uplink_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/internal/testplanet"
|
||||
"storj.io/storj/lib/uplink"
|
||||
)
|
||||
|
||||
func TestBucketExamples(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1,
|
||||
StorageNodeCount: 1,
|
||||
UplinkCount: 1},
|
||||
func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
cfg := uplink.Config{}
|
||||
cfg.Volatile.TLS.SkipPeerCAWhitelist = true
|
||||
|
||||
satelliteAddr := planet.Satellites[0].Local().Address.Address
|
||||
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
||||
|
||||
out := bytes.NewBuffer(nil)
|
||||
err := ListBucketsExample(ctx, satelliteAddr, apiKey, &cfg, out)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, out.String(), "")
|
||||
|
||||
out = bytes.NewBuffer(nil)
|
||||
err = CreateBucketExample(ctx, satelliteAddr, apiKey, &cfg, out)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, out.String(), "success!\n")
|
||||
|
||||
out = bytes.NewBuffer(nil)
|
||||
err = ListBucketsExample(ctx, satelliteAddr, apiKey, &cfg, out)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, out.String(), "Bucket: testbucket\n")
|
||||
|
||||
out = bytes.NewBuffer(nil)
|
||||
err = DeleteBucketExample(ctx, satelliteAddr, apiKey, &cfg, out)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, out.String(), "success!\n")
|
||||
|
||||
out = bytes.NewBuffer(nil)
|
||||
err = ListBucketsExample(ctx, satelliteAddr, apiKey, &cfg, out)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, out.String(), "")
|
||||
|
||||
out = bytes.NewBuffer(nil)
|
||||
encCtx, err := CreateEncryptionKeyExampleByAdmin1(ctx, satelliteAddr, apiKey, &cfg, out)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, out.String(), "success!\n")
|
||||
|
||||
out = bytes.NewBuffer(nil)
|
||||
err = CreateEncryptionKeyExampleByAdmin2(ctx, satelliteAddr, apiKey, encCtx, &cfg, out)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, out.String(), "hello world\n")
|
||||
|
||||
out = bytes.NewBuffer(nil)
|
||||
userAPIKey, userEncCtx, err := RestrictAccessExampleByAdmin(ctx, satelliteAddr, apiKey, encCtx, &cfg, out)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, out.String(), "success!\n")
|
||||
|
||||
out = bytes.NewBuffer(nil)
|
||||
err = RestrictAccessExampleByUser(ctx, satelliteAddr, userAPIKey, userEncCtx, &cfg, out)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, out.String(), "hello world\n")
|
||||
})
|
||||
}
|
@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/skyrings/skyring-common/tools/uuid"
|
||||
"github.com/vivint/infectious"
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/storj/internal/memory"
|
||||
"storj.io/storj/pkg/eestream"
|
||||
@ -79,7 +80,7 @@ func (cfg *BucketConfig) setDefaults() {
|
||||
cfg.Volatile.RedundancyScheme.OptimalShares = 80
|
||||
}
|
||||
if cfg.Volatile.RedundancyScheme.TotalShares == 0 {
|
||||
cfg.Volatile.RedundancyScheme.TotalShares = 95
|
||||
cfg.Volatile.RedundancyScheme.TotalShares = 130
|
||||
}
|
||||
if cfg.Volatile.RedundancyScheme.ShareSize == 0 {
|
||||
cfg.Volatile.RedundancyScheme.ShareSize = (1 * memory.KiB).Int32()
|
||||
@ -149,7 +150,7 @@ func (p *Project) GetBucketInfo(ctx context.Context, bucket string) (b storj.Buc
|
||||
|
||||
// OpenBucket returns a Bucket handle with the given EncryptionAccess
|
||||
// information.
|
||||
func (p *Project) OpenBucket(ctx context.Context, bucketName string, access *EncryptionAccess) (b *Bucket, err error) {
|
||||
func (p *Project) OpenBucket(ctx context.Context, bucketName string, encCtx *EncryptionCtx) (b *Bucket, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
err = p.checkBucketAttribution(ctx, bucketName)
|
||||
@ -162,12 +163,6 @@ func (p *Project) OpenBucket(ctx context.Context, bucketName string, access *Enc
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if access == nil || access.Key == (storj.Key{}) {
|
||||
return nil, Error.New("No encryption key chosen")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
encryptionScheme := cfg.EncryptionParameters.ToEncryptionScheme()
|
||||
|
||||
ec := ecclient.NewClient(p.tc, p.uplinkCfg.Volatile.MaxMemory.Int())
|
||||
@ -190,11 +185,7 @@ func (p *Project) OpenBucket(ctx context.Context, bucketName string, access *Enc
|
||||
}
|
||||
segmentStore := segments.NewSegmentStore(p.metainfo, ec, rs, p.maxInlineSize.Int(), maxEncryptedSegmentSize)
|
||||
|
||||
// TODO(jeff): this is where we would load scope information in.
|
||||
encStore := encryption.NewStore()
|
||||
encStore.SetDefaultKey(&access.Key)
|
||||
|
||||
streamStore, err := streams.NewStreamStore(segmentStore, cfg.Volatile.SegmentsSize.Int64(), encStore, int(encryptionScheme.BlockSize), encryptionScheme.Cipher, p.maxInlineSize.Int())
|
||||
streamStore, err := streams.NewStreamStore(segmentStore, cfg.Volatile.SegmentsSize.Int64(), encCtx.store, int(encryptionScheme.BlockSize), encryptionScheme.Cipher, p.maxInlineSize.Int())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -204,11 +195,40 @@ func (p *Project) OpenBucket(ctx context.Context, bucketName string, access *Enc
|
||||
Name: bucketInfo.Name,
|
||||
Created: bucketInfo.Created,
|
||||
bucket: bucketInfo,
|
||||
metainfo: kvmetainfo.New(p.project, p.metainfo, streamStore, segmentStore, encStore),
|
||||
metainfo: kvmetainfo.New(p.project, p.metainfo, streamStore, segmentStore, encCtx.store),
|
||||
streams: streamStore,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Project) retrieveSalt(ctx context.Context) (salt []byte, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
info, err := p.metainfo.GetProjectInfo(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return info.ProjectSalt, nil
|
||||
}
|
||||
|
||||
// SaltedKeyFromPassphrase returns a key generated from the given passphrase using a stable, project-specific salt
|
||||
func (p *Project) SaltedKeyFromPassphrase(ctx context.Context, passphrase string) (_ *storj.Key, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
salt, err := p.retrieveSalt(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key, err := encryption.DeriveDefaultPassword([]byte(passphrase), salt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(key) != len(storj.Key{}) {
|
||||
return nil, errs.New("unexpected key length!")
|
||||
}
|
||||
var result storj.Key
|
||||
copy(result[:], key)
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
// checkBucketAttribution Checks the bucket attribution
|
||||
func (p *Project) checkBucketAttribution(ctx context.Context, bucketName string) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
@ -74,23 +74,24 @@ func get_bucket_info(projectHandle C.ProjectRef, bucketName *C.char, cerr **C.ch
|
||||
return newBucketInfo(&bucket)
|
||||
}
|
||||
|
||||
// open_bucket returns a Bucket handle with the given EncryptionAccess information.
|
||||
// open_bucket returns a Bucket handle with the given encryption context information.
|
||||
//export open_bucket
|
||||
func open_bucket(projectHandle C.ProjectRef, name *C.char, encryptionAccess C.EncryptionAccess, cerr **C.char) C.BucketRef {
|
||||
func open_bucket(projectHandle C.ProjectRef, name *C.char, encryptionCtx *C.char, cerr **C.char) C.BucketRef {
|
||||
project, ok := universe.Get(projectHandle._handle).(*Project)
|
||||
if !ok {
|
||||
*cerr = C.CString("invalid project")
|
||||
return C.BucketRef{}
|
||||
}
|
||||
|
||||
var access uplink.EncryptionAccess
|
||||
for i := range access.Key {
|
||||
access.Key[i] = byte(encryptionAccess.key[i])
|
||||
encCtx, err := uplink.ParseEncryptionCtx(C.GoString(encryptionCtx))
|
||||
if err != nil {
|
||||
*cerr = C.CString(err.Error())
|
||||
return C.BucketRef{}
|
||||
}
|
||||
|
||||
scope := project.scope.child()
|
||||
|
||||
bucket, err := project.OpenBucket(scope.ctx, C.GoString(name), &access)
|
||||
bucket, err := project.OpenBucket(scope.ctx, C.GoString(name), encCtx)
|
||||
if err != nil {
|
||||
*cerr = C.CString(err.Error())
|
||||
return C.BucketRef{}
|
||||
|
7
lib/uplinkc/testdata/bucket_test.c
vendored
7
lib/uplinkc/testdata/bucket_test.c
vendored
@ -76,11 +76,10 @@ void handle_project(ProjectRef project) {
|
||||
}
|
||||
}
|
||||
|
||||
{ // encryption access handling
|
||||
EncryptionAccess access = {};
|
||||
memcpy(&access.key[0], "abcdefghijklmnopqrstuvwxyzABCDEF", 32);
|
||||
{ // encryption context handling
|
||||
char *enc_ctx = "12VtN2sbbn9PvaEvNbNUBiSKnRcSUNxBADwDWGsPY7UV85e82tT6u";
|
||||
|
||||
BucketRef bucket = open_bucket(project, bucket_names[0], access, err);
|
||||
BucketRef bucket = open_bucket(project, bucket_names[0], enc_ctx, err);
|
||||
require_noerror(*err);
|
||||
requiref(bucket._handle != 0, "got empty bucket\n");
|
||||
|
||||
|
7
lib/uplinkc/testdata/object_test.c
vendored
7
lib/uplinkc/testdata/object_test.c
vendored
@ -19,8 +19,7 @@ void handle_project(ProjectRef project) {
|
||||
char **err = &_err;
|
||||
|
||||
char *bucket_name = "test-bucket";
|
||||
EncryptionAccess access = {{0}};
|
||||
memcpy(&access.key, "hello", 5);
|
||||
char *enc_ctx = "12VtN2sbbn9PvaEvNbNUBiSKnRcSUNxBADwDWGsPY7UV85e82tT6u";
|
||||
|
||||
char *object_paths[] = {"test-object1","test-object2","test-object3","test-object4"};
|
||||
int num_of_objects = 4;
|
||||
@ -36,7 +35,7 @@ void handle_project(ProjectRef project) {
|
||||
}
|
||||
|
||||
// open bucket
|
||||
BucketRef bucket = open_bucket(project, bucket_name, access, err);
|
||||
BucketRef bucket = open_bucket(project, bucket_name, enc_ctx, err);
|
||||
require_noerror(*err);
|
||||
|
||||
|
||||
@ -141,4 +140,4 @@ void handle_project(ProjectRef project) {
|
||||
|
||||
close_bucket(bucket, err);
|
||||
require_noerror(*err);
|
||||
}
|
||||
}
|
||||
|
@ -76,10 +76,6 @@ typedef struct BucketList {
|
||||
int32_t length;
|
||||
} BucketList;
|
||||
|
||||
typedef struct EncryptionAccess {
|
||||
char key[32];
|
||||
} EncryptionAccess;
|
||||
|
||||
typedef struct ObjectInfo {
|
||||
uint32_t version;
|
||||
BucketInfo bucket;
|
||||
|
@ -42,12 +42,6 @@ type Bucket struct {
|
||||
lib *libuplink.Bucket
|
||||
}
|
||||
|
||||
// BucketAccess defines access to bucket
|
||||
type BucketAccess struct {
|
||||
PathEncryptionKey []byte
|
||||
EncryptedPathPrefix storj.Path
|
||||
}
|
||||
|
||||
// BucketInfo bucket meta struct
|
||||
type BucketInfo struct {
|
||||
Name string
|
||||
|
40
mobile/encryption.go
Normal file
40
mobile/encryption.go
Normal file
@ -0,0 +1,40 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package mobile
|
||||
|
||||
import (
|
||||
libuplink "storj.io/storj/lib/uplink"
|
||||
"storj.io/storj/pkg/storj"
|
||||
)
|
||||
|
||||
// EncryptionCtx holds data about encryption keys for a bucket.
|
||||
type EncryptionCtx struct {
|
||||
lib *libuplink.EncryptionCtx
|
||||
}
|
||||
|
||||
// NewEncryptionCtx constructs an empty encryption context.
|
||||
func NewEncryptionCtx() *EncryptionCtx {
|
||||
return &EncryptionCtx{lib: libuplink.NewEncryptionCtx()}
|
||||
}
|
||||
|
||||
// SetDefaultKey sets the default key to use when no matching keys are found
|
||||
// for the encryption context.
|
||||
func (e *EncryptionCtx) SetDefaultKey(keyData []byte) error {
|
||||
key, err := storj.NewKey(keyData)
|
||||
if err != nil {
|
||||
return safeError(err)
|
||||
}
|
||||
e.lib.SetDefaultKey(*key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseEncryptionCtx parses the base58 encoded encryption context data and
|
||||
// returns the resulting context.
|
||||
func ParseEncryptionCtx(b58data string) (*EncryptionCtx, error) {
|
||||
encCtx, err := libuplink.ParseEncryptionCtx(b58data)
|
||||
if err != nil {
|
||||
return nil, safeError(err)
|
||||
}
|
||||
return &EncryptionCtx{lib: encCtx}, nil
|
||||
}
|
@ -133,8 +133,8 @@ public class LibuplinkInstrumentedTest {
|
||||
try {
|
||||
Project project = uplink.openProject(VALID_SATELLITE_ADDRESS, VALID_API_KEY);
|
||||
try {
|
||||
BucketAccess access = new BucketAccess();
|
||||
access.setPathEncryptionKey("TestEncryptionKey".getBytes());
|
||||
EncryptionCtx encCtx = new EncryptionCtx();
|
||||
encCtx.setDefaultKey("TestEncryptionKey".getBytes());
|
||||
|
||||
RedundancyScheme scheme = new RedundancyScheme();
|
||||
scheme.setRequiredShares((short) 2);
|
||||
@ -147,7 +147,7 @@ public class LibuplinkInstrumentedTest {
|
||||
|
||||
project.createBucket("test", bucketConfig);
|
||||
|
||||
Bucket bucket = project.openBucket("test", access);
|
||||
Bucket bucket = project.openBucket("test", encCtx);
|
||||
|
||||
byte[] expectedData = new byte[1024];
|
||||
Random random = new Random();
|
||||
@ -197,8 +197,8 @@ public class LibuplinkInstrumentedTest {
|
||||
try {
|
||||
Project project = uplink.openProject(VALID_SATELLITE_ADDRESS, VALID_API_KEY);
|
||||
try {
|
||||
BucketAccess access = new BucketAccess();
|
||||
access.setPathEncryptionKey("TestEncryptionKey".getBytes());
|
||||
EncryptionCtx encCtx = new EncryptionCtx();
|
||||
encCtx.setDefaultKey("TestEncryptionKey".getBytes());
|
||||
|
||||
RedundancyScheme scheme = new RedundancyScheme();
|
||||
scheme.setRequiredShares((short) 2);
|
||||
@ -211,7 +211,7 @@ public class LibuplinkInstrumentedTest {
|
||||
|
||||
project.createBucket("test", bucketConfig);
|
||||
|
||||
Bucket bucket = project.openBucket("test", access);
|
||||
Bucket bucket = project.openBucket("test", encCtx);
|
||||
|
||||
byte[] expectedData = new byte[1024 * 100];
|
||||
Random random = new Random();
|
||||
@ -267,8 +267,8 @@ public class LibuplinkInstrumentedTest {
|
||||
try {
|
||||
Project project = uplink.openProject(VALID_SATELLITE_ADDRESS, VALID_API_KEY);
|
||||
try {
|
||||
BucketAccess access = new BucketAccess();
|
||||
access.setPathEncryptionKey("TestEncryptionKey".getBytes());
|
||||
EncryptionCtx encCtx = new EncryptionCtx();
|
||||
encCtx.setDefaultKey("TestEncryptionKey".getBytes());
|
||||
|
||||
BucketConfig bucketConfig = new BucketConfig();
|
||||
bucketConfig.setRedundancyScheme(new RedundancyScheme());
|
||||
@ -276,7 +276,7 @@ public class LibuplinkInstrumentedTest {
|
||||
BucketInfo bucketInfo = project.createBucket("testBucket", bucketConfig);
|
||||
assertEquals("testBucket", bucketInfo.getName());
|
||||
|
||||
Bucket bucket = project.openBucket("testBucket", access);
|
||||
Bucket bucket = project.openBucket("testBucket", encCtx);
|
||||
|
||||
long before = System.currentTimeMillis();
|
||||
|
||||
@ -317,4 +317,4 @@ public class LibuplinkInstrumentedTest {
|
||||
uplink.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -117,16 +117,10 @@ func (project *Project) CreateBucket(bucketName string, opts *BucketConfig) (*Bu
|
||||
|
||||
// OpenBucket returns a Bucket handle with the given EncryptionAccess
|
||||
// information.
|
||||
func (project *Project) OpenBucket(bucketName string, options *BucketAccess) (*Bucket, error) {
|
||||
func (project *Project) OpenBucket(bucketName string, encCtx *EncryptionCtx) (*Bucket, error) {
|
||||
scope := project.scope.child()
|
||||
|
||||
opts := libuplink.EncryptionAccess{}
|
||||
if options != nil {
|
||||
copy(opts.Key[:], options.PathEncryptionKey) // TODO: error check
|
||||
opts.EncryptedPathPrefix = options.EncryptedPathPrefix
|
||||
}
|
||||
|
||||
bucket, err := project.lib.OpenBucket(scope.ctx, bucketName, &opts)
|
||||
bucket, err := project.lib.OpenBucket(scope.ctx, bucketName, encCtx.lib)
|
||||
if err != nil {
|
||||
return nil, safeError(err)
|
||||
}
|
||||
|
@ -84,6 +84,11 @@ func (s *Store) SetDefaultKey(defaultKey *storj.Key) {
|
||||
s.defaultKey = defaultKey
|
||||
}
|
||||
|
||||
// GetDefaultKey returns the default key, or nil if none has been set.
|
||||
func (s *Store) GetDefaultKey() *storj.Key {
|
||||
return s.defaultKey
|
||||
}
|
||||
|
||||
// Add creates a mapping from the unencrypted path to the encrypted path and key.
|
||||
func (s *Store) Add(bucket string, unenc paths.Unencrypted, enc paths.Encrypted, key storj.Key) error {
|
||||
root, ok := s.roots[bucket]
|
||||
@ -213,3 +218,33 @@ func (n *node) lookup(path paths.Iterator, bestConsumed string, bestBase *Base,
|
||||
// Recurse to the next node in the tree.
|
||||
return child.lookup(path, bestConsumed, bestBase, unenc)
|
||||
}
|
||||
|
||||
// Iterate executes the callback with every value that has been Added to the Store.
|
||||
func (s *Store) Iterate(fn func(string, paths.Unencrypted, paths.Encrypted, storj.Key) error) error {
|
||||
for bucket, root := range s.roots {
|
||||
if err := root.iterate(fn, bucket); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// iterate calls the callback if the node has a base, and recurses to its children.
|
||||
func (n *node) iterate(fn func(string, paths.Unencrypted, paths.Encrypted, storj.Key) error, bucket string) error {
|
||||
if n.base != nil {
|
||||
err := fn(bucket, n.base.Unencrypted, n.base.Encrypted, n.base.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// recurse down only the unenc map, as the enc map should be the same.
|
||||
for _, child := range n.unenc {
|
||||
err := child.iterate(fn, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -126,3 +126,36 @@ func TestStoreErrorState(t *testing.T) {
|
||||
assert.Equal(t, consumed1, consumed2)
|
||||
assert.Equal(t, base1, base2)
|
||||
}
|
||||
|
||||
func TestStoreIterate(t *testing.T) {
|
||||
s := NewStore()
|
||||
ep := paths.NewEncrypted
|
||||
up := paths.NewUnencrypted
|
||||
|
||||
type storeEntry struct {
|
||||
bucket string
|
||||
unenc paths.Unencrypted
|
||||
enc paths.Encrypted
|
||||
key storj.Key
|
||||
}
|
||||
expected := map[storeEntry]struct{}{
|
||||
{"b1", up("u1/u2/u3"), ep("e1/e2/e3"), toKey("k3")}: {},
|
||||
{"b1", up("u1/u2/u3/u4"), ep("e1/e2/e3/e4"), toKey("k4")}: {},
|
||||
{"b1", up("u1/u5"), ep("e1/e5"), toKey("k5")}: {},
|
||||
{"b1", up("u6"), ep("e6"), toKey("k6")}: {},
|
||||
{"b1", up("u6/u7/u8"), ep("e6/e7/e8"), toKey("k8")}: {},
|
||||
{"b2", up("u1"), ep("e1'"), toKey("k1")}: {},
|
||||
{"b3", paths.Unencrypted{}, paths.Encrypted{}, toKey("m1")}: {},
|
||||
}
|
||||
|
||||
for entry := range expected {
|
||||
require.NoError(t, s.Add(entry.bucket, entry.unenc, entry.enc, entry.key))
|
||||
}
|
||||
|
||||
got := make(map[storeEntry]struct{})
|
||||
require.NoError(t, s.Iterate(func(bucket string, unenc paths.Unencrypted, enc paths.Encrypted, key storj.Key) error {
|
||||
got[storeEntry{bucket, unenc, enc, key}] = struct{}{}
|
||||
return nil
|
||||
}))
|
||||
require.Equal(t, expected, got)
|
||||
}
|
||||
|
@ -30,10 +30,10 @@ var (
|
||||
)
|
||||
|
||||
// NewStorjGateway creates a *Storj object from an existing ObjectStore
|
||||
func NewStorjGateway(project *uplink.Project, rootEncKey *storj.Key, pathCipher storj.CipherSuite, encryption storj.EncryptionParameters, redundancy storj.RedundancyScheme, segmentSize memory.Size) *Gateway {
|
||||
func NewStorjGateway(project *uplink.Project, encCtx *uplink.EncryptionCtx, pathCipher storj.CipherSuite, encryption storj.EncryptionParameters, redundancy storj.RedundancyScheme, segmentSize memory.Size) *Gateway {
|
||||
return &Gateway{
|
||||
project: project,
|
||||
rootEncKey: rootEncKey,
|
||||
encCtx: encCtx,
|
||||
pathCipher: pathCipher,
|
||||
encryption: encryption,
|
||||
redundancy: redundancy,
|
||||
@ -45,7 +45,7 @@ func NewStorjGateway(project *uplink.Project, rootEncKey *storj.Key, pathCipher
|
||||
// Gateway is the implementation of a minio cmd.Gateway
|
||||
type Gateway struct {
|
||||
project *uplink.Project
|
||||
rootEncKey *storj.Key
|
||||
encCtx *uplink.EncryptionCtx
|
||||
pathCipher storj.CipherSuite
|
||||
encryption storj.EncryptionParameters
|
||||
redundancy storj.RedundancyScheme
|
||||
@ -91,7 +91,9 @@ func (layer *gatewayLayer) DeleteBucket(ctx context.Context, bucketName string)
|
||||
}
|
||||
|
||||
func (layer *gatewayLayer) bucketEmpty(ctx context.Context, bucketName string) (empty bool, err error) {
|
||||
bucket, err := layer.gateway.project.OpenBucket(ctx, bucketName, &uplink.EncryptionAccess{Key: *layer.gateway.rootEncKey})
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
bucket, err := layer.gateway.project.OpenBucket(ctx, bucketName, layer.gateway.encCtx)
|
||||
if err != nil {
|
||||
return false, convertError(err, bucketName, "")
|
||||
}
|
||||
@ -108,7 +110,7 @@ func (layer *gatewayLayer) bucketEmpty(ctx context.Context, bucketName string) (
|
||||
func (layer *gatewayLayer) DeleteObject(ctx context.Context, bucketName, objectPath string) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
bucket, err := layer.gateway.project.OpenBucket(ctx, bucketName, &uplink.EncryptionAccess{Key: *layer.gateway.rootEncKey})
|
||||
bucket, err := layer.gateway.project.OpenBucket(ctx, bucketName, layer.gateway.encCtx)
|
||||
if err != nil {
|
||||
return convertError(err, bucketName, "")
|
||||
}
|
||||
@ -134,7 +136,7 @@ func (layer *gatewayLayer) GetBucketInfo(ctx context.Context, bucketName string)
|
||||
func (layer *gatewayLayer) GetObject(ctx context.Context, bucketName, objectPath string, startOffset int64, length int64, writer io.Writer, etag string) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
bucket, err := layer.gateway.project.OpenBucket(ctx, bucketName, &uplink.EncryptionAccess{Key: *layer.gateway.rootEncKey})
|
||||
bucket, err := layer.gateway.project.OpenBucket(ctx, bucketName, layer.gateway.encCtx)
|
||||
if err != nil {
|
||||
return convertError(err, bucketName, "")
|
||||
}
|
||||
@ -168,7 +170,7 @@ func (layer *gatewayLayer) GetObject(ctx context.Context, bucketName, objectPath
|
||||
func (layer *gatewayLayer) GetObjectInfo(ctx context.Context, bucketName, objectPath string) (objInfo minio.ObjectInfo, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
bucket, err := layer.gateway.project.OpenBucket(ctx, bucketName, &uplink.EncryptionAccess{Key: *layer.gateway.rootEncKey})
|
||||
bucket, err := layer.gateway.project.OpenBucket(ctx, bucketName, layer.gateway.encCtx)
|
||||
if err != nil {
|
||||
return minio.ObjectInfo{}, convertError(err, bucketName, "")
|
||||
}
|
||||
@ -223,7 +225,7 @@ func (layer *gatewayLayer) ListObjects(ctx context.Context, bucketName, prefix,
|
||||
return minio.ListObjectsInfo{}, minio.UnsupportedDelimiter{Delimiter: delimiter}
|
||||
}
|
||||
|
||||
bucket, err := layer.gateway.project.OpenBucket(ctx, bucketName, &uplink.EncryptionAccess{Key: *layer.gateway.rootEncKey})
|
||||
bucket, err := layer.gateway.project.OpenBucket(ctx, bucketName, layer.gateway.encCtx)
|
||||
if err != nil {
|
||||
return minio.ListObjectsInfo{}, convertError(err, bucketName, "")
|
||||
}
|
||||
@ -289,7 +291,7 @@ func (layer *gatewayLayer) ListObjectsV2(ctx context.Context, bucketName, prefix
|
||||
return minio.ListObjectsV2Info{ContinuationToken: continuationToken}, minio.UnsupportedDelimiter{Delimiter: delimiter}
|
||||
}
|
||||
|
||||
bucket, err := layer.gateway.project.OpenBucket(ctx, bucketName, &uplink.EncryptionAccess{Key: *layer.gateway.rootEncKey})
|
||||
bucket, err := layer.gateway.project.OpenBucket(ctx, bucketName, layer.gateway.encCtx)
|
||||
if err != nil {
|
||||
return minio.ListObjectsV2Info{}, convertError(err, bucketName, "")
|
||||
}
|
||||
@ -390,7 +392,7 @@ func (layer *gatewayLayer) MakeBucketWithLocation(ctx context.Context, bucketNam
|
||||
func (layer *gatewayLayer) CopyObject(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, srcInfo minio.ObjectInfo) (objInfo minio.ObjectInfo, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
bucket, err := layer.gateway.project.OpenBucket(ctx, srcBucket, &uplink.EncryptionAccess{Key: *layer.gateway.rootEncKey})
|
||||
bucket, err := layer.gateway.project.OpenBucket(ctx, srcBucket, layer.gateway.encCtx)
|
||||
if err != nil {
|
||||
return minio.ObjectInfo{}, convertError(err, srcBucket, "")
|
||||
}
|
||||
@ -422,7 +424,7 @@ func (layer *gatewayLayer) CopyObject(ctx context.Context, srcBucket, srcObject,
|
||||
func (layer *gatewayLayer) putObject(ctx context.Context, bucketName, objectPath string, reader io.Reader, opts *uplink.UploadOptions) (objInfo minio.ObjectInfo, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
bucket, err := layer.gateway.project.OpenBucket(ctx, bucketName, &uplink.EncryptionAccess{Key: *layer.gateway.rootEncKey})
|
||||
bucket, err := layer.gateway.project.OpenBucket(ctx, bucketName, layer.gateway.encCtx)
|
||||
if err != nil {
|
||||
return minio.ObjectInfo{}, convertError(err, bucketName, "")
|
||||
}
|
||||
|
@ -23,7 +23,6 @@ import (
|
||||
"storj.io/storj/internal/testplanet"
|
||||
libuplink "storj.io/storj/lib/uplink"
|
||||
"storj.io/storj/pkg/eestream"
|
||||
"storj.io/storj/pkg/encryption"
|
||||
"storj.io/storj/pkg/macaroon"
|
||||
"storj.io/storj/pkg/metainfo/kvmetainfo"
|
||||
"storj.io/storj/pkg/pb"
|
||||
@ -703,11 +702,10 @@ func initEnv(ctx context.Context, planet *testplanet.Planet) (minio.ObjectLayer,
|
||||
|
||||
segments := segments.NewSegmentStore(m, ec, rs, 4*memory.KiB.Int(), 8*memory.MiB.Int64())
|
||||
|
||||
encKey := new(storj.Key)
|
||||
var encKey storj.Key
|
||||
copy(encKey[:], TestEncKey)
|
||||
|
||||
encStore := encryption.NewStore()
|
||||
encStore.SetDefaultKey(encKey)
|
||||
encCtx := libuplink.NewEncryptionCtxWithDefaultKey(encKey)
|
||||
encStore := encCtx.Store()
|
||||
|
||||
blockSize := rs.StripeSize()
|
||||
inlineThreshold := 4 * memory.KiB.Int()
|
||||
@ -749,7 +747,7 @@ func initEnv(ctx context.Context, planet *testplanet.Planet) (minio.ObjectLayer,
|
||||
|
||||
gateway := NewStorjGateway(
|
||||
proj,
|
||||
encKey,
|
||||
encCtx,
|
||||
storj.EncAESGCM,
|
||||
storj.EncryptionParameters{
|
||||
CipherSuite: storj.EncAESGCM,
|
||||
|
@ -212,7 +212,7 @@ func runGateway(ctx context.Context, gwCfg config, uplinkCfg uplink.Config, log
|
||||
|
||||
gw := miniogw.NewStorjGateway(
|
||||
project,
|
||||
&storj.Key{},
|
||||
libuplink.NewEncryptionCtxWithDefaultKey(storj.Key{}),
|
||||
storj.Cipher(uplinkCfg.Enc.PathType).ToCipherSuite(),
|
||||
uplinkCfg.GetEncryptionScheme().ToEncryptionParameters(),
|
||||
uplinkCfg.GetRedundancyScheme(),
|
||||
|
155
pkg/pb/encryption_ctx.pb.go
Normal file
155
pkg/pb/encryption_ctx.pb.go
Normal file
@ -0,0 +1,155 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: encryption_ctx.proto
|
||||
|
||||
package pb
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
_ "github.com/gogo/protobuf/gogoproto"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
math "math"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
|
||||
|
||||
type EncryptionCtx struct {
|
||||
DefaultKey []byte `protobuf:"bytes,1,opt,name=default_key,json=defaultKey,proto3" json:"default_key,omitempty"`
|
||||
StoreEntries []*EncryptionCtx_StoreEntry `protobuf:"bytes,2,rep,name=store_entries,json=storeEntries,proto3" json:"store_entries,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *EncryptionCtx) Reset() { *m = EncryptionCtx{} }
|
||||
func (m *EncryptionCtx) String() string { return proto.CompactTextString(m) }
|
||||
func (*EncryptionCtx) ProtoMessage() {}
|
||||
func (*EncryptionCtx) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b1567295c75a6f56, []int{0}
|
||||
}
|
||||
func (m *EncryptionCtx) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_EncryptionCtx.Unmarshal(m, b)
|
||||
}
|
||||
func (m *EncryptionCtx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_EncryptionCtx.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *EncryptionCtx) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_EncryptionCtx.Merge(m, src)
|
||||
}
|
||||
func (m *EncryptionCtx) XXX_Size() int {
|
||||
return xxx_messageInfo_EncryptionCtx.Size(m)
|
||||
}
|
||||
func (m *EncryptionCtx) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_EncryptionCtx.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_EncryptionCtx proto.InternalMessageInfo
|
||||
|
||||
func (m *EncryptionCtx) GetDefaultKey() []byte {
|
||||
if m != nil {
|
||||
return m.DefaultKey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *EncryptionCtx) GetStoreEntries() []*EncryptionCtx_StoreEntry {
|
||||
if m != nil {
|
||||
return m.StoreEntries
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type EncryptionCtx_StoreEntry struct {
|
||||
Bucket []byte `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`
|
||||
UnencryptedPath []byte `protobuf:"bytes,2,opt,name=unencrypted_path,json=unencryptedPath,proto3" json:"unencrypted_path,omitempty"`
|
||||
EncryptedPath []byte `protobuf:"bytes,3,opt,name=encrypted_path,json=encryptedPath,proto3" json:"encrypted_path,omitempty"`
|
||||
Key []byte `protobuf:"bytes,4,opt,name=key,proto3" json:"key,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *EncryptionCtx_StoreEntry) Reset() { *m = EncryptionCtx_StoreEntry{} }
|
||||
func (m *EncryptionCtx_StoreEntry) String() string { return proto.CompactTextString(m) }
|
||||
func (*EncryptionCtx_StoreEntry) ProtoMessage() {}
|
||||
func (*EncryptionCtx_StoreEntry) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_b1567295c75a6f56, []int{0, 0}
|
||||
}
|
||||
func (m *EncryptionCtx_StoreEntry) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_EncryptionCtx_StoreEntry.Unmarshal(m, b)
|
||||
}
|
||||
func (m *EncryptionCtx_StoreEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_EncryptionCtx_StoreEntry.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *EncryptionCtx_StoreEntry) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_EncryptionCtx_StoreEntry.Merge(m, src)
|
||||
}
|
||||
func (m *EncryptionCtx_StoreEntry) XXX_Size() int {
|
||||
return xxx_messageInfo_EncryptionCtx_StoreEntry.Size(m)
|
||||
}
|
||||
func (m *EncryptionCtx_StoreEntry) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_EncryptionCtx_StoreEntry.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_EncryptionCtx_StoreEntry proto.InternalMessageInfo
|
||||
|
||||
func (m *EncryptionCtx_StoreEntry) GetBucket() []byte {
|
||||
if m != nil {
|
||||
return m.Bucket
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *EncryptionCtx_StoreEntry) GetUnencryptedPath() []byte {
|
||||
if m != nil {
|
||||
return m.UnencryptedPath
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *EncryptionCtx_StoreEntry) GetEncryptedPath() []byte {
|
||||
if m != nil {
|
||||
return m.EncryptedPath
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *EncryptionCtx_StoreEntry) GetKey() []byte {
|
||||
if m != nil {
|
||||
return m.Key
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*EncryptionCtx)(nil), "encryption_ctx.EncryptionCtx")
|
||||
proto.RegisterType((*EncryptionCtx_StoreEntry)(nil), "encryption_ctx.EncryptionCtx.StoreEntry")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("encryption_ctx.proto", fileDescriptor_b1567295c75a6f56) }
|
||||
|
||||
var fileDescriptor_b1567295c75a6f56 = []byte{
|
||||
// 224 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x49, 0xcd, 0x4b, 0x2e,
|
||||
0xaa, 0x2c, 0x28, 0xc9, 0xcc, 0xcf, 0x8b, 0x4f, 0x2e, 0xa9, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9,
|
||||
0x17, 0xe2, 0x43, 0x15, 0x95, 0xe2, 0x4a, 0xcf, 0x4f, 0xcf, 0x87, 0xc8, 0x29, 0x75, 0x31, 0x71,
|
||||
0xf1, 0xba, 0xc2, 0xa5, 0x9d, 0x4b, 0x2a, 0x84, 0xe4, 0xb9, 0xb8, 0x53, 0x52, 0xd3, 0x12, 0x4b,
|
||||
0x73, 0x4a, 0xe2, 0xb3, 0x53, 0x2b, 0x25, 0x18, 0x15, 0x18, 0x35, 0x78, 0x82, 0xb8, 0xa0, 0x42,
|
||||
0xde, 0xa9, 0x95, 0x42, 0xbe, 0x5c, 0xbc, 0xc5, 0x25, 0xf9, 0x45, 0xa9, 0xf1, 0xa9, 0x79, 0x25,
|
||||
0x45, 0x99, 0xa9, 0xc5, 0x12, 0x4c, 0x0a, 0xcc, 0x1a, 0xdc, 0x46, 0x1a, 0x7a, 0x68, 0x96, 0xa3,
|
||||
0x18, 0xab, 0x17, 0x0c, 0xd2, 0xe2, 0x9a, 0x57, 0x52, 0x54, 0x19, 0xc4, 0x53, 0x0c, 0x63, 0x67,
|
||||
0xa6, 0x16, 0x4b, 0x75, 0x30, 0x72, 0x71, 0x21, 0x24, 0x85, 0xc4, 0xb8, 0xd8, 0x92, 0x4a, 0x93,
|
||||
0xb3, 0x53, 0x4b, 0xa0, 0x36, 0x43, 0x79, 0x42, 0x9a, 0x5c, 0x02, 0xa5, 0x79, 0x50, 0x1b, 0x52,
|
||||
0x53, 0xe2, 0x0b, 0x12, 0x4b, 0x32, 0x24, 0x98, 0xc0, 0x2a, 0xf8, 0x91, 0xc4, 0x03, 0x12, 0x4b,
|
||||
0x32, 0x84, 0x54, 0xb9, 0xf8, 0xd0, 0x14, 0x32, 0x83, 0x15, 0xf2, 0xa2, 0x2a, 0x13, 0xe0, 0x62,
|
||||
0x06, 0x79, 0x90, 0x05, 0x2c, 0x07, 0x62, 0x3a, 0xb1, 0x44, 0x31, 0x15, 0x24, 0x25, 0xb1, 0x81,
|
||||
0x43, 0xc6, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0xcb, 0x91, 0x63, 0x57, 0x4d, 0x01, 0x00, 0x00,
|
||||
}
|
22
pkg/pb/encryption_ctx.proto
Normal file
22
pkg/pb/encryption_ctx.proto
Normal file
@ -0,0 +1,22 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
option go_package = "pb";
|
||||
|
||||
package encryption_ctx;
|
||||
|
||||
import "gogo.proto";
|
||||
|
||||
message EncryptionCtx {
|
||||
message StoreEntry {
|
||||
bytes bucket = 1;
|
||||
bytes unencrypted_path = 2;
|
||||
bytes encrypted_path = 3;
|
||||
bytes key = 4;
|
||||
}
|
||||
|
||||
bytes default_key = 1;
|
||||
repeated StoreEntry store_entries = 2;
|
||||
}
|
64
proto.lock
64
proto.lock
@ -184,6 +184,70 @@
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"protopath": "pkg:/:pb:/:encryption_ctx.proto",
|
||||
"def": {
|
||||
"messages": [
|
||||
{
|
||||
"name": "EncryptionCtx",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "default_key",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "store_entries",
|
||||
"type": "StoreEntry",
|
||||
"is_repeated": true
|
||||
}
|
||||
],
|
||||
"messages": [
|
||||
{
|
||||
"name": "StoreEntry",
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "bucket",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"name": "unencrypted_path",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"name": "encrypted_path",
|
||||
"type": "bytes"
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"name": "key",
|
||||
"type": "bytes"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"imports": [
|
||||
{
|
||||
"path": "gogo.proto"
|
||||
}
|
||||
],
|
||||
"package": {
|
||||
"name": "encryption_ctx"
|
||||
},
|
||||
"options": [
|
||||
{
|
||||
"name": "go_package",
|
||||
"value": "pb"
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"protopath": "pkg:/:pb:/:gc.proto",
|
||||
"def": {
|
||||
|
@ -234,7 +234,7 @@ func TestServiceList(t *testing.T) {
|
||||
}
|
||||
|
||||
config := planet.Uplinks[0].GetConfig(planet.Satellites[0])
|
||||
metainfo, _, err := config.GetMetainfo(ctx, planet.Uplinks[0].Identity)
|
||||
metainfo, _, err := testplanet.GetMetainfo(ctx, config, planet.Uplinks[0].Identity)
|
||||
require.NoError(t, err)
|
||||
|
||||
type Test struct {
|
||||
@ -559,7 +559,7 @@ func TestSetAttribution(t *testing.T) {
|
||||
uplink := planet.Uplinks[0]
|
||||
|
||||
config := uplink.GetConfig(planet.Satellites[0])
|
||||
metainfo, _, err := config.GetMetainfo(ctx, uplink.Identity)
|
||||
metainfo, _, err := testplanet.GetMetainfo(ctx, config, uplink.Identity)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = metainfo.CreateBucket(ctx, "alpha", &storj.Bucket{PathCipher: config.GetEncryptionScheme().Cipher})
|
||||
@ -600,6 +600,32 @@ func TestSetAttribution(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetProjectInfo(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 2,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
apiKey0 := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
|
||||
apiKey1 := planet.Uplinks[1].APIKey[planet.Satellites[0].ID()]
|
||||
|
||||
metainfo0, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey0)
|
||||
require.NoError(t, err)
|
||||
|
||||
metainfo1, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey1)
|
||||
require.NoError(t, err)
|
||||
|
||||
info0, err := metainfo0.GetProjectInfo(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, info0.ProjectSalt)
|
||||
|
||||
info1, err := metainfo1.GetProjectInfo(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, info1.ProjectSalt)
|
||||
|
||||
// Different projects should have different salts
|
||||
require.NotEqual(t, info0.ProjectSalt, info1.ProjectSalt)
|
||||
})
|
||||
}
|
||||
|
||||
func runCreateSegment(ctx context.Context, t *testing.T, metainfo *metainfo.Client) (*pb.Pointer, []*pb.OrderLimit2) {
|
||||
pointer := createTestPointer(t)
|
||||
expirationDate, err := ptypes.Timestamp(pointer.ExpirationDate)
|
||||
|
119
uplink/config.go
119
uplink/config.go
@ -4,27 +4,11 @@
|
||||
package uplink
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"time"
|
||||
|
||||
"github.com/vivint/infectious"
|
||||
"github.com/zeebo/errs"
|
||||
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
||||
|
||||
"storj.io/storj/internal/memory"
|
||||
"storj.io/storj/pkg/eestream"
|
||||
"storj.io/storj/pkg/encryption"
|
||||
"storj.io/storj/pkg/identity"
|
||||
"storj.io/storj/pkg/metainfo/kvmetainfo"
|
||||
"storj.io/storj/pkg/peertls/tlsopts"
|
||||
ecclient "storj.io/storj/pkg/storage/ec"
|
||||
"storj.io/storj/pkg/storage/segments"
|
||||
"storj.io/storj/pkg/storage/streams"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/pkg/transport"
|
||||
"storj.io/storj/uplink/metainfo"
|
||||
)
|
||||
|
||||
// RSConfig is a configuration struct that keeps details about default
|
||||
@ -41,10 +25,11 @@ type RSConfig struct {
|
||||
// EncryptionConfig is a configuration struct that keeps details about
|
||||
// encrypting segments
|
||||
type EncryptionConfig struct {
|
||||
EncryptionKey string `help:"the root key for encrypting the data which will be stored in KeyFilePath" setup:"true"`
|
||||
KeyFilepath string `help:"the path to the file which contains the root key for encrypting the data"`
|
||||
DataType int `help:"Type of encryption to use for content and metadata (1=AES-GCM, 2=SecretBox)" default:"1"`
|
||||
PathType int `help:"Type of encryption to use for paths (0=Unencrypted, 1=AES-GCM, 2=SecretBox)" default:"1"`
|
||||
EncryptionKey string `help:"the root key for encrypting the data which will be stored in KeyFilePath" setup:"true"`
|
||||
KeyFilepath string `help:"the path to the file which contains the root key for encrypting the data"`
|
||||
EncCtxFilepath string `help:"the path to a file containing a serialized encryption ctx"`
|
||||
DataType int `help:"Type of encryption to use for content and metadata (1=AES-GCM, 2=SecretBox)" default:"1"`
|
||||
PathType int `help:"Type of encryption to use for paths (0=Unencrypted, 1=AES-GCM, 2=SecretBox)" default:"1"`
|
||||
}
|
||||
|
||||
// ClientConfig is a configuration struct for the uplink that controls how
|
||||
@ -66,83 +51,6 @@ type Config struct {
|
||||
TLS tlsopts.Config
|
||||
}
|
||||
|
||||
var (
|
||||
mon = monkit.Package()
|
||||
|
||||
// Error is the errs class of standard End User Client errors
|
||||
Error = errs.Class("Uplink configuration error")
|
||||
)
|
||||
|
||||
// GetMetainfo returns an implementation of storj.Metainfo
|
||||
func (c Config) GetMetainfo(ctx context.Context, identity *identity.FullIdentity) (db storj.Metainfo, ss streams.Store, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
tlsOpts, err := tlsopts.NewOptions(identity, c.TLS)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// ToDo: Handle Versioning for Uplinks here
|
||||
|
||||
tc := transport.NewClientWithTimeouts(tlsOpts, transport.Timeouts{
|
||||
Request: c.Client.RequestTimeout,
|
||||
Dial: c.Client.DialTimeout,
|
||||
})
|
||||
|
||||
if c.Client.SatelliteAddr == "" {
|
||||
return nil, nil, errors.New("satellite address not specified")
|
||||
}
|
||||
|
||||
m, err := metainfo.Dial(ctx, tc, c.Client.SatelliteAddr, c.Client.APIKey)
|
||||
if err != nil {
|
||||
return nil, nil, Error.New("failed to connect to metainfo service: %v", err)
|
||||
}
|
||||
// TODO: handle closing of m
|
||||
|
||||
project, err := kvmetainfo.SetupProject(m)
|
||||
if err != nil {
|
||||
return nil, nil, Error.New("failed to create project: %v", err)
|
||||
}
|
||||
|
||||
ec := ecclient.NewClient(tc, c.RS.MaxBufferMem.Int())
|
||||
fc, err := infectious.NewFEC(c.RS.MinThreshold, c.RS.MaxThreshold)
|
||||
if err != nil {
|
||||
return nil, nil, Error.New("failed to create erasure coding client: %v", err)
|
||||
}
|
||||
rs, err := eestream.NewRedundancyStrategy(eestream.NewRSScheme(fc, c.RS.ErasureShareSize.Int()), c.RS.RepairThreshold, c.RS.SuccessThreshold)
|
||||
if err != nil {
|
||||
return nil, nil, Error.New("failed to create redundancy strategy: %v", err)
|
||||
}
|
||||
|
||||
maxEncryptedSegmentSize, err := encryption.CalcEncryptedSize(c.Client.SegmentSize.Int64(), c.GetEncryptionScheme())
|
||||
if err != nil {
|
||||
return nil, nil, Error.New("failed to calculate max encrypted segment size: %v", err)
|
||||
}
|
||||
segment := segments.NewSegmentStore(m, ec, rs, c.Client.MaxInlineSize.Int(), maxEncryptedSegmentSize)
|
||||
|
||||
blockSize := c.GetEncryptionScheme().BlockSize
|
||||
if int(blockSize)%c.RS.ErasureShareSize.Int()*c.RS.MinThreshold != 0 {
|
||||
err = Error.New("EncryptionBlockSize must be a multiple of ErasureShareSize * RS MinThreshold")
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
key, err := LoadEncryptionKey(c.Enc.KeyFilepath)
|
||||
if err != nil {
|
||||
return nil, nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
encStore := encryption.NewStore()
|
||||
encStore.SetDefaultKey(key)
|
||||
strms, err := streams.NewStreamStore(segment, c.Client.SegmentSize.Int64(), encStore,
|
||||
int(blockSize), storj.Cipher(c.Enc.DataType), c.Client.MaxInlineSize.Int(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, Error.New("failed to create stream store: %v", err)
|
||||
}
|
||||
|
||||
return kvmetainfo.New(project, m, strms, segment, encStore), strms, nil
|
||||
}
|
||||
|
||||
// GetRedundancyScheme returns the configured redundancy scheme for new uploads
|
||||
func (c Config) GetRedundancyScheme() storj.RedundancyScheme {
|
||||
return storj.RedundancyScheme{
|
||||
@ -176,20 +84,3 @@ func (c Config) GetEncryptionScheme() storj.EncryptionScheme {
|
||||
func (c Config) GetSegmentSize() memory.Size {
|
||||
return c.Client.SegmentSize
|
||||
}
|
||||
|
||||
// LoadEncryptionKey loads the encryption key stored in the file pointed by
|
||||
// filepath.
|
||||
//
|
||||
// An error is file is not found or there is an I/O error.
|
||||
func LoadEncryptionKey(filepath string) (key *storj.Key, error error) {
|
||||
if filepath == "" {
|
||||
return &storj.Key{}, nil
|
||||
}
|
||||
|
||||
rawKey, err := ioutil.ReadFile(filepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return storj.NewKey(rawKey)
|
||||
}
|
||||
|
@ -1,58 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package uplink_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/internal/testrand"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/uplink"
|
||||
)
|
||||
|
||||
func TestLoadEncryptionKey(t *testing.T) {
|
||||
saveRawKey := func(key []byte) (filepath string, clenaup func()) {
|
||||
t.Helper()
|
||||
|
||||
ctx := testcontext.New(t)
|
||||
filename := ctx.File("encryption.key")
|
||||
err := ioutil.WriteFile(filename, key, os.FileMode(0400))
|
||||
require.NoError(t, err)
|
||||
|
||||
return filename, ctx.Cleanup
|
||||
}
|
||||
|
||||
t.Run("ok: reading from file", func(t *testing.T) {
|
||||
passphrase := testrand.BytesInt(1 + testrand.Intn(100))
|
||||
|
||||
expectedKey, err := storj.NewKey(passphrase)
|
||||
require.NoError(t, err)
|
||||
filename, cleanup := saveRawKey(expectedKey[:])
|
||||
defer cleanup()
|
||||
|
||||
key, err := uplink.LoadEncryptionKey(filename)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expectedKey, key)
|
||||
})
|
||||
|
||||
t.Run("ok: empty filepath", func(t *testing.T) {
|
||||
key, err := uplink.LoadEncryptionKey("")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, &storj.Key{}, key)
|
||||
})
|
||||
|
||||
t.Run("error: file not found", func(t *testing.T) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
filename := ctx.File("encryption.key")
|
||||
|
||||
_, err := uplink.LoadEncryptionKey(filename)
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
@ -237,3 +237,10 @@ func (client *Client) SetAttribution(ctx context.Context, bucket string, partner
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// GetProjectInfo gets the ProjectInfo for the api key associated with the metainfo client.
|
||||
func (client *Client) GetProjectInfo(ctx context.Context) (resp *pb.ProjectInfoResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
return client.client.ProjectInfo(ctx, &pb.ProjectInfoRequest{})
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package uplink
|
||||
package setup
|
||||
|
||||
import (
|
||||
"os"
|
@ -1,7 +1,7 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package uplink_test
|
||||
package setup_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
@ -15,7 +15,7 @@ import (
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/internal/testrand"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/uplink"
|
||||
"storj.io/storj/uplink/setup"
|
||||
)
|
||||
|
||||
func TestSaveEncryptionKey(t *testing.T) {
|
||||
@ -29,7 +29,7 @@ func TestSaveEncryptionKey(t *testing.T) {
|
||||
|
||||
inputKey := generateInputKey()
|
||||
filename := ctx.File("storj-test-cmd-uplink", "encryption.key")
|
||||
err := uplink.SaveEncryptionKey(inputKey, filename)
|
||||
err := setup.SaveEncryptionKey(inputKey, filename)
|
||||
require.NoError(t, err)
|
||||
|
||||
savedKey, err := ioutil.ReadFile(filename)
|
||||
@ -44,14 +44,14 @@ func TestSaveEncryptionKey(t *testing.T) {
|
||||
|
||||
filename := ctx.File("storj-test-cmd-uplink", "encryption.key")
|
||||
|
||||
err := uplink.SaveEncryptionKey("", filename)
|
||||
err := setup.SaveEncryptionKey("", filename)
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
t.Run("error: empty filepath", func(t *testing.T) {
|
||||
inputKey := generateInputKey()
|
||||
|
||||
err := uplink.SaveEncryptionKey(inputKey, "")
|
||||
err := setup.SaveEncryptionKey(inputKey, "")
|
||||
require.Error(t, err)
|
||||
})
|
||||
|
||||
@ -64,7 +64,7 @@ func TestSaveEncryptionKey(t *testing.T) {
|
||||
|
||||
inputKey := generateInputKey()
|
||||
filename := filepath.Join(dir, "enc.key")
|
||||
err := uplink.SaveEncryptionKey(inputKey, filename)
|
||||
err := setup.SaveEncryptionKey(inputKey, filename)
|
||||
require.Errorf(t, err, "directory path doesn't exist")
|
||||
})
|
||||
|
||||
@ -76,7 +76,7 @@ func TestSaveEncryptionKey(t *testing.T) {
|
||||
filename := ctx.File("encryption.key")
|
||||
require.NoError(t, ioutil.WriteFile(filename, nil, os.FileMode(0600)))
|
||||
|
||||
err := uplink.SaveEncryptionKey(inputKey, filename)
|
||||
err := setup.SaveEncryptionKey(inputKey, filename)
|
||||
require.Errorf(t, err, "file key already exists")
|
||||
})
|
||||
}
|
51
uplink/setup/setup.go
Normal file
51
uplink/setup/setup.go
Normal file
@ -0,0 +1,51 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package setup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
"gopkg.in/spacemonkeygo/monkit.v2"
|
||||
|
||||
libuplink "storj.io/storj/lib/uplink"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/uplink"
|
||||
)
|
||||
|
||||
var (
|
||||
mon = monkit.Package()
|
||||
|
||||
// Error is the class of errors returned by this package
|
||||
Error = errs.Class("uplink setup")
|
||||
)
|
||||
|
||||
// LoadEncryptionCtx loads an EncryptionCtx from the values specified in the encryption config.
|
||||
func LoadEncryptionCtx(ctx context.Context, cfg uplink.EncryptionConfig) (_ *libuplink.EncryptionCtx, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
if cfg.EncCtxFilepath != "" {
|
||||
data, err := ioutil.ReadFile(cfg.EncCtxFilepath)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
return libuplink.ParseEncryptionCtx(strings.TrimSpace(string(data)))
|
||||
}
|
||||
|
||||
data := []byte(cfg.EncryptionKey)
|
||||
if cfg.KeyFilepath != "" {
|
||||
data, err = ioutil.ReadFile(cfg.KeyFilepath)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
|
||||
key, err := storj.NewKey(data)
|
||||
if err != nil {
|
||||
return nil, errs.Wrap(err)
|
||||
}
|
||||
return libuplink.NewEncryptionCtxWithDefaultKey(*key), nil
|
||||
}
|
71
uplink/setup/setup_test.go
Normal file
71
uplink/setup/setup_test.go
Normal file
@ -0,0 +1,71 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package setup_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/internal/testrand"
|
||||
libuplink "storj.io/storj/lib/uplink"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/uplink"
|
||||
"storj.io/storj/uplink/setup"
|
||||
)
|
||||
|
||||
func TestLoadEncryptionCtx(t *testing.T) {
|
||||
saveRawCtx := func(encCtx *libuplink.EncryptionCtx) (filepath string, clenaup func()) {
|
||||
t.Helper()
|
||||
|
||||
ctx := testcontext.New(t)
|
||||
filename := ctx.File("encryption.ctx")
|
||||
data, err := encCtx.Serialize()
|
||||
require.NoError(t, err)
|
||||
err = ioutil.WriteFile(filename, []byte(data), os.FileMode(0400))
|
||||
require.NoError(t, err)
|
||||
|
||||
return filename, ctx.Cleanup
|
||||
}
|
||||
|
||||
t.Run("ok: reading from file", func(t *testing.T) {
|
||||
passphrase := testrand.BytesInt(1 + testrand.Intn(100))
|
||||
|
||||
key, err := storj.NewKey(passphrase)
|
||||
require.NoError(t, err)
|
||||
encCtx := libuplink.NewEncryptionCtxWithDefaultKey(*key)
|
||||
filename, cleanup := saveRawCtx(encCtx)
|
||||
defer cleanup()
|
||||
|
||||
gotCtx, err := setup.LoadEncryptionCtx(context.Background(), uplink.EncryptionConfig{
|
||||
EncCtxFilepath: filename,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, encCtx, gotCtx)
|
||||
})
|
||||
|
||||
t.Run("ok: empty filepath", func(t *testing.T) {
|
||||
gotCtx, err := setup.LoadEncryptionCtx(context.Background(), uplink.EncryptionConfig{
|
||||
EncCtxFilepath: "",
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, gotCtx)
|
||||
})
|
||||
|
||||
t.Run("error: file not found", func(t *testing.T) {
|
||||
ctx := testcontext.New(t)
|
||||
defer ctx.Cleanup()
|
||||
filename := ctx.File("encryption.ctx")
|
||||
|
||||
_, err := setup.LoadEncryptionCtx(context.Background(), uplink.EncryptionConfig{
|
||||
EncCtxFilepath: filename,
|
||||
})
|
||||
require.Error(t, err)
|
||||
})
|
||||
}
|
Loading…
Reference in New Issue
Block a user