2019-02-04 16:56:10 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information
|
|
|
|
|
|
|
|
package testplanet
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
2019-04-09 18:01:45 +01:00
|
|
|
"strconv"
|
2019-04-25 09:17:26 +01:00
|
|
|
"time"
|
2019-02-04 16:56:10 +00:00
|
|
|
|
2019-02-08 12:57:35 +00:00
|
|
|
"github.com/spf13/pflag"
|
2019-06-27 18:36:51 +01:00
|
|
|
"github.com/vivint/infectious"
|
2019-02-04 16:56:10 +00:00
|
|
|
"github.com/zeebo/errs"
|
|
|
|
"go.uber.org/zap"
|
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
"storj.io/storj/pkg/auth/signing"
|
2019-02-08 12:57:35 +00:00
|
|
|
"storj.io/storj/pkg/cfgstruct"
|
2019-06-27 18:36:51 +01:00
|
|
|
"storj.io/storj/pkg/eestream"
|
|
|
|
"storj.io/storj/pkg/encryption"
|
2019-02-04 16:56:10 +00:00
|
|
|
"storj.io/storj/pkg/identity"
|
2019-05-24 17:51:27 +01:00
|
|
|
"storj.io/storj/pkg/macaroon"
|
2019-06-27 18:36:51 +01:00
|
|
|
"storj.io/storj/pkg/metainfo/kvmetainfo"
|
2019-02-04 16:56:10 +00:00
|
|
|
"storj.io/storj/pkg/pb"
|
2019-02-11 11:17:32 +00:00
|
|
|
"storj.io/storj/pkg/peertls/tlsopts"
|
2019-06-27 18:36:51 +01:00
|
|
|
ecclient "storj.io/storj/pkg/storage/ec"
|
|
|
|
"storj.io/storj/pkg/storage/segments"
|
2019-02-04 16:56:10 +00:00
|
|
|
"storj.io/storj/pkg/storage/streams"
|
|
|
|
"storj.io/storj/pkg/storj"
|
|
|
|
"storj.io/storj/pkg/stream"
|
|
|
|
"storj.io/storj/pkg/transport"
|
|
|
|
"storj.io/storj/satellite"
|
2019-02-05 17:22:17 +00:00
|
|
|
"storj.io/storj/satellite/console"
|
2019-02-08 12:57:35 +00:00
|
|
|
"storj.io/storj/uplink"
|
2019-03-18 10:55:06 +00:00
|
|
|
"storj.io/storj/uplink/metainfo"
|
|
|
|
"storj.io/storj/uplink/piecestore"
|
2019-02-04 16:56:10 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
// Uplink is a general purpose
|
|
|
|
type Uplink struct {
|
|
|
|
Log *zap.Logger
|
|
|
|
Info pb.Node
|
|
|
|
Identity *identity.FullIdentity
|
|
|
|
Transport transport.Client
|
|
|
|
StorageNodeCount int
|
2019-02-05 17:22:17 +00:00
|
|
|
APIKey map[storj.NodeID]string
|
2019-02-04 16:56:10 +00:00
|
|
|
}
|
|
|
|
|
2019-06-21 14:39:43 +01:00
|
|
|
// newUplinks creates initializes uplinks, requires peer to have at least one satellite
|
|
|
|
func (planet *Planet) newUplinks(prefix string, count, storageNodeCount int) ([]*Uplink, error) {
|
|
|
|
var xs []*Uplink
|
|
|
|
for i := 0; i < count; i++ {
|
|
|
|
uplink, err := planet.newUplink(prefix+strconv.Itoa(i), storageNodeCount)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
xs = append(xs, uplink)
|
|
|
|
}
|
|
|
|
|
|
|
|
return xs, nil
|
|
|
|
}
|
|
|
|
|
2019-02-04 16:56:10 +00:00
|
|
|
// newUplink creates a new uplink
|
|
|
|
func (planet *Planet) newUplink(name string, storageNodeCount int) (*Uplink, error) {
|
|
|
|
identity, err := planet.NewIdentity()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-04-08 19:15:19 +01:00
|
|
|
tlsOpts, err := tlsopts.NewOptions(identity, tlsopts.Config{
|
2019-04-09 18:01:45 +01:00
|
|
|
PeerIDVersions: strconv.Itoa(int(planet.config.IdentityVersion.Number)),
|
2019-04-08 19:15:19 +01:00
|
|
|
})
|
2019-02-11 11:17:32 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-02-04 16:56:10 +00:00
|
|
|
uplink := &Uplink{
|
|
|
|
Log: planet.log.Named(name),
|
|
|
|
Identity: identity,
|
|
|
|
StorageNodeCount: storageNodeCount,
|
|
|
|
}
|
|
|
|
|
|
|
|
uplink.Log.Debug("id=" + identity.ID.String())
|
|
|
|
|
2019-02-11 11:17:32 +00:00
|
|
|
uplink.Transport = transport.NewClient(tlsOpts)
|
2019-02-04 16:56:10 +00:00
|
|
|
|
|
|
|
uplink.Info = pb.Node{
|
2019-04-22 10:07:50 +01:00
|
|
|
Id: uplink.Identity.ID,
|
2019-02-04 16:56:10 +00:00
|
|
|
Address: &pb.NodeAddress{
|
|
|
|
Transport: pb.NodeTransport_TCP_TLS_GRPC,
|
|
|
|
Address: "",
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2019-02-05 17:22:17 +00:00
|
|
|
apiKeys := make(map[storj.NodeID]string)
|
|
|
|
for j, satellite := range planet.Satellites {
|
|
|
|
// TODO: find a nicer way to do this
|
|
|
|
// populate satellites console with example
|
|
|
|
// project and API key and pass that to uplinks
|
|
|
|
consoleDB := satellite.DB.Console()
|
|
|
|
|
|
|
|
projectName := fmt.Sprintf("%s_%d", name, j)
|
2019-05-24 17:51:27 +01:00
|
|
|
key, err := macaroon.NewAPIKey([]byte("testSecret"))
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-02-05 17:22:17 +00:00
|
|
|
|
|
|
|
project, err := consoleDB.Projects().Insert(
|
|
|
|
context.Background(),
|
|
|
|
&console.Project{
|
|
|
|
Name: projectName,
|
|
|
|
},
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
_, err = consoleDB.APIKeys().Create(
|
|
|
|
context.Background(),
|
2019-05-24 17:51:27 +01:00
|
|
|
key.Head(),
|
2019-02-05 17:22:17 +00:00
|
|
|
console.APIKeyInfo{
|
|
|
|
Name: "root",
|
|
|
|
ProjectID: project.ID,
|
2019-05-24 17:51:27 +01:00
|
|
|
Secret: []byte("testSecret"),
|
2019-02-05 17:22:17 +00:00
|
|
|
},
|
|
|
|
)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-05-24 17:51:27 +01:00
|
|
|
apiKeys[satellite.ID()] = key.Serialize()
|
2019-02-05 17:22:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
uplink.APIKey = apiKeys
|
2019-02-04 16:56:10 +00:00
|
|
|
planet.uplinks = append(planet.uplinks, uplink)
|
|
|
|
|
|
|
|
return uplink, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// ID returns uplink id
|
|
|
|
func (uplink *Uplink) ID() storj.NodeID { return uplink.Info.Id }
|
|
|
|
|
|
|
|
// Addr returns uplink address
|
|
|
|
func (uplink *Uplink) Addr() string { return uplink.Info.Address.Address }
|
|
|
|
|
|
|
|
// Local returns uplink info
|
|
|
|
func (uplink *Uplink) Local() pb.Node { return uplink.Info }
|
|
|
|
|
|
|
|
// Shutdown shuts down all uplink dependencies
|
|
|
|
func (uplink *Uplink) Shutdown() error { return nil }
|
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
// DialMetainfo dials destination with apikey and returns metainfo Client
|
2019-06-25 16:36:23 +01:00
|
|
|
func (uplink *Uplink) DialMetainfo(ctx context.Context, destination Peer, apikey string) (*metainfo.Client, error) {
|
|
|
|
return metainfo.Dial(ctx, uplink.Transport, destination.Addr(), apikey)
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// DialPiecestore dials destination storagenode and returns a piecestore client.
|
|
|
|
func (uplink *Uplink) DialPiecestore(ctx context.Context, destination Peer) (*piecestore.Client, error) {
|
|
|
|
node := destination.Local()
|
|
|
|
signer := signing.SignerFromFullIdentity(uplink.Transport.Identity())
|
2019-06-26 13:14:48 +01:00
|
|
|
return piecestore.Dial(ctx, uplink.Transport, &node.Node, uplink.Log.Named("uplink>piecestore"), signer, piecestore.DefaultConfig)
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
|
2019-02-04 16:56:10 +00:00
|
|
|
// Upload data to specific satellite
|
|
|
|
func (uplink *Uplink) Upload(ctx context.Context, satellite *satellite.Peer, bucket string, path storj.Path, data []byte) error {
|
2019-05-08 12:11:59 +01:00
|
|
|
return uplink.UploadWithExpiration(ctx, satellite, bucket, path, data, time.Time{})
|
|
|
|
}
|
|
|
|
|
2019-06-19 21:13:11 +01:00
|
|
|
// UploadWithExpiration data to specific satellite and expiration time
|
2019-05-08 12:11:59 +01:00
|
|
|
func (uplink *Uplink) UploadWithExpiration(ctx context.Context, satellite *satellite.Peer, bucket string, path storj.Path, data []byte, expiration time.Time) error {
|
2019-06-19 21:13:11 +01:00
|
|
|
return uplink.UploadWithExpirationAndConfig(ctx, satellite, nil, bucket, path, data, expiration)
|
2019-03-21 14:26:56 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// UploadWithConfig uploads data to specific satellite with configured values
|
|
|
|
func (uplink *Uplink) UploadWithConfig(ctx context.Context, satellite *satellite.Peer, redundancy *uplink.RSConfig, bucket string, path storj.Path, data []byte) error {
|
2019-06-19 21:13:11 +01:00
|
|
|
return uplink.UploadWithExpirationAndConfig(ctx, satellite, redundancy, bucket, path, data, time.Time{})
|
|
|
|
}
|
|
|
|
|
|
|
|
// UploadWithExpirationAndConfig uploads data to specific satellite with configured values and expiration time
|
2019-07-01 15:35:10 +01:00
|
|
|
func (uplink *Uplink) UploadWithExpirationAndConfig(ctx context.Context, satellite *satellite.Peer, redundancy *uplink.RSConfig, bucket string, path storj.Path, data []byte, expiration time.Time) (err error) {
|
2019-03-30 11:21:49 +00:00
|
|
|
config := uplink.GetConfig(satellite)
|
2019-03-21 14:26:56 +00:00
|
|
|
if redundancy != nil {
|
2019-06-03 10:17:09 +01:00
|
|
|
if redundancy.MinThreshold > 0 {
|
|
|
|
config.RS.MinThreshold = redundancy.MinThreshold
|
|
|
|
}
|
|
|
|
if redundancy.RepairThreshold > 0 {
|
|
|
|
config.RS.RepairThreshold = redundancy.RepairThreshold
|
|
|
|
}
|
|
|
|
if redundancy.SuccessThreshold > 0 {
|
|
|
|
config.RS.SuccessThreshold = redundancy.SuccessThreshold
|
|
|
|
}
|
|
|
|
if redundancy.MaxThreshold > 0 {
|
|
|
|
config.RS.MaxThreshold = redundancy.MaxThreshold
|
|
|
|
}
|
|
|
|
if redundancy.ErasureShareSize > 0 {
|
|
|
|
config.RS.ErasureShareSize = redundancy.ErasureShareSize
|
|
|
|
}
|
2019-03-21 14:26:56 +00:00
|
|
|
}
|
|
|
|
|
2019-07-01 15:35:10 +01:00
|
|
|
metainfo, streams, cleanup, err := DialMetainfo(ctx, config, uplink.Identity)
|
2019-03-21 14:26:56 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-07-01 15:35:10 +01:00
|
|
|
defer func() {
|
|
|
|
err = errs.Combine(err, cleanup())
|
|
|
|
}()
|
2019-03-21 14:26:56 +00:00
|
|
|
|
|
|
|
redScheme := config.GetRedundancyScheme()
|
2019-06-06 19:55:10 +01:00
|
|
|
encScheme := config.GetEncryptionScheme()
|
2019-02-04 16:56:10 +00:00
|
|
|
|
|
|
|
// create bucket if not exists
|
|
|
|
_, err = metainfo.GetBucket(ctx, bucket)
|
|
|
|
if err != nil {
|
|
|
|
if storj.ErrBucketNotFound.Has(err) {
|
|
|
|
_, err := metainfo.CreateBucket(ctx, bucket, &storj.Bucket{PathCipher: encScheme.Cipher})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
createInfo := storj.CreateObject{
|
|
|
|
RedundancyScheme: redScheme,
|
|
|
|
EncryptionScheme: encScheme,
|
2019-06-19 21:13:11 +01:00
|
|
|
Expires: expiration,
|
2019-02-04 16:56:10 +00:00
|
|
|
}
|
|
|
|
obj, err := metainfo.CreateObject(ctx, bucket, path, &createInfo)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
reader := bytes.NewReader(data)
|
|
|
|
err = uploadStream(ctx, streams, obj, reader)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-07-01 15:35:10 +01:00
|
|
|
err = obj.Commit(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-02-04 16:56:10 +00:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func uploadStream(ctx context.Context, streams streams.Store, mutableObject storj.MutableObject, reader io.Reader) error {
|
|
|
|
mutableStream, err := mutableObject.CreateStream(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
upload := stream.NewUpload(ctx, mutableStream, streams)
|
|
|
|
|
|
|
|
_, err = io.Copy(upload, reader)
|
|
|
|
|
|
|
|
return errs.Combine(err, upload.Close())
|
|
|
|
}
|
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
// DownloadStream returns stream for downloading data.
|
2019-07-01 15:35:10 +01:00
|
|
|
func (uplink *Uplink) DownloadStream(ctx context.Context, satellite *satellite.Peer, bucket string, path storj.Path) (*stream.Download, func() error, error) {
|
2019-03-30 11:21:49 +00:00
|
|
|
config := uplink.GetConfig(satellite)
|
2019-07-01 15:35:10 +01:00
|
|
|
metainfo, streams, cleanup, err := DialMetainfo(ctx, config, uplink.Identity)
|
2019-02-04 16:56:10 +00:00
|
|
|
if err != nil {
|
2019-07-01 15:35:10 +01:00
|
|
|
return nil, func() error { return nil }, errs.Combine(err, cleanup())
|
2019-02-04 16:56:10 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
readOnlyStream, err := metainfo.GetObjectStream(ctx, bucket, path)
|
|
|
|
if err != nil {
|
2019-07-01 15:35:10 +01:00
|
|
|
return nil, func() error { return nil }, errs.Combine(err, cleanup())
|
2019-02-04 16:56:10 +00:00
|
|
|
}
|
|
|
|
|
2019-07-01 15:35:10 +01:00
|
|
|
return stream.NewDownload(ctx, readOnlyStream, streams), cleanup, nil
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Download data from specific satellite
|
|
|
|
func (uplink *Uplink) Download(ctx context.Context, satellite *satellite.Peer, bucket string, path storj.Path) ([]byte, error) {
|
2019-07-01 15:35:10 +01:00
|
|
|
download, cleanup, err := uplink.DownloadStream(ctx, satellite, bucket, path)
|
2019-03-18 10:55:06 +00:00
|
|
|
if err != nil {
|
|
|
|
return []byte{}, err
|
|
|
|
}
|
2019-07-01 15:35:10 +01:00
|
|
|
defer func() {
|
|
|
|
err = errs.Combine(err,
|
|
|
|
download.Close(),
|
|
|
|
cleanup(),
|
|
|
|
)
|
|
|
|
}()
|
2019-02-04 16:56:10 +00:00
|
|
|
|
|
|
|
data, err := ioutil.ReadAll(download)
|
|
|
|
if err != nil {
|
|
|
|
return []byte{}, err
|
|
|
|
}
|
|
|
|
return data, nil
|
|
|
|
}
|
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
// Delete data to specific satellite
|
|
|
|
func (uplink *Uplink) Delete(ctx context.Context, satellite *satellite.Peer, bucket string, path storj.Path) error {
|
2019-03-30 11:21:49 +00:00
|
|
|
config := uplink.GetConfig(satellite)
|
2019-07-01 15:35:10 +01:00
|
|
|
metainfo, _, cleanup, err := DialMetainfo(ctx, config, uplink.Identity)
|
2019-03-18 10:55:06 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-07-01 15:35:10 +01:00
|
|
|
return errs.Combine(
|
|
|
|
metainfo.DeleteObject(ctx, bucket, path),
|
|
|
|
cleanup(),
|
|
|
|
)
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
|
2019-03-30 11:21:49 +00:00
|
|
|
// GetConfig returns a default config for a given satellite.
|
|
|
|
func (uplink *Uplink) GetConfig(satellite *satellite.Peer) uplink.Config {
|
2019-02-08 12:57:35 +00:00
|
|
|
config := getDefaultConfig()
|
2019-03-22 09:01:49 +00:00
|
|
|
config.Client.SatelliteAddr = satellite.Addr()
|
2019-02-08 12:57:35 +00:00
|
|
|
config.Client.APIKey = uplink.APIKey[satellite.ID()]
|
2019-05-10 12:26:25 +01:00
|
|
|
config.Client.RequestTimeout = 10 * time.Second
|
|
|
|
config.Client.DialTimeout = 10 * time.Second
|
2019-02-04 16:56:10 +00:00
|
|
|
|
2019-04-17 15:00:00 +01:00
|
|
|
config.RS.MinThreshold = atLeastOne(uplink.StorageNodeCount * 1 / 5) // 20% of storage nodes
|
|
|
|
config.RS.RepairThreshold = atLeastOne(uplink.StorageNodeCount * 2 / 5) // 40% of storage nodes
|
|
|
|
config.RS.SuccessThreshold = atLeastOne(uplink.StorageNodeCount * 3 / 5) // 60% of storage nodes
|
|
|
|
config.RS.MaxThreshold = atLeastOne(uplink.StorageNodeCount * 4 / 5) // 80% of storage nodes
|
2019-02-04 16:56:10 +00:00
|
|
|
|
2019-02-11 11:17:32 +00:00
|
|
|
config.TLS.UsePeerCAWhitelist = false
|
|
|
|
config.TLS.Extensions.Revocation = false
|
|
|
|
config.TLS.Extensions.WhitelistSignedLeaf = false
|
|
|
|
|
2019-02-08 12:57:35 +00:00
|
|
|
return config
|
2019-02-04 16:56:10 +00:00
|
|
|
}
|
|
|
|
|
2019-02-08 12:57:35 +00:00
|
|
|
func getDefaultConfig() uplink.Config {
|
|
|
|
config := uplink.Config{}
|
2019-04-19 19:17:30 +01:00
|
|
|
cfgstruct.Bind(&pflag.FlagSet{}, &config, cfgstruct.UseDevDefaults())
|
2019-02-08 12:57:35 +00:00
|
|
|
return config
|
2019-02-04 16:56:10 +00:00
|
|
|
}
|
2019-04-17 15:00:00 +01:00
|
|
|
|
|
|
|
// atLeastOne returns 1 if value < 1, or value otherwise.
|
|
|
|
func atLeastOne(value int) int {
|
|
|
|
if value < 1 {
|
|
|
|
return 1
|
|
|
|
}
|
|
|
|
return value
|
|
|
|
}
|
2019-06-27 18:36:51 +01:00
|
|
|
|
2019-07-01 15:35:10 +01:00
|
|
|
// DialMetainfo returns a metainfo and streams store for the given configuration and identity.
|
|
|
|
func DialMetainfo(ctx context.Context, config uplink.Config, identity *identity.FullIdentity) (db storj.Metainfo, ss streams.Store, cleanup func() error, err error) {
|
2019-06-27 18:36:51 +01:00
|
|
|
tlsOpts, err := tlsopts.NewOptions(identity, config.TLS)
|
|
|
|
if err != nil {
|
2019-07-01 15:35:10 +01:00
|
|
|
return nil, nil, cleanup, err
|
2019-06-27 18:36:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// ToDo: Handle Versioning for Uplinks here
|
|
|
|
|
|
|
|
tc := transport.NewClientWithTimeouts(tlsOpts, transport.Timeouts{
|
|
|
|
Request: config.Client.RequestTimeout,
|
|
|
|
Dial: config.Client.DialTimeout,
|
|
|
|
})
|
|
|
|
|
|
|
|
if config.Client.SatelliteAddr == "" {
|
2019-07-01 15:35:10 +01:00
|
|
|
return nil, nil, cleanup, errs.New("satellite address not specified")
|
2019-06-27 18:36:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
m, err := metainfo.Dial(ctx, tc, config.Client.SatelliteAddr, config.Client.APIKey)
|
|
|
|
if err != nil {
|
2019-07-01 15:35:10 +01:00
|
|
|
return nil, nil, cleanup, errs.New("failed to connect to metainfo service: %v", err)
|
2019-06-27 18:36:51 +01:00
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
2019-07-01 15:35:10 +01:00
|
|
|
// close metainfo if any of the setup fails
|
2019-06-27 18:36:51 +01:00
|
|
|
err = errs.Combine(err, m.Close())
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
project, err := kvmetainfo.SetupProject(m)
|
|
|
|
if err != nil {
|
2019-07-01 15:35:10 +01:00
|
|
|
return nil, nil, cleanup, errs.New("failed to create project: %v", err)
|
2019-06-27 18:36:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
ec := ecclient.NewClient(tc, config.RS.MaxBufferMem.Int())
|
|
|
|
fc, err := infectious.NewFEC(config.RS.MinThreshold, config.RS.MaxThreshold)
|
|
|
|
if err != nil {
|
2019-07-01 15:35:10 +01:00
|
|
|
return nil, nil, cleanup, errs.New("failed to create erasure coding client: %v", err)
|
2019-06-27 18:36:51 +01:00
|
|
|
}
|
|
|
|
rs, err := eestream.NewRedundancyStrategy(eestream.NewRSScheme(fc, config.RS.ErasureShareSize.Int()), config.RS.RepairThreshold, config.RS.SuccessThreshold)
|
|
|
|
if err != nil {
|
2019-07-01 15:35:10 +01:00
|
|
|
return nil, nil, cleanup, errs.New("failed to create redundancy strategy: %v", err)
|
2019-06-27 18:36:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
maxEncryptedSegmentSize, err := encryption.CalcEncryptedSize(config.Client.SegmentSize.Int64(), config.GetEncryptionScheme())
|
|
|
|
if err != nil {
|
2019-07-01 15:35:10 +01:00
|
|
|
return nil, nil, cleanup, errs.New("failed to calculate max encrypted segment size: %v", err)
|
2019-06-27 18:36:51 +01:00
|
|
|
}
|
|
|
|
segment := segments.NewSegmentStore(m, ec, rs, config.Client.MaxInlineSize.Int(), maxEncryptedSegmentSize)
|
|
|
|
|
|
|
|
blockSize := config.GetEncryptionScheme().BlockSize
|
|
|
|
if int(blockSize)%config.RS.ErasureShareSize.Int()*config.RS.MinThreshold != 0 {
|
|
|
|
err = errs.New("EncryptionBlockSize must be a multiple of ErasureShareSize * RS MinThreshold")
|
2019-07-01 15:35:10 +01:00
|
|
|
return nil, nil, cleanup, err
|
2019-06-27 18:36:51 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(jeff): there's some cycles with libuplink and this package in the libuplink tests
|
|
|
|
// and so this package can't import libuplink. that's why this function is duplicated
|
|
|
|
// in some spots.
|
|
|
|
|
|
|
|
encStore := encryption.NewStore()
|
|
|
|
encStore.SetDefaultKey(new(storj.Key))
|
|
|
|
|
|
|
|
strms, err := streams.NewStreamStore(segment, config.Client.SegmentSize.Int64(), encStore,
|
|
|
|
int(blockSize), storj.Cipher(config.Enc.DataType), config.Client.MaxInlineSize.Int(),
|
|
|
|
)
|
|
|
|
if err != nil {
|
2019-07-01 15:35:10 +01:00
|
|
|
return nil, nil, cleanup, errs.New("failed to create stream store: %v", err)
|
2019-06-27 18:36:51 +01:00
|
|
|
}
|
|
|
|
|
2019-07-01 15:35:10 +01:00
|
|
|
return kvmetainfo.New(project, m, strms, segment, encStore), strms, m.Close, nil
|
2019-06-27 18:36:51 +01:00
|
|
|
}
|