storj/internal/testplanet/uplink.go
Jeff Wendling efcdaa43a3
lib/uplink: encryption context (#2349)
* lib/uplink: encryption context

Change-Id: I5c23dca3286a46b713b30c4997e9ae6e630b2280

* lib/uplink: bucket operation examples

Change-Id: Ia0f6e69f365dcff0cf11c731f51b30842bce053b

* lib/uplink: encryption key sharing test cases

Change-Id: I3a172d565f33f4e591402cdcb9460664a7cc7fbe

* fix encrypted path prefix restriction issue

Change-Id: I8f3921f9d52aaf4b84039de608b8cbbc88769554

* implement panics in libuplink encryption code

todo on cipher suite selection as well as an api concern

Change-Id: Ifa39eb3cc4b3443f7d96f9304df9b2ac4ec4085d

* implement GetProjectInfo api call to get salt

Change-Id: Ic5f6b3be9ea35df48c1aa214ab5d355fb328e2cf

* some fixes and accessors for encryption store

Change-Id: I3bb61f6712a037900e2a96e72ad4029ec1d3f718

* general fixes to builds/tests/etc

Change-Id: I9930fa96acb3b221d9a001f8e274af5729cc8a47

* java bindings changes

Change-Id: Ia2bd4c9c69739c8d3154d79616cff1f36fb403b6

* get libuplink examples passing

Change-Id: I828f09a144160e0a5dd932324f78491ae2ec8a07

* fix proto.lock file

Change-Id: I2fbbf4d0976a7d0473c2645e6dcb21aaa3be7651

* fix proto.lock again

Change-Id: I92702cf49e1a340eef6379c2be4f7c4a268112a9

* fix golint issues

Change-Id: I631ff9f43307a58e3b25a58cbb4a4cc2495f5eb6

* more linting fixes

Change-Id: I51f8f30b367b5bca14c94b15417b9a4c9e7aa0ce

* bug fixed by structs bump

Change-Id: Ibb03c691fce7606c35c08721b3ef0781ab48a38a

* retrigger

Change-Id: Ieee0470b6a2d07168a1578552e8e7f271ae93a13

* retrigger

Change-Id: I753d63853171e6a436c104ce176048892eb974c5

* semantic merge conflict

Change-Id: I9419448496de90340569047a6a16a1b858a7978a

* update total to match prod defaults

Change-Id: I693d55c1ebb28b5803ee1d26e9e198decf82308b

* retrigger

Change-Id: I28b74d5d6202f61aa3866fe407d423f6a0a14b9e

* retrigger

Change-Id: I6fd054885c715f602e2cef623fd464c42e88742c

* retrigger

Change-Id: I6a01bae88c72406d4ed5a8f13bf8a2b3c650bd2d
2019-06-27 17:36:51 +00:00

402 lines
12 KiB
Go

// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information
package testplanet
import (
"bytes"
"context"
"fmt"
"io"
"io/ioutil"
"strconv"
"time"
"github.com/spf13/pflag"
"github.com/vivint/infectious"
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/storj/pkg/auth/signing"
"storj.io/storj/pkg/cfgstruct"
"storj.io/storj/pkg/eestream"
"storj.io/storj/pkg/encryption"
"storj.io/storj/pkg/identity"
"storj.io/storj/pkg/macaroon"
"storj.io/storj/pkg/metainfo/kvmetainfo"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/peertls/tlsopts"
ecclient "storj.io/storj/pkg/storage/ec"
"storj.io/storj/pkg/storage/segments"
"storj.io/storj/pkg/storage/streams"
"storj.io/storj/pkg/storj"
"storj.io/storj/pkg/stream"
"storj.io/storj/pkg/transport"
"storj.io/storj/satellite"
"storj.io/storj/satellite/console"
"storj.io/storj/uplink"
"storj.io/storj/uplink/metainfo"
"storj.io/storj/uplink/piecestore"
)
// Uplink is a general purpose
type Uplink struct {
Log *zap.Logger
Info pb.Node
Identity *identity.FullIdentity
Transport transport.Client
StorageNodeCount int
APIKey map[storj.NodeID]string
}
// newUplinks creates initializes uplinks, requires peer to have at least one satellite
func (planet *Planet) newUplinks(prefix string, count, storageNodeCount int) ([]*Uplink, error) {
var xs []*Uplink
for i := 0; i < count; i++ {
uplink, err := planet.newUplink(prefix+strconv.Itoa(i), storageNodeCount)
if err != nil {
return nil, err
}
xs = append(xs, uplink)
}
return xs, nil
}
// newUplink creates a new uplink
func (planet *Planet) newUplink(name string, storageNodeCount int) (*Uplink, error) {
identity, err := planet.NewIdentity()
if err != nil {
return nil, err
}
tlsOpts, err := tlsopts.NewOptions(identity, tlsopts.Config{
PeerIDVersions: strconv.Itoa(int(planet.config.IdentityVersion.Number)),
})
if err != nil {
return nil, err
}
uplink := &Uplink{
Log: planet.log.Named(name),
Identity: identity,
StorageNodeCount: storageNodeCount,
}
uplink.Log.Debug("id=" + identity.ID.String())
uplink.Transport = transport.NewClient(tlsOpts)
uplink.Info = pb.Node{
Id: uplink.Identity.ID,
Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: "",
},
}
apiKeys := make(map[storj.NodeID]string)
for j, satellite := range planet.Satellites {
// TODO: find a nicer way to do this
// populate satellites console with example
// project and API key and pass that to uplinks
consoleDB := satellite.DB.Console()
projectName := fmt.Sprintf("%s_%d", name, j)
key, err := macaroon.NewAPIKey([]byte("testSecret"))
if err != nil {
return nil, err
}
project, err := consoleDB.Projects().Insert(
context.Background(),
&console.Project{
Name: projectName,
},
)
if err != nil {
return nil, err
}
_, err = consoleDB.APIKeys().Create(
context.Background(),
key.Head(),
console.APIKeyInfo{
Name: "root",
ProjectID: project.ID,
Secret: []byte("testSecret"),
},
)
if err != nil {
return nil, err
}
apiKeys[satellite.ID()] = key.Serialize()
}
uplink.APIKey = apiKeys
planet.uplinks = append(planet.uplinks, uplink)
return uplink, nil
}
// ID returns uplink id
func (uplink *Uplink) ID() storj.NodeID { return uplink.Info.Id }
// Addr returns uplink address
func (uplink *Uplink) Addr() string { return uplink.Info.Address.Address }
// Local returns uplink info
func (uplink *Uplink) Local() pb.Node { return uplink.Info }
// Shutdown shuts down all uplink dependencies
func (uplink *Uplink) Shutdown() error { return nil }
// DialMetainfo dials destination with apikey and returns metainfo Client
func (uplink *Uplink) DialMetainfo(ctx context.Context, destination Peer, apikey string) (*metainfo.Client, error) {
return metainfo.Dial(ctx, uplink.Transport, destination.Addr(), apikey)
}
// DialPiecestore dials destination storagenode and returns a piecestore client.
func (uplink *Uplink) DialPiecestore(ctx context.Context, destination Peer) (*piecestore.Client, error) {
node := destination.Local()
signer := signing.SignerFromFullIdentity(uplink.Transport.Identity())
return piecestore.Dial(ctx, uplink.Transport, &node.Node, uplink.Log.Named("uplink>piecestore"), signer, piecestore.DefaultConfig)
}
// Upload data to specific satellite
func (uplink *Uplink) Upload(ctx context.Context, satellite *satellite.Peer, bucket string, path storj.Path, data []byte) error {
return uplink.UploadWithExpiration(ctx, satellite, bucket, path, data, time.Time{})
}
// UploadWithExpiration data to specific satellite and expiration time
func (uplink *Uplink) UploadWithExpiration(ctx context.Context, satellite *satellite.Peer, bucket string, path storj.Path, data []byte, expiration time.Time) error {
return uplink.UploadWithExpirationAndConfig(ctx, satellite, nil, bucket, path, data, expiration)
}
// UploadWithConfig uploads data to specific satellite with configured values
func (uplink *Uplink) UploadWithConfig(ctx context.Context, satellite *satellite.Peer, redundancy *uplink.RSConfig, bucket string, path storj.Path, data []byte) error {
return uplink.UploadWithExpirationAndConfig(ctx, satellite, redundancy, bucket, path, data, time.Time{})
}
// UploadWithExpirationAndConfig uploads data to specific satellite with configured values and expiration time
func (uplink *Uplink) UploadWithExpirationAndConfig(ctx context.Context, satellite *satellite.Peer, redundancy *uplink.RSConfig, bucket string, path storj.Path, data []byte, expiration time.Time) error {
config := uplink.GetConfig(satellite)
if redundancy != nil {
if redundancy.MinThreshold > 0 {
config.RS.MinThreshold = redundancy.MinThreshold
}
if redundancy.RepairThreshold > 0 {
config.RS.RepairThreshold = redundancy.RepairThreshold
}
if redundancy.SuccessThreshold > 0 {
config.RS.SuccessThreshold = redundancy.SuccessThreshold
}
if redundancy.MaxThreshold > 0 {
config.RS.MaxThreshold = redundancy.MaxThreshold
}
if redundancy.ErasureShareSize > 0 {
config.RS.ErasureShareSize = redundancy.ErasureShareSize
}
}
metainfo, streams, err := GetMetainfo(ctx, config, uplink.Identity)
if err != nil {
return err
}
redScheme := config.GetRedundancyScheme()
encScheme := config.GetEncryptionScheme()
// create bucket if not exists
_, err = metainfo.GetBucket(ctx, bucket)
if err != nil {
if storj.ErrBucketNotFound.Has(err) {
_, err := metainfo.CreateBucket(ctx, bucket, &storj.Bucket{PathCipher: encScheme.Cipher})
if err != nil {
return err
}
} else {
return err
}
}
createInfo := storj.CreateObject{
RedundancyScheme: redScheme,
EncryptionScheme: encScheme,
Expires: expiration,
}
obj, err := metainfo.CreateObject(ctx, bucket, path, &createInfo)
if err != nil {
return err
}
reader := bytes.NewReader(data)
err = uploadStream(ctx, streams, obj, reader)
if err != nil {
return err
}
return nil
}
func uploadStream(ctx context.Context, streams streams.Store, mutableObject storj.MutableObject, reader io.Reader) error {
mutableStream, err := mutableObject.CreateStream(ctx)
if err != nil {
return err
}
upload := stream.NewUpload(ctx, mutableStream, streams)
_, err = io.Copy(upload, reader)
return errs.Combine(err, upload.Close())
}
// DownloadStream returns stream for downloading data.
func (uplink *Uplink) DownloadStream(ctx context.Context, satellite *satellite.Peer, bucket string, path storj.Path) (*stream.Download, error) {
config := uplink.GetConfig(satellite)
metainfo, streams, err := GetMetainfo(ctx, config, uplink.Identity)
if err != nil {
return nil, err
}
readOnlyStream, err := metainfo.GetObjectStream(ctx, bucket, path)
if err != nil {
return nil, err
}
return stream.NewDownload(ctx, readOnlyStream, streams), nil
}
// Download data from specific satellite
func (uplink *Uplink) Download(ctx context.Context, satellite *satellite.Peer, bucket string, path storj.Path) ([]byte, error) {
download, err := uplink.DownloadStream(ctx, satellite, bucket, path)
if err != nil {
return []byte{}, err
}
defer func() { err = errs.Combine(err, download.Close()) }()
data, err := ioutil.ReadAll(download)
if err != nil {
return []byte{}, err
}
return data, nil
}
// Delete data to specific satellite
func (uplink *Uplink) Delete(ctx context.Context, satellite *satellite.Peer, bucket string, path storj.Path) error {
config := uplink.GetConfig(satellite)
metainfo, _, err := GetMetainfo(ctx, config, uplink.Identity)
if err != nil {
return err
}
return metainfo.DeleteObject(ctx, bucket, path)
}
// GetConfig returns a default config for a given satellite.
func (uplink *Uplink) GetConfig(satellite *satellite.Peer) uplink.Config {
config := getDefaultConfig()
config.Client.SatelliteAddr = satellite.Addr()
config.Client.APIKey = uplink.APIKey[satellite.ID()]
config.Client.RequestTimeout = 10 * time.Second
config.Client.DialTimeout = 10 * time.Second
config.RS.MinThreshold = atLeastOne(uplink.StorageNodeCount * 1 / 5) // 20% of storage nodes
config.RS.RepairThreshold = atLeastOne(uplink.StorageNodeCount * 2 / 5) // 40% of storage nodes
config.RS.SuccessThreshold = atLeastOne(uplink.StorageNodeCount * 3 / 5) // 60% of storage nodes
config.RS.MaxThreshold = atLeastOne(uplink.StorageNodeCount * 4 / 5) // 80% of storage nodes
config.TLS.UsePeerCAWhitelist = false
config.TLS.Extensions.Revocation = false
config.TLS.Extensions.WhitelistSignedLeaf = false
return config
}
func getDefaultConfig() uplink.Config {
config := uplink.Config{}
cfgstruct.Bind(&pflag.FlagSet{}, &config, cfgstruct.UseDevDefaults())
return config
}
// atLeastOne returns 1 if value < 1, or value otherwise.
func atLeastOne(value int) int {
if value < 1 {
return 1
}
return value
}
// GetMetainfo returns a metainfo and streams store for the given configuration and identity.
func GetMetainfo(ctx context.Context, config uplink.Config, identity *identity.FullIdentity) (db storj.Metainfo, ss streams.Store, err error) {
tlsOpts, err := tlsopts.NewOptions(identity, config.TLS)
if err != nil {
return nil, nil, err
}
// ToDo: Handle Versioning for Uplinks here
tc := transport.NewClientWithTimeouts(tlsOpts, transport.Timeouts{
Request: config.Client.RequestTimeout,
Dial: config.Client.DialTimeout,
})
if config.Client.SatelliteAddr == "" {
return nil, nil, errs.New("satellite address not specified")
}
m, err := metainfo.Dial(ctx, tc, config.Client.SatelliteAddr, config.Client.APIKey)
if err != nil {
return nil, nil, errs.New("failed to connect to metainfo service: %v", err)
}
defer func() {
if err != nil {
err = errs.Combine(err, m.Close())
}
}()
project, err := kvmetainfo.SetupProject(m)
if err != nil {
return nil, nil, errs.New("failed to create project: %v", err)
}
ec := ecclient.NewClient(tc, config.RS.MaxBufferMem.Int())
fc, err := infectious.NewFEC(config.RS.MinThreshold, config.RS.MaxThreshold)
if err != nil {
return nil, nil, errs.New("failed to create erasure coding client: %v", err)
}
rs, err := eestream.NewRedundancyStrategy(eestream.NewRSScheme(fc, config.RS.ErasureShareSize.Int()), config.RS.RepairThreshold, config.RS.SuccessThreshold)
if err != nil {
return nil, nil, errs.New("failed to create redundancy strategy: %v", err)
}
maxEncryptedSegmentSize, err := encryption.CalcEncryptedSize(config.Client.SegmentSize.Int64(), config.GetEncryptionScheme())
if err != nil {
return nil, nil, errs.New("failed to calculate max encrypted segment size: %v", err)
}
segment := segments.NewSegmentStore(m, ec, rs, config.Client.MaxInlineSize.Int(), maxEncryptedSegmentSize)
blockSize := config.GetEncryptionScheme().BlockSize
if int(blockSize)%config.RS.ErasureShareSize.Int()*config.RS.MinThreshold != 0 {
err = errs.New("EncryptionBlockSize must be a multiple of ErasureShareSize * RS MinThreshold")
return nil, nil, err
}
// TODO(jeff): there's some cycles with libuplink and this package in the libuplink tests
// and so this package can't import libuplink. that's why this function is duplicated
// in some spots.
encStore := encryption.NewStore()
encStore.SetDefaultKey(new(storj.Key))
strms, err := streams.NewStreamStore(segment, config.Client.SegmentSize.Int64(), encStore,
int(blockSize), storj.Cipher(config.Enc.DataType), config.Client.MaxInlineSize.Int(),
)
if err != nil {
return nil, nil, errs.New("failed to create stream store: %v", err)
}
return kvmetainfo.New(project, m, strms, segment, encStore), strms, nil
}