update testplanet with libuplink (#2618)

* update testplanet uplink upload with libuplink

* add libuplink to testplanet download

* update createbucket and delete obj with libuplink

* update downloadStream, fix tests

* fix test

* updates for CR comments
This commit is contained in:
Jess G 2019-07-23 07:58:45 -07:00 committed by GitHub
parent 3c8f1370d2
commit 353b089927
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 178 additions and 195 deletions

View File

@ -7,35 +7,28 @@ import (
"bytes" "bytes"
"context" "context"
"fmt" "fmt"
"io"
"io/ioutil" "io/ioutil"
"strconv" "strconv"
"time" "time"
"github.com/spf13/pflag" "github.com/spf13/pflag"
"github.com/vivint/infectious"
"github.com/zeebo/errs" "github.com/zeebo/errs"
"go.uber.org/zap" "go.uber.org/zap"
libuplink "storj.io/storj/lib/uplink"
"storj.io/storj/pkg/cfgstruct" "storj.io/storj/pkg/cfgstruct"
"storj.io/storj/pkg/eestream"
"storj.io/storj/pkg/encryption"
"storj.io/storj/pkg/identity" "storj.io/storj/pkg/identity"
"storj.io/storj/pkg/macaroon" "storj.io/storj/pkg/macaroon"
"storj.io/storj/pkg/metainfo/kvmetainfo"
"storj.io/storj/pkg/pb" "storj.io/storj/pkg/pb"
"storj.io/storj/pkg/peertls/tlsopts" "storj.io/storj/pkg/peertls/tlsopts"
ecclient "storj.io/storj/pkg/storage/ec"
"storj.io/storj/pkg/storage/segments"
"storj.io/storj/pkg/storage/streams"
"storj.io/storj/pkg/storj" "storj.io/storj/pkg/storj"
"storj.io/storj/pkg/stream"
"storj.io/storj/pkg/transport" "storj.io/storj/pkg/transport"
"storj.io/storj/satellite" "storj.io/storj/satellite"
"storj.io/storj/satellite/console" "storj.io/storj/satellite/console"
"storj.io/storj/uplink" "storj.io/storj/uplink"
"storj.io/storj/uplink/metainfo" "storj.io/storj/uplink/metainfo"
"storj.io/storj/uplink/piecestore" "storj.io/storj/uplink/piecestore"
"storj.io/storj/uplink/setup"
) )
// Uplink is a general purpose // Uplink is a general purpose
@ -140,46 +133,46 @@ func (planet *Planet) newUplink(name string, storageNodeCount int) (*Uplink, err
} }
// ID returns uplink id // ID returns uplink id
func (uplink *Uplink) ID() storj.NodeID { return uplink.Info.Id } func (client *Uplink) ID() storj.NodeID { return client.Info.Id }
// Addr returns uplink address // Addr returns uplink address
func (uplink *Uplink) Addr() string { return uplink.Info.Address.Address } func (client *Uplink) Addr() string { return client.Info.Address.Address }
// Local returns uplink info // Local returns uplink info
func (uplink *Uplink) Local() pb.Node { return uplink.Info } func (client *Uplink) Local() pb.Node { return client.Info }
// Shutdown shuts down all uplink dependencies // Shutdown shuts down all uplink dependencies
func (uplink *Uplink) Shutdown() error { return nil } func (client *Uplink) Shutdown() error { return nil }
// DialMetainfo dials destination with apikey and returns metainfo Client // DialMetainfo dials destination with apikey and returns metainfo Client
func (uplink *Uplink) DialMetainfo(ctx context.Context, destination Peer, apikey string) (*metainfo.Client, error) { func (client *Uplink) DialMetainfo(ctx context.Context, destination Peer, apikey string) (*metainfo.Client, error) {
return metainfo.Dial(ctx, uplink.Transport, destination.Addr(), apikey) return metainfo.Dial(ctx, client.Transport, destination.Addr(), apikey)
} }
// DialPiecestore dials destination storagenode and returns a piecestore client. // DialPiecestore dials destination storagenode and returns a piecestore client.
func (uplink *Uplink) DialPiecestore(ctx context.Context, destination Peer) (*piecestore.Client, error) { func (client *Uplink) DialPiecestore(ctx context.Context, destination Peer) (*piecestore.Client, error) {
node := destination.Local() node := destination.Local()
return piecestore.Dial(ctx, uplink.Transport, &node.Node, uplink.Log.Named("uplink>piecestore"), piecestore.DefaultConfig) return piecestore.Dial(ctx, client.Transport, &node.Node, client.Log.Named("uplink>piecestore"), piecestore.DefaultConfig)
} }
// Upload data to specific satellite // Upload data to specific satellite
func (uplink *Uplink) Upload(ctx context.Context, satellite *satellite.Peer, bucket string, path storj.Path, data []byte) error { func (client *Uplink) Upload(ctx context.Context, satellite *satellite.Peer, bucket string, path storj.Path, data []byte) error {
return uplink.UploadWithExpiration(ctx, satellite, bucket, path, data, time.Time{}) return client.UploadWithExpiration(ctx, satellite, bucket, path, data, time.Time{})
} }
// UploadWithExpiration data to specific satellite and expiration time // UploadWithExpiration data to specific satellite and expiration time
func (uplink *Uplink) UploadWithExpiration(ctx context.Context, satellite *satellite.Peer, bucket string, path storj.Path, data []byte, expiration time.Time) error { func (client *Uplink) UploadWithExpiration(ctx context.Context, satellite *satellite.Peer, bucket string, path storj.Path, data []byte, expiration time.Time) error {
return uplink.UploadWithExpirationAndConfig(ctx, satellite, nil, bucket, path, data, expiration) return client.UploadWithExpirationAndConfig(ctx, satellite, nil, bucket, path, data, expiration)
} }
// UploadWithConfig uploads data to specific satellite with configured values // UploadWithConfig uploads data to specific satellite with configured values
func (uplink *Uplink) UploadWithConfig(ctx context.Context, satellite *satellite.Peer, redundancy *uplink.RSConfig, bucket string, path storj.Path, data []byte) error { func (client *Uplink) UploadWithConfig(ctx context.Context, satellite *satellite.Peer, redundancy *uplink.RSConfig, bucket string, path storj.Path, data []byte) error {
return uplink.UploadWithExpirationAndConfig(ctx, satellite, redundancy, bucket, path, data, time.Time{}) return client.UploadWithExpirationAndConfig(ctx, satellite, redundancy, bucket, path, data, time.Time{})
} }
// UploadWithExpirationAndConfig uploads data to specific satellite with configured values and expiration time // UploadWithExpirationAndConfig uploads data to specific satellite with configured values and expiration time
func (uplink *Uplink) UploadWithExpirationAndConfig(ctx context.Context, satellite *satellite.Peer, redundancy *uplink.RSConfig, bucket string, path storj.Path, data []byte, expiration time.Time) (err error) { func (client *Uplink) UploadWithExpirationAndConfig(ctx context.Context, satellite *satellite.Peer, redundancy *uplink.RSConfig, bucketName string, path storj.Path, data []byte, expiration time.Time) (err error) {
config := uplink.GetConfig(satellite) config := client.GetConfig(satellite)
if redundancy != nil { if redundancy != nil {
if redundancy.MinThreshold > 0 { if redundancy.MinThreshold > 0 {
config.RS.MinThreshold = redundancy.MinThreshold config.RS.MinThreshold = redundancy.MinThreshold
@ -198,128 +191,119 @@ func (uplink *Uplink) UploadWithExpirationAndConfig(ctx context.Context, satelli
} }
} }
metainfo, streams, cleanup, err := DialMetainfo(ctx, uplink.Log.Named("metainfo"), config, uplink.Identity) project, bucket, err := client.GetProjectAndBucket(ctx, satellite, bucketName, config)
if err != nil { if err != nil {
return err return err
} }
defer func() { defer func() { err = errs.Combine(err, bucket.Close(), project.Close()) }()
err = errs.Combine(err, cleanup())
}()
redScheme := config.GetRedundancyScheme() opts := &libuplink.UploadOptions{}
encParameters := config.GetEncryptionParameters() opts.Expires = expiration
opts.Volatile.RedundancyScheme = config.GetRedundancyScheme()
// create bucket if not exists opts.Volatile.EncryptionParameters = config.GetEncryptionParameters()
_, err = metainfo.GetBucket(ctx, bucket)
if err != nil {
if storj.ErrBucketNotFound.Has(err) {
_, err := metainfo.CreateBucket(ctx, bucket, &storj.Bucket{PathCipher: encParameters.CipherSuite})
if err != nil {
return err
}
} else {
return err
}
}
createInfo := storj.CreateObject{
RedundancyScheme: redScheme,
EncryptionParameters: encParameters,
Expires: expiration,
}
obj, err := metainfo.CreateObject(ctx, bucket, path, &createInfo)
if err != nil {
return err
}
reader := bytes.NewReader(data) reader := bytes.NewReader(data)
err = uploadStream(ctx, streams, obj, reader) if err := bucket.UploadObject(ctx, path, reader, opts); err != nil {
if err != nil {
return err
}
err = obj.Commit(ctx)
if err != nil {
return err return err
} }
return nil return nil
} }
func uploadStream(ctx context.Context, streams streams.Store, mutableObject storj.MutableObject, reader io.Reader) error {
mutableStream, err := mutableObject.CreateStream(ctx)
if err != nil {
return err
}
upload := stream.NewUpload(ctx, mutableStream, streams)
_, err = io.Copy(upload, reader)
return errs.Combine(err, upload.Close())
}
// DownloadStream returns stream for downloading data.
func (uplink *Uplink) DownloadStream(ctx context.Context, satellite *satellite.Peer, bucket string, path storj.Path) (*stream.Download, func() error, error) {
config := uplink.GetConfig(satellite)
metainfo, streams, cleanup, err := DialMetainfo(ctx, uplink.Log.Named("metainfo"), config, uplink.Identity)
if err != nil {
return nil, func() error { return nil }, errs.Combine(err, cleanup())
}
readOnlyStream, err := metainfo.GetObjectStream(ctx, bucket, path)
if err != nil {
return nil, func() error { return nil }, errs.Combine(err, cleanup())
}
return stream.NewDownload(ctx, readOnlyStream, streams), cleanup, nil
}
// Download data from specific satellite // Download data from specific satellite
func (uplink *Uplink) Download(ctx context.Context, satellite *satellite.Peer, bucket string, path storj.Path) ([]byte, error) { func (client *Uplink) Download(ctx context.Context, satellite *satellite.Peer, bucketName string, path storj.Path) ([]byte, error) {
download, cleanup, err := uplink.DownloadStream(ctx, satellite, bucket, path) project, bucket, err := client.GetProjectAndBucket(ctx, satellite, bucketName, client.GetConfig(satellite))
if err != nil { if err != nil {
return []byte{}, err return nil, err
} }
defer func() { defer func() { err = errs.Combine(err, project.Close(), bucket.Close()) }()
err = errs.Combine(err,
download.Close(),
cleanup(),
)
}()
data, err := ioutil.ReadAll(download) object, err := bucket.OpenObject(ctx, path)
if err != nil {
return nil, err
}
rc, err := object.DownloadRange(ctx, 0, object.Meta.Size)
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, rc.Close()) }()
data, err := ioutil.ReadAll(rc)
if err != nil { if err != nil {
return []byte{}, err return []byte{}, err
} }
return data, nil return data, nil
} }
// Delete data to specific satellite // DownloadStream returns stream for downloading data
func (uplink *Uplink) Delete(ctx context.Context, satellite *satellite.Peer, bucket string, path storj.Path) error { func (client *Uplink) DownloadStream(ctx context.Context, satellite *satellite.Peer, bucketName string, path storj.Path) (_ libuplink.ReadSeekCloser, cleanup func() error, err error) {
config := uplink.GetConfig(satellite) project, bucket, err := client.GetProjectAndBucket(ctx, satellite, bucketName, client.GetConfig(satellite))
metainfo, _, cleanup, err := DialMetainfo(ctx, uplink.Log.Named("metainfo"), config, uplink.Identity) if err != nil {
return nil, nil, err
}
cleanup = func() error {
err = errs.Combine(err,
project.Close(),
bucket.Close(),
)
return err
}
downloader, err := bucket.NewReader(ctx, path)
return downloader, cleanup, err
}
// Delete deletes an object at the path in a bucket
func (client *Uplink) Delete(ctx context.Context, satellite *satellite.Peer, bucketName string, path storj.Path) error {
project, bucket, err := client.GetProjectAndBucket(ctx, satellite, bucketName, client.GetConfig(satellite))
if err != nil { if err != nil {
return err return err
} }
return errs.Combine( defer func() { err = errs.Combine(err, project.Close(), bucket.Close()) }()
metainfo.DeleteObject(ctx, bucket, path),
cleanup(), err = bucket.DeleteObject(ctx, path)
) if err != nil {
return err
}
return nil
}
// CreateBucket creates a new bucket
func (client *Uplink) CreateBucket(ctx context.Context, satellite *satellite.Peer, bucketName string) error {
project, err := client.GetProject(ctx, satellite)
if err != nil {
return err
}
defer func() { err = errs.Combine(err, project.Close()) }()
clientCfg := client.GetConfig(satellite)
bucketCfg := &libuplink.BucketConfig{}
bucketCfg.PathCipher = clientCfg.GetPathCipherSuite()
bucketCfg.EncryptionParameters = clientCfg.GetEncryptionParameters()
bucketCfg.Volatile.RedundancyScheme = clientCfg.GetRedundancyScheme()
bucketCfg.Volatile.SegmentsSize = clientCfg.GetSegmentSize()
_, err = project.CreateBucket(ctx, bucketName, bucketCfg)
if err != nil {
return err
}
return nil
} }
// GetConfig returns a default config for a given satellite. // GetConfig returns a default config for a given satellite.
func (uplink *Uplink) GetConfig(satellite *satellite.Peer) uplink.Config { func (client *Uplink) GetConfig(satellite *satellite.Peer) uplink.Config {
config := getDefaultConfig() config := getDefaultConfig()
config.Client.SatelliteAddr = satellite.Addr() config.Client.SatelliteAddr = satellite.Addr()
config.Client.APIKey = uplink.APIKey[satellite.ID()] config.Client.APIKey = client.APIKey[satellite.ID()]
config.Client.RequestTimeout = 10 * time.Second config.Client.RequestTimeout = 10 * time.Second
config.Client.DialTimeout = 10 * time.Second config.Client.DialTimeout = 10 * time.Second
config.RS.MinThreshold = atLeastOne(uplink.StorageNodeCount * 1 / 5) // 20% of storage nodes config.RS.MinThreshold = atLeastOne(client.StorageNodeCount * 1 / 5) // 20% of storage nodes
config.RS.RepairThreshold = atLeastOne(uplink.StorageNodeCount * 2 / 5) // 40% of storage nodes config.RS.RepairThreshold = atLeastOne(client.StorageNodeCount * 2 / 5) // 40% of storage nodes
config.RS.SuccessThreshold = atLeastOne(uplink.StorageNodeCount * 3 / 5) // 60% of storage nodes config.RS.SuccessThreshold = atLeastOne(client.StorageNodeCount * 3 / 5) // 60% of storage nodes
config.RS.MaxThreshold = atLeastOne(uplink.StorageNodeCount * 4 / 5) // 80% of storage nodes config.RS.MaxThreshold = atLeastOne(client.StorageNodeCount * 4 / 5) // 80% of storage nodes
config.TLS.UsePeerCAWhitelist = false config.TLS.UsePeerCAWhitelist = false
config.TLS.Extensions.Revocation = false config.TLS.Extensions.Revocation = false
@ -342,75 +326,91 @@ func atLeastOne(value int) int {
return value return value
} }
// DialMetainfo returns a metainfo and streams store for the given configuration and identity. // NewLibuplink creates a libuplink.Uplink object with the testplanet Uplink config
func DialMetainfo(ctx context.Context, log *zap.Logger, config uplink.Config, identity *identity.FullIdentity) (db storj.Metainfo, ss streams.Store, cleanup func() error, err error) { func (client *Uplink) NewLibuplink(ctx context.Context) (*libuplink.Uplink, error) {
tlsOpts, err := tlsopts.NewOptions(identity, config.TLS) config := getDefaultConfig()
libuplinkCfg := &libuplink.Config{}
libuplinkCfg.Volatile.MaxInlineSize = config.Client.MaxInlineSize
libuplinkCfg.Volatile.MaxMemory = config.RS.MaxBufferMem
libuplinkCfg.Volatile.PeerIDVersion = config.TLS.PeerIDVersions
libuplinkCfg.Volatile.TLS.SkipPeerCAWhitelist = !config.TLS.UsePeerCAWhitelist
libuplinkCfg.Volatile.TLS.PeerCAWhitelistPath = config.TLS.PeerCAWhitelistPath
libuplinkCfg.Volatile.DialTimeout = config.Client.DialTimeout
libuplinkCfg.Volatile.RequestTimeout = config.Client.RequestTimeout
return libuplink.NewUplink(ctx, libuplinkCfg)
}
// GetProject returns a libuplink.Project which allows interactions with a specific project
func (client *Uplink) GetProject(ctx context.Context, satellite *satellite.Peer) (*libuplink.Project, error) {
testLibuplink, err := client.NewLibuplink(ctx)
if err != nil { if err != nil {
return nil, nil, cleanup, err return nil, err
} }
defer func() { err = errs.Combine(err, testLibuplink.Close()) }()
// ToDo: Handle Versioning for Uplinks here clientAPIKey := client.APIKey[satellite.ID()]
key, err := libuplink.ParseAPIKey(clientAPIKey)
tc := transport.NewClientWithTimeouts(tlsOpts, transport.Timeouts{
Request: config.Client.RequestTimeout,
Dial: config.Client.DialTimeout,
})
if config.Client.SatelliteAddr == "" {
return nil, nil, cleanup, errs.New("satellite address not specified")
}
m, err := metainfo.Dial(ctx, tc, config.Client.SatelliteAddr, config.Client.APIKey)
if err != nil { if err != nil {
return nil, nil, cleanup, errs.New("failed to connect to metainfo service: %v", err) return nil, err
} }
project, err := testLibuplink.OpenProject(ctx, satellite.Addr(), key)
if err != nil {
return nil, err
}
return project, nil
}
// GetProjectAndBucket returns a libuplink.Project and Bucket which allows interactions with a specific project and its buckets
func (client *Uplink) GetProjectAndBucket(ctx context.Context, satellite *satellite.Peer, bucketName string, clientCfg uplink.Config) (_ *libuplink.Project, _ *libuplink.Bucket, err error) {
project, err := client.GetProject(ctx, satellite)
if err != nil {
return nil, nil, err
}
defer func() { defer func() {
if err != nil { if err != nil {
// close metainfo if any of the setup fails err = errs.Combine(err, project.Close())
err = errs.Combine(err, m.Close())
} }
}() }()
project, err := kvmetainfo.SetupProject(m) access, err := setup.LoadEncryptionAccess(ctx, clientCfg.Enc)
if err != nil { if err != nil {
return nil, nil, cleanup, errs.New("failed to create project: %v", err) return nil, nil, err
} }
ec := ecclient.NewClient(log.Named("ecclient"), tc, config.RS.MaxBufferMem.Int()) // Check if the bucket exists, if not then create it
fc, err := infectious.NewFEC(config.RS.MinThreshold, config.RS.MaxThreshold) _, _, err = project.GetBucketInfo(ctx, bucketName)
if err != nil { if err != nil {
return nil, nil, cleanup, errs.New("failed to create erasure coding client: %v", err) if storj.ErrBucketNotFound.Has(err) {
err := createBucket(ctx, clientCfg, *project, bucketName)
if err != nil {
return nil, nil, err
}
} else {
return nil, nil, err
}
} }
rs, err := eestream.NewRedundancyStrategy(eestream.NewRSScheme(fc, config.RS.ErasureShareSize.Int()), config.RS.RepairThreshold, config.RS.SuccessThreshold)
bucket, err := project.OpenBucket(ctx, bucketName, access)
if err != nil { if err != nil {
return nil, nil, cleanup, errs.New("failed to create redundancy strategy: %v", err) return nil, nil, err
} }
maxEncryptedSegmentSize, err := encryption.CalcEncryptedSize(config.Client.SegmentSize.Int64(), config.GetEncryptionParameters()) return project, bucket, nil
if err != nil { }
return nil, nil, cleanup, errs.New("failed to calculate max encrypted segment size: %v", err)
} func createBucket(ctx context.Context, config uplink.Config, project libuplink.Project, bucketName string) error {
segment := segments.NewSegmentStore(m, ec, rs, config.Client.MaxInlineSize.Int(), maxEncryptedSegmentSize) bucketCfg := &libuplink.BucketConfig{}
bucketCfg.PathCipher = config.GetPathCipherSuite()
blockSize := config.GetEncryptionParameters().BlockSize bucketCfg.EncryptionParameters = config.GetEncryptionParameters()
if int(blockSize)%config.RS.ErasureShareSize.Int()*config.RS.MinThreshold != 0 { bucketCfg.Volatile.RedundancyScheme = config.GetRedundancyScheme()
err = errs.New("EncryptionBlockSize must be a multiple of ErasureShareSize * RS MinThreshold") bucketCfg.Volatile.SegmentsSize = config.GetSegmentSize()
return nil, nil, cleanup, err
} _, err := project.CreateBucket(ctx, bucketName, bucketCfg)
if err != nil {
// TODO(jeff): there's some cycles with libuplink and this package in the libuplink tests return err
// and so this package can't import libuplink. that's why this function is duplicated }
// in some spots. return nil
encStore := encryption.NewStore()
encStore.SetDefaultKey(new(storj.Key))
strms, err := streams.NewStreamStore(segment, config.Client.SegmentSize.Int64(), encStore,
int(blockSize), storj.CipherSuite(config.Enc.DataType), config.Client.MaxInlineSize.Int(),
)
if err != nil {
return nil, nil, cleanup, errs.New("failed to create stream store: %v", err)
}
return kvmetainfo.New(project, m, strms, segment, encStore), strms, m.Close, nil
} }

View File

@ -233,17 +233,17 @@ func TestServiceList(t *testing.T) {
assert.NoError(t, err) assert.NoError(t, err)
} }
config := planet.Uplinks[0].GetConfig(planet.Satellites[0])
metainfo, _, cleanup, err := testplanet.DialMetainfo(ctx, planet.Uplinks[0].Log.Named("metainfo"), config, planet.Uplinks[0].Identity)
require.NoError(t, err)
defer ctx.Check(cleanup)
type Test struct { type Test struct {
Request storj.ListOptions Request storj.ListOptions
Expected storj.ObjectList // objects are partial Expected storj.ObjectList // objects are partial
} }
list, err := metainfo.ListObjects(ctx, "testbucket", storj.ListOptions{Recursive: true, Direction: storj.After}) config := planet.Uplinks[0].GetConfig(planet.Satellites[0])
project, bucket, err := planet.Uplinks[0].GetProjectAndBucket(ctx, planet.Satellites[0], "testbucket", config)
require.NoError(t, err)
defer ctx.Check(bucket.Close)
defer ctx.Check(project.Close)
list, err := bucket.ListObjects(ctx, &storj.ListOptions{Recursive: true, Direction: storj.After})
require.NoError(t, err) require.NoError(t, err)
expected := []storj.Object{ expected := []storj.Object{
@ -265,7 +265,7 @@ func TestServiceList(t *testing.T) {
require.Equal(t, item.IsPrefix, list.Items[i].IsPrefix) require.Equal(t, item.IsPrefix, list.Items[i].IsPrefix)
} }
list, err = metainfo.ListObjects(ctx, "testbucket", storj.ListOptions{Recursive: false, Direction: storj.After}) list, err = bucket.ListObjects(ctx, &storj.ListOptions{Recursive: false, Direction: storj.After})
require.NoError(t, err) require.NoError(t, err)
expected = []storj.Object{ expected = []storj.Object{
@ -641,12 +641,7 @@ func TestSetAttribution(t *testing.T) {
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()] apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
uplink := planet.Uplinks[0] uplink := planet.Uplinks[0]
config := uplink.GetConfig(planet.Satellites[0]) err := uplink.CreateBucket(ctx, planet.Satellites[0], "alpha")
metainfo, _, cleanup, err := testplanet.DialMetainfo(ctx, uplink.Log.Named("metainfo"), config, uplink.Identity)
require.NoError(t, err)
defer ctx.Check(cleanup)
_, err = metainfo.CreateBucket(ctx, "alpha", &storj.Bucket{PathCipher: config.GetEncryptionParameters().CipherSuite})
require.NoError(t, err) require.NoError(t, err)
metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey) metainfoClient, err := planet.Uplinks[0].DialMetainfo(ctx, planet.Satellites[0], apiKey)

View File

@ -24,21 +24,10 @@ func TestIterate(t *testing.T) {
saPeer := planet.Satellites[0] saPeer := planet.Satellites[0]
uplinkPeer := planet.Uplinks[0] uplinkPeer := planet.Uplinks[0]
apiKey := uplinkPeer.APIKey[saPeer.ID()]
metainfoClient, err := uplinkPeer.DialMetainfo(ctx, saPeer, apiKey)
require.NoError(t, err)
projects, err := saPeer.DB.Console().Projects().GetAll(ctx)
require.NoError(t, err)
projectID := projects[0].ID
// Setup: create 2 test buckets // Setup: create 2 test buckets
test1 := newTestBucket("test1", projectID) err := uplinkPeer.CreateBucket(ctx, saPeer, "test1")
_, err = metainfoClient.CreateBucket(ctx, test1)
require.NoError(t, err) require.NoError(t, err)
err = uplinkPeer.CreateBucket(ctx, saPeer, "test2")
test2 := newTestBucket("test2", projectID)
_, err = metainfoClient.CreateBucket(ctx, test2)
require.NoError(t, err) require.NoError(t, err)
// Setup: upload an object in one of the buckets // Setup: upload an object in one of the buckets

View File

@ -66,7 +66,7 @@ func TestUploadAndPartialDownload(t *testing.T) {
download, cleanup, err := planet.Uplinks[0].DownloadStream(ctx, planet.Satellites[0], "testbucket", "test/path") download, cleanup, err := planet.Uplinks[0].DownloadStream(ctx, planet.Satellites[0], "testbucket", "test/path")
require.NoError(t, err) require.NoError(t, err)
defer ctx.Check(cleanup)
pos, err := download.Seek(tt.offset, io.SeekStart) pos, err := download.Seek(tt.offset, io.SeekStart)
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, pos, tt.offset) assert.Equal(t, pos, tt.offset)
@ -79,7 +79,6 @@ func TestUploadAndPartialDownload(t *testing.T) {
assert.Equal(t, expectedData[tt.offset:tt.offset+tt.size], data) assert.Equal(t, expectedData[tt.offset:tt.offset+tt.size], data)
require.NoError(t, download.Close()) require.NoError(t, download.Close())
require.NoError(t, cleanup())
} }
var totalBandwidthUsage bandwidth.Usage var totalBandwidthUsage bandwidth.Usage