satellite/orders: get bucketID from encrypted metadata in order instead of serial_numbers table

We want to stop using the serial_numbers table in satelliteDB. One of the last places using the serial_numbers table is when storagenodes settle orders, we look up the bucket name and project ID from the serial number from the serial_numbers table.

Now that we have support to add encrypted metadata into the OrderLimit, this PR makes use of that and now attempts to read the project ID and bucket name from the encrypted orderLimit metadata instead of from the serial_numbers table. For backwards compatibility and to ensure no errors, we will still fallback to the old way of getting that info from the serial_numbers table, but this will be removed in the next release as long as there are no errors.

All processes that create orderLimits must have an orders.encryption-keys set. The services that create orderLimits (and thus need to encrypt the order metadata) are the satellite apiProcess, the repair process, audit service (core process), and graceful exit (core process). Only the satellite api process decrypts the order metadata when storagenodes settle orders. This means that the same encryption key needs to be provided in the config for the satellite api process, repair process, and the core process like so:
orders.include-encrypted-metadata=true
orders.encryption-keys="<"encryptionKeyID>=<encryptionKey>"

Change-Id: Ie2c037971713d6fbf69d697bfad7f8b672eedd66
This commit is contained in:
Jessica Grebenschikov 2020-11-18 13:39:13 -08:00 committed by Jess G
parent 70b91aac54
commit b261110352
11 changed files with 191 additions and 41 deletions

View File

@ -355,6 +355,8 @@ func newNetwork(flags *Flags) (*Processes, error) {
apiProcess.Arguments["setup"] = append(apiProcess.Arguments["setup"], apiProcess.Arguments["setup"] = append(apiProcess.Arguments["setup"],
"--database", masterDBURL, "--database", masterDBURL,
"--metainfo.database-url", metainfoDBURL, "--metainfo.database-url", metainfoDBURL,
"--orders.include-encrypted-metadata=true",
"--orders.encryption-keys", "0100000000000000=0100000000000000000000000000000000000000000000000000000000000000",
) )
} }
apiProcess.ExecBefore["run"] = func(process *Process) error { apiProcess.ExecBefore["run"] = func(process *Process) error {
@ -392,6 +394,8 @@ func newNetwork(flags *Flags) (*Processes, error) {
coreProcess.Arguments = withCommon(apiProcess.Directory, Arguments{ coreProcess.Arguments = withCommon(apiProcess.Directory, Arguments{
"run": { "run": {
"--debug.addr", net.JoinHostPort(host, port(satellitePeer, i, debugPeerHTTP)), "--debug.addr", net.JoinHostPort(host, port(satellitePeer, i, debugPeerHTTP)),
"--orders.include-encrypted-metadata=true",
"--orders.encryption-keys", "0100000000000000=0100000000000000000000000000000000000000000000000000000000000000",
}, },
}) })
coreProcess.WaitForExited(migrationProcess) coreProcess.WaitForExited(migrationProcess)
@ -419,6 +423,8 @@ func newNetwork(flags *Flags) (*Processes, error) {
"run": { "run": {
"repair", "repair",
"--debug.addr", net.JoinHostPort(host, port(satellitePeer, i, debugRepairerHTTP)), "--debug.addr", net.JoinHostPort(host, port(satellitePeer, i, debugRepairerHTTP)),
"--orders.include-encrypted-metadata=true",
"--orders.encryption-keys", "0100000000000000=0100000000000000000000000000000000000000000000000000000000000000",
}, },
}) })
repairProcess.WaitForExited(migrationProcess) repairProcess.WaitForExited(migrationProcess)

2
go.sum
View File

@ -905,8 +905,6 @@ storj.io/common v0.0.0-20201026135900-1aaeec90670b/go.mod h1:GqdmNf3fLm2UZX/7Zr0
storj.io/common v0.0.0-20201124202331-31c1d1dc486d h1:QTXYMePGSEAtNbweZifHkMQstrRHkviGaKtueOWPmOU= storj.io/common v0.0.0-20201124202331-31c1d1dc486d h1:QTXYMePGSEAtNbweZifHkMQstrRHkviGaKtueOWPmOU=
storj.io/common v0.0.0-20201124202331-31c1d1dc486d/go.mod h1:ocAfQaE1dpflrdTr8hXRZTWP1bq2jXz7ieGSBVCmHEc= storj.io/common v0.0.0-20201124202331-31c1d1dc486d/go.mod h1:ocAfQaE1dpflrdTr8hXRZTWP1bq2jXz7ieGSBVCmHEc=
storj.io/drpc v0.0.11/go.mod h1:TiFc2obNjL9/3isMW1Rpxjy8V9uE0B2HMeMFGiiI7Iw= storj.io/drpc v0.0.11/go.mod h1:TiFc2obNjL9/3isMW1Rpxjy8V9uE0B2HMeMFGiiI7Iw=
storj.io/drpc v0.0.11/go.mod h1:TiFc2obNjL9/3isMW1Rpxjy8V9uE0B2HMeMFGiiI7Iw=
storj.io/drpc v0.0.14 h1:GCBdymTt1BRw4oHmmUZZlxYXLVRxxYj6x3Ivide2J+I=
storj.io/drpc v0.0.14/go.mod h1:82nfl+6YwRwF6UG31cEWWUqv/FaKvP5SGqUvoqTxCMA= storj.io/drpc v0.0.14/go.mod h1:82nfl+6YwRwF6UG31cEWWUqv/FaKvP5SGqUvoqTxCMA=
storj.io/drpc v0.0.16 h1:9sxypc5lKi/0D69cR21BR0S21+IvXfON8L5nXMVNTwQ= storj.io/drpc v0.0.16 h1:9sxypc5lKi/0D69cR21BR0S21+IvXfON8L5nXMVNTwQ=
storj.io/drpc v0.0.16/go.mod h1:zdmQ93nx4Z35u11pQ+GAnBy4DGOK3HJCSOfeh2RryTo= storj.io/drpc v0.0.16/go.mod h1:zdmQ93nx4Z35u11pQ+GAnBy4DGOK3HJCSOfeh2RryTo=

View File

@ -390,7 +390,13 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
if err != nil { if err != nil {
return nil, err return nil, err
} }
planet.databases = append(planet.databases, redis) encryptionKeys, err := orders.NewEncryptionKeys(orders.EncryptionKey{
ID: orders.EncryptionKeyID{1},
Key: storj.Key{1},
})
if err != nil {
return nil, err
}
config := satellite.Config{ config := satellite.Config{
Server: server.Config{ Server: server.Config{
@ -499,6 +505,8 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
FlushInterval: defaultInterval, FlushInterval: defaultInterval,
NodeStatusLogging: true, NodeStatusLogging: true,
WindowEndpointRolloutPhase: orders.WindowEndpointRolloutPhase3, WindowEndpointRolloutPhase: orders.WindowEndpointRolloutPhase3,
IncludeEncryptedMetadata: true,
EncryptionKeys: *encryptionKeys,
}, },
Checker: checker.Config{ Checker: checker.Config{
Interval: defaultInterval, Interval: defaultInterval,

View File

@ -170,13 +170,15 @@ func TestBilling_TrafficAfterFileDeletion(t *testing.T) {
uplink = planet.Uplinks[0] uplink = planet.Uplinks[0]
projectID = uplink.Projects[0].ID projectID = uplink.Projects[0].ID
) )
err := planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], bucketName)
require.NoError(t, err)
// stop any async flushes because we want to be sure when some values are // stop any async flushes because we want to be sure when some values are
// written to avoid races // written to avoid races
satelliteSys.Orders.Chore.Loop.Pause() satelliteSys.Orders.Chore.Loop.Pause()
data := testrand.Bytes(5 * memory.KiB) data := testrand.Bytes(5 * memory.KiB)
err := uplink.Upload(ctx, satelliteSys, bucketName, filePath, data) err = uplink.Upload(ctx, satelliteSys, bucketName, filePath, data)
require.NoError(t, err) require.NoError(t, err)
_, err = uplink.Download(ctx, satelliteSys, bucketName, filePath) _, err = uplink.Download(ctx, satelliteSys, bucketName, filePath)

View File

@ -325,17 +325,6 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
}) })
peer.Debug.Server.Panel.Add( peer.Debug.Server.Panel.Add(
debug.Cycle("Orders Chore", peer.Orders.Chore.Loop)) debug.Cycle("Orders Chore", peer.Orders.Chore.Loop))
satelliteSignee := signing.SigneeFromPeerIdentity(peer.Identity.PeerIdentity())
peer.Orders.Endpoint = orders.NewEndpoint(
peer.Log.Named("orders:endpoint"),
satelliteSignee,
peer.Orders.DB,
peer.DB.NodeAPIVersion(),
config.Orders.SettlementBatchSize,
config.Orders.WindowEndpointRolloutPhase,
config.Orders.OrdersSemaphoreSize,
)
var err error var err error
peer.Orders.Service, err = orders.NewService( peer.Orders.Service, err = orders.NewService(
peer.Log.Named("orders:service"), peer.Log.Named("orders:service"),
@ -352,6 +341,19 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
if err != nil { if err != nil {
return nil, errs.Combine(err, peer.Close()) return nil, errs.Combine(err, peer.Close())
} }
satelliteSignee := signing.SigneeFromPeerIdentity(peer.Identity.PeerIdentity())
peer.Orders.Endpoint = orders.NewEndpoint(
peer.Log.Named("orders:endpoint"),
satelliteSignee,
peer.Orders.DB,
peer.DB.NodeAPIVersion(),
config.Orders.SettlementBatchSize,
config.Orders.WindowEndpointRolloutPhase,
config.Orders.OrdersSemaphoreSize,
peer.Orders.Service,
)
if err := pb.DRPCRegisterOrders(peer.Server.DRPC(), peer.Orders.Endpoint); err != nil { if err := pb.DRPCRegisterOrders(peer.Server.DRPC(), peer.Orders.Endpoint); err != nil {
return nil, errs.Combine(err, peer.Close()) return nil, errs.Combine(err, peer.Close())
} }

View File

@ -211,13 +211,14 @@ type Endpoint struct {
settlementBatchSize int settlementBatchSize int
windowEndpointRolloutPhase WindowEndpointRolloutPhase windowEndpointRolloutPhase WindowEndpointRolloutPhase
ordersSemaphore chan struct{} ordersSemaphore chan struct{}
ordersService *Service
} }
// NewEndpoint new orders receiving endpoint. // NewEndpoint new orders receiving endpoint.
// //
// ordersSemaphoreSize controls the number of concurrent clients allowed to submit orders at once. // ordersSemaphoreSize controls the number of concurrent clients allowed to submit orders at once.
// A value of zero means unlimited. // A value of zero means unlimited.
func NewEndpoint(log *zap.Logger, satelliteSignee signing.Signee, db DB, nodeAPIVersionDB nodeapiversion.DB, settlementBatchSize int, windowEndpointRolloutPhase WindowEndpointRolloutPhase, ordersSemaphoreSize int) *Endpoint { func NewEndpoint(log *zap.Logger, satelliteSignee signing.Signee, db DB, nodeAPIVersionDB nodeapiversion.DB, settlementBatchSize int, windowEndpointRolloutPhase WindowEndpointRolloutPhase, ordersSemaphoreSize int, ordersService *Service) *Endpoint {
var ordersSemaphore chan struct{} var ordersSemaphore chan struct{}
if ordersSemaphoreSize > 0 { if ordersSemaphoreSize > 0 {
ordersSemaphore = make(chan struct{}, ordersSemaphoreSize) ordersSemaphore = make(chan struct{}, ordersSemaphoreSize)
@ -231,6 +232,7 @@ func NewEndpoint(log *zap.Logger, satelliteSignee signing.Signee, db DB, nodeAPI
settlementBatchSize: settlementBatchSize, settlementBatchSize: settlementBatchSize,
windowEndpointRolloutPhase: windowEndpointRolloutPhase, windowEndpointRolloutPhase: windowEndpointRolloutPhase,
ordersSemaphore: ordersSemaphore, ordersSemaphore: ordersSemaphore,
ordersService: ordersService,
} }
} }
@ -642,22 +644,56 @@ func (endpoint *Endpoint) SettlementWithWindowFinal(stream pb.DRPCOrders_Settlem
storagenodeSettled[int32(orderLimit.Action)] += order.Amount storagenodeSettled[int32(orderLimit.Action)] += order.Amount
bucketPrefix, err := endpoint.DB.GetBucketIDFromSerialNumber(ctx, serialNum) var bucketName string
if err != nil { var projectID uuid.UUID
log.Info("get bucketPrefix from serial number table err", zap.Error(err)) if len(orderLimit.EncryptedMetadata) > 0 {
continue metadata, err := endpoint.ordersService.DecryptOrderMetadata(ctx, orderLimit)
if err != nil {
log.Info("decrypt order metadata err:", zap.Error(err))
mon.Event("bucketinfo_from_orders_metadata_error_1")
goto idFromSerialTable
}
bucketInfo, err := metabase.ParseBucketPrefix(
metabase.BucketPrefix(metadata.GetProjectBucketPrefix()),
)
if err != nil {
log.Info("decrypt order: ParseBucketPrefix", zap.Error(err))
mon.Event("bucketinfo_from_orders_metadata_error_2")
goto idFromSerialTable
}
bucketName = bucketInfo.BucketName
projectID = bucketInfo.ProjectID
mon.Event("bucketinfo_from_orders_metadata")
} }
bucket, err := metabase.ParseBucketPrefix(metabase.BucketPrefix(bucketPrefix))
if err != nil { // If we cannot get the bucket name and project ID from the orderLimit metadata, then fallback
log.Info("split bucket err", zap.Error(err), zap.String("bucketPrefix", string(bucketPrefix))) // to the old method of getting it from the serial_numbers table.
continue // This is only temporary to make sure the orderLimit metadata is working correctly.
idFromSerialTable:
if bucketName == "" || projectID.IsZero() {
bucketPrefix, err := endpoint.DB.GetBucketIDFromSerialNumber(ctx, serialNum)
if err != nil {
log.Info("get bucketPrefix from serial number table err", zap.Error(err))
continue
}
bucket, err := metabase.ParseBucketPrefix(metabase.BucketPrefix(bucketPrefix))
if err != nil {
log.Info("split bucket err", zap.Error(err), zap.String("bucketPrefix", string(bucketPrefix)))
continue
}
bucketName = bucket.BucketName
projectID = bucket.ProjectID
mon.Event("bucketinfo_from_serial_number")
} }
bucketSettled[bucketIDAction{ bucketSettled[bucketIDAction{
bucketname: bucket.BucketName, bucketname: bucketName,
projectID: bucket.ProjectID, projectID: projectID,
action: orderLimit.Action, action: orderLimit.Action,
}] += order.Amount }] += order.Amount
} }
if len(storagenodeSettled) == 0 { if len(storagenodeSettled) == 0 {
log.Debug("no orders were successfully processed", zap.Int("received count", receivedCount)) log.Debug("no orders were successfully processed", zap.Int("received count", receivedCount))
status = pb.SettlementWithWindowResponse_REJECTED status = pb.SettlementWithWindowResponse_REJECTED

View File

@ -22,8 +22,12 @@ import (
"storj.io/uplink/private/eestream" "storj.io/uplink/private/eestream"
) )
// ErrDownloadFailedNotEnoughPieces is returned when download failed due to missing pieces. var (
var ErrDownloadFailedNotEnoughPieces = errs.Class("not enough pieces for download") // ErrDownloadFailedNotEnoughPieces is returned when download failed due to missing pieces.
ErrDownloadFailedNotEnoughPieces = errs.Class("not enough pieces for download")
// ErrDecryptOrderMetadata is returned when a step of decrypting metadata fails.
ErrDecryptOrderMetadata = errs.Class("decrytping order metadata")
)
// Config is a configuration struct for orders Service. // Config is a configuration struct for orders Service.
type Config struct { type Config struct {
@ -604,3 +608,24 @@ func (service *Service) UpdatePutInlineOrder(ctx context.Context, bucket metabas
return service.orders.UpdateBucketBandwidthInline(ctx, bucket.ProjectID, []byte(bucket.BucketName), pb.PieceAction_PUT, amount, intervalStart) return service.orders.UpdateBucketBandwidthInline(ctx, bucket.ProjectID, []byte(bucket.BucketName), pb.PieceAction_PUT, amount, intervalStart)
} }
// DecryptOrderMetadata decrypts the order metadata.
func (service *Service) DecryptOrderMetadata(ctx context.Context, order *pb.OrderLimit) (_ *pb.OrderLimitMetadata, err error) {
defer mon.Task()(&ctx)(&err)
var orderKeyID EncryptionKeyID
copy(orderKeyID[:], order.EncryptedMetadataKeyId)
var key = service.encryptionKeys.Default
if key.ID != orderKeyID {
val, ok := service.encryptionKeys.KeyByID[orderKeyID]
if !ok {
return nil, ErrDecryptOrderMetadata.New("no encryption key found that matches the order.EncryptedMetadataKeyId")
}
key = EncryptionKey{
ID: orderKeyID,
Key: val,
}
}
return key.DecryptMetadata(order.SerialNumber, order.EncryptedMetadata)
}

View File

@ -0,0 +1,70 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package orders_test
import (
"testing"
"github.com/stretchr/testify/require"
"storj.io/common/memory"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite/metainfo/metabase"
)
func TestOrderLimitsEncryptedMetadata(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
const (
bucketName = "testbucket"
filePath = "test/path"
)
var (
satellitePeer = planet.Satellites[0]
uplinkPeer = planet.Uplinks[0]
projectID = uplinkPeer.Projects[0].ID
)
// Setup: Upload an object and create order limits
require.NoError(t, uplinkPeer.Upload(ctx, satellitePeer, bucketName, filePath, testrand.Bytes(5*memory.KiB)))
bucket := metabase.BucketLocation{ProjectID: projectID, BucketName: bucketName}
items, _, err := satellitePeer.Metainfo.Service.List(ctx, metabase.SegmentKey{}, "", true, 10, ^uint32(0))
require.NoError(t, err)
require.Equal(t, 1, len(items))
pointer, err := satellitePeer.Metainfo.Service.Get(ctx, metabase.SegmentKey(items[0].Path))
require.NoError(t, err)
limits, _, err := satellitePeer.Orders.Service.CreateGetOrderLimits(ctx, bucket, pointer)
require.NoError(t, err)
require.Equal(t, 2, len(limits))
// Test: get the bucket name and project ID from the encrypted metadata and
// compare with the old method of getting the data from the serial numbers table.
orderLimit1 := limits[0].Limit
require.True(t, len(orderLimit1.EncryptedMetadata) > 0)
_, err = metabase.ParseBucketPrefix(metabase.BucketPrefix(""))
require.Error(t, err)
var x []byte
_, err = metabase.ParseBucketPrefix(metabase.BucketPrefix(x))
require.Error(t, err)
actualOrderMetadata, err := satellitePeer.Orders.Service.DecryptOrderMetadata(ctx, orderLimit1)
require.NoError(t, err)
actualBucketInfo, err := metabase.ParseBucketPrefix(
metabase.BucketPrefix(actualOrderMetadata.GetProjectBucketPrefix()),
)
require.NoError(t, err)
require.Equal(t, bucketName, actualBucketInfo.BucketName)
require.Equal(t, projectID, actualBucketInfo.ProjectID)
bucketPrefix, err := satellitePeer.Orders.DB.GetBucketIDFromSerialNumber(ctx, orderLimit1.SerialNumber)
require.NoError(t, err)
bucket1, err := metabase.ParseBucketPrefix(metabase.BucketPrefix(bucketPrefix))
require.NoError(t, err)
require.Equal(t, actualBucketInfo.BucketName, bucket1.BucketName)
require.Equal(t, actualBucketInfo.ProjectID, bucket1.ProjectID)
})
}

View File

@ -146,15 +146,10 @@ func (signer *Signer) Sign(ctx context.Context, node storj.NodeURL, pieceNum int
return nil, ErrSigner.New("default encryption key is missing") return nil, ErrSigner.New("default encryption key is missing")
} }
bucketID, err := signer.Service.buckets.GetBucketID(ctx, signer.Bucket)
if err != nil {
return nil, ErrSigner.Wrap(err)
}
encrypted, err := encryptionKey.EncryptMetadata( encrypted, err := encryptionKey.EncryptMetadata(
signer.Serial, signer.Serial,
&pb.OrderLimitMetadata{ &pb.OrderLimitMetadata{
BucketId: bucketID[:], ProjectBucketPrefix: []byte(signer.Bucket.Prefix()),
}, },
) )
if err != nil { if err != nil {

View File

@ -42,10 +42,10 @@ func TestSigner_EncryptedMetadata(t *testing.T) {
project, err := uplink.GetProject(ctx, satellite) project, err := uplink.GetProject(ctx, satellite)
require.NoError(t, err) require.NoError(t, err)
bucketName := "testbucket"
bucketLocation := metabase.BucketLocation{ bucketLocation := metabase.BucketLocation{
ProjectID: uplink.Projects[0].ID, ProjectID: uplink.Projects[0].ID,
BucketName: "testbucket", BucketName: bucketName,
} }
_, err = project.EnsureBucket(ctx, bucketLocation.BucketName) _, err = project.EnsureBucket(ctx, bucketLocation.BucketName)
@ -71,10 +71,10 @@ func TestSigner_EncryptedMetadata(t *testing.T) {
metadata, err := ekeys.Default.DecryptMetadata(addressedLimit.Limit.SerialNumber, addressedLimit.Limit.EncryptedMetadata) metadata, err := ekeys.Default.DecryptMetadata(addressedLimit.Limit.SerialNumber, addressedLimit.Limit.EncryptedMetadata)
require.NoError(t, err) require.NoError(t, err)
bucketID, err := satellite.DB.Buckets().GetBucketID(ctx, bucketLocation) bucketInfo, err := metabase.ParseBucketPrefix(metabase.BucketPrefix(metadata.ProjectBucketPrefix))
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, bucketInfo.BucketName, bucketName)
require.Equal(t, bucketID[:], metadata.BucketId) require.Equal(t, bucketInfo.ProjectID, uplink.Projects[0].ID)
}) })
} }

View File

@ -34,9 +34,13 @@ func TestOrderDBSettle(t *testing.T) {
service.Sender.Pause() service.Sender.Pause()
service.Cleanup.Pause() service.Cleanup.Pause()
bucketname := "testbucket"
err := planet.Uplinks[0].CreateBucket(ctx, satellite, bucketname)
require.NoError(t, err)
_, orderLimits, piecePrivateKey, err := satellite.Orders.Service.CreatePutOrderLimits( _, orderLimits, piecePrivateKey, err := satellite.Orders.Service.CreatePutOrderLimits(
ctx, ctx,
metabase.BucketLocation{ProjectID: planet.Uplinks[0].Projects[0].ID, BucketName: "testbucket"}, metabase.BucketLocation{ProjectID: planet.Uplinks[0].Projects[0].ID, BucketName: bucketname},
[]*overlay.SelectedNode{ []*overlay.SelectedNode{
{ID: node.ID(), LastIPPort: "fake", Address: new(pb.NodeAddress)}, {ID: node.ID(), LastIPPort: "fake", Address: new(pb.NodeAddress)},
}, },
@ -135,10 +139,14 @@ func TestOrderFileStoreAndDBSettle(t *testing.T) {
service.Cleanup.Pause() service.Cleanup.Pause()
tomorrow := time.Now().Add(24 * time.Hour) tomorrow := time.Now().Add(24 * time.Hour)
bucketname := "testbucket"
err := uplinkPeer.CreateBucket(ctx, satellite, bucketname)
require.NoError(t, err)
// add orders to orders DB // add orders to orders DB
_, orderLimits, piecePrivateKey, err := satellite.Orders.Service.CreatePutOrderLimits( _, orderLimits, piecePrivateKey, err := satellite.Orders.Service.CreatePutOrderLimits(
ctx, ctx,
metabase.BucketLocation{ProjectID: uplinkPeer.Projects[0].ID, BucketName: "testbucket"}, metabase.BucketLocation{ProjectID: uplinkPeer.Projects[0].ID, BucketName: bucketname},
[]*overlay.SelectedNode{ []*overlay.SelectedNode{
{ID: node.ID(), LastIPPort: "fake", Address: new(pb.NodeAddress)}, {ID: node.ID(), LastIPPort: "fake", Address: new(pb.NodeAddress)},
}, },