all: remove code related to PointerDB

Change-Id: I6675c9597f87019020f6233b83ab2f1119d2bc46
This commit is contained in:
Kaloyan Raev 2020-12-22 12:38:32 +02:00 committed by Egon Elbre
parent 785adfb849
commit 2ee3030275
21 changed files with 76 additions and 819 deletions

View File

@ -44,14 +44,6 @@ func cmdAPIRun(cmd *cobra.Command, args []string) (err error) {
err = errs.Combine(err, db.Close())
}()
pointerDB, err := metainfo.OpenStore(ctx, log.Named("pointerdb"), runCfg.Config.Metainfo.DatabaseURL, "satellite-api")
if err != nil {
return errs.New("Error creating metainfodb connection on satellite api: %+v", err)
}
defer func() {
err = errs.Combine(err, pointerDB.Close())
}()
metabaseDB, err := metainfo.OpenMetabase(ctx, log.Named("metabase"), runCfg.Config.Metainfo.DatabaseURL)
if err != nil {
return errs.New("Error creating metabase connection on satellite api: %+v", err)
@ -87,7 +79,7 @@ func cmdAPIRun(cmd *cobra.Command, args []string) (err error) {
err = errs.Combine(err, rollupsWriteCache.CloseAndFlush(context2.WithoutCancellation(ctx)))
}()
peer, err := satellite.NewAPI(log, identity, db, pointerDB, metabaseDB, revocationDB, accountingCache, rollupsWriteCache, &runCfg.Config, version.Build, process.AtomicLevel(cmd))
peer, err := satellite.NewAPI(log, identity, db, metabaseDB, revocationDB, accountingCache, rollupsWriteCache, &runCfg.Config, version.Build, process.AtomicLevel(cmd))
if err != nil {
return err
}
@ -101,14 +93,10 @@ func cmdAPIRun(cmd *cobra.Command, args []string) (err error) {
log.Warn("Failed to initialize telemetry batcher on satellite api", zap.Error(err))
}
err = pointerDB.MigrateToLatest(ctx)
err = metabaseDB.CheckVersion(ctx)
if err != nil {
return errs.New("Error creating metainfodb tables on satellite api: %+v", err)
}
err = metabaseDB.MigrateToLatest(ctx)
if err != nil {
return errs.New("Error creating metabase tables on satellite api: %+v", err)
log.Error("Failed metabase database version check.", zap.Error(err))
return errs.New("failed metabase version check: %+v", err)
}
err = db.CheckVersion(ctx)

View File

@ -3,11 +3,6 @@ set -euo pipefail
SETUP_PARAMS=""
if [[ -n "${STORJ_DATABASE:-}" ]]; then
export STORJ_POINTER_DB_DATABASE_URL="${STORJ_DATABASE}"
fi
if [[ -n "${IDENTITY_ADDR:-}" ]]; then
export STORJ_SERVER_ADDRESS="${IDENTITY_ADDR}"
fi

View File

@ -36,14 +36,6 @@ func cmdGCRun(cmd *cobra.Command, args []string) (err error) {
err = errs.Combine(err, db.Close())
}()
pointerDB, err := metainfo.OpenStore(ctx, log.Named("pointerdb"), runCfg.Metainfo.DatabaseURL, "satellite-gc")
if err != nil {
return errs.New("Error creating pointerDB connection GC: %+v", err)
}
defer func() {
err = errs.Combine(err, pointerDB.Close())
}()
metabaseDB, err := metainfo.OpenMetabase(ctx, log.Named("metabase"), runCfg.Metainfo.DatabaseURL)
if err != nil {
return errs.New("Error creating metabase connection: %+v", err)
@ -60,7 +52,7 @@ func cmdGCRun(cmd *cobra.Command, args []string) (err error) {
err = errs.Combine(err, revocationDB.Close())
}()
peer, err := satellite.NewGarbageCollection(log, identity, db, pointerDB, metabaseDB, revocationDB, version.Build, &runCfg.Config, process.AtomicLevel(cmd))
peer, err := satellite.NewGarbageCollection(log, identity, db, metabaseDB, revocationDB, version.Build, &runCfg.Config, process.AtomicLevel(cmd))
if err != nil {
return err
}
@ -74,9 +66,10 @@ func cmdGCRun(cmd *cobra.Command, args []string) (err error) {
log.Warn("Failed to initialize telemetry batcher on satellite GC", zap.Error(err))
}
err = pointerDB.MigrateToLatest(ctx)
err = metabaseDB.CheckVersion(ctx)
if err != nil {
return errs.New("Error creating pointerDB tables GC: %+v", err)
log.Error("Failed metabase database version check.", zap.Error(err))
return errs.New("failed metabase version check: %+v", err)
}
err = db.CheckVersion(ctx)

View File

@ -376,14 +376,6 @@ func cmdRun(cmd *cobra.Command, args []string) (err error) {
err = errs.Combine(err, db.Close())
}()
pointerDB, err := metainfo.OpenStore(ctx, log.Named("pointerdb"), runCfg.Metainfo.DatabaseURL, "satellite-core")
if err != nil {
return errs.New("Error creating metainfodb connection: %+v", err)
}
defer func() {
err = errs.Combine(err, pointerDB.Close())
}()
metabaseDB, err := metainfo.OpenMetabase(ctx, log.Named("metabase"), runCfg.Metainfo.DatabaseURL)
if err != nil {
return errs.New("Error creating metabase connection: %+v", err)
@ -419,7 +411,7 @@ func cmdRun(cmd *cobra.Command, args []string) (err error) {
err = errs.Combine(err, rollupsWriteCache.CloseAndFlush(context2.WithoutCancellation(ctx)))
}()
peer, err := satellite.New(log, identity, db, pointerDB, metabaseDB, revocationDB, liveAccounting, rollupsWriteCache, version.Build, &runCfg.Config, process.AtomicLevel(cmd))
peer, err := satellite.New(log, identity, db, metabaseDB, revocationDB, liveAccounting, rollupsWriteCache, version.Build, &runCfg.Config, process.AtomicLevel(cmd))
if err != nil {
return err
}
@ -434,14 +426,10 @@ func cmdRun(cmd *cobra.Command, args []string) (err error) {
log.Warn("Failed to initialize telemetry batcher", zap.Error(err))
}
err = pointerDB.MigrateToLatest(ctx)
err = metabaseDB.CheckVersion(ctx)
if err != nil {
return errs.New("Error creating metainfodb tables: %+v", err)
}
err = metabaseDB.MigrateToLatest(ctx)
if err != nil {
return errs.New("Error creating metabase tables: %+v", err)
log.Error("Failed metabase database version check.", zap.Error(err))
return errs.New("failed metabase version check: %+v", err)
}
err = db.CheckVersion(ctx)
@ -472,18 +460,6 @@ func cmdMigrationRun(cmd *cobra.Command, args []string) (err error) {
return errs.New("Error creating tables for master database on satellite: %+v", err)
}
pdb, err := metainfo.OpenStore(ctx, log.Named("migration"), runCfg.Metainfo.DatabaseURL, "satellite-migration")
if err != nil {
return errs.New("Error creating pointer database connection on satellite: %+v", err)
}
defer func() {
err = errs.Combine(err, pdb.Close())
}()
err = pdb.MigrateToLatest(ctx)
if err != nil {
return errs.New("Error creating tables for pointer database on satellite: %+v", err)
}
metabaseDB, err := metainfo.OpenMetabase(ctx, log.Named("metabase"), runCfg.Metainfo.DatabaseURL)
if err != nil {
return errs.New("Error creating metabase connection: %+v", err)

View File

@ -39,14 +39,6 @@ func cmdRepairerRun(cmd *cobra.Command, args []string) (err error) {
err = errs.Combine(err, db.Close())
}()
pointerDB, err := metainfo.OpenStore(ctx, log.Named("pointerdb"), runCfg.Metainfo.DatabaseURL, "satellite-repairer")
if err != nil {
return errs.New("Error creating metainfo database connection: %+v", err)
}
defer func() {
err = errs.Combine(err, pointerDB.Close())
}()
metabaseDB, err := metainfo.OpenMetabase(ctx, log.Named("metabase"), runCfg.Metainfo.DatabaseURL)
if err != nil {
return errs.New("Error creating metabase connection: %+v", err)
@ -71,7 +63,6 @@ func cmdRepairerRun(cmd *cobra.Command, args []string) (err error) {
peer, err := satellite.NewRepairer(
log,
identity,
pointerDB,
metabaseDB,
revocationDB,
db.RepairQueue(),
@ -96,14 +87,10 @@ func cmdRepairerRun(cmd *cobra.Command, args []string) (err error) {
log.Warn("Failed to initialize telemetry batcher on repairer", zap.Error(err))
}
err = pointerDB.MigrateToLatest(ctx)
err = metabaseDB.CheckVersion(ctx)
if err != nil {
return errs.New("Error creating tables for metainfo database: %+v", err)
}
err = metabaseDB.MigrateToLatest(ctx)
if err != nil {
return errs.New("Error creating tables for metabase: %+v", err)
log.Error("Failed metabase database version check.", zap.Error(err))
return errs.New("failed metabase version check: %+v", err)
}
err = db.CheckVersion(ctx)

View File

@ -60,8 +60,7 @@ type Config struct {
// DatabaseConfig defines connection strings for database.
type DatabaseConfig struct {
SatelliteDB string
SatellitePointerDB string
SatelliteDB string
}
// Planet is a full storj system setup.

View File

@ -20,7 +20,6 @@ import (
// Reconfigure allows to change node configurations.
type Reconfigure struct {
SatelliteDB func(log *zap.Logger, index int, db satellite.DB) (satellite.DB, error)
SatellitePointerDB func(log *zap.Logger, index int, db metainfo.PointerDB) (metainfo.PointerDB, error)
SatelliteMetabaseDB func(log *zap.Logger, index int, db metainfo.MetabaseDB) (metainfo.MetabaseDB, error)
Satellite func(log *zap.Logger, index int, config *satellite.Config)

View File

@ -98,7 +98,6 @@ type Satellite struct {
}
Metainfo struct {
Database metainfo.PointerDB
Metabase metainfo.MetabaseDB
Service *metainfo.Service
Endpoint2 *metainfo.Endpoint
@ -356,21 +355,6 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
}
planet.databases = append(planet.databases, db)
pointerDB, err := satellitedbtest.CreatePointerDB(ctx, log.Named("pointerdb"), planet.config.Name, "P", index, databases.PointerDB)
if err != nil {
return nil, err
}
if planet.config.Reconfigure.SatellitePointerDB != nil {
var newPointerDB metainfo.PointerDB
newPointerDB, err = planet.config.Reconfigure.SatellitePointerDB(log.Named("pointerdb"), index, pointerDB)
if err != nil {
return nil, errs.Combine(err, pointerDB.Close())
}
pointerDB = newPointerDB
}
planet.databases = append(planet.databases, pointerDB)
metabaseDB, err := satellitedbtest.CreateMetabaseDB(context.TODO(), log.Named("metabase"), planet.config.Name, "M", index, databases.MetabaseDB)
if err != nil {
return nil, err
@ -646,7 +630,7 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
rollupsWriteCache := orders.NewRollupsWriteCache(log.Named("orders-write-cache"), db.Orders(), config.Orders.FlushBatchSize)
planet.databases = append(planet.databases, rollupsWriteCacheCloser{rollupsWriteCache})
peer, err := satellite.New(log, identity, db, pointerDB, metabaseDB, revocationDB, liveAccounting, rollupsWriteCache, versionInfo, &config, nil)
peer, err := satellite.New(log, identity, db, metabaseDB, revocationDB, liveAccounting, rollupsWriteCache, versionInfo, &config, nil)
if err != nil {
return nil, err
}
@ -661,7 +645,7 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
return nil, err
}
api, err := planet.newAPI(ctx, index, identity, db, pointerDB, metabaseDB, config, versionInfo)
api, err := planet.newAPI(ctx, index, identity, db, metabaseDB, config, versionInfo)
if err != nil {
return nil, err
}
@ -671,12 +655,12 @@ func (planet *Planet) newSatellite(ctx context.Context, prefix string, index int
return nil, err
}
repairerPeer, err := planet.newRepairer(ctx, index, identity, db, pointerDB, metabaseDB, config, versionInfo)
repairerPeer, err := planet.newRepairer(ctx, index, identity, db, metabaseDB, config, versionInfo)
if err != nil {
return nil, err
}
gcPeer, err := planet.newGarbageCollection(ctx, index, identity, db, pointerDB, metabaseDB, config, versionInfo)
gcPeer, err := planet.newGarbageCollection(ctx, index, identity, db, metabaseDB, config, versionInfo)
if err != nil {
return nil, err
}
@ -711,7 +695,6 @@ func createNewSystem(name string, log *zap.Logger, config satellite.Config, peer
system.Overlay.Service = api.Overlay.Service
system.Overlay.DQStrayNodes = peer.Overlay.DQStrayNodes
system.Metainfo.Database = api.Metainfo.Database
system.Metainfo.Metabase = api.Metainfo.Metabase
system.Metainfo.Service = peer.Metainfo.Service
system.Metainfo.Endpoint2 = api.Metainfo.Endpoint2
@ -756,7 +739,7 @@ func createNewSystem(name string, log *zap.Logger, config satellite.Config, peer
return system
}
func (planet *Planet) newAPI(ctx context.Context, index int, identity *identity.FullIdentity, db satellite.DB, pointerDB metainfo.PointerDB, metabaseDB metainfo.MetabaseDB, config satellite.Config, versionInfo version.Info) (*satellite.API, error) {
func (planet *Planet) newAPI(ctx context.Context, index int, identity *identity.FullIdentity, db satellite.DB, metabaseDB metainfo.MetabaseDB, config satellite.Config, versionInfo version.Info) (*satellite.API, error) {
prefix := "satellite-api" + strconv.Itoa(index)
log := planet.log.Named(prefix)
var err error
@ -776,7 +759,7 @@ func (planet *Planet) newAPI(ctx context.Context, index int, identity *identity.
rollupsWriteCache := orders.NewRollupsWriteCache(log.Named("orders-write-cache"), db.Orders(), config.Orders.FlushBatchSize)
planet.databases = append(planet.databases, rollupsWriteCacheCloser{rollupsWriteCache})
return satellite.NewAPI(log, identity, db, pointerDB, metabaseDB, revocationDB, liveAccounting, rollupsWriteCache, &config, versionInfo, nil)
return satellite.NewAPI(log, identity, db, metabaseDB, revocationDB, liveAccounting, rollupsWriteCache, &config, versionInfo, nil)
}
func (planet *Planet) newAdmin(ctx context.Context, index int, identity *identity.FullIdentity, db satellite.DB, config satellite.Config, versionInfo version.Info) (*satellite.Admin, error) {
@ -786,7 +769,7 @@ func (planet *Planet) newAdmin(ctx context.Context, index int, identity *identit
return satellite.NewAdmin(log, identity, db, versionInfo, &config, nil)
}
func (planet *Planet) newRepairer(ctx context.Context, index int, identity *identity.FullIdentity, db satellite.DB, pointerDB metainfo.PointerDB, metabaseDB metainfo.MetabaseDB, config satellite.Config, versionInfo version.Info) (*satellite.Repairer, error) {
func (planet *Planet) newRepairer(ctx context.Context, index int, identity *identity.FullIdentity, db satellite.DB, metabaseDB metainfo.MetabaseDB, config satellite.Config, versionInfo version.Info) (*satellite.Repairer, error) {
prefix := "satellite-repairer" + strconv.Itoa(index)
log := planet.log.Named(prefix)
@ -799,7 +782,7 @@ func (planet *Planet) newRepairer(ctx context.Context, index int, identity *iden
rollupsWriteCache := orders.NewRollupsWriteCache(log.Named("orders-write-cache"), db.Orders(), config.Orders.FlushBatchSize)
planet.databases = append(planet.databases, rollupsWriteCacheCloser{rollupsWriteCache})
return satellite.NewRepairer(log, identity, pointerDB, metabaseDB, revocationDB, db.RepairQueue(), db.Buckets(), db.OverlayCache(), rollupsWriteCache, db.Irreparable(), versionInfo, &config, nil)
return satellite.NewRepairer(log, identity, metabaseDB, revocationDB, db.RepairQueue(), db.Buckets(), db.OverlayCache(), rollupsWriteCache, db.Irreparable(), versionInfo, &config, nil)
}
type rollupsWriteCacheCloser struct {
@ -810,7 +793,7 @@ func (cache rollupsWriteCacheCloser) Close() error {
return cache.RollupsWriteCache.CloseAndFlush(context.TODO())
}
func (planet *Planet) newGarbageCollection(ctx context.Context, index int, identity *identity.FullIdentity, db satellite.DB, pointerDB metainfo.PointerDB, metabaseDB metainfo.MetabaseDB, config satellite.Config, versionInfo version.Info) (*satellite.GarbageCollection, error) {
func (planet *Planet) newGarbageCollection(ctx context.Context, index int, identity *identity.FullIdentity, db satellite.DB, metabaseDB metainfo.MetabaseDB, config satellite.Config, versionInfo version.Info) (*satellite.GarbageCollection, error) {
prefix := "satellite-gc" + strconv.Itoa(index)
log := planet.log.Named(prefix)
@ -819,7 +802,7 @@ func (planet *Planet) newGarbageCollection(ctx context.Context, index int, ident
return nil, errs.Wrap(err)
}
planet.databases = append(planet.databases, revocationDB)
return satellite.NewGarbageCollection(log, identity, db, pointerDB, metabaseDB, revocationDB, versionInfo, &config, nil)
return satellite.NewGarbageCollection(log, identity, db, metabaseDB, revocationDB, versionInfo, &config, nil)
}
// atLeastOne returns 1 if value < 1, or value otherwise.

View File

@ -26,7 +26,6 @@ import (
"storj.io/storj/pkg/revocation"
"storj.io/storj/pkg/server"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite/metainfo/metabase"
"storj.io/uplink"
"storj.io/uplink/private/metainfo"
)
@ -88,31 +87,19 @@ func TestDownloadWithSomeNodesOffline(t *testing.T) {
err := ul.Upload(ctx, satellite, "testbucket", "test/path", testData)
require.NoError(t, err)
// get a remote segment from pointerdb
pdb := satellite.Metainfo.Service
listResponse, _, err := pdb.List(ctx, metabase.SegmentKey{}, "", true, 0, 0)
// get a remote segment
segments, err := satellite.Metainfo.Metabase.TestingAllSegments(ctx)
require.NoError(t, err)
var path string
var pointer *pb.Pointer
for _, v := range listResponse {
path = v.GetPath()
pointer, err = pdb.Get(ctx, metabase.SegmentKey(path))
require.NoError(t, err)
if pointer.GetType() == pb.Pointer_REMOTE {
break
}
}
// calculate how many storagenodes to kill
redundancy := pointer.GetRemote().GetRedundancy()
remotePieces := pointer.GetRemote().GetRemotePieces()
minReq := redundancy.GetMinReq()
redundancy := segments[0].Redundancy
remotePieces := segments[0].Pieces
minReq := redundancy.RequiredShares
numPieces := len(remotePieces)
toKill := numPieces - int(minReq)
for _, piece := range remotePieces[:toKill] {
err := planet.StopNodeAndUpdate(ctx, planet.FindNode(piece.NodeId))
err := planet.StopNodeAndUpdate(ctx, planet.FindNode(piece.StorageNode))
require.NoError(t, err)
}

View File

@ -5,7 +5,6 @@ package accounting_test
import (
"context"
"strings"
"testing"
"time"
@ -18,7 +17,6 @@ import (
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite/accounting"
"storj.io/storj/satellite/metainfo/metabase"
"storj.io/storj/storage"
)
func TestBilling_DownloadWithoutExpansionFactor(t *testing.T) {
@ -422,62 +420,6 @@ func getTallies(ctx context.Context, t *testing.T, planet *testplanet.Planet, sa
return tallies
}
func TestBilling_ZombieSegments(t *testing.T) {
// failing test - see https://storjlabs.atlassian.net/browse/SM-592
t.Skip("Zombie segments do get billed. Wait for resolution of SM-592")
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: testplanet.MaxSegmentSize(5 * memory.KiB),
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
const (
bucketName = "a-bucket"
objectKey = "object-filename"
)
satelliteSys := planet.Satellites[0]
satelliteSys.Audit.Chore.Loop.Stop()
satelliteSys.Repair.Repairer.Loop.Stop()
satelliteSys.Accounting.Tally.Loop.Pause()
uplnk := planet.Uplinks[0]
{
data := testrand.Bytes(10 * memory.KiB)
err := uplnk.Upload(ctx, satelliteSys, bucketName, objectKey, data)
require.NoError(t, err)
}
// trigger tally so it gets all set up and can return a storage usage
satelliteSys.Accounting.Tally.Loop.TriggerWait()
projectID := uplnk.Projects[0].ID
{ // delete last segment from metainfo to get zombie segments
keys, err := planet.Satellites[0].Metainfo.Database.List(ctx, nil, 10)
require.NoError(t, err)
var lastSegmentKey storage.Key
for _, key := range keys {
if strings.Contains(key.String(), "/l/") {
lastSegmentKey = key
}
}
require.NotNil(t, lastSegmentKey)
err = satelliteSys.Metainfo.Service.UnsynchronizedDelete(ctx, metabase.SegmentKey(lastSegmentKey))
require.NoError(t, err)
err = uplnk.DeleteObject(ctx, satelliteSys, bucketName, objectKey)
require.Error(t, err)
}
from := time.Now()
storageAfterDelete := getProjectTotal(ctx, t, planet, 0, projectID, from).Storage
require.Equal(t, 0.0, storageAfterDelete, "zombie segments billed")
})
}
// getProjectTotal returns the total used egress, storage, objectCount for the
// projectID in the satellite referenced by satelliteIdx index.
func getProjectTotal(

View File

@ -97,7 +97,6 @@ type API struct {
}
Metainfo struct {
Database metainfo.PointerDB
Metabase metainfo.MetabaseDB
Service *metainfo.Service
PieceDeletion *piecedeletion.Service
@ -166,7 +165,7 @@ type API struct {
// NewAPI creates a new satellite API process.
func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
pointerDB metainfo.PointerDB, metabaseDB metainfo.MetabaseDB, revocationDB extensions.RevocationDB,
metabaseDB metainfo.MetabaseDB, revocationDB extensions.RevocationDB,
liveAccounting accounting.Cache, rollupsWriteCache *orders.RollupsWriteCache,
config *Config, versionInfo version.Info, atomicLogLevel *zap.AtomicLevel) (*API, error) {
peer := &API{
@ -368,10 +367,8 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
}
{ // setup metainfo
peer.Metainfo.Database = pointerDB
peer.Metainfo.Metabase = metabaseDB
peer.Metainfo.Service = metainfo.NewService(peer.Log.Named("metainfo:service"),
peer.Metainfo.Database,
peer.DB.Buckets(),
peer.Metainfo.Metabase,
)

View File

@ -81,7 +81,6 @@ type Core struct {
}
Metainfo struct {
Database metainfo.PointerDB // TODO: move into pointerDB
Metabase metainfo.MetabaseDB
Service *metainfo.Service
Loop *metaloop.Service
@ -140,7 +139,7 @@ type Core struct {
// New creates a new satellite.
func New(log *zap.Logger, full *identity.FullIdentity, db DB,
pointerDB metainfo.PointerDB, metabaseDB metainfo.MetabaseDB, revocationDB extensions.RevocationDB,
metabaseDB metainfo.MetabaseDB, revocationDB extensions.RevocationDB,
liveAccounting accounting.Cache, rollupsWriteCache *orders.RollupsWriteCache,
versionInfo version.Info, config *Config, atomicLogLevel *zap.AtomicLevel) (*Core, error) {
peer := &Core{
@ -274,10 +273,8 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB,
}
{ // setup metainfo
peer.Metainfo.Database = pointerDB // for logging: storelogger.New(peer.Log.Named("pdb"), db)
peer.Metainfo.Metabase = metabaseDB
peer.Metainfo.Service = metainfo.NewService(peer.Log.Named("metainfo:service"),
peer.Metainfo.Database,
peer.DB.Buckets(),
peer.Metainfo.Metabase,
)

View File

@ -56,8 +56,7 @@ type GarbageCollection struct {
}
Metainfo struct {
Database metainfo.PointerDB
Loop *metaloop.Service
Loop *metaloop.Service
}
GarbageCollection struct {
@ -67,7 +66,7 @@ type GarbageCollection struct {
// NewGarbageCollection creates a new satellite garbage collection process.
func NewGarbageCollection(log *zap.Logger, full *identity.FullIdentity, db DB,
pointerDB metainfo.PointerDB, metabaseDB metainfo.MetabaseDB, revocationDB extensions.RevocationDB,
metabaseDB metainfo.MetabaseDB, revocationDB extensions.RevocationDB,
versionInfo version.Info, config *Config, atomicLogLevel *zap.AtomicLevel) (*GarbageCollection, error) {
peer := &GarbageCollection{
Log: log,
@ -130,8 +129,6 @@ func NewGarbageCollection(log *zap.Logger, full *identity.FullIdentity, db DB,
}
{ // setup metainfo
peer.Metainfo.Database = pointerDB
// Garbage Collection creates its own instance of the metainfo loop here. Since
// GC runs infrequently, this shouldn't add too much extra load on the metainfo db.
// As long as garbage collection is the only observer joining the metainfo loop, then by default

View File

@ -20,9 +20,6 @@ import (
"storj.io/storj/satellite/metainfo/metabase"
"storj.io/storj/satellite/metainfo/metaloop"
"storj.io/storj/satellite/metainfo/piecedeletion"
"storj.io/storj/storage"
"storj.io/storj/storage/cockroachkv"
"storj.io/storj/storage/postgreskv"
)
const (
@ -130,40 +127,6 @@ type Config struct {
PieceDeletion piecedeletion.Config `help:"piece deletion configuration"`
}
// PointerDB stores pointers.
//
// architecture: Database
type PointerDB interface {
// MigrateToLatest migrates to latest schema version.
MigrateToLatest(ctx context.Context) error
storage.KeyValueStore
}
// OpenStore returns database for storing pointer data.
func OpenStore(ctx context.Context, logger *zap.Logger, dbURLString string, app string) (db PointerDB, err error) {
_, source, implementation, err := dbutil.SplitConnStr(dbURLString)
if err != nil {
return nil, err
}
switch implementation {
case dbutil.Postgres:
db, err = postgreskv.Open(ctx, source, app)
case dbutil.Cockroach:
db, err = cockroachkv.Open(ctx, source, app)
default:
err = Error.New("unsupported db implementation: %s", dbURLString)
}
if err != nil {
return nil, err
}
logger.Debug("Connected to:", zap.String("db source", source))
return db, nil
}
// MetabaseDB stores objects and segments.
type MetabaseDB interface {
io.Closer

View File

@ -6,6 +6,7 @@ package metabase
import (
"context"
"fmt"
"sort"
"strconv"
@ -18,6 +19,7 @@ import (
"storj.io/common/storj"
"storj.io/common/uuid"
"storj.io/storj/private/dbutil"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/storj/private/migrate"
"storj.io/storj/private/tagsql"
)
@ -30,6 +32,7 @@ var (
type DB struct {
log *zap.Logger
db tagsql.DB
connstr string
implementation dbutil.Implementation
aliasCache *NodeAliasCache
@ -43,7 +46,7 @@ func Open(ctx context.Context, log *zap.Logger, driverName, connstr string) (*DB
}
dbutil.Configure(ctx, rawdb, "metabase", mon)
db := &DB{log: log, db: postgresRebind{rawdb}}
db := &DB{log: log, connstr: connstr, db: postgresRebind{rawdb}}
db.aliasCache = NewNodeAliasCache(db)
_, _, db.implementation, err = dbutil.SplitConnStr(connstr)
@ -88,6 +91,38 @@ func (db *DB) DestroyTables(ctx context.Context) error {
// MigrateToLatest migrates database to the latest version.
func (db *DB) MigrateToLatest(ctx context.Context) error {
// First handle the idiosyncrasies of postgres and cockroach migrations. Postgres
// will need to create any schemas specified in the search path, and cockroach
// will need to create the database it was told to connect to. These things should
// not really be here, and instead should be assumed to exist.
// This is tracked in jira ticket SM-200
switch db.implementation {
case dbutil.Postgres:
schema, err := pgutil.ParseSchemaFromConnstr(db.connstr)
if err != nil {
return errs.New("error parsing schema: %+v", err)
}
if schema != "" {
err = pgutil.CreateSchema(ctx, db.db, schema)
if err != nil {
return errs.New("error creating schema: %+v", err)
}
}
case dbutil.Cockroach:
var dbName string
if err := db.db.QueryRow(ctx, `SELECT current_database();`).Scan(&dbName); err != nil {
return errs.New("error querying current database: %+v", err)
}
_, err := db.db.Exec(ctx, fmt.Sprintf(`CREATE DATABASE IF NOT EXISTS %s;`,
pgutil.QuoteIdentifier(dbName)))
if err != nil {
return errs.Wrap(err)
}
}
migration := db.PostgresMigration()
return migration.Run(ctx, db.log.Named("migrate"))
}

View File

@ -5,18 +5,14 @@ package metainfo
import (
"context"
"time"
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/common/macaroon"
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/common/uuid"
"storj.io/storj/satellite/metainfo/metabase"
"storj.io/storj/storage"
"storj.io/uplink/private/storage/meta"
)
var (
@ -29,382 +25,19 @@ var (
// architecture: Service
type Service struct {
logger *zap.Logger
db PointerDB
bucketsDB BucketsDB
metabaseDB MetabaseDB
}
// NewService creates new metainfo service.
func NewService(logger *zap.Logger, db PointerDB, bucketsDB BucketsDB, metabaseDB MetabaseDB) *Service {
func NewService(logger *zap.Logger, bucketsDB BucketsDB, metabaseDB MetabaseDB) *Service {
return &Service{
logger: logger,
db: db,
bucketsDB: bucketsDB,
metabaseDB: metabaseDB,
}
}
// Put puts pointer to db under specific path.
func (s *Service) Put(ctx context.Context, key metabase.SegmentKey, pointer *pb.Pointer) (err error) {
defer mon.Task()(&ctx)(&err)
if err := sanityCheckPointer(key, pointer); err != nil {
return Error.Wrap(err)
}
// Update the pointer with the creation date
pointer.CreationDate = time.Now()
pointerBytes, err := pb.Marshal(pointer)
if err != nil {
return Error.Wrap(err)
}
// CompareAndSwap is used instead of Put to avoid overwriting existing pointers
err = s.db.CompareAndSwap(ctx, storage.Key(key), nil, pointerBytes)
return Error.Wrap(err)
}
// UnsynchronizedPut puts pointer to db under specific path without verifying for existing pointer under the same path.
func (s *Service) UnsynchronizedPut(ctx context.Context, key metabase.SegmentKey, pointer *pb.Pointer) (err error) {
defer mon.Task()(&ctx)(&err)
if err := sanityCheckPointer(key, pointer); err != nil {
return Error.Wrap(err)
}
// Update the pointer with the creation date
pointer.CreationDate = time.Now()
pointerBytes, err := pb.Marshal(pointer)
if err != nil {
return Error.Wrap(err)
}
err = s.db.Put(ctx, storage.Key(key), pointerBytes)
return Error.Wrap(err)
}
// UpdatePieces calls UpdatePiecesCheckDuplicates with checkDuplicates equal to false.
func (s *Service) UpdatePieces(ctx context.Context, key metabase.SegmentKey, ref *pb.Pointer, toAdd, toRemove []*pb.RemotePiece) (pointer *pb.Pointer, err error) {
return s.UpdatePiecesCheckDuplicates(ctx, key, ref, toAdd, toRemove, false)
}
// UpdatePiecesCheckDuplicates atomically adds toAdd pieces and removes toRemove pieces from
// the pointer under path. ref is the pointer that caller received via Get
// prior to calling this method.
//
// It will first check if the pointer has been deleted or replaced.
// Then if checkDuplicates is true it will return an error if the nodes to be
// added are already in the pointer.
// Then it will remove the toRemove pieces and then it will add the toAdd pieces.
// Replacing the node ID and the hash of a piece can be done by adding the
// piece to both toAdd and toRemove.
func (s *Service) UpdatePiecesCheckDuplicates(ctx context.Context, key metabase.SegmentKey, ref *pb.Pointer, toAdd, toRemove []*pb.RemotePiece, checkDuplicates bool) (pointer *pb.Pointer, err error) {
defer mon.Task()(&ctx)(&err)
if err := sanityCheckPointer(key, ref); err != nil {
return nil, Error.Wrap(err)
}
defer func() {
if err == nil {
err = sanityCheckPointer(key, pointer)
}
}()
for {
// read the pointer
oldPointerBytes, err := s.db.Get(ctx, storage.Key(key))
if err != nil {
if storage.ErrKeyNotFound.Has(err) {
err = storj.ErrObjectNotFound.Wrap(err)
}
return nil, Error.Wrap(err)
}
// unmarshal the pointer
pointer = &pb.Pointer{}
err = pb.Unmarshal(oldPointerBytes, pointer)
if err != nil {
return nil, Error.Wrap(err)
}
// check if pointer has been replaced
if !pointer.GetCreationDate().Equal(ref.GetCreationDate()) {
return nil, Error.New("pointer has been replaced")
}
// put all existing pieces to a map
pieceMap := make(map[int32]*pb.RemotePiece)
nodePieceMap := make(map[storj.NodeID]struct{})
for _, piece := range pointer.GetRemote().GetRemotePieces() {
pieceMap[piece.PieceNum] = piece
if checkDuplicates {
nodePieceMap[piece.NodeId] = struct{}{}
}
}
// Return an error if the pointer already has a piece for this node
if checkDuplicates {
for _, piece := range toAdd {
_, ok := nodePieceMap[piece.NodeId]
if ok {
return nil, ErrNodeAlreadyExists.New("node id already exists in pointer. Key: %s, NodeID: %s", key, piece.NodeId.String())
}
nodePieceMap[piece.NodeId] = struct{}{}
}
}
// remove the toRemove pieces from the map
// only if all piece number, node id and hash match
for _, piece := range toRemove {
if piece == nil {
continue
}
existing := pieceMap[piece.PieceNum]
if existing != nil && existing.NodeId == piece.NodeId {
delete(pieceMap, piece.PieceNum)
}
}
// add the toAdd pieces to the map
for _, piece := range toAdd {
if piece == nil {
continue
}
_, exists := pieceMap[piece.PieceNum]
if exists {
return nil, Error.New("piece to add already exists (piece no: %d)", piece.PieceNum)
}
pieceMap[piece.PieceNum] = piece
}
// copy the pieces from the map back to the pointer
var pieces []*pb.RemotePiece
for _, piece := range pieceMap {
// clear hashes so we don't store them
piece.Hash = nil
pieces = append(pieces, piece)
}
pointer.GetRemote().RemotePieces = pieces
pointer.LastRepaired = ref.LastRepaired
pointer.RepairCount = ref.RepairCount
// marshal the pointer
newPointerBytes, err := pb.Marshal(pointer)
if err != nil {
return nil, Error.Wrap(err)
}
// write the pointer using compare-and-swap
err = s.db.CompareAndSwap(ctx, storage.Key(key), oldPointerBytes, newPointerBytes)
if storage.ErrValueChanged.Has(err) {
continue
}
if err != nil {
if storage.ErrKeyNotFound.Has(err) {
err = storj.ErrObjectNotFound.Wrap(err)
}
return nil, Error.Wrap(err)
}
return pointer, nil
}
}
// Get gets decoded pointer from DB.
func (s *Service) Get(ctx context.Context, key metabase.SegmentKey) (_ *pb.Pointer, err error) {
defer mon.Task()(&ctx)(&err)
_, pointer, err := s.GetWithBytes(ctx, key)
if err != nil {
return nil, err
}
return pointer, nil
}
// GetItems gets decoded pointers from DB.
// The return value is in the same order as the argument paths.
func (s *Service) GetItems(ctx context.Context, keys []metabase.SegmentKey) (_ []*pb.Pointer, err error) {
defer mon.Task()(&ctx)(&err)
storageKeys := make(storage.Keys, len(keys))
for i := range keys {
storageKeys[i] = storage.Key(keys[i])
}
pointerBytes, err := s.db.GetAll(ctx, storageKeys)
if err != nil {
return nil, Error.Wrap(err)
}
pointers := make([]*pb.Pointer, len(pointerBytes))
for i, p := range pointerBytes {
if p == nil {
continue
}
var pointer pb.Pointer
err = pb.Unmarshal([]byte(p), &pointer)
if err != nil {
return nil, Error.Wrap(err)
}
pointers[i] = &pointer
}
return pointers, nil
}
// GetWithBytes gets the protocol buffers encoded and decoded pointer from the DB.
func (s *Service) GetWithBytes(ctx context.Context, key metabase.SegmentKey) (pointerBytes []byte, pointer *pb.Pointer, err error) {
defer mon.Task()(&ctx)(&err)
pointerBytes, err = s.db.Get(ctx, storage.Key(key))
if err != nil {
if storage.ErrKeyNotFound.Has(err) {
err = storj.ErrObjectNotFound.Wrap(err)
}
return nil, nil, Error.Wrap(err)
}
pointer = &pb.Pointer{}
err = pb.Unmarshal(pointerBytes, pointer)
if err != nil {
return nil, nil, Error.Wrap(err)
}
return pointerBytes, pointer, nil
}
// List returns all Path keys in the pointers bucket.
func (s *Service) List(ctx context.Context, prefix metabase.SegmentKey, startAfter string, recursive bool, limit int32,
metaFlags uint32) (items []*pb.ListResponse_Item, more bool, err error) {
defer mon.Task()(&ctx)(&err)
var prefixKey storage.Key
if len(prefix) != 0 {
prefixKey = storage.Key(prefix)
if prefix[len(prefix)-1] != storage.Delimiter {
prefixKey = append(prefixKey, storage.Delimiter)
}
}
more, err = storage.ListV2Iterate(ctx, s.db, storage.ListOptions{
Prefix: prefixKey,
StartAfter: storage.Key(startAfter),
Recursive: recursive,
Limit: int(limit),
IncludeValue: metaFlags != meta.None,
}, func(ctx context.Context, item *storage.ListItem) error {
items = append(items, s.createListItem(ctx, *item, metaFlags))
return nil
})
if err != nil {
return nil, false, Error.Wrap(err)
}
return items, more, nil
}
// createListItem creates a new list item with the given path. It also adds
// the metadata according to the given metaFlags.
func (s *Service) createListItem(ctx context.Context, rawItem storage.ListItem, metaFlags uint32) *pb.ListResponse_Item {
defer mon.Task()(&ctx)(nil)
item := &pb.ListResponse_Item{
Path: rawItem.Key.String(),
IsPrefix: rawItem.IsPrefix,
}
if item.IsPrefix {
return item
}
err := s.setMetadata(item, rawItem.Value, metaFlags)
if err != nil {
s.logger.Warn("err retrieving metadata", zap.Error(err))
}
return item
}
// getMetadata adds the metadata to the given item pointer according to the
// given metaFlags.
func (s *Service) setMetadata(item *pb.ListResponse_Item, data []byte, metaFlags uint32) (err error) {
if metaFlags == meta.None || len(data) == 0 {
return nil
}
pr := &pb.Pointer{}
err = pb.Unmarshal(data, pr)
if err != nil {
return Error.Wrap(err)
}
// Start with an empty pointer to and add only what's requested in
// metaFlags to safe to transfer payload
item.Pointer = &pb.Pointer{}
if metaFlags&meta.Modified != 0 {
item.Pointer.CreationDate = pr.GetCreationDate()
}
if metaFlags&meta.Expiration != 0 {
item.Pointer.ExpirationDate = pr.GetExpirationDate()
}
if metaFlags&meta.Size != 0 {
item.Pointer.SegmentSize = pr.GetSegmentSize()
}
if metaFlags&meta.UserDefined != 0 {
item.Pointer.Metadata = pr.GetMetadata()
}
return nil
}
// Delete deletes a pointer bytes when it matches oldPointerBytes, otherwise it'll fail.
func (s *Service) Delete(ctx context.Context, key metabase.SegmentKey, oldPointerBytes []byte) (err error) {
defer mon.Task()(&ctx)(&err)
err = s.db.CompareAndSwap(ctx, storage.Key(key), oldPointerBytes, nil)
if storage.ErrKeyNotFound.Has(err) {
err = storj.ErrObjectNotFound.Wrap(err)
}
return Error.Wrap(err)
}
// UnsynchronizedGetDel deletes items from db without verifying whether the pointers have changed in the database,
// and it returns deleted items.
func (s *Service) UnsynchronizedGetDel(ctx context.Context, keys []metabase.SegmentKey) ([]metabase.SegmentKey, []*pb.Pointer, error) {
storageKeys := make(storage.Keys, len(keys))
for i := range keys {
storageKeys[i] = storage.Key(keys[i])
}
items, err := s.db.DeleteMultiple(ctx, storageKeys)
if err != nil {
return nil, nil, Error.Wrap(err)
}
pointerPaths := make([]metabase.SegmentKey, 0, len(items))
pointers := make([]*pb.Pointer, 0, len(items))
for _, item := range items {
data := &pb.Pointer{}
err = pb.Unmarshal(item.Value, data)
if err != nil {
return nil, nil, Error.Wrap(err)
}
pointerPaths = append(pointerPaths, metabase.SegmentKey(item.Key))
pointers = append(pointers, data)
}
return pointerPaths, pointers, nil
}
// UnsynchronizedDelete deletes item from db without verifying whether the pointer has changed in the database.
func (s *Service) UnsynchronizedDelete(ctx context.Context, key metabase.SegmentKey) (err error) {
defer mon.Task()(&ctx)(&err)
err = s.db.Delete(ctx, storage.Key(key))
if storage.ErrKeyNotFound.Has(err) {
err = storj.ErrObjectNotFound.Wrap(err)
}
return Error.Wrap(err)
}
// CreateBucket creates a new bucket in the buckets db.
func (s *Service) CreateBucket(ctx context.Context, bucket storj.Bucket) (_ storj.Bucket, err error) {
defer mon.Task()(&ctx)(&err)

View File

@ -4,99 +4,16 @@
package metainfo_test
import (
"fmt"
"strconv"
"testing"
"github.com/stretchr/testify/require"
"github.com/zeebo/errs"
"storj.io/common/memory"
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite/metainfo/metabase"
)
const lastSegmentIndex = -1
// TestGetItems_ReturnValueOrder ensures the return value
// of GetItems will always be the same order as the requested paths.
// The test does following steps:
// - Uploads test data (multi-segment objects)
// - Gather all object paths with an extra invalid path at random position
// - Retrieve pointers using above paths
// - Ensure the nil pointer and last segment paths are in the same order as their
// corresponding paths.
func TestGetItems_ReturnValueOrder(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: testplanet.Combine(
testplanet.ReconfigureRS(2, 2, 4, 4),
testplanet.MaxSegmentSize(3*memory.KiB),
),
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
uplinkPeer := planet.Uplinks[0]
numItems := 5
for i := 0; i < numItems; i++ {
path := fmt.Sprintf("test/path_%d", i)
err := uplinkPeer.Upload(ctx, satellite, "bucket", path, testrand.Bytes(15*memory.KiB))
require.NoError(t, err)
}
keys, err := satellite.Metainfo.Database.List(ctx, nil, numItems)
require.NoError(t, err)
var segmentKeys = make([]metabase.SegmentKey, 0, numItems+1)
var lastSegmentPathIndices []int
// Random nil pointer
nilPointerIndex := testrand.Intn(numItems + 1)
for i, key := range keys {
segmentKeys = append(segmentKeys, metabase.SegmentKey(key))
segmentIdx, err := parseSegmentPath([]byte(key.String()))
require.NoError(t, err)
if segmentIdx == lastSegmentIndex {
lastSegmentPathIndices = append(lastSegmentPathIndices, i)
}
// set a random path to be nil.
if nilPointerIndex == i {
segmentKeys[nilPointerIndex] = nil
}
}
pointers, err := satellite.Metainfo.Service.GetItems(ctx, segmentKeys)
require.NoError(t, err)
for i, p := range pointers {
if p == nil {
require.Equal(t, nilPointerIndex, i)
continue
}
meta := pb.StreamMeta{}
metaInBytes := p.GetMetadata()
err = pb.Unmarshal(metaInBytes, &meta)
require.NoError(t, err)
lastSegmentMeta := meta.GetLastSegmentMeta()
if lastSegmentMeta != nil {
require.Equal(t, lastSegmentPathIndices[i], i)
}
}
})
}
func TestCountBuckets(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
@ -122,24 +39,6 @@ func TestCountBuckets(t *testing.T) {
})
}
func parseSegmentPath(segmentPath []byte) (segmentIndex int64, err error) {
elements := storj.SplitPath(string(segmentPath))
if len(elements) < 4 {
return -1, errs.New("invalid path %q", string(segmentPath))
}
// var segmentIndex int64
if elements[1] == "l" {
segmentIndex = lastSegmentIndex
} else {
segmentIndex, err = strconv.ParseInt(elements[1][1:], 10, 64)
if err != nil {
return lastSegmentIndex, errs.Wrap(err)
}
}
return segmentIndex, nil
}
func TestIsBucketEmpty(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,

View File

@ -1,17 +0,0 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
// +build !race
package metainfo
import (
"storj.io/common/pb"
"storj.io/storj/satellite/metainfo/metabase"
)
// sanityCheckPointer implements sanity checking test data,
// we don't need this in production code.
func sanityCheckPointer(key metabase.SegmentKey, pointer *pb.Pointer) (err error) {
return nil
}

View File

@ -1,52 +0,0 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
// +build race
package metainfo
import (
"bytes"
"storj.io/common/pb"
"storj.io/storj/satellite/metainfo/metabase"
)
// sanityCheckPointer implements sanity checking test data,
// we don't need this in production code.
func sanityCheckPointer(key metabase.SegmentKey, pointer *pb.Pointer) (err error) {
tokens := bytes.Split(key, []byte("/"))
if len(tokens) <= 3 {
return Error.New("invalid path %s", key)
}
if pointer.Type == pb.Pointer_REMOTE {
remote := pointer.Remote
switch {
case remote.RootPieceId.IsZero():
return Error.New("piece id zero")
case remote == nil:
return Error.New("no remote segment specified")
case remote.RemotePieces == nil:
return Error.New("no remote segment pieces specified")
case remote.Redundancy == nil:
return Error.New("no redundancy scheme specified")
}
redundancy := remote.Redundancy
if redundancy.MinReq <= 0 || redundancy.Total <= 0 ||
redundancy.RepairThreshold <= 0 || redundancy.SuccessThreshold <= 0 ||
redundancy.ErasureShareSize <= 0 {
return Error.New("invalid redundancy: %+v", redundancy)
}
for _, piece := range remote.GetRemotePieces() {
if int(piece.PieceNum) >= int(redundancy.Total) {
return Error.New("invalid PieceNum=%v total=%v", piece.PieceNum, redundancy.Total)
}
}
}
return nil
}

View File

@ -66,7 +66,7 @@ type Repairer struct {
// NewRepairer creates a new repairer peer.
func NewRepairer(log *zap.Logger, full *identity.FullIdentity,
pointerDB metainfo.PointerDB, metabaseDB metainfo.MetabaseDB,
metabaseDB metainfo.MetabaseDB,
revocationDB extensions.RevocationDB, repairQueue queue.RepairQueue,
bucketsDB metainfo.BucketsDB, overlayCache overlay.DB,
rollupsWriteCache *orders.RollupsWriteCache, irrDB irreparable.DB,
@ -127,7 +127,7 @@ func NewRepairer(log *zap.Logger, full *identity.FullIdentity,
}
{ // setup metainfo
peer.Metainfo = metainfo.NewService(log.Named("metainfo"), pointerDB, bucketsDB, metabaseDB)
peer.Metainfo = metainfo.NewService(log.Named("metainfo"), bucketsDB, metabaseDB)
}
{ // setup overlay

View File

@ -30,7 +30,6 @@ import (
type SatelliteDatabases struct {
Name string
MasterDB Database
PointerDB Database
MetabaseDB Database
}
@ -53,13 +52,11 @@ func Databases() []SatelliteDatabases {
{
Name: "Postgres",
MasterDB: Database{"Postgres", postgresConnStr, "Postgres flag missing, example: -postgres-test-db=" + pgtest.DefaultPostgres + " or use STORJ_TEST_POSTGRES environment variable."},
PointerDB: Database{"Postgres", postgresConnStr, ""},
MetabaseDB: Database{"Postgres", postgresConnStr, ""},
},
{
Name: "Cockroach",
MasterDB: Database{"Cockroach", cockroachConnStr, "Cockroach flag missing, example: -cockroach-test-db=" + pgtest.DefaultCockroach + " or use STORJ_TEST_COCKROACH environment variable."},
PointerDB: Database{"Cockroach", cockroachConnStr, ""},
MetabaseDB: Database{"Cockroach", cockroachConnStr, ""},
},
}
@ -126,54 +123,13 @@ func CreateMasterDBOnTopOf(ctx context.Context, log *zap.Logger, tempDB *dbutil.
return &tempMasterDB{DB: masterDB, tempDB: tempDB}, err
}
// tempPointerDB is a satellite.DB-implementing type that cleans up after itself when closed.
type tempPointerDB struct {
metainfo.PointerDB
tempDB *dbutil.TempDatabase
}
// Close closes a tempPointerDB and cleans it up afterward.
func (db *tempPointerDB) Close() error {
return errs.Combine(db.PointerDB.Close(), db.tempDB.Close())
}
// CreatePointerDB creates a new satellite pointer database for testing.
func CreatePointerDB(ctx context.Context, log *zap.Logger, name string, category string, index int, dbInfo Database) (db metainfo.PointerDB, err error) {
if dbInfo.URL == "" {
return nil, fmt.Errorf("Database %s connection string not provided. %s", dbInfo.Name, dbInfo.Message)
}
schemaSuffix := SchemaSuffix()
log.Debug("creating", zap.String("suffix", schemaSuffix))
schema := SchemaName(name, category, index, schemaSuffix)
tempDB, err := tempdb.OpenUnique(ctx, dbInfo.URL, schema)
if err != nil {
return nil, err
}
return CreatePointerDBOnTopOf(ctx, log, tempDB)
}
// CreatePointerDBOnTopOf creates a new satellite database on top of an already existing
// temporary database.
func CreatePointerDBOnTopOf(ctx context.Context, log *zap.Logger, tempDB *dbutil.TempDatabase) (db metainfo.PointerDB, err error) {
pointerDB, err := metainfo.OpenStore(ctx, log.Named("pointerdb"), tempDB.ConnStr, "satellite-satellitdb-test")
if err != nil {
return nil, err
}
err = pointerDB.MigrateToLatest(ctx)
return &tempPointerDB{PointerDB: pointerDB, tempDB: tempDB}, err
}
// tempMetabaseDB is a metabase.DB-implementing type that cleans up after itself when closed.
type tempMetabaseDB struct {
metainfo.MetabaseDB
tempDB *dbutil.TempDatabase
}
// Close closes a tempPointerDB and cleans it up afterward.
// Close closes a tempMetabaseDB and cleans it up afterward.
func (db *tempMetabaseDB) Close() error {
return errs.Combine(db.MetabaseDB.Close(), db.tempDB.Close())
}