private/testplanet: extend testplanet with multinode instance
Testplanet does not have a multinode instance, hence, makes it difficult to run ui tests for the multinode dashboard. Instead of using storj-sim for multinode ui tests, it's better to use the same approach (testplanet) fir all UI tests. This changes adds a multinode instance to testplanet. Change-Id: I58aa8c4597e789275f9e7ea7059703c742903492
This commit is contained in:
parent
b8e8110ca3
commit
b8dd35ceaf
@ -5,6 +5,7 @@ package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"html/template"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
@ -15,6 +16,7 @@ import (
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"storj.io/common/errs2"
|
||||
"storj.io/storj/multinode/bandwidth"
|
||||
"storj.io/storj/multinode/console/controllers"
|
||||
"storj.io/storj/multinode/nodes"
|
||||
@ -180,7 +182,11 @@ func (server *Server) Run(ctx context.Context) (err error) {
|
||||
})
|
||||
group.Go(func() error {
|
||||
defer cancel()
|
||||
return Error.Wrap(server.http.Serve(server.listener))
|
||||
err := Error.Wrap(server.http.Serve(server.listener))
|
||||
if errs2.IsCanceled(err) || errors.Is(err, http.ErrServerClosed) {
|
||||
err = nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
|
||||
return Error.Wrap(group.Wait())
|
||||
|
133
private/testplanet/multinode.go
Normal file
133
private/testplanet/multinode.go
Normal file
@ -0,0 +1,133 @@
|
||||
// Copyright (C) 2021 Storj Labs, Inc.
|
||||
// See LICENSE for copying information
|
||||
|
||||
package testplanet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime/pprof"
|
||||
"strconv"
|
||||
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/common/storj"
|
||||
"storj.io/private/debug"
|
||||
"storj.io/storj/multinode"
|
||||
"storj.io/storj/multinode/console/server"
|
||||
"storj.io/storj/multinode/multinodedb"
|
||||
)
|
||||
|
||||
// Multinode contains all the processes needed to run a full multinode setup.
|
||||
type Multinode struct {
|
||||
Name string
|
||||
Config multinode.Config
|
||||
*multinode.Peer
|
||||
}
|
||||
|
||||
// ID returns multinode id.
|
||||
func (system *Multinode) ID() storj.NodeID { return system.Identity.ID }
|
||||
|
||||
// Addr returns the public address.
|
||||
func (system *Multinode) Addr() string { return system.Console.Listener.Addr().String() }
|
||||
|
||||
// Label returns name for debugger.
|
||||
func (system *Multinode) Label() string { return system.Name }
|
||||
|
||||
// URL returns the NodeURL as a string.
|
||||
func (system *Multinode) URL() string { return system.NodeURL().String() }
|
||||
|
||||
// NodeURL returns the storj.NodeURL.
|
||||
func (system *Multinode) NodeURL() storj.NodeURL {
|
||||
return storj.NodeURL{ID: system.ID(), Address: system.Addr()}
|
||||
}
|
||||
|
||||
// newMultinodes initializes multinode dashboards.
|
||||
func (planet *Planet) newMultinodes(ctx context.Context, prefix string, count int) (_ []*Multinode, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var xs []*Multinode
|
||||
for i := 0; i < count; i++ {
|
||||
index := i
|
||||
name := prefix + strconv.Itoa(index)
|
||||
log := planet.log.Named(name)
|
||||
|
||||
var system *Multinode
|
||||
var err error
|
||||
pprof.Do(ctx, pprof.Labels("peer", name), func(ctx context.Context) {
|
||||
system, err = planet.newMultinode(ctx, name, index, log)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debug("id=" + system.ID().String() + " addr=" + system.Addr())
|
||||
xs = append(xs, system)
|
||||
planet.peers = append(planet.peers, newClosablePeer(system))
|
||||
}
|
||||
return xs, nil
|
||||
}
|
||||
|
||||
func (planet *Planet) newMultinode(ctx context.Context, prefix string, index int, log *zap.Logger) (_ *Multinode, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
storageDir := filepath.Join(planet.directory, prefix)
|
||||
if err := os.MkdirAll(storageDir, 0700); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
identity, err := planet.NewIdentity()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config := multinode.Config{
|
||||
Debug: debug.Config{
|
||||
Address: "",
|
||||
},
|
||||
Console: server.Config{
|
||||
Address: "127.0.0.1:0",
|
||||
StaticDir: filepath.Join(developmentRoot, "web/multinode/"),
|
||||
},
|
||||
}
|
||||
if planet.config.Reconfigure.Multinode != nil {
|
||||
planet.config.Reconfigure.Multinode(index, &config)
|
||||
}
|
||||
|
||||
database := fmt.Sprintf("sqlite3://file:%s/master.db", storageDir)
|
||||
|
||||
var db multinode.DB
|
||||
db, err = multinodedb.Open(ctx, log.Named("db"), database)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if planet.config.Reconfigure.MultinodeDB != nil {
|
||||
db, err = planet.config.Reconfigure.MultinodeDB(index, db, planet.log)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
peer, err := multinode.New(log, identity, config, db)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = db.MigrateToLatest(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
planet.databases = append(planet.databases, db)
|
||||
|
||||
log.Debug(peer.Console.Listener.Addr().String())
|
||||
|
||||
return &Multinode{
|
||||
Name: prefix,
|
||||
Config: config,
|
||||
Peer: peer,
|
||||
}, nil
|
||||
}
|
@ -52,6 +52,7 @@ type Config struct {
|
||||
SatelliteCount int
|
||||
StorageNodeCount int
|
||||
UplinkCount int
|
||||
MultinodeCount int
|
||||
|
||||
IdentityVersion *storj.IDVersion
|
||||
Reconfigure Reconfigure
|
||||
@ -82,6 +83,7 @@ type Planet struct {
|
||||
VersionControl *versioncontrol.Peer
|
||||
Satellites []*Satellite
|
||||
StorageNodes []*StorageNode
|
||||
Multinodes []*Multinode
|
||||
Uplinks []*Uplink
|
||||
|
||||
identities *testidentity.Identities
|
||||
@ -172,6 +174,11 @@ func NewCustom(ctx context.Context, log *zap.Logger, config Config, satelliteDat
|
||||
return nil, errs.Combine(err, planet.Shutdown())
|
||||
}
|
||||
|
||||
planet.Multinodes, err = planet.newMultinodes(ctx, "multinode", config.MultinodeCount)
|
||||
if err != nil {
|
||||
return nil, errs.Combine(err, planet.Shutdown())
|
||||
}
|
||||
|
||||
planet.Uplinks, err = planet.newUplinks(ctx, "uplink", config.UplinkCount)
|
||||
if err != nil {
|
||||
return nil, errs.Combine(err, planet.Shutdown())
|
||||
|
@ -20,7 +20,7 @@ func TestBasic(t *testing.T) {
|
||||
version := version
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 2, StorageNodeCount: 4, UplinkCount: 1,
|
||||
IdentityVersion: &version,
|
||||
MultinodeCount: 1, IdentityVersion: &version,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
for _, satellite := range planet.Satellites {
|
||||
t.Log("SATELLITE", satellite.ID(), satellite.Addr())
|
||||
@ -28,6 +28,9 @@ func TestBasic(t *testing.T) {
|
||||
for _, storageNode := range planet.StorageNodes {
|
||||
t.Log("STORAGE", storageNode.ID(), storageNode.Addr())
|
||||
}
|
||||
for _, multitude := range planet.Multinodes {
|
||||
t.Log("MULTINODE", multitude.ID(), multitude.Addr())
|
||||
}
|
||||
for _, uplink := range planet.Uplinks {
|
||||
t.Log("UPLINK", uplink.ID(), uplink.Addr())
|
||||
}
|
||||
|
@ -11,6 +11,7 @@ import (
|
||||
"storj.io/common/identity/testidentity"
|
||||
"storj.io/common/memory"
|
||||
"storj.io/common/storj"
|
||||
"storj.io/storj/multinode"
|
||||
"storj.io/storj/satellite"
|
||||
"storj.io/storj/satellite/metabase"
|
||||
"storj.io/storj/storagenode"
|
||||
@ -30,6 +31,9 @@ type Reconfigure struct {
|
||||
VersionControl func(config *versioncontrol.Config)
|
||||
|
||||
Identities func(log *zap.Logger, version storj.IDVersion) *testidentity.Identities
|
||||
|
||||
MultinodeDB func(index int, db multinode.DB, log *zap.Logger) (multinode.DB, error)
|
||||
Multinode func(index int, config *multinode.Config)
|
||||
}
|
||||
|
||||
// DisablePeerCAWhitelist returns a `Reconfigure` that sets `UsePeerCAWhitelist` for
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
|
||||
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1, MultinodeCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
t.Log("running test")
|
||||
})
|
||||
|
Loading…
Reference in New Issue
Block a user