cmd/storagenode: refactor lazyfilewalker commands to satisfy the execwrapper.Command interface
Follow-up change for https://review.dev.storj.io/c/storj/storj/+/10335 Updates https://github.com/storj/storj/issues/5349 Change-Id: Iadf55bae84ebc0803a0766830e596c396dfb332b
This commit is contained in:
parent
56dbe7738d
commit
cf7ce81d09
@ -1,190 +0,0 @@
|
||||
// Copyright (C) 2023 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/common/bloomfilter"
|
||||
"storj.io/private/process"
|
||||
"storj.io/storj/storagenode/iopriority"
|
||||
"storj.io/storj/storagenode/pieces"
|
||||
"storj.io/storj/storagenode/pieces/lazyfilewalker"
|
||||
"storj.io/storj/storagenode/storagenodedb"
|
||||
)
|
||||
|
||||
type filewalkerCfg struct {
|
||||
lazyfilewalker.Config
|
||||
}
|
||||
|
||||
// DatabaseConfig returns the storagenodedb.Config that should be used with this LazyFilewalkerConfig.
|
||||
func (config *filewalkerCfg) DatabaseConfig() storagenodedb.Config {
|
||||
return storagenodedb.Config{
|
||||
Storage: config.Storage,
|
||||
Info: config.Info,
|
||||
Info2: config.Info2,
|
||||
Pieces: config.Pieces,
|
||||
Filestore: config.Filestore,
|
||||
Driver: config.Driver,
|
||||
}
|
||||
}
|
||||
|
||||
func newUsedSpaceFilewalkerCmd() *cobra.Command {
|
||||
var cfg filewalkerCfg
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: lazyfilewalker.UsedSpaceFilewalkerCmdName,
|
||||
Short: "An internal subcommand used to run used-space calculation filewalker as a separate subprocess with lower IO priority",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdUsedSpaceFilewalker(cmd, &cfg)
|
||||
},
|
||||
Hidden: true,
|
||||
Args: cobra.ExactArgs(0),
|
||||
}
|
||||
|
||||
process.Bind(cmd, &cfg)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func newGCFilewalkerCmd() *cobra.Command {
|
||||
var cfg filewalkerCfg
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: lazyfilewalker.GCFilewalkerCmdName,
|
||||
Short: "An internal subcommand used to run garbage collection filewalker as a separate subprocess with lower IO priority",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdGCFilewalker(cmd, &cfg)
|
||||
},
|
||||
Hidden: true,
|
||||
Args: cobra.ExactArgs(0),
|
||||
}
|
||||
|
||||
process.Bind(cmd, &cfg)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func cmdUsedSpaceFilewalker(cmd *cobra.Command, cfg *filewalkerCfg) (err error) {
|
||||
if runtime.GOOS == "linux" {
|
||||
// Pin the current goroutine to the current OS thread, so we can set the IO priority
|
||||
// for the current thread.
|
||||
// This is necessary because Go does use CLONE_IO when creating new threads,
|
||||
// so they do not share a single IO context.
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
}
|
||||
|
||||
err = iopriority.SetLowIOPriority()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, _ := process.Ctx(cmd)
|
||||
log := zap.L()
|
||||
|
||||
// We still need the DB in this case because we still have to deal with v0 pieces.
|
||||
// Once we drop support for v0 pieces, we can remove this.
|
||||
db, err := storagenodedb.OpenExisting(ctx, log.Named("db"), cfg.DatabaseConfig())
|
||||
if err != nil {
|
||||
return errs.New("Error starting master database on storage node: %v", err)
|
||||
}
|
||||
log.Info("Database started")
|
||||
defer func() {
|
||||
err = errs.Combine(err, db.Close())
|
||||
}()
|
||||
|
||||
// Decode the data struct received from the main process
|
||||
var req lazyfilewalker.UsedSpaceRequest
|
||||
if err = json.NewDecoder(io.Reader(os.Stdin)).Decode(&req); err != nil {
|
||||
return errs.New("Error decoding data from stdin: %v", err)
|
||||
}
|
||||
|
||||
if req.SatelliteID.IsZero() {
|
||||
return errs.New("SatelliteID is required")
|
||||
}
|
||||
|
||||
filewalker := pieces.NewFileWalker(log, db.Pieces(), db.V0PieceInfo())
|
||||
|
||||
total, contentSize, err := filewalker.WalkAndComputeSpaceUsedBySatellite(ctx, req.SatelliteID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp := lazyfilewalker.UsedSpaceResponse{PiecesTotal: total, PiecesContentSize: contentSize}
|
||||
|
||||
// encode the response struct and write it to stdout
|
||||
return json.NewEncoder(io.Writer(os.Stdout)).Encode(resp)
|
||||
}
|
||||
|
||||
func cmdGCFilewalker(cmd *cobra.Command, cfg *filewalkerCfg) (err error) {
|
||||
if runtime.GOOS == "linux" {
|
||||
// Pin the current goroutine to the current OS thread, so we can set the IO priority
|
||||
// for the current thread.
|
||||
// This is necessary because Go does use CLONE_IO when creating new threads,
|
||||
// so they do not share a single IO context.
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
}
|
||||
|
||||
err = iopriority.SetLowIOPriority()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, _ := process.Ctx(cmd)
|
||||
log := zap.L()
|
||||
|
||||
// We still need the DB in this case because we still have to deal with v0 pieces.
|
||||
// Once we drop support for v0 pieces, we can remove this.
|
||||
db, err := storagenodedb.OpenExisting(ctx, log.Named("db"), cfg.DatabaseConfig())
|
||||
if err != nil {
|
||||
return errs.New("Error starting master database on storage node: %v", err)
|
||||
}
|
||||
log.Info("Database started")
|
||||
defer func() {
|
||||
err = errs.Combine(err, db.Close())
|
||||
}()
|
||||
|
||||
// Decode the data struct received from the main process
|
||||
var req lazyfilewalker.GCFilewalkerRequest
|
||||
if err = json.NewDecoder(io.Reader(os.Stdin)).Decode(&req); err != nil {
|
||||
return errs.New("Error decoding data from stdin: %v", err)
|
||||
}
|
||||
|
||||
// Validate the request data
|
||||
switch {
|
||||
case req.SatelliteID.IsZero():
|
||||
return errs.New("SatelliteID is required")
|
||||
case req.CreatedBefore.IsZero():
|
||||
return errs.New("CreatedBefore is required")
|
||||
}
|
||||
|
||||
// Decode the bloom filter
|
||||
filter, err := bloomfilter.NewFromBytes(req.BloomFilter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
filewalker := pieces.NewFileWalker(log, db.Pieces(), db.V0PieceInfo())
|
||||
pieceIDs, piecesCount, piecesSkippedCount, err := filewalker.WalkSatellitePiecesToTrash(ctx, req.SatelliteID, req.CreatedBefore, filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp := lazyfilewalker.GCFilewalkerResponse{
|
||||
PieceIDs: pieceIDs,
|
||||
PiecesCount: piecesCount,
|
||||
PiecesSkippedCount: piecesSkippedCount,
|
||||
}
|
||||
|
||||
// encode the response struct and write it to stdout
|
||||
return json.NewEncoder(io.Writer(os.Stdout)).Encode(resp)
|
||||
}
|
157
cmd/storagenode/internalcmd/cmd_lazy_filewalker.go
Normal file
157
cmd/storagenode/internalcmd/cmd_lazy_filewalker.go
Normal file
@ -0,0 +1,157 @@
|
||||
// Copyright (C) 2023 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package internalcmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/private/process"
|
||||
"storj.io/storj/storagenode/pieces/lazyfilewalker"
|
||||
"storj.io/storj/storagenode/storagenodedb"
|
||||
)
|
||||
|
||||
// FilewalkerCfg is the config structure for the lazyfilewalker commands.
|
||||
type FilewalkerCfg struct {
|
||||
lazyfilewalker.Config
|
||||
}
|
||||
|
||||
// RunOptions defines the options for the lazyfilewalker runners.
|
||||
type RunOptions struct {
|
||||
Ctx context.Context
|
||||
Logger *zap.Logger
|
||||
Config *FilewalkerCfg
|
||||
|
||||
stdin io.Reader
|
||||
stderr io.Writer
|
||||
stdout io.Writer
|
||||
}
|
||||
|
||||
type nopWriterSyncCloser struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
func (cw nopWriterSyncCloser) Close() error { return nil }
|
||||
func (cw nopWriterSyncCloser) Sync() error { return nil }
|
||||
|
||||
// SetOut sets the stdout writer.
|
||||
func (r *RunOptions) SetOut(writer io.Writer) {
|
||||
r.stdout = writer
|
||||
}
|
||||
|
||||
// SetErr sets the stderr writer.
|
||||
func (r *RunOptions) SetErr(writer io.Writer) {
|
||||
r.stderr = writer
|
||||
writerkey := "zapwriter"
|
||||
|
||||
// If the writer is os.Stderr, we don't need to register it because the stderr
|
||||
// writer is registered by default.
|
||||
if writer == os.Stderr {
|
||||
return
|
||||
}
|
||||
|
||||
err := zap.RegisterSink(writerkey, func(u *url.URL) (zap.Sink, error) {
|
||||
return nopWriterSyncCloser{r.stderr}, nil
|
||||
})
|
||||
|
||||
// this error is expected if the sink is already registered.
|
||||
duplicateSinkErr := fmt.Errorf("sink factory already registered for scheme %q", writerkey)
|
||||
if err != nil && err.Error() != duplicateSinkErr.Error() {
|
||||
r.Logger.Error("failed to register logger sink", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
err = flag.Set("log.encoding", "json")
|
||||
if err != nil {
|
||||
r.Logger.Error("failed to set log encoding", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
// create a new logger with the writer as the output path.
|
||||
path := fmt.Sprintf("%s:subprocess", writerkey)
|
||||
logger, err := process.NewLoggerWithOutputPaths("lazyfilewalker", path)
|
||||
if err != nil {
|
||||
r.Logger.Error("failed to create logger", zap.Error(err))
|
||||
return
|
||||
}
|
||||
|
||||
// set the logger to the new logger.
|
||||
r.Logger = logger
|
||||
}
|
||||
|
||||
// SetIn sets the stdin reader.
|
||||
func (r *RunOptions) SetIn(reader io.Reader) {
|
||||
r.stdin = reader
|
||||
}
|
||||
|
||||
// DefaultRunOpts returns the default RunOptions.
|
||||
func DefaultRunOpts(ctx context.Context, logger *zap.Logger, config *FilewalkerCfg) *RunOptions {
|
||||
return &RunOptions{
|
||||
Ctx: ctx,
|
||||
Logger: logger,
|
||||
Config: config,
|
||||
stdin: os.Stdin,
|
||||
stdout: os.Stdout,
|
||||
stderr: os.Stderr,
|
||||
}
|
||||
}
|
||||
|
||||
// DatabaseConfig returns the storagenodedb.Config that should be used with this lazyfilewalker.
|
||||
func (config *FilewalkerCfg) DatabaseConfig() storagenodedb.Config {
|
||||
return storagenodedb.Config{
|
||||
Storage: config.Storage,
|
||||
Info: config.Info,
|
||||
Info2: config.Info2,
|
||||
Pieces: config.Pieces,
|
||||
Filestore: config.Filestore,
|
||||
Driver: config.Driver,
|
||||
}
|
||||
}
|
||||
|
||||
// NewUsedSpaceFilewalkerCmd creates a new cobra command for running used-space calculation filewalker.
|
||||
func NewUsedSpaceFilewalkerCmd() *cobra.Command {
|
||||
var cfg FilewalkerCfg
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: lazyfilewalker.UsedSpaceFilewalkerCmdName,
|
||||
Short: "An internal subcommand used to run used-space calculation filewalker as a separate subprocess with lower IO priority",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx, _ := process.Ctx(cmd)
|
||||
return NewUsedSpaceLazyFilewalkerWithConfig(ctx, zap.L(), &cfg).Run()
|
||||
},
|
||||
Hidden: true,
|
||||
Args: cobra.ExactArgs(0),
|
||||
}
|
||||
|
||||
process.Bind(cmd, &cfg)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
// NewGCFilewalkerCmd creates a new cobra command for running garbage collection filewalker.
|
||||
func NewGCFilewalkerCmd() *cobra.Command {
|
||||
var cfg FilewalkerCfg
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: lazyfilewalker.GCFilewalkerCmdName,
|
||||
Short: "An internal subcommand used to run garbage collection filewalker as a separate subprocess with lower IO priority",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx, _ := process.Ctx(cmd)
|
||||
return NewGCLazyFilewalkerWithConfig(ctx, zap.L(), &cfg).Run()
|
||||
},
|
||||
Hidden: true,
|
||||
Args: cobra.ExactArgs(0),
|
||||
}
|
||||
|
||||
process.Bind(cmd, &cfg)
|
||||
|
||||
return cmd
|
||||
}
|
138
cmd/storagenode/internalcmd/gc_filewalker.go
Normal file
138
cmd/storagenode/internalcmd/gc_filewalker.go
Normal file
@ -0,0 +1,138 @@
|
||||
// Copyright (C) 2023 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package internalcmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"runtime"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/common/bloomfilter"
|
||||
"storj.io/storj/storagenode/iopriority"
|
||||
"storj.io/storj/storagenode/pieces"
|
||||
"storj.io/storj/storagenode/pieces/lazyfilewalker"
|
||||
"storj.io/storj/storagenode/pieces/lazyfilewalker/execwrapper"
|
||||
"storj.io/storj/storagenode/storagenodedb"
|
||||
)
|
||||
|
||||
// GCLazyFileWalker is an execwrapper.Command for the gc-filewalker.
|
||||
type GCLazyFileWalker struct {
|
||||
*RunOptions
|
||||
}
|
||||
|
||||
var _ execwrapper.Command = (*GCLazyFileWalker)(nil)
|
||||
|
||||
// NewGCLazyFilewalker creates a new GCLazyFileWalker instance.
|
||||
func NewGCLazyFilewalker(ctx context.Context, logger *zap.Logger, config lazyfilewalker.Config) *GCLazyFileWalker {
|
||||
return NewGCLazyFilewalkerWithConfig(ctx, logger, &FilewalkerCfg{config})
|
||||
}
|
||||
|
||||
// NewGCLazyFilewalkerWithConfig creates a new GCLazyFileWalker instance with the given config.
|
||||
func NewGCLazyFilewalkerWithConfig(ctx context.Context, logger *zap.Logger, config *FilewalkerCfg) *GCLazyFileWalker {
|
||||
return &GCLazyFileWalker{
|
||||
RunOptions: DefaultRunOpts(ctx, logger, config),
|
||||
}
|
||||
}
|
||||
|
||||
// Run runs the GCLazyFileWalker.
|
||||
func (g *GCLazyFileWalker) Run() (err error) {
|
||||
if g.Config.LowerIOPriority {
|
||||
if runtime.GOOS == "linux" {
|
||||
// Pin the current goroutine to the current OS thread, so we can set the IO priority
|
||||
// for the current thread.
|
||||
// This is necessary because Go does use CLONE_IO when creating new threads,
|
||||
// so they do not share a single IO context.
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
}
|
||||
|
||||
err = iopriority.SetLowIOPriority()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
log := g.Logger
|
||||
|
||||
// Decode the data struct received from the main process
|
||||
var req lazyfilewalker.GCFilewalkerRequest
|
||||
if err = json.NewDecoder(g.stdin).Decode(&req); err != nil {
|
||||
return errs.New("Error decoding data from stdin: %v", err)
|
||||
}
|
||||
|
||||
// Validate the request data
|
||||
switch {
|
||||
case req.SatelliteID.IsZero():
|
||||
return errs.New("SatelliteID is required")
|
||||
case req.CreatedBefore.IsZero():
|
||||
return errs.New("CreatedBefore is required")
|
||||
}
|
||||
|
||||
// We still need the DB in this case because we still have to deal with v0 pieces.
|
||||
// Once we drop support for v0 pieces, we can remove this.
|
||||
db, err := storagenodedb.OpenExisting(g.Ctx, log.Named("db"), g.Config.DatabaseConfig())
|
||||
if err != nil {
|
||||
return errs.New("Error starting master database on storage node: %v", err)
|
||||
}
|
||||
log.Info("Database started")
|
||||
defer func() {
|
||||
err = errs.Combine(err, db.Close())
|
||||
}()
|
||||
|
||||
// Decode the bloom filter
|
||||
filter, err := bloomfilter.NewFromBytes(req.BloomFilter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("gc-filewalker started", zap.Time("created_before", req.CreatedBefore), zap.Int("bloom_filter_size", len(req.BloomFilter)))
|
||||
|
||||
filewalker := pieces.NewFileWalker(log, db.Pieces(), db.V0PieceInfo())
|
||||
pieceIDs, piecesCount, piecesSkippedCount, err := filewalker.WalkSatellitePiecesToTrash(g.Ctx, req.SatelliteID, req.CreatedBefore, filter)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
resp := lazyfilewalker.GCFilewalkerResponse{
|
||||
PieceIDs: pieceIDs,
|
||||
PiecesCount: piecesCount,
|
||||
PiecesSkippedCount: piecesSkippedCount,
|
||||
}
|
||||
|
||||
log.Info("gc-filewalker completed", zap.Int64("pieces_count", piecesCount), zap.Int64("pieces_skipped_count", piecesSkippedCount))
|
||||
|
||||
// encode the response struct and write it to stdout
|
||||
return json.NewEncoder(g.stdout).Encode(resp)
|
||||
}
|
||||
|
||||
// Start starts the GCLazyFileWalker, assuming it behaves like the Start method on exec.Cmd.
|
||||
// This is a no-op and only exists to satisfy the execwrapper.Command interface.
|
||||
// Wait must be called to actually run the command.
|
||||
func (g *GCLazyFileWalker) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wait waits for the GCLazyFileWalker to finish, assuming it behaves like the Wait method on exec.Cmd.
|
||||
func (g *GCLazyFileWalker) Wait() error {
|
||||
return g.Run()
|
||||
}
|
||||
|
||||
// SetIn sets the stdin of the GCLazyFileWalker.
|
||||
func (g *GCLazyFileWalker) SetIn(reader io.Reader) {
|
||||
g.RunOptions.SetIn(reader)
|
||||
}
|
||||
|
||||
// SetOut sets the stdout of the GCLazyFileWalker.
|
||||
func (g *GCLazyFileWalker) SetOut(writer io.Writer) {
|
||||
g.RunOptions.SetOut(writer)
|
||||
}
|
||||
|
||||
// SetErr sets the stderr of the GCLazyFileWalker.
|
||||
func (g *GCLazyFileWalker) SetErr(writer io.Writer) {
|
||||
g.RunOptions.SetErr(writer)
|
||||
}
|
121
cmd/storagenode/internalcmd/used_space_filewalker.go
Normal file
121
cmd/storagenode/internalcmd/used_space_filewalker.go
Normal file
@ -0,0 +1,121 @@
|
||||
// Copyright (C) 2023 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package internalcmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"runtime"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/storj/storagenode/iopriority"
|
||||
"storj.io/storj/storagenode/pieces"
|
||||
"storj.io/storj/storagenode/pieces/lazyfilewalker"
|
||||
"storj.io/storj/storagenode/pieces/lazyfilewalker/execwrapper"
|
||||
"storj.io/storj/storagenode/storagenodedb"
|
||||
)
|
||||
|
||||
// UsedSpaceLazyFileWalker is an execwrapper.Command for the used-space-filewalker.
|
||||
type UsedSpaceLazyFileWalker struct {
|
||||
*RunOptions
|
||||
}
|
||||
|
||||
var _ execwrapper.Command = (*UsedSpaceLazyFileWalker)(nil)
|
||||
|
||||
// NewUsedSpaceLazyFilewalker creates a new UsedSpaceLazyFileWalker instance.
|
||||
func NewUsedSpaceLazyFilewalker(ctx context.Context, logger *zap.Logger, config lazyfilewalker.Config) *UsedSpaceLazyFileWalker {
|
||||
return NewUsedSpaceLazyFilewalkerWithConfig(ctx, logger, &FilewalkerCfg{config})
|
||||
}
|
||||
|
||||
// NewUsedSpaceLazyFilewalkerWithConfig creates a new UsedSpaceLazyFileWalker instance with the given config.
|
||||
func NewUsedSpaceLazyFilewalkerWithConfig(ctx context.Context, logger *zap.Logger, config *FilewalkerCfg) *UsedSpaceLazyFileWalker {
|
||||
return &UsedSpaceLazyFileWalker{
|
||||
RunOptions: DefaultRunOpts(ctx, logger, config),
|
||||
}
|
||||
}
|
||||
|
||||
// Run runs the UsedSpaceLazyFileWalker.
|
||||
func (u *UsedSpaceLazyFileWalker) Run() (err error) {
|
||||
if u.Config.LowerIOPriority {
|
||||
if runtime.GOOS == "linux" {
|
||||
// Pin the current goroutine to the current OS thread, so we can set the IO priority
|
||||
// for the current thread.
|
||||
// This is necessary because Go does use CLONE_IO when creating new threads,
|
||||
// so they do not share a single IO context.
|
||||
runtime.LockOSThread()
|
||||
defer runtime.UnlockOSThread()
|
||||
}
|
||||
|
||||
err = iopriority.SetLowIOPriority()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
log := u.Logger
|
||||
|
||||
// Decode the data struct received from the main process
|
||||
var req lazyfilewalker.UsedSpaceRequest
|
||||
if err = json.NewDecoder(u.stdin).Decode(&req); err != nil {
|
||||
return errs.New("Error decoding data from stdin: %v", err)
|
||||
}
|
||||
|
||||
if req.SatelliteID.IsZero() {
|
||||
return errs.New("SatelliteID is required")
|
||||
}
|
||||
|
||||
// We still need the DB in this case because we still have to deal with v0 pieces.
|
||||
// Once we drop support for v0 pieces, we can remove this.
|
||||
db, err := storagenodedb.OpenExisting(u.Ctx, log.Named("db"), u.Config.DatabaseConfig())
|
||||
if err != nil {
|
||||
return errs.New("Error starting master database on storage node: %v", err)
|
||||
}
|
||||
log.Info("Database started")
|
||||
defer func() {
|
||||
err = errs.Combine(err, db.Close())
|
||||
}()
|
||||
|
||||
log.Info("used-space-filewalker started")
|
||||
|
||||
filewalker := pieces.NewFileWalker(log, db.Pieces(), db.V0PieceInfo())
|
||||
total, contentSize, err := filewalker.WalkAndComputeSpaceUsedBySatellite(u.Ctx, req.SatelliteID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp := lazyfilewalker.UsedSpaceResponse{PiecesTotal: total, PiecesContentSize: contentSize}
|
||||
|
||||
log.Info("used-space-filewalker completed", zap.Int64("pieces_total", total), zap.Int64("content_size", contentSize))
|
||||
|
||||
// encode the response struct and write it to stdout
|
||||
return json.NewEncoder(u.stdout).Encode(resp)
|
||||
}
|
||||
|
||||
// Start starts the GCLazyFileWalker, assuming it behaves like the Start method on exec.Cmd.
|
||||
// This is a no-op and only exists to satisfy the execwrapper.Command interface.
|
||||
// Wait must be called to actually run the command.
|
||||
func (u *UsedSpaceLazyFileWalker) Start() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Wait waits for the GCLazyFileWalker to finish, assuming it behaves like the Wait method on exec.Cmd.
|
||||
func (u *UsedSpaceLazyFileWalker) Wait() error {
|
||||
return u.Run()
|
||||
}
|
||||
|
||||
// SetIn sets the stdin of the UsedSpaceLazyFileWalker.
|
||||
func (u *UsedSpaceLazyFileWalker) SetIn(reader io.Reader) {
|
||||
u.RunOptions.SetIn(reader)
|
||||
}
|
||||
|
||||
// SetOut sets the stdout of the UsedSpaceLazyFileWalker.
|
||||
func (u *UsedSpaceLazyFileWalker) SetOut(writer io.Writer) {
|
||||
u.RunOptions.SetOut(writer)
|
||||
}
|
||||
|
||||
// SetErr sets the stderr of the UsedSpaceLazyFileWalker.
|
||||
func (u *UsedSpaceLazyFileWalker) SetErr(writer io.Writer) {
|
||||
u.RunOptions.SetErr(writer)
|
||||
}
|
@ -9,6 +9,7 @@ import (
|
||||
|
||||
"storj.io/common/fpath"
|
||||
"storj.io/private/cfgstruct"
|
||||
"storj.io/storj/cmd/storagenode/internalcmd"
|
||||
"storj.io/storj/storagenode"
|
||||
)
|
||||
|
||||
@ -59,8 +60,8 @@ func newRootCmd(setDefaults bool) (*cobra.Command, *Factory) {
|
||||
newGracefulExitInitCmd(factory),
|
||||
newGracefulExitStatusCmd(factory),
|
||||
// internal hidden commands
|
||||
newUsedSpaceFilewalkerCmd(),
|
||||
newGCFilewalkerCmd(),
|
||||
internalcmd.NewUsedSpaceFilewalkerCmd(),
|
||||
internalcmd.NewGCFilewalkerCmd(),
|
||||
)
|
||||
|
||||
return cmd, factory
|
||||
|
@ -75,6 +75,9 @@ var (
|
||||
//
|
||||
// architecture: Master Database
|
||||
type DB interface {
|
||||
// Config returns the configuration used to initialize the database.
|
||||
Config() storagenodedb.Config
|
||||
|
||||
// MigrateToLatest initializes the database
|
||||
MigrateToLatest(ctx context.Context) error
|
||||
|
||||
@ -466,16 +469,7 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten
|
||||
return nil, errs.Combine(err, peer.Close())
|
||||
}
|
||||
|
||||
dbCfg := config.DatabaseConfig()
|
||||
lazyfilewalkerCfg := lazyfilewalker.Config{
|
||||
Storage: dbCfg.Storage,
|
||||
Info: dbCfg.Info,
|
||||
Info2: dbCfg.Info2,
|
||||
Pieces: dbCfg.Pieces,
|
||||
Filestore: dbCfg.Filestore,
|
||||
Driver: dbCfg.Driver,
|
||||
}
|
||||
peer.Storage2.LazyFileWalker = lazyfilewalker.NewSupervisor(peer.Log.Named("lazyfilewalker"), executable, lazyfilewalkerCfg.Args())
|
||||
peer.Storage2.LazyFileWalker = lazyfilewalker.NewSupervisor(peer.Log.Named("lazyfilewalker"), db.Config().LazyFilewalkerConfig(), executable)
|
||||
}
|
||||
|
||||
peer.Storage2.Store = pieces.NewStore(peer.Log.Named("pieces"),
|
||||
|
@ -4,6 +4,8 @@
|
||||
package lazyfilewalker
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"storj.io/storj/storagenode/blobstore/filestore"
|
||||
)
|
||||
|
||||
@ -16,6 +18,8 @@ type Config struct {
|
||||
Driver string `help:"database driver to use" default:"sqlite3"`
|
||||
Pieces string `help:"path to store pieces in"`
|
||||
Filestore filestore.Config
|
||||
|
||||
LowerIOPriority bool `help:"if true, the process will run with lower IO priority" default:"true"`
|
||||
}
|
||||
|
||||
// Args returns the flags to be passed lazyfilewalker process.
|
||||
@ -33,5 +37,6 @@ func (config *Config) Args() []string {
|
||||
// use the json formatter in the subprocess, so we could read lines and re-log them in the main process
|
||||
// with all the fields intact.
|
||||
"--log.encoding", "json",
|
||||
"--lower-io-priority", strconv.FormatBool(config.LowerIOPriority),
|
||||
}
|
||||
}
|
||||
|
@ -45,11 +45,11 @@ type Supervisor struct {
|
||||
}
|
||||
|
||||
// NewSupervisor creates a new lazy filewalker Supervisor.
|
||||
func NewSupervisor(log *zap.Logger, executable string, args []string) *Supervisor {
|
||||
func NewSupervisor(log *zap.Logger, config Config, executable string) *Supervisor {
|
||||
return &Supervisor{
|
||||
log: log,
|
||||
gcArgs: append([]string{GCFilewalkerCmdName}, args...),
|
||||
usedSpaceArgs: append([]string{UsedSpaceFilewalkerCmdName}, args...),
|
||||
gcArgs: append([]string{GCFilewalkerCmdName}, config.Args()...),
|
||||
usedSpaceArgs: append([]string{UsedSpaceFilewalkerCmdName}, config.Args()...),
|
||||
executable: executable,
|
||||
}
|
||||
}
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
"storj.io/storj/storagenode/orders"
|
||||
"storj.io/storj/storagenode/payouts"
|
||||
"storj.io/storj/storagenode/pieces"
|
||||
"storj.io/storj/storagenode/pieces/lazyfilewalker"
|
||||
"storj.io/storj/storagenode/pricing"
|
||||
"storj.io/storj/storagenode/reputation"
|
||||
"storj.io/storj/storagenode/satellites"
|
||||
@ -87,6 +88,21 @@ type Config struct {
|
||||
TestingDisableWAL bool
|
||||
}
|
||||
|
||||
// LazyFilewalkerConfig creates a lazyfilewalker.Config from storagenodedb.Config.
|
||||
//
|
||||
// TODO: this is a temporary solution to avoid circular dependencies.
|
||||
func (config Config) LazyFilewalkerConfig() lazyfilewalker.Config {
|
||||
return lazyfilewalker.Config{
|
||||
Storage: config.Storage,
|
||||
Info: config.Info,
|
||||
Info2: config.Info2,
|
||||
Driver: config.Driver,
|
||||
Pieces: config.Pieces,
|
||||
Filestore: config.Filestore,
|
||||
LowerIOPriority: true,
|
||||
}
|
||||
}
|
||||
|
||||
// DB contains access to different database tables.
|
||||
type DB struct {
|
||||
log *zap.Logger
|
||||
@ -571,6 +587,11 @@ func (db *DB) DBDirectory() string {
|
||||
return db.dbDirectory
|
||||
}
|
||||
|
||||
// Config returns the database configuration used to initialize the database.
|
||||
func (db *DB) Config() Config {
|
||||
return db.config
|
||||
}
|
||||
|
||||
// migrateToDB is a helper method that performs the migration from the
|
||||
// deprecatedInfoDB to the specified new db. It first closes and deletes any
|
||||
// existing database to guarantee idempotence. After migration it also closes
|
||||
|
Loading…
Reference in New Issue
Block a user