all: fix dots
Change-Id: I6a419c62700c568254ff67ae5b73efed2fc98aa2
This commit is contained in:
parent
0a800336cb
commit
080ba47a06
@ -167,7 +167,7 @@ func (a Authorization) String() string {
|
||||
return fmt.Sprintf("%."+fmtLen+"s..", a.Token.String())
|
||||
}
|
||||
|
||||
// Equal checks if two tokens have equal user IDs and data
|
||||
// Equal checks if two tokens have equal user IDs and data.
|
||||
func (t *Token) Equal(cmpToken *Token) bool {
|
||||
return t.UserID == cmpToken.UserID && bytes.Equal(t.Data[:], cmpToken.Data[:])
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ type Config struct {
|
||||
TLS tlsopts.Config
|
||||
}
|
||||
|
||||
// Client implements pb.DRPCCertificatesClient
|
||||
// Client implements pb.DRPCCertificatesClient.
|
||||
type Client struct {
|
||||
conn *rpc.Conn
|
||||
client pb.DRPCCertificatesClient
|
||||
|
@ -29,7 +29,7 @@ import (
|
||||
"storj.io/storj/pkg/server"
|
||||
)
|
||||
|
||||
// TODO: test sad path
|
||||
// TODO: test sad path.
|
||||
func TestCertificateSigner_Sign_E2E(t *testing.T) {
|
||||
testidentity.SignerVersionsTest(t, func(t *testing.T, _ storj.IDVersion, signer *identity.FullCertificateAuthority) {
|
||||
testidentity.CompleteIdentityVersionsTest(t, func(t *testing.T, _ storj.IDVersion, serverIdent *identity.FullIdentity) {
|
||||
|
@ -119,7 +119,7 @@ func NewInspector(ctx context.Context, address, path string) (*Inspector, error)
|
||||
// Close closes the inspector.
|
||||
func (i *Inspector) Close() error { return i.conn.Close() }
|
||||
|
||||
// ObjectHealth gets information about the health of an object on the network
|
||||
// ObjectHealth gets information about the health of an object on the network.
|
||||
func ObjectHealth(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx, _ := process.Ctx(cmd)
|
||||
i, err := NewInspector(ctx, *Addr, *IdentityPath)
|
||||
@ -201,7 +201,7 @@ func ObjectHealth(cmd *cobra.Command, args []string) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SegmentHealth gets information about the health of a segment on the network
|
||||
// SegmentHealth gets information about the health of a segment on the network.
|
||||
func SegmentHealth(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx, _ := process.Ctx(cmd)
|
||||
i, err := NewInspector(ctx, *Addr, *IdentityPath)
|
||||
@ -415,7 +415,7 @@ func getSegments(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// sortSegments by the object they belong to
|
||||
// sortSegments by the object they belong to.
|
||||
func sortSegments(segments []*pb.IrreparableSegment) map[string][]*pb.IrreparableSegment {
|
||||
objects := make(map[string][]*pb.IrreparableSegment)
|
||||
for _, seg := range segments {
|
||||
|
@ -105,7 +105,7 @@ func ReadFile(path string) (*Asset, error) {
|
||||
return asset, nil
|
||||
}
|
||||
|
||||
// readFiles adds all nested files to asset
|
||||
// readFiles adds all nested files to asset.
|
||||
func (asset *Asset) readFiles(dir string, infos []os.FileInfo) error {
|
||||
for _, info := range infos {
|
||||
child, err := ReadFile(filepath.Join(dir, info.Name()))
|
||||
|
@ -14,13 +14,13 @@ import (
|
||||
|
||||
var _ http.FileSystem = (*InmemoryFileSystem)(nil)
|
||||
|
||||
// InmemoryFileSystem defines an inmemory http.FileSystem
|
||||
// InmemoryFileSystem defines an inmemory http.FileSystem.
|
||||
type InmemoryFileSystem struct {
|
||||
Root *Asset
|
||||
Index map[string]*Asset
|
||||
}
|
||||
|
||||
// Inmemory creates an InmemoryFileSystem from
|
||||
// Inmemory creates an InmemoryFileSystem from.
|
||||
func Inmemory(root *Asset) *InmemoryFileSystem {
|
||||
fs := &InmemoryFileSystem{}
|
||||
fs.Root = root
|
||||
@ -29,7 +29,7 @@ func Inmemory(root *Asset) *InmemoryFileSystem {
|
||||
return fs
|
||||
}
|
||||
|
||||
// reindex inserts a node to the index
|
||||
// reindex inserts a node to the index.
|
||||
func (fs *InmemoryFileSystem) reindex(prefix, name string, file *Asset) {
|
||||
fs.Index[path.Join(prefix, name)] = file
|
||||
for _, child := range file.Children {
|
||||
@ -51,7 +51,7 @@ func (asset *Asset) File() *File {
|
||||
return &File{*bytes.NewReader(asset.Data), asset}
|
||||
}
|
||||
|
||||
// File defines a readable file
|
||||
// File defines a readable file.
|
||||
type File struct {
|
||||
bytes.Reader
|
||||
*Asset
|
||||
@ -98,20 +98,20 @@ type FileInfo struct {
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
// Name implements os.FileInfo
|
||||
// Name implements os.FileInfo.
|
||||
func (info FileInfo) Name() string { return info.name }
|
||||
|
||||
// Size implements os.FileInfo
|
||||
// Size implements os.FileInfo.
|
||||
func (info FileInfo) Size() int64 { return info.size }
|
||||
|
||||
// Mode implements os.FileInfo
|
||||
// Mode implements os.FileInfo.
|
||||
func (info FileInfo) Mode() os.FileMode { return info.mode }
|
||||
|
||||
// ModTime implements os.FileInfo
|
||||
// ModTime implements os.FileInfo.
|
||||
func (info FileInfo) ModTime() time.Time { return info.modTime }
|
||||
|
||||
// IsDir implements os.FileInfo
|
||||
// IsDir implements os.FileInfo.
|
||||
func (info FileInfo) IsDir() bool { return info.mode.IsDir() }
|
||||
|
||||
// Sys implements os.FileInfo
|
||||
// Sys implements os.FileInfo.
|
||||
func (info FileInfo) Sys() interface{} { return nil }
|
||||
|
@ -17,7 +17,7 @@ import (
|
||||
"storj.io/common/storj"
|
||||
)
|
||||
|
||||
// PromptForAccessName handles user input for access name to be used with wizards
|
||||
// PromptForAccessName handles user input for access name to be used with wizards.
|
||||
func PromptForAccessName() (string, error) {
|
||||
_, err := fmt.Printf("Choose an access name (use lowercase letters) [\"default\"]: ")
|
||||
if err != nil {
|
||||
@ -41,7 +41,7 @@ func PromptForAccessName() (string, error) {
|
||||
return accessName, nil
|
||||
}
|
||||
|
||||
// PromptForSatellite handles user input for a satellite address to be used with wizards
|
||||
// PromptForSatellite handles user input for a satellite address to be used with wizards.
|
||||
func PromptForSatellite(cmd *cobra.Command) (string, error) {
|
||||
satellites := []string{
|
||||
"12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
|
||||
@ -115,7 +115,7 @@ func PromptForSatellite(cmd *cobra.Command) (string, error) {
|
||||
return satelliteAddress, nil
|
||||
}
|
||||
|
||||
// PromptForAPIKey handles user input for an API key to be used with wizards
|
||||
// PromptForAPIKey handles user input for an API key to be used with wizards.
|
||||
func PromptForAPIKey() (string, error) {
|
||||
_, err := fmt.Print("Enter your API key: ")
|
||||
if err != nil {
|
||||
@ -134,7 +134,7 @@ func PromptForAPIKey() (string, error) {
|
||||
return apiKey, nil
|
||||
}
|
||||
|
||||
// PromptForEncryptionPassphrase handles user input for an encryption passphrase to be used with wizards
|
||||
// PromptForEncryptionPassphrase handles user input for an encryption passphrase to be used with wizards.
|
||||
func PromptForEncryptionPassphrase() (string, error) {
|
||||
_, err := fmt.Print("Enter your encryption passphrase: ")
|
||||
if err != nil {
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
"storj.io/storj/satellite/satellitedb"
|
||||
)
|
||||
|
||||
// generateGracefulExitCSV creates a report with graceful exit data for exiting or exited nodes in a given period
|
||||
// generateGracefulExitCSV creates a report with graceful exit data for exiting or exited nodes in a given period.
|
||||
func generateGracefulExitCSV(ctx context.Context, completed bool, start time.Time, end time.Time, output io.Writer) error {
|
||||
db, err := satellitedb.New(zap.L().Named("db"), gracefulExitCfg.Database, satellitedb.Options{})
|
||||
if err != nil {
|
||||
|
@ -37,7 +37,7 @@ import (
|
||||
"storj.io/storj/satellite/satellitedb/dbx"
|
||||
)
|
||||
|
||||
// Satellite defines satellite configuration
|
||||
// Satellite defines satellite configuration.
|
||||
type Satellite struct {
|
||||
Database string `help:"satellite database connection string" releaseDefault:"postgres://" devDefault:"postgres://"`
|
||||
|
||||
@ -55,7 +55,7 @@ type Satellite struct {
|
||||
satellite.Config
|
||||
}
|
||||
|
||||
// APIKeysLRUOptions returns a cache.Options based on the APIKeys LRU config
|
||||
// APIKeysLRUOptions returns a cache.Options based on the APIKeys LRU config.
|
||||
func (s *Satellite) APIKeysLRUOptions() cache.Options {
|
||||
return cache.Options{
|
||||
Expiration: s.DatabaseOptions.APIKeysCache.Expiration,
|
||||
@ -63,7 +63,7 @@ func (s *Satellite) APIKeysLRUOptions() cache.Options {
|
||||
}
|
||||
}
|
||||
|
||||
// RevocationLRUOptions returns a cache.Options based on the Revocations LRU config
|
||||
// RevocationLRUOptions returns a cache.Options based on the Revocations LRU config.
|
||||
func (s *Satellite) RevocationLRUOptions() cache.Options {
|
||||
return cache.Options{
|
||||
Expiration: s.DatabaseOptions.RevocationsCache.Expiration,
|
||||
|
@ -29,7 +29,7 @@ var headers = []string{
|
||||
"bytes:BWEgress",
|
||||
}
|
||||
|
||||
// GenerateAttributionCSV creates a report with
|
||||
// GenerateAttributionCSV creates a report with.
|
||||
func GenerateAttributionCSV(ctx context.Context, database string, partnerID uuid.UUID, start time.Time, end time.Time, output io.Writer) error {
|
||||
log := zap.L().Named("db")
|
||||
db, err := satellitedb.New(log, database, satellitedb.Options{})
|
||||
|
@ -19,7 +19,7 @@ import (
|
||||
"storj.io/storj/satellite/satellitedb"
|
||||
)
|
||||
|
||||
// generateNodeUsageCSV creates a report with node usage data for all nodes in a given period which can be used for payments
|
||||
// generateNodeUsageCSV creates a report with node usage data for all nodes in a given period which can be used for payments.
|
||||
func generateNodeUsageCSV(ctx context.Context, start time.Time, end time.Time, output io.Writer) error {
|
||||
db, err := satellitedb.New(zap.L().Named("db"), nodeUsageCfg.Database, satellitedb.Options{})
|
||||
if err != nil {
|
||||
|
@ -573,7 +573,7 @@ func TestObserver_findZombieSegments(t *testing.T) {
|
||||
}
|
||||
|
||||
// segmentRef is an object segment reference to be used for simulating calls to
|
||||
// observer.processSegment
|
||||
// observer.processSegment.
|
||||
type segmentRef struct {
|
||||
path metainfo.ScopedPath
|
||||
pointer *pb.Pointer
|
||||
|
@ -145,7 +145,7 @@ func printDashboard(data *pb.DashboardResponse) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// clearScreen clears the screen so it can be redrawn
|
||||
// clearScreen clears the screen so it can be redrawn.
|
||||
func clearScreen() {
|
||||
switch runtime.GOOS {
|
||||
case "linux", "darwin":
|
||||
|
@ -12,7 +12,7 @@ import (
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// Deprecated contains deprecated config structs
|
||||
// Deprecated contains deprecated config structs.
|
||||
type Deprecated struct {
|
||||
Kademlia struct {
|
||||
ExternalAddress string `default:"" hidden:"true"`
|
||||
@ -23,7 +23,7 @@ type Deprecated struct {
|
||||
}
|
||||
}
|
||||
|
||||
// maps deprecated config values to new values if applicable
|
||||
// maps deprecated config values to new values if applicable.
|
||||
func mapDeprecatedConfigs(log *zap.Logger) {
|
||||
type migration struct {
|
||||
newValue interface{}
|
||||
|
@ -27,7 +27,7 @@ import (
|
||||
"storj.io/storj/storagenode/storagenodedb"
|
||||
)
|
||||
|
||||
// StorageNodeFlags defines storage node configuration
|
||||
// StorageNodeFlags defines storage node configuration.
|
||||
type StorageNodeFlags struct {
|
||||
EditConf bool `default:"false" help:"open config in default editor"`
|
||||
|
||||
|
@ -39,7 +39,7 @@ var (
|
||||
setupCfg AdminConf
|
||||
)
|
||||
|
||||
// AdminConf defines necessary configuration to run the storj-admin UI
|
||||
// AdminConf defines necessary configuration to run the storj-admin UI.
|
||||
type AdminConf struct {
|
||||
AuthKey string `help:"API authorization key" default:""`
|
||||
Address string `help:"address to start the web server on" default:":8080"`
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
"os/signal"
|
||||
)
|
||||
|
||||
// NewCLIContext creates a context that can be canceled with Ctrl-C
|
||||
// NewCLIContext creates a context that can be canceled with Ctrl-C.
|
||||
func NewCLIContext(root context.Context) (context.Context, func()) {
|
||||
// trap Ctrl+C and call cancel on the context
|
||||
ctx, cancel := context.WithCancel(root)
|
||||
|
@ -11,7 +11,7 @@ import (
|
||||
"storj.io/common/fpath"
|
||||
)
|
||||
|
||||
// Flags contains different flags for commands
|
||||
// Flags contains different flags for commands.
|
||||
type Flags struct {
|
||||
Directory string
|
||||
Host string
|
||||
|
@ -203,7 +203,7 @@ func networkDestroy(flags *Flags, args []string) error {
|
||||
return os.RemoveAll(flags.Directory)
|
||||
}
|
||||
|
||||
// newNetwork creates a default network
|
||||
// newNetwork creates a default network.
|
||||
func newNetwork(flags *Flags) (*Processes, error) {
|
||||
_, filename, _, ok := runtime.Caller(0)
|
||||
if !ok {
|
||||
@ -667,7 +667,7 @@ func identitySetup(network *Processes) (*Processes, error) {
|
||||
return processes, nil
|
||||
}
|
||||
|
||||
// readConfigString reads from dir/config.yaml flagName returns the value in `into`
|
||||
// readConfigString reads from dir/config.yaml flagName returns the value in `into`.
|
||||
func readConfigString(into *string, dir, flagName string) error {
|
||||
vip := viper.New()
|
||||
vip.AddConfigPath(dir)
|
||||
|
@ -69,7 +69,7 @@ func (writer *PrefixWriter) Write(data []byte) (int, error) {
|
||||
return writer.root.Write(data)
|
||||
}
|
||||
|
||||
// Write implements io.Writer that prefixes lines
|
||||
// Write implements io.Writer that prefixes lines.
|
||||
func (writer *prefixWriter) Write(data []byte) (int, error) {
|
||||
if len(data) == 0 {
|
||||
return 0, nil
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"storj.io/common/sync2"
|
||||
)
|
||||
|
||||
// Processes contains list of processes
|
||||
// Processes contains list of processes.
|
||||
type Processes struct {
|
||||
Output *PrefixWriter
|
||||
Directory string
|
||||
@ -32,7 +32,7 @@ type Processes struct {
|
||||
MaxStartupWait time.Duration
|
||||
}
|
||||
|
||||
// NewProcesses returns a group of processes
|
||||
// NewProcesses returns a group of processes.
|
||||
func NewProcesses(dir string) *Processes {
|
||||
return &Processes{
|
||||
Output: NewPrefixWriter("sim", os.Stdout),
|
||||
@ -42,14 +42,14 @@ func NewProcesses(dir string) *Processes {
|
||||
}
|
||||
}
|
||||
|
||||
// Exec executes a command on all processes
|
||||
// Exec executes a command on all processes.
|
||||
func (processes *Processes) Exec(ctx context.Context, command string) error {
|
||||
var group errgroup.Group
|
||||
processes.Start(ctx, &group, command)
|
||||
return group.Wait()
|
||||
}
|
||||
|
||||
// Start executes all processes using specified errgroup.Group
|
||||
// Start executes all processes using specified errgroup.Group.
|
||||
func (processes *Processes) Start(ctx context.Context, group *errgroup.Group, command string) {
|
||||
for _, p := range processes.List {
|
||||
process := p
|
||||
@ -59,7 +59,7 @@ func (processes *Processes) Start(ctx context.Context, group *errgroup.Group, co
|
||||
}
|
||||
}
|
||||
|
||||
// Env returns environment flags for other nodes
|
||||
// Env returns environment flags for other nodes.
|
||||
func (processes *Processes) Env() []string {
|
||||
var env []string
|
||||
for _, process := range processes.List {
|
||||
@ -68,7 +68,7 @@ func (processes *Processes) Env() []string {
|
||||
return env
|
||||
}
|
||||
|
||||
// Close closes all the processes and their resources
|
||||
// Close closes all the processes and their resources.
|
||||
func (processes *Processes) Close() error {
|
||||
var errlist errs.Group
|
||||
for _, process := range processes.List {
|
||||
@ -77,7 +77,7 @@ func (processes *Processes) Close() error {
|
||||
return errlist.Err()
|
||||
}
|
||||
|
||||
// Info represents public information about the process
|
||||
// Info represents public information about the process.
|
||||
type Info struct {
|
||||
Name string
|
||||
Executable string
|
||||
@ -88,7 +88,7 @@ type Info struct {
|
||||
Extra []EnvVar
|
||||
}
|
||||
|
||||
// EnvVar represents an environment variable like Key=Value
|
||||
// EnvVar represents an environment variable like Key=Value.
|
||||
type EnvVar struct {
|
||||
Key string
|
||||
Value string
|
||||
@ -99,7 +99,7 @@ func (info *Info) AddExtra(key, value string) {
|
||||
info.Extra = append(info.Extra, EnvVar{Key: key, Value: value})
|
||||
}
|
||||
|
||||
// Env returns process flags
|
||||
// Env returns process flags.
|
||||
func (info *Info) Env() []string {
|
||||
name := strings.ToUpper(info.Name)
|
||||
|
||||
@ -135,10 +135,10 @@ func (info *Info) Env() []string {
|
||||
return env
|
||||
}
|
||||
|
||||
// Arguments contains arguments based on the main command
|
||||
// Arguments contains arguments based on the main command.
|
||||
type Arguments map[string][]string
|
||||
|
||||
// Process is a type for monitoring the process
|
||||
// Process is a type for monitoring the process.
|
||||
type Process struct {
|
||||
processes *Processes
|
||||
|
||||
@ -158,7 +158,7 @@ type Process struct {
|
||||
stderr io.Writer
|
||||
}
|
||||
|
||||
// New creates a process which can be run in the specified directory
|
||||
// New creates a process which can be run in the specified directory.
|
||||
func (processes *Processes) New(info Info) *Process {
|
||||
output := processes.Output.Prefixed(info.Name)
|
||||
|
||||
@ -187,7 +187,7 @@ func (process *Process) WaitForExited(dependency *Process) {
|
||||
process.Wait = append(process.Wait, &dependency.Status.Exited)
|
||||
}
|
||||
|
||||
// Exec runs the process using the arguments for a given command
|
||||
// Exec runs the process using the arguments for a given command.
|
||||
func (process *Process) Exec(ctx context.Context, command string) (err error) {
|
||||
// ensure that we always release all status fences
|
||||
defer process.Status.Started.Release()
|
||||
@ -318,7 +318,7 @@ func (process *Process) waitForAddress(maxStartupWait time.Duration) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// tryConnect will try to connect to the process public address
|
||||
// tryConnect will try to connect to the process public address.
|
||||
func (process *Process) tryConnect() bool {
|
||||
conn, err := net.Dial("tcp", process.Info.Address)
|
||||
if err != nil {
|
||||
@ -331,5 +331,5 @@ func (process *Process) tryConnect() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Close closes process resources
|
||||
// Close closes process resources.
|
||||
func (process *Process) Close() error { return nil }
|
||||
|
@ -20,7 +20,7 @@ func init() {
|
||||
}, RootCmd)
|
||||
}
|
||||
|
||||
// catMain is the function executed when catCmd is called
|
||||
// catMain is the function executed when catCmd is called.
|
||||
func catMain(cmd *cobra.Command, args []string) (err error) {
|
||||
if len(args) == 0 {
|
||||
return fmt.Errorf("no object specified for copy")
|
||||
|
@ -20,7 +20,7 @@ type ClientConfig struct {
|
||||
DialTimeout time.Duration `help:"timeout for dials" default:"0h2m00s"`
|
||||
}
|
||||
|
||||
// Config uplink configuration
|
||||
// Config uplink configuration.
|
||||
type Config struct {
|
||||
AccessConfig
|
||||
Client ClientConfig
|
||||
|
@ -42,7 +42,7 @@ func init() {
|
||||
setBasicFlags(cpCmd.Flags(), "progress", "expires", "metadata")
|
||||
}
|
||||
|
||||
// upload transfers src from local machine to s3 compatible object dst
|
||||
// upload transfers src from local machine to s3 compatible object dst.
|
||||
func upload(ctx context.Context, src fpath.FPath, dst fpath.FPath, showProgress bool) (err error) {
|
||||
if !src.IsLocal() {
|
||||
return fmt.Errorf("source must be local path: %s", src)
|
||||
@ -151,7 +151,7 @@ func upload(ctx context.Context, src fpath.FPath, dst fpath.FPath, showProgress
|
||||
return nil
|
||||
}
|
||||
|
||||
// download transfers s3 compatible object src to dst on local machine
|
||||
// download transfers s3 compatible object src to dst on local machine.
|
||||
func download(ctx context.Context, src fpath.FPath, dst fpath.FPath, showProgress bool) (err error) {
|
||||
if src.IsLocal() {
|
||||
return fmt.Errorf("source must be Storj URL: %s", src)
|
||||
@ -218,7 +218,7 @@ func download(ctx context.Context, src fpath.FPath, dst fpath.FPath, showProgres
|
||||
return nil
|
||||
}
|
||||
|
||||
// copy copies s3 compatible object src to s3 compatible object dst
|
||||
// copy copies s3 compatible object src to s3 compatible object dst.
|
||||
func copyObject(ctx context.Context, src fpath.FPath, dst fpath.FPath) (err error) {
|
||||
if src.IsLocal() {
|
||||
return fmt.Errorf("source must be Storj URL: %s", src)
|
||||
|
@ -48,7 +48,7 @@ func init() {
|
||||
cfgstruct.SetBoolAnnotation(importCmd.Flags(), "access", cfgstruct.BasicHelpAnnotationName, false)
|
||||
}
|
||||
|
||||
// importMain is the function executed when importCmd is called
|
||||
// importMain is the function executed when importCmd is called.
|
||||
func importMain(cmd *cobra.Command, args []string) (err error) {
|
||||
if cmd.Flag("access").Changed {
|
||||
return ErrAccessFlag
|
||||
|
@ -20,7 +20,7 @@ func init() {
|
||||
}, RootCmd)
|
||||
}
|
||||
|
||||
// putMain is the function executed when putCmd is called
|
||||
// putMain is the function executed when putCmd is called.
|
||||
func putMain(cmd *cobra.Command, args []string) (err error) {
|
||||
if len(args) == 0 {
|
||||
return fmt.Errorf("no object specified for copy")
|
||||
|
@ -31,7 +31,7 @@ import (
|
||||
|
||||
const advancedFlagName = "advanced"
|
||||
|
||||
// UplinkFlags configuration flags
|
||||
// UplinkFlags configuration flags.
|
||||
type UplinkFlags struct {
|
||||
Config
|
||||
|
||||
@ -66,7 +66,7 @@ func init() {
|
||||
var cpuProfile = flag.String("profile.cpu", "", "file path of the cpu profile to be created")
|
||||
var memoryProfile = flag.String("profile.mem", "", "file path of the memory profile to be created")
|
||||
|
||||
// RootCmd represents the base CLI command when called without any subcommands
|
||||
// RootCmd represents the base CLI command when called without any subcommands.
|
||||
var RootCmd = &cobra.Command{
|
||||
Use: "uplink",
|
||||
Short: "The Storj client-side CLI",
|
||||
|
@ -71,7 +71,7 @@ func parseHumanDate(date string, now time.Time) (time.Time, error) {
|
||||
}
|
||||
}
|
||||
|
||||
// shareMain is the function executed when shareCmd is called
|
||||
// shareMain is the function executed when shareCmd is called.
|
||||
func shareMain(cmd *cobra.Command, args []string) (err error) {
|
||||
now := time.Now()
|
||||
notBefore, err := parseHumanDate(shareCfg.NotBefore, now)
|
||||
|
@ -38,7 +38,7 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
// Main is the exported CLI executable function
|
||||
// Main is the exported CLI executable function.
|
||||
func Main() error {
|
||||
ctx := context.Background()
|
||||
encKey := storj.Key(sha256.Sum256([]byte(*key)))
|
||||
|
@ -27,7 +27,7 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
// Main is the exported CLI executable function
|
||||
// Main is the exported CLI executable function.
|
||||
func Main() error {
|
||||
pieces, err := ioutil.ReadDir(flag.Arg(0))
|
||||
if err != nil {
|
||||
|
@ -47,7 +47,7 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
// Main is the exported CLI executable function
|
||||
// Main is the exported CLI executable function.
|
||||
func Main() error {
|
||||
encKey := storj.Key(sha256.Sum256([]byte(*key)))
|
||||
fc, err := infectious.NewFEC(*rsk, *rsn)
|
||||
|
@ -40,7 +40,7 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
// Main is the exported CLI executable function
|
||||
// Main is the exported CLI executable function.
|
||||
func Main() error {
|
||||
err := os.MkdirAll(flag.Arg(0), 0755)
|
||||
if err != nil {
|
||||
|
@ -11,12 +11,12 @@ import (
|
||||
// other packages.
|
||||
type apikey struct{}
|
||||
|
||||
// WithAPIKey creates context with api key
|
||||
// WithAPIKey creates context with api key.
|
||||
func WithAPIKey(ctx context.Context, key []byte) context.Context {
|
||||
return context.WithValue(ctx, apikey{}, key)
|
||||
}
|
||||
|
||||
// GetAPIKey returns api key from context is exists
|
||||
// GetAPIKey returns api key from context is exists.
|
||||
func GetAPIKey(ctx context.Context) ([]byte, bool) {
|
||||
key, ok := ctx.Value(apikey{}).([]byte)
|
||||
return key, ok
|
||||
|
@ -21,7 +21,7 @@ func NewDBFromCfg(cfg tlsopts.Config) (*DB, error) {
|
||||
return NewDB(cfg.RevocationDBURL)
|
||||
}
|
||||
|
||||
// NewDB returns a new revocation database given the URL
|
||||
// NewDB returns a new revocation database given the URL.
|
||||
func NewDB(dbURL string) (*DB, error) {
|
||||
driver, source, _, err := dbutil.SplitConnStr(dbURL)
|
||||
if err != nil {
|
||||
@ -45,7 +45,7 @@ func NewDB(dbURL string) (*DB, error) {
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// newDBBolt creates a bolt-backed DB
|
||||
// newDBBolt creates a bolt-backed DB.
|
||||
func newDBBolt(path string) (*DB, error) {
|
||||
client, err := boltdb.New(path, extensions.RevocationBucket)
|
||||
if err != nil {
|
||||
|
@ -100,7 +100,7 @@ func (db *DB) Put(ctx context.Context, chain []*x509.Certificate, revExt pkix.Ex
|
||||
return nil
|
||||
}
|
||||
|
||||
// List lists all revocations in the store
|
||||
// List lists all revocations in the store.
|
||||
func (db *DB) List(ctx context.Context) (revs []*extensions.Revocation, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -134,7 +134,7 @@ func (db *DB) TestGetStore() storage.KeyValueStore {
|
||||
return db.store
|
||||
}
|
||||
|
||||
// Close closes the underlying store
|
||||
// Close closes the underlying store.
|
||||
func (db *DB) Close() error {
|
||||
if db.store == nil {
|
||||
return nil
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"storj.io/storj/pkg/listenmux"
|
||||
)
|
||||
|
||||
// Config holds server specific configuration parameters
|
||||
// Config holds server specific configuration parameters.
|
||||
type Config struct {
|
||||
tlsopts.Config
|
||||
Address string `user:"true" help:"public address to listen on" default:":7777"`
|
||||
@ -99,22 +99,22 @@ func New(log *zap.Logger, tlsOptions *tlsopts.Options, publicAddr, privateAddr s
|
||||
return server, nil
|
||||
}
|
||||
|
||||
// Identity returns the server's identity
|
||||
// Identity returns the server's identity.
|
||||
func (p *Server) Identity() *identity.FullIdentity { return p.tlsOptions.Ident }
|
||||
|
||||
// Addr returns the server's public listener address
|
||||
// Addr returns the server's public listener address.
|
||||
func (p *Server) Addr() net.Addr { return p.public.listener.Addr() }
|
||||
|
||||
// PrivateAddr returns the server's private listener address
|
||||
// PrivateAddr returns the server's private listener address.
|
||||
func (p *Server) PrivateAddr() net.Addr { return p.private.listener.Addr() }
|
||||
|
||||
// DRPC returns the server's dRPC mux for registration purposes
|
||||
// DRPC returns the server's dRPC mux for registration purposes.
|
||||
func (p *Server) DRPC() *drpcmux.Mux { return p.public.mux }
|
||||
|
||||
// PrivateDRPC returns the server's dRPC mux for registration purposes
|
||||
// PrivateDRPC returns the server's dRPC mux for registration purposes.
|
||||
func (p *Server) PrivateDRPC() *drpcmux.Mux { return p.private.mux }
|
||||
|
||||
// Close shuts down the server
|
||||
// Close shuts down the server.
|
||||
func (p *Server) Close() error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
@ -132,7 +132,7 @@ func (p *Server) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run will run the server and all of its services
|
||||
// Run will run the server and all of its services.
|
||||
func (p *Server) Run(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
|
@ -22,10 +22,10 @@ const padding = 2
|
||||
// Y is the row
|
||||
type Point struct{ X, Y int }
|
||||
|
||||
// Rect is a 2D rectangle in console, excluding Max edge
|
||||
// Rect is a 2D rectangle in console, excluding Max edge.
|
||||
type Rect struct{ Min, Max Point }
|
||||
|
||||
// Screen is a writable area on screen
|
||||
// Screen is a writable area on screen.
|
||||
type Screen struct {
|
||||
rendering sync.Mutex
|
||||
|
||||
@ -81,7 +81,7 @@ func (screen *Screen) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run runs the event loop
|
||||
// Run runs the event loop.
|
||||
func (screen *Screen) Run() error {
|
||||
defer screen.markClosed()
|
||||
|
||||
@ -124,10 +124,10 @@ func (screen *Screen) Size() (width, height int) {
|
||||
return width, height
|
||||
}
|
||||
|
||||
// Lock screen for exclusive rendering
|
||||
// Lock screen for exclusive rendering.
|
||||
func (screen *Screen) Lock() { screen.rendering.Lock() }
|
||||
|
||||
// Unlock screen
|
||||
// Unlock screen.
|
||||
func (screen *Screen) Unlock() { screen.rendering.Unlock() }
|
||||
|
||||
// Write writes to the screen.
|
||||
@ -152,7 +152,7 @@ func (screen *Screen) Flush() error {
|
||||
return err
|
||||
}
|
||||
|
||||
// blit writes content to the console
|
||||
// blit writes content to the console.
|
||||
func (screen *Screen) blit(frame *frame) error {
|
||||
screen.flushed.content = frame.content
|
||||
size := screen.flushed.size
|
||||
@ -191,7 +191,7 @@ var lightStyle = rectStyle{
|
||||
{'└', '─', '┘'},
|
||||
}
|
||||
|
||||
// drawRect draws a rectangle using termbox
|
||||
// drawRect draws a rectangle using termbox.
|
||||
func drawRect(r Rect, style rectStyle) {
|
||||
attr := termbox.ColorDefault
|
||||
|
||||
|
@ -6,14 +6,14 @@ package date
|
||||
|
||||
import "time"
|
||||
|
||||
// MonthBoundary extract month from the provided date and returns its edges
|
||||
// MonthBoundary extract month from the provided date and returns its edges.
|
||||
func MonthBoundary(t time.Time) (time.Time, time.Time) {
|
||||
startDate := time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, t.Location())
|
||||
endDate := time.Date(t.Year(), t.Month()+1, 1, 0, 0, 0, -1, t.Location())
|
||||
return startDate, endDate
|
||||
}
|
||||
|
||||
// DayBoundary returns start and end of the provided day
|
||||
// DayBoundary returns start and end of the provided day.
|
||||
func DayBoundary(t time.Time) (time.Time, time.Time) {
|
||||
return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location()),
|
||||
time.Date(t.Year(), t.Month(), t.Day()+1, 0, 0, 0, -1, t.Location())
|
||||
@ -36,7 +36,7 @@ func MonthsCountSince(from time.Time) int {
|
||||
return MonthsBetweenDates(from, time.Now())
|
||||
}
|
||||
|
||||
// MonthsBetweenDates calculates amount of months between two dates
|
||||
// MonthsBetweenDates calculates amount of months between two dates.
|
||||
func MonthsBetweenDates(from time.Time, to time.Time) int {
|
||||
// we need UTC here before its the only sensible way to say what day it is
|
||||
y1, M1, _ := from.UTC().Date()
|
||||
@ -47,7 +47,7 @@ func MonthsBetweenDates(from time.Time, to time.Time) int {
|
||||
return months
|
||||
}
|
||||
|
||||
// TruncateToHourInNano returns the time truncated to the hour in nanoseconds
|
||||
// TruncateToHourInNano returns the time truncated to the hour in nanoseconds.
|
||||
func TruncateToHourInNano(t time.Time) int64 {
|
||||
return t.Truncate(1 * time.Hour).UnixNano()
|
||||
}
|
||||
|
@ -3,7 +3,7 @@
|
||||
|
||||
package dbutil
|
||||
|
||||
// Implementation type of valid DBs
|
||||
// Implementation type of valid DBs.
|
||||
type Implementation int
|
||||
|
||||
const (
|
||||
|
@ -13,12 +13,12 @@ import (
|
||||
"github.com/zeebo/errs"
|
||||
)
|
||||
|
||||
// Data is the database content formatted as strings
|
||||
// Data is the database content formatted as strings.
|
||||
type Data struct {
|
||||
Tables []*TableData
|
||||
}
|
||||
|
||||
// TableData is content of a sql table
|
||||
// TableData is content of a sql table.
|
||||
type TableData struct {
|
||||
Name string
|
||||
Columns []string
|
||||
@ -36,7 +36,7 @@ func (c ColumnData) String() string {
|
||||
return fmt.Sprintf("%s:%s", c.Column, c.Value)
|
||||
}
|
||||
|
||||
// RowData is content of a single row
|
||||
// RowData is content of a single row.
|
||||
type RowData []ColumnData
|
||||
|
||||
// Less returns true if one row is less than the other.
|
||||
@ -74,7 +74,7 @@ func (table *TableData) AddRow(row RowData) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// FindTable finds a table by name
|
||||
// FindTable finds a table by name.
|
||||
func (data *Data) FindTable(tableName string) (*TableData, bool) {
|
||||
for _, table := range data.Tables {
|
||||
if table.Name == tableName {
|
||||
@ -103,7 +103,7 @@ func (row RowData) Clone() RowData {
|
||||
return append(RowData{}, row...)
|
||||
}
|
||||
|
||||
// QueryData loads all data from tables
|
||||
// QueryData loads all data from tables.
|
||||
func QueryData(ctx context.Context, db Queryer, schema *Schema, quoteColumn func(string) string) (*Data, error) {
|
||||
data := &Data{}
|
||||
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Snapshots defines a collection of snapshot
|
||||
// Snapshots defines a collection of snapshot.
|
||||
type Snapshots struct {
|
||||
List []*Snapshot
|
||||
}
|
||||
@ -37,7 +37,7 @@ func (snapshots *Snapshots) FindVersion(version int) (*Snapshot, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Sort sorts the snapshots by version
|
||||
// Sort sorts the snapshots by version.
|
||||
func (snapshots *Snapshots) Sort() {
|
||||
sort.Slice(snapshots.List, func(i, k int) bool {
|
||||
return snapshots.List[i].Version < snapshots.List[k].Version
|
||||
|
@ -25,7 +25,7 @@ type ConfigurableDB interface {
|
||||
Stats() sql.DBStats
|
||||
}
|
||||
|
||||
// Configure Sets Connection Boundaries and adds db_stats monitoring to monkit
|
||||
// Configure Sets Connection Boundaries and adds db_stats monitoring to monkit.
|
||||
func Configure(db ConfigurableDB, dbName string, mon *monkit.Scope) {
|
||||
if *maxIdleConns >= 0 {
|
||||
db.SetMaxIdleConns(*maxIdleConns)
|
||||
|
@ -28,13 +28,13 @@ func getenv(priority ...string) string {
|
||||
// postgres is the test database connection string.
|
||||
var postgres = flag.String("postgres-test-db", getenv("STORJ_TEST_POSTGRES", "STORJ_POSTGRES_TEST"), "PostgreSQL test database connection string (semicolon delimited for multiple), \"omit\" is used to omit the tests from output")
|
||||
|
||||
// cockroach is the test database connection string for CockroachDB
|
||||
// cockroach is the test database connection string for CockroachDB.
|
||||
var cockroach = flag.String("cockroach-test-db", getenv("STORJ_TEST_COCKROACH", "STORJ_COCKROACH_TEST"), "CockroachDB test database connection string (semicolon delimited for multiple), \"omit\" is used to omit the tests from output")
|
||||
|
||||
// DefaultPostgres is expected to work under the storj-test docker-compose instance
|
||||
// DefaultPostgres is expected to work under the storj-test docker-compose instance.
|
||||
const DefaultPostgres = "postgres://storj:storj-pass@test-postgres/teststorj?sslmode=disable"
|
||||
|
||||
// DefaultCockroach is expected to work when a local cockroachDB instance is running
|
||||
// DefaultCockroach is expected to work when a local cockroachDB instance is running.
|
||||
const DefaultCockroach = "cockroach://root@localhost:26257/master?sslmode=disable"
|
||||
|
||||
// Database defines a postgres compatible database.
|
||||
|
@ -10,7 +10,7 @@ import (
|
||||
"storj.io/storj/private/dbutil/dbschema"
|
||||
)
|
||||
|
||||
// QueryData loads all data from tables
|
||||
// QueryData loads all data from tables.
|
||||
func QueryData(ctx context.Context, db dbschema.Queryer, schema *dbschema.Schema) (*dbschema.Data, error) {
|
||||
return dbschema.QueryData(ctx, db, schema, func(columnName string) string {
|
||||
quoted := strconv.Quote(columnName)
|
||||
|
@ -72,7 +72,7 @@ func OpenUnique(ctx context.Context, connstr string, schemaPrefix string) (*dbut
|
||||
}, nil
|
||||
}
|
||||
|
||||
// QuerySnapshot loads snapshot from database
|
||||
// QuerySnapshot loads snapshot from database.
|
||||
func QuerySnapshot(ctx context.Context, db dbschema.Queryer) (*dbschema.Snapshot, error) {
|
||||
schema, err := QuerySchema(ctx, db)
|
||||
if err != nil {
|
||||
@ -91,7 +91,7 @@ func QuerySnapshot(ctx context.Context, db dbschema.Queryer) (*dbschema.Snapshot
|
||||
}, err
|
||||
}
|
||||
|
||||
// CheckApplicationName ensures that the Connection String contains an application name
|
||||
// CheckApplicationName ensures that the Connection String contains an application name.
|
||||
func CheckApplicationName(s string) (r string) {
|
||||
if !strings.Contains(s, "application_name") {
|
||||
if !strings.Contains(s, "?") {
|
||||
@ -105,7 +105,7 @@ func CheckApplicationName(s string) (r string) {
|
||||
return s
|
||||
}
|
||||
|
||||
// IsConstraintError checks if given error is about constraint violation
|
||||
// IsConstraintError checks if given error is about constraint violation.
|
||||
func IsConstraintError(err error) bool {
|
||||
errCode := ErrorCode(err)
|
||||
return strings.HasPrefix(errCode, pgErrorClassConstraintViolation)
|
||||
|
@ -184,7 +184,7 @@ func QuerySchema(ctx context.Context, db dbschema.Queryer) (*dbschema.Schema, er
|
||||
return schema, nil
|
||||
}
|
||||
|
||||
// matches FOREIGN KEY (project_id) REFERENCES projects(id) ON UPDATE CASCADE ON DELETE CASCADE
|
||||
// matches FOREIGN KEY (project_id) REFERENCES projects(id) ON UPDATE CASCADE ON DELETE CASCADE.
|
||||
var rxPostgresForeignKey = regexp.MustCompile(
|
||||
`^FOREIGN KEY \([[:word:]]+\) ` +
|
||||
`REFERENCES ([[:word:]]+)\(([[:word:]]+)\)` +
|
||||
|
@ -23,7 +23,7 @@ func CreateRandomTestingSchemaName(n int) string {
|
||||
return hex.EncodeToString(data)
|
||||
}
|
||||
|
||||
// ConnstrWithSchema adds schema to a connection string
|
||||
// ConnstrWithSchema adds schema to a connection string.
|
||||
func ConnstrWithSchema(connstr, schema string) string {
|
||||
if strings.Contains(connstr, "?") {
|
||||
connstr += "&options="
|
||||
@ -34,7 +34,7 @@ func ConnstrWithSchema(connstr, schema string) string {
|
||||
}
|
||||
|
||||
// ParseSchemaFromConnstr returns the name of the schema parsed from the
|
||||
// connection string if one is provided
|
||||
// connection string if one is provided.
|
||||
func ParseSchemaFromConnstr(connstr string) (string, error) {
|
||||
url, err := url.Parse(connstr)
|
||||
if err != nil {
|
||||
@ -56,12 +56,12 @@ func ParseSchemaFromConnstr(connstr string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// QuoteSchema quotes schema name for
|
||||
// QuoteSchema quotes schema name for.
|
||||
func QuoteSchema(schema string) string {
|
||||
return QuoteIdentifier(schema)
|
||||
}
|
||||
|
||||
// Execer is for executing sql
|
||||
// Execer is for executing sql.
|
||||
type Execer interface {
|
||||
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
|
||||
}
|
||||
@ -84,7 +84,7 @@ func CreateSchema(ctx context.Context, db Execer, schema string) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// DropSchema drops the named schema
|
||||
// DropSchema drops the named schema.
|
||||
func DropSchema(ctx context.Context, db Execer, schema string) error {
|
||||
_, err := db.ExecContext(ctx, `DROP SCHEMA `+QuoteSchema(schema)+` CASCADE;`)
|
||||
return err
|
||||
|
@ -53,7 +53,7 @@ func LoadSnapshotFromSQL(ctx context.Context, script string) (_ *dbschema.Snapsh
|
||||
return snapshot, nil
|
||||
}
|
||||
|
||||
// QuerySnapshot loads snapshot from database
|
||||
// QuerySnapshot loads snapshot from database.
|
||||
func QuerySnapshot(ctx context.Context, db dbschema.Queryer) (*dbschema.Snapshot, error) {
|
||||
schema, err := QuerySchema(ctx, db)
|
||||
if err != nil {
|
||||
@ -72,7 +72,7 @@ func QuerySnapshot(ctx context.Context, db dbschema.Queryer) (*dbschema.Snapshot
|
||||
}, err
|
||||
}
|
||||
|
||||
// QueryData loads all data from tables
|
||||
// QueryData loads all data from tables.
|
||||
func QueryData(ctx context.Context, db dbschema.Queryer, schema *dbschema.Schema) (*dbschema.Data, error) {
|
||||
return dbschema.QueryData(ctx, db, schema, func(columnName string) string {
|
||||
quoted := strconv.Quote(columnName)
|
||||
@ -80,7 +80,7 @@ func QueryData(ctx context.Context, db dbschema.Queryer, schema *dbschema.Schema
|
||||
})
|
||||
}
|
||||
|
||||
// IsConstraintError checks if given error is about constraint violation
|
||||
// IsConstraintError checks if given error is about constraint violation.
|
||||
func IsConstraintError(err error) bool {
|
||||
return errs.IsFunc(err, func(err error) bool {
|
||||
if e, ok := err.(sqlite3.Error); ok {
|
||||
|
@ -172,7 +172,7 @@ func KeepTables(ctx context.Context, db tagsql.DB, tablesToKeep ...string) (err
|
||||
return err
|
||||
}
|
||||
|
||||
// dropTables performs the table drops in a single transaction
|
||||
// dropTables performs the table drops in a single transaction.
|
||||
func dropTables(ctx context.Context, db tagsql.DB, tablesToKeep ...string) (err error) {
|
||||
err = txutil.WithTx(ctx, db, nil, func(ctx context.Context, tx tagsql.Tx) error {
|
||||
// Get a list of tables excluding sqlite3 system tables.
|
||||
|
@ -91,5 +91,5 @@ func (emptyStmt) Close() error { return nil }
|
||||
func (emptyStmt) Exec(args []driver.Value) (driver.Result, error) { return nil, nil }
|
||||
func (emptyStmt) Query(args []driver.Value) (driver.Rows, error) { return nil, nil }
|
||||
|
||||
// must be 1 so that we can pass 1 argument
|
||||
// must be 1 so that we can pass 1 argument.
|
||||
func (emptyStmt) NumInput() int { return 1 }
|
||||
|
@ -18,12 +18,12 @@ var (
|
||||
diffOpts = jsondiff.DefaultConsoleOptions()
|
||||
)
|
||||
|
||||
// DebugCert is a subset of the most relevant fields from an x509.Certificate for debugging
|
||||
// DebugCert is a subset of the most relevant fields from an x509.Certificate for debugging.
|
||||
type DebugCert struct {
|
||||
Cert *x509.Certificate
|
||||
}
|
||||
|
||||
// NewDebugCert converts an *x509.Certificate into a DebugCert
|
||||
// NewDebugCert converts an *x509.Certificate into a DebugCert.
|
||||
func NewDebugCert(cert x509.Certificate) DebugCert {
|
||||
return DebugCert{
|
||||
Cert: &cert,
|
||||
@ -31,7 +31,7 @@ func NewDebugCert(cert x509.Certificate) DebugCert {
|
||||
}
|
||||
|
||||
// PrintJSON uses a json marshaler to pretty-print arbitrary data for debugging
|
||||
// with special considerations for certain, specific types
|
||||
// with special considerations for certain, specific types.
|
||||
func PrintJSON(data interface{}, label string) {
|
||||
var (
|
||||
jsonBytes []byte
|
||||
@ -89,7 +89,7 @@ func PrintJSON(data interface{}, label string) {
|
||||
fmt.Println("")
|
||||
}
|
||||
|
||||
// Cmp is used to compare 2 DebugCerts against each other and print the diff
|
||||
// Cmp is used to compare 2 DebugCerts against each other and print the diff.
|
||||
func (c DebugCert) Cmp(c2 DebugCert, label string) error {
|
||||
fmt.Println("diff " + label + " ---================================================================---")
|
||||
cJSON, err := c.JSON()
|
||||
@ -107,7 +107,7 @@ func (c DebugCert) Cmp(c2 DebugCert, label string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// JSON serializes the certificate to JSON
|
||||
// JSON serializes the certificate to JSON.
|
||||
func (c DebugCert) JSON() ([]byte, error) {
|
||||
return json.Marshal(c.Cert)
|
||||
}
|
||||
|
@ -14,10 +14,10 @@ import (
|
||||
"storj.io/storj/private/tagsql"
|
||||
)
|
||||
|
||||
// Error is the default migrate errs class
|
||||
// Error is the default migrate errs class.
|
||||
var Error = errs.Class("migrate")
|
||||
|
||||
// Create with a previous schema check
|
||||
// Create with a previous schema check.
|
||||
func Create(ctx context.Context, identifier string, db DBX) error {
|
||||
// is this necessary? it's not immediately obvious why we roll back the transaction
|
||||
// when the schemas match.
|
||||
|
@ -53,7 +53,7 @@ Scenarios it doesn't handle properly.
|
||||
4. Figuring out what the exact executed steps are.
|
||||
*/
|
||||
|
||||
// Migration describes a migration steps
|
||||
// Migration describes a migration steps.
|
||||
type Migration struct {
|
||||
// Table is the table name to register the applied migration version.
|
||||
// NOTE: Always validates its value with the ValidTableName method before it's
|
||||
@ -74,12 +74,12 @@ type Step struct {
|
||||
SeparateTx bool
|
||||
}
|
||||
|
||||
// Action is something that needs to be done
|
||||
// Action is something that needs to be done.
|
||||
type Action interface {
|
||||
Run(ctx context.Context, log *zap.Logger, db tagsql.DB, tx tagsql.Tx) error
|
||||
}
|
||||
|
||||
// TargetVersion returns migration with steps upto specified version
|
||||
// TargetVersion returns migration with steps upto specified version.
|
||||
func (migration *Migration) TargetVersion(version int) *Migration {
|
||||
m := *migration
|
||||
m.Steps = nil
|
||||
@ -105,7 +105,7 @@ func (migration *Migration) ValidTableName() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateSteps checks that the version for each migration step increments in order
|
||||
// ValidateSteps checks that the version for each migration step increments in order.
|
||||
func (migration *Migration) ValidateSteps() error {
|
||||
sorted := sort.SliceIsSorted(migration.Steps, func(i, j int) bool {
|
||||
return migration.Steps[i].Version <= migration.Steps[j].Version
|
||||
@ -116,7 +116,7 @@ func (migration *Migration) ValidateSteps() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateVersions checks that the version of the migration matches the state of the database
|
||||
// ValidateVersions checks that the version of the migration matches the state of the database.
|
||||
func (migration *Migration) ValidateVersions(ctx context.Context, log *zap.Logger) error {
|
||||
for _, step := range migration.Steps {
|
||||
dbVersion, err := migration.getLatestVersion(ctx, log, step.DB)
|
||||
@ -139,7 +139,7 @@ func (migration *Migration) ValidateVersions(ctx context.Context, log *zap.Logge
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run runs the migration steps
|
||||
// Run runs the migration steps.
|
||||
func (migration *Migration) Run(ctx context.Context, log *zap.Logger) error {
|
||||
err := migration.ValidateSteps()
|
||||
if err != nil {
|
||||
@ -238,7 +238,7 @@ func (migration *Migration) getLatestVersion(ctx context.Context, log *zap.Logge
|
||||
return int(version.Int64), Error.Wrap(err)
|
||||
}
|
||||
|
||||
// addVersion adds information about a new migration
|
||||
// addVersion adds information about a new migration.
|
||||
func (migration *Migration) addVersion(ctx context.Context, tx tagsql.Tx, db tagsql.DB, version int) error {
|
||||
err := migration.ValidTableName()
|
||||
if err != nil {
|
||||
@ -254,7 +254,7 @@ func (migration *Migration) addVersion(ctx context.Context, tx tagsql.Tx, db tag
|
||||
return err
|
||||
}
|
||||
|
||||
// CurrentVersion finds the latest version for the db
|
||||
// CurrentVersion finds the latest version for the db.
|
||||
func (migration *Migration) CurrentVersion(ctx context.Context, log *zap.Logger, db tagsql.DB) (int, error) {
|
||||
err := migration.ensureVersionTable(ctx, log, db)
|
||||
if err != nil {
|
||||
@ -263,10 +263,10 @@ func (migration *Migration) CurrentVersion(ctx context.Context, log *zap.Logger,
|
||||
return migration.getLatestVersion(ctx, log, db)
|
||||
}
|
||||
|
||||
// SQL statements that are executed on the database
|
||||
// SQL statements that are executed on the database.
|
||||
type SQL []string
|
||||
|
||||
// Run runs the SQL statements
|
||||
// Run runs the SQL statements.
|
||||
func (sql SQL) Run(ctx context.Context, log *zap.Logger, db tagsql.DB, tx tagsql.Tx) (err error) {
|
||||
for _, query := range sql {
|
||||
_, err := tx.Exec(ctx, rebind(db, query))
|
||||
@ -277,10 +277,10 @@ func (sql SQL) Run(ctx context.Context, log *zap.Logger, db tagsql.DB, tx tagsql
|
||||
return nil
|
||||
}
|
||||
|
||||
// Func is an arbitrary operation
|
||||
// Func is an arbitrary operation.
|
||||
type Func func(ctx context.Context, log *zap.Logger, db tagsql.DB, tx tagsql.Tx) error
|
||||
|
||||
// Run runs the migration
|
||||
// Run runs the migration.
|
||||
func (fn Func) Run(ctx context.Context, log *zap.Logger, db tagsql.DB, tx tagsql.Tx) error {
|
||||
return fn(ctx, log, db, tx)
|
||||
}
|
||||
|
@ -9,13 +9,13 @@ import (
|
||||
"github.com/zeebo/errs"
|
||||
)
|
||||
|
||||
// LoginAuth implements LOGIN authentication mechanism
|
||||
// LoginAuth implements LOGIN authentication mechanism.
|
||||
type LoginAuth struct {
|
||||
Username string
|
||||
Password string
|
||||
}
|
||||
|
||||
// Start begins an authentication with a server
|
||||
// Start begins an authentication with a server.
|
||||
func (auth LoginAuth) Start(server *smtp.ServerInfo) (proto string, toServer []byte, err error) {
|
||||
if !server.TLS {
|
||||
return "", nil, errs.New("unencrypted connection")
|
||||
@ -24,7 +24,7 @@ func (auth LoginAuth) Start(server *smtp.ServerInfo) (proto string, toServer []b
|
||||
}
|
||||
|
||||
// Next continues the authentication with server response and flag representing
|
||||
// if server expects more data from client
|
||||
// if server expects more data from client.
|
||||
func (auth LoginAuth) Next(fromServer []byte, more bool) (toServer []byte, err error) {
|
||||
if more {
|
||||
switch string(fromServer) {
|
||||
|
@ -16,7 +16,7 @@ import (
|
||||
"github.com/zeebo/errs"
|
||||
)
|
||||
|
||||
// Message is RFC compliant email message
|
||||
// Message is RFC compliant email message.
|
||||
type Message struct {
|
||||
From Address
|
||||
To []Address
|
||||
@ -29,7 +29,7 @@ type Message struct {
|
||||
Parts []Part
|
||||
}
|
||||
|
||||
// Part represent one part of multipart message
|
||||
// Part represent one part of multipart message.
|
||||
type Part struct {
|
||||
Type string
|
||||
Encoding string
|
||||
@ -37,10 +37,10 @@ type Part struct {
|
||||
Content string
|
||||
}
|
||||
|
||||
// Error is the default message errs class
|
||||
// Error is the default message errs class.
|
||||
var Error = errs.Class("Email message error")
|
||||
|
||||
// Bytes builds message and returns result as bytes
|
||||
// Bytes builds message and returns result as bytes.
|
||||
func (msg *Message) Bytes() (data []byte, err error) {
|
||||
// always returns nil error on read and write, so most of the errors can be ignored
|
||||
var body bytes.Buffer
|
||||
|
@ -22,14 +22,14 @@ var (
|
||||
mon = monkit.Package()
|
||||
)
|
||||
|
||||
// Auth is XOAUTH2 implementation of smtp.Auth interface
|
||||
// Auth is XOAUTH2 implementation of smtp.Auth interface.
|
||||
type Auth struct {
|
||||
UserEmail string
|
||||
|
||||
Storage *TokenStore
|
||||
}
|
||||
|
||||
// Start returns proto and auth credentials for first auth msg
|
||||
// Start returns proto and auth credentials for first auth msg.
|
||||
func (auth *Auth) Start(server *smtp.ServerInfo) (proto string, toServer []byte, err error) {
|
||||
ctx := context.TODO()
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
@ -46,7 +46,7 @@ func (auth *Auth) Start(server *smtp.ServerInfo) (proto string, toServer []byte,
|
||||
return "XOAUTH2", []byte(format), nil
|
||||
}
|
||||
|
||||
// Next sends empty response to solve SASL challenge if response code is 334
|
||||
// Next sends empty response to solve SASL challenge if response code is 334.
|
||||
func (auth *Auth) Next(fromServer []byte, more bool) (toServer []byte, err error) {
|
||||
if more {
|
||||
return make([]byte, 0), nil
|
||||
@ -55,7 +55,7 @@ func (auth *Auth) Next(fromServer []byte, more bool) (toServer []byte, err error
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Token represents OAuth2 token
|
||||
// Token represents OAuth2 token.
|
||||
type Token struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
@ -63,21 +63,21 @@ type Token struct {
|
||||
Expiry time.Time `json:"expiry"`
|
||||
}
|
||||
|
||||
// Credentials represents OAuth2 credentials
|
||||
// Credentials represents OAuth2 credentials.
|
||||
type Credentials struct {
|
||||
ClientID string `json:"client_id"`
|
||||
ClientSecret string `json:"client_secret"`
|
||||
TokenURI string `json:"token_uri"`
|
||||
}
|
||||
|
||||
// TokenStore is a thread safe storage for OAuth2 token and credentials
|
||||
// TokenStore is a thread safe storage for OAuth2 token and credentials.
|
||||
type TokenStore struct {
|
||||
mu sync.Mutex
|
||||
token Token
|
||||
creds Credentials
|
||||
}
|
||||
|
||||
// NewTokenStore creates new instance of token storage
|
||||
// NewTokenStore creates new instance of token storage.
|
||||
func NewTokenStore(creds Credentials, token Token) *TokenStore {
|
||||
return &TokenStore{
|
||||
token: token,
|
||||
@ -85,7 +85,7 @@ func NewTokenStore(creds Credentials, token Token) *TokenStore {
|
||||
}
|
||||
}
|
||||
|
||||
// Token retrieves token in a thread safe way and refreshes it if needed
|
||||
// Token retrieves token in a thread safe way and refreshes it if needed.
|
||||
func (s *TokenStore) Token(ctx context.Context) (_ *Token, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
s.mu.Lock()
|
||||
@ -105,7 +105,7 @@ func (s *TokenStore) Token(ctx context.Context) (_ *Token, err error) {
|
||||
return token, nil
|
||||
}
|
||||
|
||||
// RefreshToken is a helper method that refreshes token with given credentials and OUATH2 refresh token
|
||||
// RefreshToken is a helper method that refreshes token with given credentials and OUATH2 refresh token.
|
||||
func RefreshToken(ctx context.Context, creds Credentials, refreshToken string) (_ *Token, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
|
@ -15,12 +15,12 @@ import (
|
||||
"github.com/zeebo/errs"
|
||||
)
|
||||
|
||||
// Address is alias of net/mail.Address
|
||||
// Address is alias of net/mail.Address.
|
||||
type Address = mail.Address
|
||||
|
||||
var mon = monkit.Package()
|
||||
|
||||
// SMTPSender is smtp sender
|
||||
// SMTPSender is smtp sender.
|
||||
type SMTPSender struct {
|
||||
ServerAddress string
|
||||
|
||||
@ -28,12 +28,12 @@ type SMTPSender struct {
|
||||
Auth smtp.Auth
|
||||
}
|
||||
|
||||
// FromAddress implements satellite/mail.SMTPSender
|
||||
// FromAddress implements satellite/mail.SMTPSender.
|
||||
func (sender *SMTPSender) FromAddress() Address {
|
||||
return sender.From
|
||||
}
|
||||
|
||||
// SendEmail sends email message to the given recipient
|
||||
// SendEmail sends email message to the given recipient.
|
||||
func (sender *SMTPSender) SendEmail(ctx context.Context, msg *Message) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -49,7 +49,7 @@ func (sender *SMTPSender) SendEmail(ctx context.Context, msg *Message) (err erro
|
||||
return nil
|
||||
}
|
||||
|
||||
// communicate sends mail via SMTP using provided client and message
|
||||
// communicate sends mail via SMTP using provided client and message.
|
||||
func (sender *SMTPSender) communicate(ctx context.Context, client *smtp.Client, msg *Message) error {
|
||||
// suppress error because address should be validated
|
||||
// before creating SMTPSender
|
||||
@ -98,7 +98,7 @@ func (sender *SMTPSender) communicate(ctx context.Context, client *smtp.Client,
|
||||
return client.Quit()
|
||||
}
|
||||
|
||||
// writeData ensures that writer will be closed after data is written
|
||||
// writeData ensures that writer will be closed after data is written.
|
||||
func writeData(writer io.WriteCloser, data []byte) (err error) {
|
||||
defer func() {
|
||||
err = errs.Combine(err, writer.Close())
|
||||
|
@ -106,7 +106,7 @@ func (bad *BadBlobs) RestoreTrash(ctx context.Context, namespace []byte) ([][]by
|
||||
return bad.blobs.RestoreTrash(ctx, namespace)
|
||||
}
|
||||
|
||||
// EmptyTrash empties the trash
|
||||
// EmptyTrash empties the trash.
|
||||
func (bad *BadBlobs) EmptyTrash(ctx context.Context, namespace []byte, trashedBefore time.Time) (int64, [][]byte, error) {
|
||||
if bad.err != nil {
|
||||
return 0, nil, bad.err
|
||||
|
@ -89,13 +89,13 @@ func (slow *SlowBlobs) Trash(ctx context.Context, ref storage.BlobRef) error {
|
||||
return slow.blobs.Trash(ctx, ref)
|
||||
}
|
||||
|
||||
// RestoreTrash restores all files in the trash
|
||||
// RestoreTrash restores all files in the trash.
|
||||
func (slow *SlowBlobs) RestoreTrash(ctx context.Context, namespace []byte) ([][]byte, error) {
|
||||
slow.sleep()
|
||||
return slow.blobs.RestoreTrash(ctx, namespace)
|
||||
}
|
||||
|
||||
// EmptyTrash empties the trash
|
||||
// EmptyTrash empties the trash.
|
||||
func (slow *SlowBlobs) EmptyTrash(ctx context.Context, namespace []byte, trashedBefore time.Time) (int64, [][]byte, error) {
|
||||
slow.sleep()
|
||||
return slow.blobs.EmptyTrash(ctx, namespace, trashedBefore)
|
||||
@ -107,7 +107,7 @@ func (slow *SlowBlobs) Delete(ctx context.Context, ref storage.BlobRef) error {
|
||||
return slow.blobs.Delete(ctx, ref)
|
||||
}
|
||||
|
||||
// DeleteWithStorageFormat deletes the blob with the namespace, key, and format version
|
||||
// DeleteWithStorageFormat deletes the blob with the namespace, key, and format version.
|
||||
func (slow *SlowBlobs) DeleteWithStorageFormat(ctx context.Context, ref storage.BlobRef, formatVer storage.FormatVersion) error {
|
||||
slow.sleep()
|
||||
return slow.blobs.DeleteWithStorageFormat(ctx, ref, formatVer)
|
||||
@ -119,7 +119,7 @@ func (slow *SlowBlobs) DeleteNamespace(ctx context.Context, ref []byte) (err err
|
||||
return slow.blobs.DeleteNamespace(ctx, ref)
|
||||
}
|
||||
|
||||
// Stat looks up disk metadata on the blob file
|
||||
// Stat looks up disk metadata on the blob file.
|
||||
func (slow *SlowBlobs) Stat(ctx context.Context, ref storage.BlobRef) (storage.BlobInfo, error) {
|
||||
slow.sleep()
|
||||
return slow.blobs.Stat(ctx, ref)
|
||||
@ -152,19 +152,19 @@ func (slow *SlowBlobs) FreeSpace() (int64, error) {
|
||||
return slow.blobs.FreeSpace()
|
||||
}
|
||||
|
||||
// SpaceUsedForBlobs adds up how much is used in all namespaces
|
||||
// SpaceUsedForBlobs adds up how much is used in all namespaces.
|
||||
func (slow *SlowBlobs) SpaceUsedForBlobs(ctx context.Context) (int64, error) {
|
||||
slow.sleep()
|
||||
return slow.blobs.SpaceUsedForBlobs(ctx)
|
||||
}
|
||||
|
||||
// SpaceUsedForBlobsInNamespace adds up how much is used in the given namespace
|
||||
// SpaceUsedForBlobsInNamespace adds up how much is used in the given namespace.
|
||||
func (slow *SlowBlobs) SpaceUsedForBlobsInNamespace(ctx context.Context, namespace []byte) (int64, error) {
|
||||
slow.sleep()
|
||||
return slow.blobs.SpaceUsedForBlobsInNamespace(ctx, namespace)
|
||||
}
|
||||
|
||||
// SpaceUsedForTrash adds up how much is used in all namespaces
|
||||
// SpaceUsedForTrash adds up how much is used in all namespaces.
|
||||
func (slow *SlowBlobs) SpaceUsedForTrash(ctx context.Context) (int64, error) {
|
||||
slow.sleep()
|
||||
return slow.blobs.SpaceUsedForTrash(ctx)
|
||||
@ -176,7 +176,7 @@ func (slow *SlowBlobs) SetLatency(delay time.Duration) {
|
||||
atomic.StoreInt64(&slow.delay, int64(delay))
|
||||
}
|
||||
|
||||
// sleep sleeps for the duration set to slow.delay
|
||||
// sleep sleeps for the duration set to slow.delay.
|
||||
func (slow *SlowBlobs) sleep() {
|
||||
delay := time.Duration(atomic.LoadInt64(&slow.delay))
|
||||
time.Sleep(delay)
|
||||
|
@ -32,7 +32,7 @@ import (
|
||||
|
||||
const defaultInterval = 15 * time.Second
|
||||
|
||||
// Peer represents one of StorageNode or Satellite
|
||||
// Peer represents one of StorageNode or Satellite.
|
||||
type Peer interface {
|
||||
ID() storj.NodeID
|
||||
Addr() string
|
||||
@ -43,7 +43,7 @@ type Peer interface {
|
||||
Close() error
|
||||
}
|
||||
|
||||
// Config describes planet configuration
|
||||
// Config describes planet configuration.
|
||||
type Config struct {
|
||||
SatelliteCount int
|
||||
StorageNodeCount int
|
||||
@ -224,7 +224,7 @@ func (planet *Planet) Start(ctx context.Context) {
|
||||
planet.started = true
|
||||
}
|
||||
|
||||
// StopPeer stops a single peer in the planet
|
||||
// StopPeer stops a single peer in the planet.
|
||||
func (planet *Planet) StopPeer(peer Peer) error {
|
||||
if peer == nil {
|
||||
return errors.New("peer is nil")
|
||||
@ -265,7 +265,7 @@ func (planet *Planet) StopNodeAndUpdate(ctx context.Context, node *StorageNode)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Size returns number of nodes in the network
|
||||
// Size returns number of nodes in the network.
|
||||
func (planet *Planet) Size() int { return len(planet.uplinks) + len(planet.peers) }
|
||||
|
||||
// FindNode is a helper to retrieve a storage node record by its node ID.
|
||||
@ -336,12 +336,12 @@ func (planet *Planet) Identities() *testidentity.Identities {
|
||||
return planet.identities
|
||||
}
|
||||
|
||||
// NewIdentity creates a new identity for a node
|
||||
// NewIdentity creates a new identity for a node.
|
||||
func (planet *Planet) NewIdentity() (*identity.FullIdentity, error) {
|
||||
return planet.identities.NewIdentity()
|
||||
}
|
||||
|
||||
// NewListener creates a new listener
|
||||
// NewListener creates a new listener.
|
||||
func (planet *Planet) NewListener() (net.Listener, error) {
|
||||
return net.Listen("tcp", "127.0.0.1:0")
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ func TestBasic(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// test that nodes get put into each satellite's overlay cache
|
||||
// test that nodes get put into each satellite's overlay cache.
|
||||
func TestContact(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 2, StorageNodeCount: 5, UplinkCount: 0,
|
||||
|
@ -17,7 +17,7 @@ import (
|
||||
"storj.io/storj/storagenode"
|
||||
)
|
||||
|
||||
// Reconfigure allows to change node configurations
|
||||
// Reconfigure allows to change node configurations.
|
||||
type Reconfigure struct {
|
||||
SatelliteDB func(log *zap.Logger, index int, db satellite.DB) (satellite.DB, error)
|
||||
SatellitePointerDB func(log *zap.Logger, index int, db metainfo.PointerDB) (metainfo.PointerDB, error)
|
||||
@ -44,7 +44,7 @@ var DisablePeerCAWhitelist = Reconfigure{
|
||||
}
|
||||
|
||||
// ShortenOnlineWindow returns a `Reconfigure` that sets the NodeSelection
|
||||
// OnlineWindow to 1 second, meaning a connection failure leads to marking the nodes as offline
|
||||
// OnlineWindow to 1 second, meaning a connection failure leads to marking the nodes as offline.
|
||||
var ShortenOnlineWindow = Reconfigure{
|
||||
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Overlay.Node.OnlineWindow = 1 * time.Second
|
||||
@ -60,7 +60,7 @@ var Combine = func(elements ...func(log *zap.Logger, index int, config *satellit
|
||||
}
|
||||
}
|
||||
|
||||
// ReconfigureRS returns function to change satellite redundancy scheme values
|
||||
// ReconfigureRS returns function to change satellite redundancy scheme values.
|
||||
var ReconfigureRS = func(minThreshold, repairThreshold, successThreshold, totalThreshold int) func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
return func(log *zap.Logger, index int, config *satellite.Config) {
|
||||
config.Metainfo.RS.MinThreshold = minThreshold
|
||||
|
@ -15,12 +15,12 @@ import (
|
||||
"storj.io/storj/pkg/server"
|
||||
)
|
||||
|
||||
// DefaultReferralManagerServer implements the default behavior of a mock referral manager
|
||||
// DefaultReferralManagerServer implements the default behavior of a mock referral manager.
|
||||
type DefaultReferralManagerServer struct {
|
||||
tokenCount int
|
||||
}
|
||||
|
||||
// newReferralManager initializes a referral manager server
|
||||
// newReferralManager initializes a referral manager server.
|
||||
func (planet *Planet) newReferralManager() (*server.Server, error) {
|
||||
prefix := "referralmanager"
|
||||
log := planet.log.Named(prefix)
|
||||
|
@ -64,7 +64,7 @@ import (
|
||||
"storj.io/storj/storage/redis/redisserver"
|
||||
)
|
||||
|
||||
// Satellite contains all the processes needed to run a full Satellite setup
|
||||
// Satellite contains all the processes needed to run a full Satellite setup.
|
||||
type Satellite struct {
|
||||
Config satellite.Config
|
||||
|
||||
@ -271,7 +271,7 @@ func (system *Satellite) authenticatedContext(ctx context.Context, userID uuid.U
|
||||
return console.WithAuth(ctx, auth), nil
|
||||
}
|
||||
|
||||
// Close closes all the subsystems in the Satellite system
|
||||
// Close closes all the subsystems in the Satellite system.
|
||||
func (system *Satellite) Close() error {
|
||||
return errs.Combine(
|
||||
system.API.Close(),
|
||||
@ -282,7 +282,7 @@ func (system *Satellite) Close() error {
|
||||
)
|
||||
}
|
||||
|
||||
// Run runs all the subsystems in the Satellite system
|
||||
// Run runs all the subsystems in the Satellite system.
|
||||
func (system *Satellite) Run(ctx context.Context) (err error) {
|
||||
group, ctx := errgroup.WithContext(ctx)
|
||||
|
||||
@ -307,7 +307,7 @@ func (system *Satellite) Run(ctx context.Context) (err error) {
|
||||
// PrivateAddr returns the private address from the Satellite system API.
|
||||
func (system *Satellite) PrivateAddr() string { return system.API.Server.PrivateAddr().String() }
|
||||
|
||||
// newSatellites initializes satellites
|
||||
// newSatellites initializes satellites.
|
||||
func (planet *Planet) newSatellites(count int, satelliteDatabases satellitedbtest.SatelliteDatabases) ([]*Satellite, error) {
|
||||
var xs []*Satellite
|
||||
defer func() {
|
||||
|
@ -27,7 +27,7 @@ import (
|
||||
"storj.io/uplink/private/testuplink"
|
||||
)
|
||||
|
||||
// Uplink is a general purpose
|
||||
// Uplink is a general purpose.
|
||||
type Uplink struct {
|
||||
Log *zap.Logger
|
||||
Identity *identity.FullIdentity
|
||||
@ -156,16 +156,16 @@ func (planet *Planet) newUplink(name string) (*Uplink, error) {
|
||||
return planetUplink, nil
|
||||
}
|
||||
|
||||
// ID returns uplink id
|
||||
// ID returns uplink id.
|
||||
func (client *Uplink) ID() storj.NodeID { return client.Identity.ID }
|
||||
|
||||
// Addr returns uplink address
|
||||
// Addr returns uplink address.
|
||||
func (client *Uplink) Addr() string { return "" }
|
||||
|
||||
// Shutdown shuts down all uplink dependencies
|
||||
// Shutdown shuts down all uplink dependencies.
|
||||
func (client *Uplink) Shutdown() error { return nil }
|
||||
|
||||
// DialMetainfo dials destination with apikey and returns metainfo Client
|
||||
// DialMetainfo dials destination with apikey and returns metainfo Client.
|
||||
func (client *Uplink) DialMetainfo(ctx context.Context, destination Peer, apikey *macaroon.APIKey) (*metainfo.Client, error) {
|
||||
return metainfo.DialNodeURL(ctx, client.Dialer, destination.NodeURL().String(), apikey, "Test/1.0")
|
||||
}
|
||||
@ -175,12 +175,12 @@ func (client *Uplink) DialPiecestore(ctx context.Context, destination Peer) (*pi
|
||||
return piecestore.DialNodeURL(ctx, client.Dialer, destination.NodeURL(), client.Log.Named("uplink>piecestore"), piecestore.DefaultConfig)
|
||||
}
|
||||
|
||||
// Upload data to specific satellite
|
||||
// Upload data to specific satellite.
|
||||
func (client *Uplink) Upload(ctx context.Context, satellite *Satellite, bucket string, path storj.Path, data []byte) error {
|
||||
return client.UploadWithExpiration(ctx, satellite, bucket, path, data, time.Time{})
|
||||
}
|
||||
|
||||
// UploadWithExpiration data to specific satellite and expiration time
|
||||
// UploadWithExpiration data to specific satellite and expiration time.
|
||||
func (client *Uplink) UploadWithExpiration(ctx context.Context, satellite *Satellite, bucketName string, path storj.Path, data []byte, expiration time.Time) error {
|
||||
_, found := testuplink.GetMaxSegmentSize(ctx)
|
||||
if !found {
|
||||
@ -215,7 +215,7 @@ func (client *Uplink) UploadWithExpiration(ctx context.Context, satellite *Satel
|
||||
return upload.Commit()
|
||||
}
|
||||
|
||||
// Download data from specific satellite
|
||||
// Download data from specific satellite.
|
||||
func (client *Uplink) Download(ctx context.Context, satellite *Satellite, bucketName string, path storj.Path) ([]byte, error) {
|
||||
project, err := client.GetProject(ctx, satellite)
|
||||
if err != nil {
|
||||
@ -236,7 +236,7 @@ func (client *Uplink) Download(ctx context.Context, satellite *Satellite, bucket
|
||||
return data, nil
|
||||
}
|
||||
|
||||
// DownloadStream returns stream for downloading data
|
||||
// DownloadStream returns stream for downloading data.
|
||||
func (client *Uplink) DownloadStream(ctx context.Context, satellite *Satellite, bucketName string, path storj.Path) (_ io.ReadCloser, cleanup func() error, err error) {
|
||||
project, err := client.GetProject(ctx, satellite)
|
||||
if err != nil {
|
||||
@ -254,7 +254,7 @@ func (client *Uplink) DownloadStream(ctx context.Context, satellite *Satellite,
|
||||
return downloader, cleanup, err
|
||||
}
|
||||
|
||||
// DownloadStreamRange returns stream for downloading data
|
||||
// DownloadStreamRange returns stream for downloading data.
|
||||
func (client *Uplink) DownloadStreamRange(ctx context.Context, satellite *Satellite, bucketName string, path storj.Path, start, limit int64) (_ io.ReadCloser, cleanup func() error, err error) {
|
||||
project, err := client.GetProject(ctx, satellite)
|
||||
if err != nil {
|
||||
@ -275,7 +275,7 @@ func (client *Uplink) DownloadStreamRange(ctx context.Context, satellite *Satell
|
||||
return downloader, cleanup, err
|
||||
}
|
||||
|
||||
// DeleteObject deletes an object at the path in a bucket
|
||||
// DeleteObject deletes an object at the path in a bucket.
|
||||
func (client *Uplink) DeleteObject(ctx context.Context, satellite *Satellite, bucketName string, path storj.Path) error {
|
||||
project, err := client.GetProject(ctx, satellite)
|
||||
if err != nil {
|
||||
@ -290,7 +290,7 @@ func (client *Uplink) DeleteObject(ctx context.Context, satellite *Satellite, bu
|
||||
return err
|
||||
}
|
||||
|
||||
// CreateBucket creates a new bucket
|
||||
// CreateBucket creates a new bucket.
|
||||
func (client *Uplink) CreateBucket(ctx context.Context, satellite *Satellite, bucketName string) error {
|
||||
project, err := client.GetProject(ctx, satellite)
|
||||
if err != nil {
|
||||
|
@ -14,7 +14,7 @@ import (
|
||||
"storj.io/storj/versioncontrol"
|
||||
)
|
||||
|
||||
// newVersionControlServer initializes the Versioning Server
|
||||
// newVersionControlServer initializes the Versioning Server.
|
||||
func (planet *Planet) newVersionControlServer() (peer *versioncontrol.Peer, err error) {
|
||||
|
||||
prefix := "versioncontrol"
|
||||
|
@ -9,20 +9,20 @@ import (
|
||||
)
|
||||
|
||||
// NodeIDFromBytes returns a node ID consisting of the bytes
|
||||
// and padding to the node ID length
|
||||
// and padding to the node ID length.
|
||||
func NodeIDFromBytes(b []byte) storj.NodeID {
|
||||
id, _ := storj.NodeIDFromBytes(fit(b))
|
||||
return id
|
||||
}
|
||||
|
||||
// NodeIDFromString returns node ID consisting of the strings
|
||||
// and padding to the node ID length
|
||||
// and padding to the node ID length.
|
||||
func NodeIDFromString(s string) storj.NodeID {
|
||||
return NodeIDFromBytes([]byte(s))
|
||||
}
|
||||
|
||||
// NodeIDsFromBytes returns node IDs consisting of the byte slices
|
||||
// and padding to the node ID length
|
||||
// and padding to the node ID length.
|
||||
func NodeIDsFromBytes(bs ...[]byte) (ids storj.NodeIDList) {
|
||||
for _, b := range bs {
|
||||
ids = append(ids, NodeIDFromBytes(b))
|
||||
@ -31,7 +31,7 @@ func NodeIDsFromBytes(bs ...[]byte) (ids storj.NodeIDList) {
|
||||
}
|
||||
|
||||
// NodeIDsFromStrings returns node IDs consisting of the strings
|
||||
// and padding to the node ID length
|
||||
// and padding to the node ID length.
|
||||
func NodeIDsFromStrings(strs ...string) (ids storj.NodeIDList) {
|
||||
for _, s := range strs {
|
||||
ids = append(ids, NodeIDFromString(s))
|
||||
@ -39,7 +39,7 @@ func NodeIDsFromStrings(strs ...string) (ids storj.NodeIDList) {
|
||||
return ids
|
||||
}
|
||||
|
||||
// used to pad node IDs
|
||||
// used to pad node IDs.
|
||||
func fit(b []byte) []byte {
|
||||
l := len(storj.NodeID{})
|
||||
if len(b) < l {
|
||||
@ -50,7 +50,7 @@ func fit(b []byte) []byte {
|
||||
}
|
||||
|
||||
// MockNode returns a pb node with an ID consisting of the string
|
||||
// and padding to the node ID length
|
||||
// and padding to the node ID length.
|
||||
func MockNode(s string) *pb.Node {
|
||||
id := NodeIDFromString(s)
|
||||
var node pb.Node
|
||||
|
@ -7,13 +7,13 @@ import (
|
||||
"storj.io/common/storj"
|
||||
)
|
||||
|
||||
// PieceIDFromBytes converts a byte slice into a piece ID
|
||||
// PieceIDFromBytes converts a byte slice into a piece ID.
|
||||
func PieceIDFromBytes(b []byte) storj.PieceID {
|
||||
id, _ := storj.PieceIDFromBytes(fit(b))
|
||||
return id
|
||||
}
|
||||
|
||||
// PieceIDFromString decodes a hex encoded piece ID string
|
||||
// PieceIDFromString decodes a hex encoded piece ID string.
|
||||
func PieceIDFromString(s string) storj.PieceID {
|
||||
return PieceIDFromBytes([]byte(s))
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ func NewChore(service *Service, checkInterval time.Duration) *Chore {
|
||||
}
|
||||
}
|
||||
|
||||
// Run logs the current version information
|
||||
// Run logs the current version information.
|
||||
func (chore *Chore) Run(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if !chore.service.Checked() {
|
||||
|
@ -15,7 +15,7 @@ import (
|
||||
"storj.io/private/version"
|
||||
)
|
||||
|
||||
// Config contains the necessary Information to check the Software Version
|
||||
// Config contains the necessary Information to check the Software Version.
|
||||
type Config struct {
|
||||
ClientConfig
|
||||
|
||||
@ -38,7 +38,7 @@ type Service struct {
|
||||
acceptedVersion version.SemVer
|
||||
}
|
||||
|
||||
// NewService creates a Version Check Client with default configuration
|
||||
// NewService creates a Version Check Client with default configuration.
|
||||
func NewService(log *zap.Logger, config Config, info version.Info, service string) (client *Service) {
|
||||
return &Service{
|
||||
log: log,
|
||||
@ -51,7 +51,7 @@ func NewService(log *zap.Logger, config Config, info version.Info, service strin
|
||||
}
|
||||
|
||||
// CheckProcessVersion is not meant to be used for peers but is meant to be
|
||||
// used for other utilities
|
||||
// used for other utilities.
|
||||
func CheckProcessVersion(ctx context.Context, log *zap.Logger, config Config, info version.Info, service string) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
_, err = NewService(log, config, info, service).CheckVersion(ctx)
|
||||
@ -141,7 +141,7 @@ func (service *Service) Checked() bool {
|
||||
return service.checked.Released()
|
||||
}
|
||||
|
||||
// isAcceptedVersion compares and checks if the passed version is greater/equal than the minimum required version
|
||||
// isAcceptedVersion compares and checks if the passed version is greater/equal than the minimum required version.
|
||||
func isAcceptedVersion(test version.SemVer, target version.OldSemVer) bool {
|
||||
return test.Major > uint64(target.Major) || (test.Major == uint64(target.Major) && (test.Minor > uint64(target.Minor) || (test.Minor == uint64(target.Minor) && test.Patch >= uint64(target.Patch))))
|
||||
}
|
||||
|
@ -7,7 +7,7 @@ import (
|
||||
"storj.io/common/uuid"
|
||||
)
|
||||
|
||||
// BucketTally contains information about aggregate data stored in a bucket
|
||||
// BucketTally contains information about aggregate data stored in a bucket.
|
||||
type BucketTally struct {
|
||||
ProjectID uuid.UUID
|
||||
BucketName []byte
|
||||
@ -23,7 +23,7 @@ type BucketTally struct {
|
||||
MetadataSize int64
|
||||
}
|
||||
|
||||
// Combine aggregates all the tallies
|
||||
// Combine aggregates all the tallies.
|
||||
func (s *BucketTally) Combine(o *BucketTally) {
|
||||
s.ObjectCount += o.ObjectCount
|
||||
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
"storj.io/common/uuid"
|
||||
)
|
||||
|
||||
// BucketStorageTally holds data about a bucket tally
|
||||
// BucketStorageTally holds data about a bucket tally.
|
||||
type BucketStorageTally struct {
|
||||
BucketName string
|
||||
ProjectID uuid.UUID
|
||||
|
@ -9,7 +9,7 @@ import (
|
||||
"storj.io/common/storj"
|
||||
)
|
||||
|
||||
// Constants for accounting_raw, accounting_rollup, and accounting_timestamps
|
||||
// Constants for accounting_raw, accounting_rollup, and accounting_timestamps.
|
||||
const (
|
||||
// LastAtRestTally represents the accounting timestamp for the at-rest data calculation
|
||||
LastAtRestTally = "LastAtRestTally"
|
||||
@ -19,7 +19,7 @@ const (
|
||||
LastRollup = "LastRollup"
|
||||
)
|
||||
|
||||
// CSVRow represents data from QueryPaymentInfo without exposing dbx
|
||||
// CSVRow represents data from QueryPaymentInfo without exposing dbx.
|
||||
type CSVRow struct {
|
||||
NodeID storj.NodeID
|
||||
NodeCreationDate time.Time
|
||||
|
@ -13,10 +13,10 @@ import (
|
||||
"storj.io/storj/satellite/compensation"
|
||||
)
|
||||
|
||||
// RollupStats is a convenience alias
|
||||
// RollupStats is a convenience alias.
|
||||
type RollupStats map[time.Time]map[storj.NodeID]*Rollup
|
||||
|
||||
// StoragenodeStorageTally mirrors dbx.StoragenodeStorageTally, allowing us to use that struct without leaking dbx
|
||||
// StoragenodeStorageTally mirrors dbx.StoragenodeStorageTally, allowing us to use that struct without leaking dbx.
|
||||
type StoragenodeStorageTally struct {
|
||||
ID int64
|
||||
NodeID storj.NodeID
|
||||
@ -24,7 +24,7 @@ type StoragenodeStorageTally struct {
|
||||
DataTotal float64
|
||||
}
|
||||
|
||||
// StoragenodeBandwidthRollup mirrors dbx.StoragenodeBandwidthRollup, allowing us to use the struct without leaking dbx
|
||||
// StoragenodeBandwidthRollup mirrors dbx.StoragenodeBandwidthRollup, allowing us to use the struct without leaking dbx.
|
||||
type StoragenodeBandwidthRollup struct {
|
||||
NodeID storj.NodeID
|
||||
IntervalStart time.Time
|
||||
@ -32,7 +32,7 @@ type StoragenodeBandwidthRollup struct {
|
||||
Settled uint64
|
||||
}
|
||||
|
||||
// Rollup mirrors dbx.AccountingRollup, allowing us to use that struct without leaking dbx
|
||||
// Rollup mirrors dbx.AccountingRollup, allowing us to use that struct without leaking dbx.
|
||||
type Rollup struct {
|
||||
ID int64
|
||||
NodeID storj.NodeID
|
||||
@ -45,7 +45,7 @@ type Rollup struct {
|
||||
AtRestTotal float64
|
||||
}
|
||||
|
||||
// StorageNodePeriodUsage represents a statement for a node for a compensation period
|
||||
// StorageNodePeriodUsage represents a statement for a node for a compensation period.
|
||||
type StorageNodePeriodUsage struct {
|
||||
NodeID storj.NodeID
|
||||
AtRestTotal float64
|
||||
@ -56,7 +56,7 @@ type StorageNodePeriodUsage struct {
|
||||
GetAuditTotal int64
|
||||
}
|
||||
|
||||
// StorageNodeUsage is node at rest space usage over a period of time
|
||||
// StorageNodeUsage is node at rest space usage over a period of time.
|
||||
type StorageNodeUsage struct {
|
||||
NodeID storj.NodeID
|
||||
StorageUsed float64
|
||||
@ -65,7 +65,7 @@ type StorageNodeUsage struct {
|
||||
}
|
||||
|
||||
// ProjectUsage consist of period total storage, egress
|
||||
// and objects count per hour for certain Project in bytes
|
||||
// and objects count per hour for certain Project in bytes.
|
||||
type ProjectUsage struct {
|
||||
Storage float64 `json:"storage"`
|
||||
Egress int64 `json:"egress"`
|
||||
@ -75,7 +75,7 @@ type ProjectUsage struct {
|
||||
Before time.Time `json:"before"`
|
||||
}
|
||||
|
||||
// BucketUsage consist of total bucket usage for period
|
||||
// BucketUsage consist of total bucket usage for period.
|
||||
type BucketUsage struct {
|
||||
ProjectID uuid.UUID
|
||||
BucketName string
|
||||
@ -89,14 +89,14 @@ type BucketUsage struct {
|
||||
}
|
||||
|
||||
// BucketUsageCursor holds info for bucket usage
|
||||
// cursor pagination
|
||||
// cursor pagination.
|
||||
type BucketUsageCursor struct {
|
||||
Search string
|
||||
Limit uint
|
||||
Page uint
|
||||
}
|
||||
|
||||
// BucketUsagePage represents bucket usage page result
|
||||
// BucketUsagePage represents bucket usage page result.
|
||||
type BucketUsagePage struct {
|
||||
BucketUsages []BucketUsage
|
||||
|
||||
@ -110,7 +110,7 @@ type BucketUsagePage struct {
|
||||
}
|
||||
|
||||
// BucketUsageRollup is total bucket usage info
|
||||
// for certain period
|
||||
// for certain period.
|
||||
type BucketUsageRollup struct {
|
||||
ProjectID uuid.UUID
|
||||
BucketName []byte
|
||||
|
@ -15,7 +15,7 @@ import (
|
||||
"storj.io/storj/satellite/accounting"
|
||||
)
|
||||
|
||||
// Config contains configurable values for rollup
|
||||
// Config contains configurable values for rollup.
|
||||
type Config struct {
|
||||
Interval time.Duration `help:"how frequently rollup should run" releaseDefault:"24h" devDefault:"120s"`
|
||||
DeleteTallies bool `help:"option for deleting tallies after they are rolled up" default:"true"`
|
||||
@ -31,7 +31,7 @@ type Service struct {
|
||||
deleteTallies bool
|
||||
}
|
||||
|
||||
// New creates a new rollup service
|
||||
// New creates a new rollup service.
|
||||
func New(logger *zap.Logger, sdb accounting.StoragenodeAccounting, interval time.Duration, deleteTallies bool) *Service {
|
||||
return &Service{
|
||||
logger: logger,
|
||||
@ -41,7 +41,7 @@ func New(logger *zap.Logger, sdb accounting.StoragenodeAccounting, interval time
|
||||
}
|
||||
}
|
||||
|
||||
// Run the Rollup loop
|
||||
// Run the Rollup loop.
|
||||
func (r *Service) Run(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return r.Loop.Run(ctx, func(ctx context.Context) error {
|
||||
@ -59,7 +59,7 @@ func (r *Service) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Rollup aggregates storage and bandwidth amounts for the time interval
|
||||
// Rollup aggregates storage and bandwidth amounts for the time interval.
|
||||
func (r *Service) Rollup(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
// only Rollup new things - get LastRollup
|
||||
@ -102,7 +102,7 @@ func (r *Service) Rollup(ctx context.Context) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// RollupStorage rolls up storage tally, modifies rollupStats map
|
||||
// RollupStorage rolls up storage tally, modifies rollupStats map.
|
||||
func (r *Service) RollupStorage(ctx context.Context, lastRollup time.Time, rollupStats accounting.RollupStats) (latestTally time.Time, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
tallies, err := r.sdb.GetTalliesSince(ctx, lastRollup)
|
||||
@ -136,7 +136,7 @@ func (r *Service) RollupStorage(ctx context.Context, lastRollup time.Time, rollu
|
||||
return latestTally, nil
|
||||
}
|
||||
|
||||
// RollupBW aggregates the bandwidth rollups, modifies rollupStats map
|
||||
// RollupBW aggregates the bandwidth rollups, modifies rollupStats map.
|
||||
func (r *Service) RollupBW(ctx context.Context, lastRollup time.Time, rollupStats accounting.RollupStats) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
var latestTally time.Time
|
||||
|
@ -178,7 +178,7 @@ func TestRollupDeletes(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// expectedTotals sums test data up to, but not including the current day's
|
||||
// expectedTotals sums test data up to, but not including the current day's.
|
||||
func expectedTotals(data []testData, id storj.NodeID, currentDay int) []float64 {
|
||||
totals := make([]float64, 5)
|
||||
for i := 0; i < currentDay; i++ {
|
||||
@ -206,7 +206,7 @@ func createData(planet *testplanet.Planet, days int) []testData {
|
||||
return data
|
||||
}
|
||||
|
||||
// dqNodes disqualifies half the nodes in the testplanet and returns a map of dqed nodes
|
||||
// dqNodes disqualifies half the nodes in the testplanet and returns a map of dqed nodes.
|
||||
func dqNodes(ctx *testcontext.Context, planet *testplanet.Planet) (map[storj.NodeID]bool, error) {
|
||||
dqed := make(map[storj.NodeID]bool)
|
||||
|
||||
|
@ -25,7 +25,7 @@ var (
|
||||
mon = monkit.Package()
|
||||
)
|
||||
|
||||
// Config contains configurable values for the tally service
|
||||
// Config contains configurable values for the tally service.
|
||||
type Config struct {
|
||||
Interval time.Duration `help:"how frequently the tally service should run" releaseDefault:"1h" devDefault:"30s"`
|
||||
}
|
||||
@ -44,7 +44,7 @@ type Service struct {
|
||||
nowFn func() time.Time
|
||||
}
|
||||
|
||||
// New creates a new tally Service
|
||||
// New creates a new tally Service.
|
||||
func New(log *zap.Logger, sdb accounting.StoragenodeAccounting, pdb accounting.ProjectAccounting, liveAccounting accounting.Cache, metainfoLoop *metainfo.Loop, interval time.Duration) *Service {
|
||||
return &Service{
|
||||
log: log,
|
||||
@ -58,7 +58,7 @@ func New(log *zap.Logger, sdb accounting.StoragenodeAccounting, pdb accounting.P
|
||||
}
|
||||
}
|
||||
|
||||
// Run the tally service loop
|
||||
// Run the tally service loop.
|
||||
func (service *Service) Run(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -83,7 +83,7 @@ func (service *Service) SetNow(now func() time.Time) {
|
||||
service.nowFn = now
|
||||
}
|
||||
|
||||
// Tally calculates data-at-rest usage once
|
||||
// Tally calculates data-at-rest usage once.
|
||||
func (service *Service) Tally(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -190,7 +190,7 @@ func (service *Service) Tally(ctx context.Context) (err error) {
|
||||
|
||||
var _ metainfo.Observer = (*Observer)(nil)
|
||||
|
||||
// Observer observes metainfo and adds up tallies for nodes and buckets
|
||||
// Observer observes metainfo and adds up tallies for nodes and buckets.
|
||||
type Observer struct {
|
||||
Now time.Time
|
||||
Log *zap.Logger
|
||||
@ -213,7 +213,7 @@ func (observer *Observer) pointerExpired(pointer *pb.Pointer) bool {
|
||||
return !pointer.ExpirationDate.IsZero() && pointer.ExpirationDate.Before(observer.Now)
|
||||
}
|
||||
|
||||
// ensureBucket returns bucket corresponding to the passed in path
|
||||
// ensureBucket returns bucket corresponding to the passed in path.
|
||||
func (observer *Observer) ensureBucket(ctx context.Context, path metainfo.ScopedPath) *accounting.BucketTally {
|
||||
bucketID := storj.JoinPaths(path.ProjectIDString, path.BucketName)
|
||||
|
||||
@ -290,5 +290,5 @@ func projectTotalsFromBuckets(buckets map[string]*accounting.BucketTally) map[uu
|
||||
return projectTallyTotals
|
||||
}
|
||||
|
||||
// using custom name to avoid breaking monitoring
|
||||
// using custom name to avoid breaking monitoring.
|
||||
var monAccounting = monkit.ScopeNamed("storj.io/storj/satellite/accounting")
|
||||
|
@ -314,7 +314,7 @@ func TestTallyEmptyProjectUpdatesLiveAccounting(t *testing.T) {
|
||||
}
|
||||
|
||||
// addBucketTally creates a new expected bucket tally based on the
|
||||
// pointer that was just created for the test case
|
||||
// pointer that was just created for the test case.
|
||||
func addBucketTally(existingTally *accounting.BucketTally, inline, last bool) *accounting.BucketTally {
|
||||
// if there is already an existing tally for this project and bucket, then
|
||||
// add the new pointer data to the existing tally
|
||||
@ -349,7 +349,7 @@ func addBucketTally(existingTally *accounting.BucketTally, inline, last bool) *a
|
||||
return newRemoteTally
|
||||
}
|
||||
|
||||
// makePointer creates a pointer
|
||||
// makePointer creates a pointer.
|
||||
func makePointer(storageNodes []*testplanet.StorageNode, rs storj.RedundancyScheme, segmentSize int64, inline bool) *pb.Pointer {
|
||||
if inline {
|
||||
inlinePointer := &pb.Pointer{
|
||||
|
@ -161,7 +161,7 @@ type API struct {
|
||||
}
|
||||
}
|
||||
|
||||
// NewAPI creates a new satellite API process
|
||||
// NewAPI creates a new satellite API process.
|
||||
func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
|
||||
pointerDB metainfo.PointerDB, revocationDB extensions.RevocationDB, liveAccounting accounting.Cache, rollupsWriteCache *orders.RollupsWriteCache,
|
||||
config *Config, versionInfo version.Info, atomicLogLevel *zap.AtomicLevel) (*API, error) {
|
||||
|
@ -13,10 +13,10 @@ import (
|
||||
"storj.io/common/uuid"
|
||||
)
|
||||
|
||||
// ErrBucketNotAttributed is returned if a requested bucket not attributed(entry not found)
|
||||
// ErrBucketNotAttributed is returned if a requested bucket not attributed(entry not found).
|
||||
var ErrBucketNotAttributed = errs.Class("bucket not attributed")
|
||||
|
||||
// Info describing value attribution from partner to bucket
|
||||
// Info describing value attribution from partner to bucket.
|
||||
type Info struct {
|
||||
ProjectID uuid.UUID
|
||||
BucketName []byte
|
||||
@ -24,7 +24,7 @@ type Info struct {
|
||||
CreatedAt time.Time
|
||||
}
|
||||
|
||||
// CSVRow represents data from QueryAttribution without exposing dbx
|
||||
// CSVRow represents data from QueryAttribution without exposing dbx.
|
||||
type CSVRow struct {
|
||||
PartnerID []byte
|
||||
ProjectID []byte
|
||||
|
@ -23,7 +23,7 @@ var (
|
||||
ErrContainDelete = errs.Class("unable to delete pending audit")
|
||||
)
|
||||
|
||||
// PendingAudit contains info needed for retrying an audit for a contained node
|
||||
// PendingAudit contains info needed for retrying an audit for a contained node.
|
||||
type PendingAudit struct {
|
||||
NodeID storj.NodeID
|
||||
PieceID storj.PieceID
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// cryptoSource implements the math/rand Source interface using crypto/rand
|
||||
// cryptoSource implements the math/rand Source interface using crypto/rand.
|
||||
type cryptoSource struct{}
|
||||
|
||||
func (s cryptoSource) Seed(seed int64) {}
|
||||
|
@ -23,7 +23,7 @@ type PathCollector struct {
|
||||
rand *rand.Rand
|
||||
}
|
||||
|
||||
// NewPathCollector instantiates a path collector
|
||||
// NewPathCollector instantiates a path collector.
|
||||
func NewPathCollector(reservoirSlots int, r *rand.Rand) *PathCollector {
|
||||
return &PathCollector{
|
||||
Reservoirs: make(map[storj.NodeID]*Reservoir),
|
||||
@ -32,7 +32,7 @@ func NewPathCollector(reservoirSlots int, r *rand.Rand) *PathCollector {
|
||||
}
|
||||
}
|
||||
|
||||
// RemoteSegment takes a remote segment found in metainfo and creates a reservoir for it if it doesn't exist already
|
||||
// RemoteSegment takes a remote segment found in metainfo and creates a reservoir for it if it doesn't exist already.
|
||||
func (collector *PathCollector) RemoteSegment(ctx context.Context, path metainfo.ScopedPath, pointer *pb.Pointer) (err error) {
|
||||
for _, piece := range pointer.GetRemote().GetRemotePieces() {
|
||||
if _, ok := collector.Reservoirs[piece.NodeId]; !ok {
|
||||
@ -43,12 +43,12 @@ func (collector *PathCollector) RemoteSegment(ctx context.Context, path metainfo
|
||||
return nil
|
||||
}
|
||||
|
||||
// Object returns nil because the audit service does not interact with objects
|
||||
// Object returns nil because the audit service does not interact with objects.
|
||||
func (collector *PathCollector) Object(ctx context.Context, path metainfo.ScopedPath, pointer *pb.Pointer) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// InlineSegment returns nil because we're only auditing for storage nodes for now
|
||||
// InlineSegment returns nil because we're only auditing for storage nodes for now.
|
||||
func (collector *PathCollector) InlineSegment(ctx context.Context, path metainfo.ScopedPath, pointer *pb.Pointer) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
@ -27,7 +27,7 @@ type Reporter struct {
|
||||
maxReverifyCount int32
|
||||
}
|
||||
|
||||
// Report contains audit result lists for nodes that succeeded, failed, were offline, have pending audits, or failed for unknown reasons
|
||||
// Report contains audit result lists for nodes that succeeded, failed, were offline, have pending audits, or failed for unknown reasons.
|
||||
type Report struct {
|
||||
Successes storj.NodeIDList
|
||||
Fails storj.NodeIDList
|
||||
@ -36,7 +36,7 @@ type Report struct {
|
||||
Unknown storj.NodeIDList
|
||||
}
|
||||
|
||||
// NewReporter instantiates a reporter
|
||||
// NewReporter instantiates a reporter.
|
||||
func NewReporter(log *zap.Logger, overlay *overlay.Service, containment Containment, maxRetries int, maxReverifyCount int32) *Reporter {
|
||||
return &Reporter{
|
||||
log: log,
|
||||
@ -124,7 +124,7 @@ func (reporter *Reporter) RecordAudits(ctx context.Context, req Report, path sto
|
||||
return Report{}, nil
|
||||
}
|
||||
|
||||
// recordAuditFailStatus updates nodeIDs in overlay with isup=true, auditoutcome=fail
|
||||
// recordAuditFailStatus updates nodeIDs in overlay with isup=true, auditoutcome=fail.
|
||||
func (reporter *Reporter) recordAuditFailStatus(ctx context.Context, failedAuditNodeIDs storj.NodeIDList) (failed storj.NodeIDList, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -144,7 +144,7 @@ func (reporter *Reporter) recordAuditFailStatus(ctx context.Context, failedAudit
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// recordAuditUnknownStatus updates nodeIDs in overlay with isup=true, auditoutcome=unknown
|
||||
// recordAuditUnknownStatus updates nodeIDs in overlay with isup=true, auditoutcome=unknown.
|
||||
func (reporter *Reporter) recordAuditUnknownStatus(ctx context.Context, unknownAuditNodeIDs storj.NodeIDList) (failed storj.NodeIDList, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -186,7 +186,7 @@ func (reporter *Reporter) recordOfflineStatus(ctx context.Context, offlineNodeID
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// recordAuditSuccessStatus updates nodeIDs in overlay with isup=true, auditoutcome=success
|
||||
// recordAuditSuccessStatus updates nodeIDs in overlay with isup=true, auditoutcome=success.
|
||||
func (reporter *Reporter) recordAuditSuccessStatus(ctx context.Context, successNodeIDs storj.NodeIDList) (failed storj.NodeIDList, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -207,7 +207,7 @@ func (reporter *Reporter) recordAuditSuccessStatus(ctx context.Context, successN
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// recordPendingAudits updates the containment status of nodes with pending audits
|
||||
// recordPendingAudits updates the containment status of nodes with pending audits.
|
||||
func (reporter *Reporter) recordPendingAudits(ctx context.Context, pendingAudits []*PendingAudit) (failed []*PendingAudit, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
var errlist errs.Group
|
||||
|
@ -11,14 +11,14 @@ import (
|
||||
|
||||
const maxReservoirSize = 3
|
||||
|
||||
// Reservoir holds a certain number of segments to reflect a random sample
|
||||
// Reservoir holds a certain number of segments to reflect a random sample.
|
||||
type Reservoir struct {
|
||||
Paths [maxReservoirSize]storj.Path
|
||||
size int8
|
||||
index int64
|
||||
}
|
||||
|
||||
// NewReservoir instantiates a Reservoir
|
||||
// NewReservoir instantiates a Reservoir.
|
||||
func NewReservoir(size int) *Reservoir {
|
||||
if size < 1 {
|
||||
size = 1
|
||||
@ -32,7 +32,7 @@ func NewReservoir(size int) *Reservoir {
|
||||
}
|
||||
|
||||
// Sample makes sure that for every segment in metainfo from index i=size..n-1,
|
||||
// pick a random number r = rand(0..i), and if r < size, replace reservoir.Segments[r] with segment
|
||||
// pick a random number r = rand(0..i), and if r < size, replace reservoir.Segments[r] with segment.
|
||||
func (reservoir *Reservoir) Sample(r *rand.Rand, path storj.Path) {
|
||||
reservoir.index++
|
||||
if reservoir.index < int64(reservoir.size) {
|
||||
|
@ -899,7 +899,7 @@ func TestReverifyDifferentShare(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
// TestReverifyExpired1 tests the case where the segment passed into Reverify is expired
|
||||
// TestReverifyExpired1 tests the case where the segment passed into Reverify is expired.
|
||||
func TestReverifyExpired1(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
||||
@ -1074,7 +1074,7 @@ func TestReverifyExpired2(t *testing.T) {
|
||||
}
|
||||
|
||||
// TestReverifySlowDownload checks that a node that times out while sending data to the
|
||||
// audit service gets put into containment mode
|
||||
// audit service gets put into containment mode.
|
||||
func TestReverifySlowDownload(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
||||
|
@ -44,7 +44,7 @@ var (
|
||||
ErrSegmentModified = errs.Class("segment has been modified")
|
||||
)
|
||||
|
||||
// Share represents required information about an audited share
|
||||
// Share represents required information about an audited share.
|
||||
type Share struct {
|
||||
Error error
|
||||
PieceNum int
|
||||
@ -69,7 +69,7 @@ type Verifier struct {
|
||||
OnTestingCheckSegmentAlteredHook func()
|
||||
}
|
||||
|
||||
// NewVerifier creates a Verifier
|
||||
// NewVerifier creates a Verifier.
|
||||
func NewVerifier(log *zap.Logger, metainfo *metainfo.Service, dialer rpc.Dialer, overlay *overlay.Service, containment Containment, orders *orders.Service, id *identity.FullIdentity, minBytesPerSecond memory.Size, minDownloadTimeout time.Duration) *Verifier {
|
||||
return &Verifier{
|
||||
log: log,
|
||||
@ -308,7 +308,7 @@ func (verifier *Verifier) Verify(ctx context.Context, path storj.Path, skip map[
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DownloadShares downloads shares from the nodes where remote pieces are located
|
||||
// DownloadShares downloads shares from the nodes where remote pieces are located.
|
||||
func (verifier *Verifier) DownloadShares(ctx context.Context, limits []*pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, stripeIndex int64, shareSize int32) (shares map[int]Share, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -345,7 +345,7 @@ func (verifier *Verifier) DownloadShares(ctx context.Context, limits []*pb.Addre
|
||||
return shares, nil
|
||||
}
|
||||
|
||||
// Reverify reverifies the contained nodes in the stripe
|
||||
// Reverify reverifies the contained nodes in the stripe.
|
||||
func (verifier *Verifier) Reverify(ctx context.Context, path storj.Path) (report Report, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -619,7 +619,7 @@ func (verifier *Verifier) Reverify(ctx context.Context, path storj.Path) (report
|
||||
return report, err
|
||||
}
|
||||
|
||||
// GetShare use piece store client to download shares from nodes
|
||||
// GetShare use piece store client to download shares from nodes.
|
||||
func (verifier *Verifier) GetShare(ctx context.Context, limit *pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, stripeIndex int64, shareSize int32, pieceNum int) (share Share, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
@ -729,7 +729,7 @@ func auditShares(ctx context.Context, required, total int, originals map[int]Sha
|
||||
return pieceNums, copies, nil
|
||||
}
|
||||
|
||||
// makeCopies takes in a map of audit Shares and deep copies their data to a slice of infectious Shares
|
||||
// makeCopies takes in a map of audit Shares and deep copies their data to a slice of infectious Shares.
|
||||
func makeCopies(ctx context.Context, originals map[int]Share) (copies []infectious.Share, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
copies = make([]infectious.Share, 0, len(originals))
|
||||
@ -762,7 +762,7 @@ func getOfflineNodes(pointer *pb.Pointer, limits []*pb.AddressedOrderLimit, skip
|
||||
return offlines
|
||||
}
|
||||
|
||||
// getSuccessNodes uses the failed nodes, offline nodes and contained nodes arrays to determine which nodes passed the audit
|
||||
// getSuccessNodes uses the failed nodes, offline nodes and contained nodes arrays to determine which nodes passed the audit.
|
||||
func getSuccessNodes(ctx context.Context, shares map[int]Share, failedNodes, offlineNodes, unknownNodes storj.NodeIDList, containedNodes map[int]storj.NodeID) (successNodes storj.NodeIDList) {
|
||||
defer mon.Task()(&ctx)(nil)
|
||||
fails := make(map[storj.NodeID]bool)
|
||||
|
@ -763,7 +763,7 @@ func TestVerifierModifiedSegmentFailsOnce(t *testing.T) {
|
||||
}
|
||||
|
||||
// TestVerifierSlowDownload checks that a node that times out while sending data to the
|
||||
// audit service gets put into containment mode
|
||||
// audit service gets put into containment mode.
|
||||
func TestVerifierSlowDownload(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
||||
@ -821,7 +821,7 @@ func TestVerifierSlowDownload(t *testing.T) {
|
||||
}
|
||||
|
||||
// TestVerifierUnknownError checks that a node that returns an unknown error in response to an audit request
|
||||
// does not get marked as successful, failed, or contained
|
||||
// does not get marked as successful, failed, or contained.
|
||||
func TestVerifierUnknownError(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
|
||||
|
@ -34,7 +34,7 @@ func (percents Percents) String() string {
|
||||
return strings.Join(s, ",")
|
||||
}
|
||||
|
||||
// Set implements pflag.Value by parsing a comma separated list of percents
|
||||
// Set implements pflag.Value by parsing a comma separated list of percents.
|
||||
func (percents *Percents) Set(value string) error {
|
||||
var entries []string
|
||||
if value != "" {
|
||||
@ -54,7 +54,7 @@ func (percents *Percents) Set(value string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Type returns the type of the pflag.Value
|
||||
// Type returns the type of the pflag.Value.
|
||||
func (percents Percents) Type() string {
|
||||
return "percents"
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ type APIKeys interface {
|
||||
Delete(ctx context.Context, id uuid.UUID) error
|
||||
}
|
||||
|
||||
// APIKeyInfo describing api key model in the database
|
||||
// APIKeyInfo describing api key model in the database.
|
||||
type APIKeyInfo struct {
|
||||
ID uuid.UUID `json:"id"`
|
||||
ProjectID uuid.UUID `json:"projectId"`
|
||||
@ -40,7 +40,7 @@ type APIKeyInfo struct {
|
||||
CreatedAt time.Time `json:"createdAt"`
|
||||
}
|
||||
|
||||
// APIKeyCursor holds info for api keys cursor pagination
|
||||
// APIKeyCursor holds info for api keys cursor pagination.
|
||||
type APIKeyCursor struct {
|
||||
Search string
|
||||
Limit uint
|
||||
@ -49,7 +49,7 @@ type APIKeyCursor struct {
|
||||
OrderDirection OrderDirection
|
||||
}
|
||||
|
||||
// APIKeyPage represent api key page result
|
||||
// APIKeyPage represent api key page result.
|
||||
type APIKeyPage struct {
|
||||
APIKeys []APIKeyInfo
|
||||
|
||||
@ -64,7 +64,7 @@ type APIKeyPage struct {
|
||||
TotalCount uint64
|
||||
}
|
||||
|
||||
// APIKeyOrder is used for querying api keys in specified order
|
||||
// APIKeyOrder is used for querying api keys in specified order.
|
||||
type APIKeyOrder uint8
|
||||
|
||||
const (
|
||||
|
@ -14,12 +14,12 @@ import (
|
||||
|
||||
//TODO: change to JWT or Macaroon based auth
|
||||
|
||||
// Signer creates signature for provided data
|
||||
// Signer creates signature for provided data.
|
||||
type Signer interface {
|
||||
Sign(data []byte) ([]byte, error)
|
||||
}
|
||||
|
||||
// signToken signs token with given signer
|
||||
// signToken signs token with given signer.
|
||||
func signToken(token *consoleauth.Token, signer Signer) error {
|
||||
encoded := base64.URLEncoding.EncodeToString(token.Payload)
|
||||
|
||||
@ -32,32 +32,32 @@ func signToken(token *consoleauth.Token, signer Signer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// key is a context value key type
|
||||
// key is a context value key type.
|
||||
type key int
|
||||
|
||||
// authKey is context key for Authorization
|
||||
// authKey is context key for Authorization.
|
||||
const authKey key = 0
|
||||
|
||||
// ErrUnauthorized is error class for authorization related errors
|
||||
// ErrUnauthorized is error class for authorization related errors.
|
||||
var ErrUnauthorized = errs.Class("unauthorized error")
|
||||
|
||||
// Authorization contains auth info of authorized User
|
||||
// Authorization contains auth info of authorized User.
|
||||
type Authorization struct {
|
||||
User User
|
||||
Claims consoleauth.Claims
|
||||
}
|
||||
|
||||
// WithAuth creates new context with Authorization
|
||||
// WithAuth creates new context with Authorization.
|
||||
func WithAuth(ctx context.Context, auth Authorization) context.Context {
|
||||
return context.WithValue(ctx, authKey, auth)
|
||||
}
|
||||
|
||||
// WithAuthFailure creates new context with authorization failure
|
||||
// WithAuthFailure creates new context with authorization failure.
|
||||
func WithAuthFailure(ctx context.Context, err error) context.Context {
|
||||
return context.WithValue(ctx, authKey, err)
|
||||
}
|
||||
|
||||
// GetAuth gets Authorization from context
|
||||
// GetAuth gets Authorization from context.
|
||||
func GetAuth(ctx context.Context) (Authorization, error) {
|
||||
value := ctx.Value(authKey)
|
||||
|
||||
|
@ -13,14 +13,14 @@ import (
|
||||
|
||||
//TODO: change to JWT or Macaroon based auth
|
||||
|
||||
// Claims represents data signed by server and used for authentication
|
||||
// Claims represents data signed by server and used for authentication.
|
||||
type Claims struct {
|
||||
ID uuid.UUID `json:"id"`
|
||||
Email string `json:"email,omitempty"`
|
||||
Expiration time.Time `json:"expires,omitempty"`
|
||||
}
|
||||
|
||||
// JSON returns json representation of Claims
|
||||
// JSON returns json representation of Claims.
|
||||
func (c *Claims) JSON() ([]byte, error) {
|
||||
buffer := bytes.NewBuffer(nil)
|
||||
|
||||
@ -28,7 +28,7 @@ func (c *Claims) JSON() ([]byte, error) {
|
||||
return buffer.Bytes(), err
|
||||
}
|
||||
|
||||
// FromJSON returns Claims instance, parsed from JSON
|
||||
// FromJSON returns Claims instance, parsed from JSON.
|
||||
func FromJSON(data []byte) (*Claims, error) {
|
||||
claims := new(Claims)
|
||||
|
||||
|
@ -10,12 +10,12 @@ import (
|
||||
|
||||
//TODO: change to JWT or Macaroon based auth
|
||||
|
||||
// Hmac is hmac256 based Signer
|
||||
// Hmac is hmac256 based Signer.
|
||||
type Hmac struct {
|
||||
Secret []byte
|
||||
}
|
||||
|
||||
// Sign implements satellite signer
|
||||
// Sign implements satellite signer.
|
||||
func (a *Hmac) Sign(data []byte) ([]byte, error) {
|
||||
mac := hmac.New(sha256.New, a.Secret)
|
||||
|
||||
|
@ -14,7 +14,7 @@ import (
|
||||
|
||||
//TODO: change to JWT or Macaroon based auth
|
||||
|
||||
// Token represents authentication data structure
|
||||
// Token represents authentication data structure.
|
||||
type Token struct {
|
||||
Payload []byte
|
||||
Signature []byte
|
||||
@ -28,7 +28,7 @@ func (t Token) String() string {
|
||||
return strings.Join([]string{payload, signature}, ".")
|
||||
}
|
||||
|
||||
// FromBase64URLString creates Token instance from base64URLEncoded string representation
|
||||
// FromBase64URLString creates Token instance from base64URLEncoded string representation.
|
||||
func FromBase64URLString(token string) (Token, error) {
|
||||
i := strings.Index(token, ".")
|
||||
if i < 0 {
|
||||
|
@ -19,7 +19,7 @@ const (
|
||||
FieldKey = "key"
|
||||
)
|
||||
|
||||
// graphqlAPIKeyInfo creates satellite.APIKeyInfo graphql object
|
||||
// graphqlAPIKeyInfo creates satellite.APIKeyInfo graphql object.
|
||||
func graphqlAPIKeyInfo() *graphql.Object {
|
||||
return graphql.NewObject(graphql.ObjectConfig{
|
||||
Name: APIKeyInfoType,
|
||||
@ -43,7 +43,7 @@ func graphqlAPIKeyInfo() *graphql.Object {
|
||||
})
|
||||
}
|
||||
|
||||
// graphqlCreateAPIKey creates createAPIKey graphql object
|
||||
// graphqlCreateAPIKey creates createAPIKey graphql object.
|
||||
func graphqlCreateAPIKey(types *TypeCreator) *graphql.Object {
|
||||
return graphql.NewObject(graphql.ObjectConfig{
|
||||
Name: CreateAPIKeyType,
|
||||
@ -116,7 +116,7 @@ func graphqlAPIKeysPage(types *TypeCreator) *graphql.Object {
|
||||
})
|
||||
}
|
||||
|
||||
// createAPIKey holds macaroon.APIKey and console.APIKeyInfo
|
||||
// createAPIKey holds macaroon.APIKey and console.APIKeyInfo.
|
||||
type createAPIKey struct {
|
||||
Key string
|
||||
KeyInfo *console.APIKeyInfo
|
||||
|
@ -20,7 +20,7 @@ const (
|
||||
TermsAndConditionsURL = "termsAndConditionsURL"
|
||||
)
|
||||
|
||||
// AccountActivationEmail is mailservice template with activation data
|
||||
// AccountActivationEmail is mailservice template with activation data.
|
||||
type AccountActivationEmail struct {
|
||||
Origin string
|
||||
ActivationLink string
|
||||
@ -29,13 +29,13 @@ type AccountActivationEmail struct {
|
||||
UserName string
|
||||
}
|
||||
|
||||
// Template returns email template name
|
||||
// Template returns email template name.
|
||||
func (*AccountActivationEmail) Template() string { return "Welcome" }
|
||||
|
||||
// Subject gets email subject
|
||||
// Subject gets email subject.
|
||||
func (*AccountActivationEmail) Subject() string { return "Activate your email" }
|
||||
|
||||
// ForgotPasswordEmail is mailservice template with reset password data
|
||||
// ForgotPasswordEmail is mailservice template with reset password data.
|
||||
type ForgotPasswordEmail struct {
|
||||
Origin string
|
||||
UserName string
|
||||
@ -46,13 +46,13 @@ type ForgotPasswordEmail struct {
|
||||
TermsAndConditionsURL string
|
||||
}
|
||||
|
||||
// Template returns email template name
|
||||
// Template returns email template name.
|
||||
func (*ForgotPasswordEmail) Template() string { return "Forgot" }
|
||||
|
||||
// Subject gets email subject
|
||||
// Subject gets email subject.
|
||||
func (*ForgotPasswordEmail) Subject() string { return "Password recovery request" }
|
||||
|
||||
// ProjectInvitationEmail is mailservice template for project invitation email
|
||||
// ProjectInvitationEmail is mailservice template for project invitation email.
|
||||
type ProjectInvitationEmail struct {
|
||||
Origin string
|
||||
UserName string
|
||||
@ -63,10 +63,10 @@ type ProjectInvitationEmail struct {
|
||||
TermsAndConditionsURL string
|
||||
}
|
||||
|
||||
// Template returns email template name
|
||||
// Template returns email template name.
|
||||
func (*ProjectInvitationEmail) Template() string { return "Invite" }
|
||||
|
||||
// Subject gets email subject
|
||||
// Subject gets email subject.
|
||||
func (email *ProjectInvitationEmail) Subject() string {
|
||||
return "You were invited to join the Project " + email.ProjectName
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ const (
|
||||
ReferrerUserID = "referrerUserId"
|
||||
)
|
||||
|
||||
// rootMutation creates mutation for graphql populated by AccountsClient
|
||||
// rootMutation creates mutation for graphql populated by AccountsClient.
|
||||
func rootMutation(log *zap.Logger, service *console.Service, mailService *mailservice.Service, types *TypeCreator) *graphql.Object {
|
||||
return graphql.NewObject(graphql.ObjectConfig{
|
||||
Name: Mutation,
|
||||
|
@ -32,15 +32,15 @@ import (
|
||||
"storj.io/storj/storage/redis/redisserver"
|
||||
)
|
||||
|
||||
// discardSender discard sending of an actual email
|
||||
// discardSender discard sending of an actual email.
|
||||
type discardSender struct{}
|
||||
|
||||
// SendEmail immediately returns with nil error
|
||||
// SendEmail immediately returns with nil error.
|
||||
func (*discardSender) SendEmail(ctx context.Context, msg *post.Message) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// FromAddress returns empty post.Address
|
||||
// FromAddress returns empty post.Address.
|
||||
func (*discardSender) FromAddress() post.Address {
|
||||
return post.Address{}
|
||||
}
|
||||
|
@ -84,7 +84,7 @@ const (
|
||||
BeforeArg = "before"
|
||||
)
|
||||
|
||||
// graphqlProject creates *graphql.Object type representation of satellite.ProjectInfo
|
||||
// graphqlProject creates *graphql.Object type representation of satellite.ProjectInfo.
|
||||
func graphqlProject(service *console.Service, types *TypeCreator) *graphql.Object {
|
||||
return graphql.NewObject(graphql.ObjectConfig{
|
||||
Name: ProjectType,
|
||||
@ -240,7 +240,7 @@ func graphqlProject(service *console.Service, types *TypeCreator) *graphql.Objec
|
||||
})
|
||||
}
|
||||
|
||||
// graphqlProjectInput creates graphql.InputObject type needed to create/update satellite.Project
|
||||
// graphqlProjectInput creates graphql.InputObject type needed to create/update satellite.Project.
|
||||
func graphqlProjectInput() *graphql.InputObject {
|
||||
return graphql.NewInputObject(graphql.InputObjectConfig{
|
||||
Name: ProjectInputType,
|
||||
@ -255,7 +255,7 @@ func graphqlProjectInput() *graphql.InputObject {
|
||||
})
|
||||
}
|
||||
|
||||
// graphqlBucketUsageCursor creates bucket usage cursor graphql input type
|
||||
// graphqlBucketUsageCursor creates bucket usage cursor graphql input type.
|
||||
func graphqlBucketUsageCursor() *graphql.InputObject {
|
||||
return graphql.NewInputObject(graphql.InputObjectConfig{
|
||||
Name: BucketUsageCursorInputType,
|
||||
@ -273,7 +273,7 @@ func graphqlBucketUsageCursor() *graphql.InputObject {
|
||||
})
|
||||
}
|
||||
|
||||
// graphqlBucketUsage creates bucket usage grapqhl type
|
||||
// graphqlBucketUsage creates bucket usage grapqhl type.
|
||||
func graphqlBucketUsage() *graphql.Object {
|
||||
return graphql.NewObject(graphql.ObjectConfig{
|
||||
Name: BucketUsageType,
|
||||
@ -300,7 +300,7 @@ func graphqlBucketUsage() *graphql.Object {
|
||||
})
|
||||
}
|
||||
|
||||
// graphqlBucketUsagePage creates bucket usage page graphql object
|
||||
// graphqlBucketUsagePage creates bucket usage page graphql object.
|
||||
func graphqlBucketUsagePage(types *TypeCreator) *graphql.Object {
|
||||
return graphql.NewObject(graphql.ObjectConfig{
|
||||
Name: BucketUsagePageType,
|
||||
@ -330,7 +330,7 @@ func graphqlBucketUsagePage(types *TypeCreator) *graphql.Object {
|
||||
})
|
||||
}
|
||||
|
||||
// graphqlProjectUsage creates project usage graphql type
|
||||
// graphqlProjectUsage creates project usage graphql type.
|
||||
func graphqlProjectUsage() *graphql.Object {
|
||||
return graphql.NewObject(graphql.ObjectConfig{
|
||||
Name: ProjectUsageType,
|
||||
@ -354,7 +354,7 @@ func graphqlProjectUsage() *graphql.Object {
|
||||
})
|
||||
}
|
||||
|
||||
// fromMapProjectInfo creates console.ProjectInfo from input args
|
||||
// fromMapProjectInfo creates console.ProjectInfo from input args.
|
||||
func fromMapProjectInfo(args map[string]interface{}) (project console.ProjectInfo) {
|
||||
project.Name, _ = args[FieldName].(string)
|
||||
project.Description, _ = args[FieldDescription].(string)
|
||||
@ -362,7 +362,7 @@ func fromMapProjectInfo(args map[string]interface{}) (project console.ProjectInf
|
||||
return
|
||||
}
|
||||
|
||||
// fromMapBucketUsageCursor creates console.BucketUsageCursor from input args
|
||||
// fromMapBucketUsageCursor creates console.BucketUsageCursor from input args.
|
||||
func fromMapBucketUsageCursor(args map[string]interface{}) (cursor accounting.BucketUsageCursor) {
|
||||
limit, _ := args[LimitArg].(int)
|
||||
page, _ := args[PageArg].(int)
|
||||
|
@ -18,7 +18,7 @@ const (
|
||||
FieldJoinedAt = "joinedAt"
|
||||
)
|
||||
|
||||
// graphqlProjectMember creates projectMember type
|
||||
// graphqlProjectMember creates projectMember type.
|
||||
func graphqlProjectMember(service *console.Service, types *TypeCreator) *graphql.Object {
|
||||
return graphql.NewObject(graphql.ObjectConfig{
|
||||
Name: ProjectMemberType,
|
||||
@ -96,7 +96,7 @@ func graphqlProjectMembersPage(types *TypeCreator) *graphql.Object {
|
||||
})
|
||||
}
|
||||
|
||||
// projectMember encapsulates User and joinedAt
|
||||
// projectMember encapsulates User and joinedAt.
|
||||
type projectMember struct {
|
||||
User *console.User
|
||||
JoinedAt time.Time
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user