all: fix dots

Change-Id: I6a419c62700c568254ff67ae5b73efed2fc98aa2
This commit is contained in:
Egon Elbre 2020-07-16 17:18:02 +03:00
parent 0a800336cb
commit 080ba47a06
272 changed files with 1017 additions and 1017 deletions

View File

@ -167,7 +167,7 @@ func (a Authorization) String() string {
return fmt.Sprintf("%."+fmtLen+"s..", a.Token.String()) return fmt.Sprintf("%."+fmtLen+"s..", a.Token.String())
} }
// Equal checks if two tokens have equal user IDs and data // Equal checks if two tokens have equal user IDs and data.
func (t *Token) Equal(cmpToken *Token) bool { func (t *Token) Equal(cmpToken *Token) bool {
return t.UserID == cmpToken.UserID && bytes.Equal(t.Data[:], cmpToken.Data[:]) return t.UserID == cmpToken.UserID && bytes.Equal(t.Data[:], cmpToken.Data[:])
} }

View File

@ -24,7 +24,7 @@ type Config struct {
TLS tlsopts.Config TLS tlsopts.Config
} }
// Client implements pb.DRPCCertificatesClient // Client implements pb.DRPCCertificatesClient.
type Client struct { type Client struct {
conn *rpc.Conn conn *rpc.Conn
client pb.DRPCCertificatesClient client pb.DRPCCertificatesClient

View File

@ -29,7 +29,7 @@ import (
"storj.io/storj/pkg/server" "storj.io/storj/pkg/server"
) )
// TODO: test sad path // TODO: test sad path.
func TestCertificateSigner_Sign_E2E(t *testing.T) { func TestCertificateSigner_Sign_E2E(t *testing.T) {
testidentity.SignerVersionsTest(t, func(t *testing.T, _ storj.IDVersion, signer *identity.FullCertificateAuthority) { testidentity.SignerVersionsTest(t, func(t *testing.T, _ storj.IDVersion, signer *identity.FullCertificateAuthority) {
testidentity.CompleteIdentityVersionsTest(t, func(t *testing.T, _ storj.IDVersion, serverIdent *identity.FullIdentity) { testidentity.CompleteIdentityVersionsTest(t, func(t *testing.T, _ storj.IDVersion, serverIdent *identity.FullIdentity) {

View File

@ -119,7 +119,7 @@ func NewInspector(ctx context.Context, address, path string) (*Inspector, error)
// Close closes the inspector. // Close closes the inspector.
func (i *Inspector) Close() error { return i.conn.Close() } func (i *Inspector) Close() error { return i.conn.Close() }
// ObjectHealth gets information about the health of an object on the network // ObjectHealth gets information about the health of an object on the network.
func ObjectHealth(cmd *cobra.Command, args []string) (err error) { func ObjectHealth(cmd *cobra.Command, args []string) (err error) {
ctx, _ := process.Ctx(cmd) ctx, _ := process.Ctx(cmd)
i, err := NewInspector(ctx, *Addr, *IdentityPath) i, err := NewInspector(ctx, *Addr, *IdentityPath)
@ -201,7 +201,7 @@ func ObjectHealth(cmd *cobra.Command, args []string) (err error) {
return nil return nil
} }
// SegmentHealth gets information about the health of a segment on the network // SegmentHealth gets information about the health of a segment on the network.
func SegmentHealth(cmd *cobra.Command, args []string) (err error) { func SegmentHealth(cmd *cobra.Command, args []string) (err error) {
ctx, _ := process.Ctx(cmd) ctx, _ := process.Ctx(cmd)
i, err := NewInspector(ctx, *Addr, *IdentityPath) i, err := NewInspector(ctx, *Addr, *IdentityPath)
@ -415,7 +415,7 @@ func getSegments(cmd *cobra.Command, args []string) error {
return nil return nil
} }
// sortSegments by the object they belong to // sortSegments by the object they belong to.
func sortSegments(segments []*pb.IrreparableSegment) map[string][]*pb.IrreparableSegment { func sortSegments(segments []*pb.IrreparableSegment) map[string][]*pb.IrreparableSegment {
objects := make(map[string][]*pb.IrreparableSegment) objects := make(map[string][]*pb.IrreparableSegment)
for _, seg := range segments { for _, seg := range segments {

View File

@ -105,7 +105,7 @@ func ReadFile(path string) (*Asset, error) {
return asset, nil return asset, nil
} }
// readFiles adds all nested files to asset // readFiles adds all nested files to asset.
func (asset *Asset) readFiles(dir string, infos []os.FileInfo) error { func (asset *Asset) readFiles(dir string, infos []os.FileInfo) error {
for _, info := range infos { for _, info := range infos {
child, err := ReadFile(filepath.Join(dir, info.Name())) child, err := ReadFile(filepath.Join(dir, info.Name()))

View File

@ -14,13 +14,13 @@ import (
var _ http.FileSystem = (*InmemoryFileSystem)(nil) var _ http.FileSystem = (*InmemoryFileSystem)(nil)
// InmemoryFileSystem defines an inmemory http.FileSystem // InmemoryFileSystem defines an inmemory http.FileSystem.
type InmemoryFileSystem struct { type InmemoryFileSystem struct {
Root *Asset Root *Asset
Index map[string]*Asset Index map[string]*Asset
} }
// Inmemory creates an InmemoryFileSystem from // Inmemory creates an InmemoryFileSystem from.
func Inmemory(root *Asset) *InmemoryFileSystem { func Inmemory(root *Asset) *InmemoryFileSystem {
fs := &InmemoryFileSystem{} fs := &InmemoryFileSystem{}
fs.Root = root fs.Root = root
@ -29,7 +29,7 @@ func Inmemory(root *Asset) *InmemoryFileSystem {
return fs return fs
} }
// reindex inserts a node to the index // reindex inserts a node to the index.
func (fs *InmemoryFileSystem) reindex(prefix, name string, file *Asset) { func (fs *InmemoryFileSystem) reindex(prefix, name string, file *Asset) {
fs.Index[path.Join(prefix, name)] = file fs.Index[path.Join(prefix, name)] = file
for _, child := range file.Children { for _, child := range file.Children {
@ -51,7 +51,7 @@ func (asset *Asset) File() *File {
return &File{*bytes.NewReader(asset.Data), asset} return &File{*bytes.NewReader(asset.Data), asset}
} }
// File defines a readable file // File defines a readable file.
type File struct { type File struct {
bytes.Reader bytes.Reader
*Asset *Asset
@ -98,20 +98,20 @@ type FileInfo struct {
modTime time.Time modTime time.Time
} }
// Name implements os.FileInfo // Name implements os.FileInfo.
func (info FileInfo) Name() string { return info.name } func (info FileInfo) Name() string { return info.name }
// Size implements os.FileInfo // Size implements os.FileInfo.
func (info FileInfo) Size() int64 { return info.size } func (info FileInfo) Size() int64 { return info.size }
// Mode implements os.FileInfo // Mode implements os.FileInfo.
func (info FileInfo) Mode() os.FileMode { return info.mode } func (info FileInfo) Mode() os.FileMode { return info.mode }
// ModTime implements os.FileInfo // ModTime implements os.FileInfo.
func (info FileInfo) ModTime() time.Time { return info.modTime } func (info FileInfo) ModTime() time.Time { return info.modTime }
// IsDir implements os.FileInfo // IsDir implements os.FileInfo.
func (info FileInfo) IsDir() bool { return info.mode.IsDir() } func (info FileInfo) IsDir() bool { return info.mode.IsDir() }
// Sys implements os.FileInfo // Sys implements os.FileInfo.
func (info FileInfo) Sys() interface{} { return nil } func (info FileInfo) Sys() interface{} { return nil }

View File

@ -17,7 +17,7 @@ import (
"storj.io/common/storj" "storj.io/common/storj"
) )
// PromptForAccessName handles user input for access name to be used with wizards // PromptForAccessName handles user input for access name to be used with wizards.
func PromptForAccessName() (string, error) { func PromptForAccessName() (string, error) {
_, err := fmt.Printf("Choose an access name (use lowercase letters) [\"default\"]: ") _, err := fmt.Printf("Choose an access name (use lowercase letters) [\"default\"]: ")
if err != nil { if err != nil {
@ -41,7 +41,7 @@ func PromptForAccessName() (string, error) {
return accessName, nil return accessName, nil
} }
// PromptForSatellite handles user input for a satellite address to be used with wizards // PromptForSatellite handles user input for a satellite address to be used with wizards.
func PromptForSatellite(cmd *cobra.Command) (string, error) { func PromptForSatellite(cmd *cobra.Command) (string, error) {
satellites := []string{ satellites := []string{
"12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777", "12EayRS2V1kEsWESU9QMRseFhdxYxKicsiFmxrsLZHeLUtdps3S@us-central-1.tardigrade.io:7777",
@ -115,7 +115,7 @@ func PromptForSatellite(cmd *cobra.Command) (string, error) {
return satelliteAddress, nil return satelliteAddress, nil
} }
// PromptForAPIKey handles user input for an API key to be used with wizards // PromptForAPIKey handles user input for an API key to be used with wizards.
func PromptForAPIKey() (string, error) { func PromptForAPIKey() (string, error) {
_, err := fmt.Print("Enter your API key: ") _, err := fmt.Print("Enter your API key: ")
if err != nil { if err != nil {
@ -134,7 +134,7 @@ func PromptForAPIKey() (string, error) {
return apiKey, nil return apiKey, nil
} }
// PromptForEncryptionPassphrase handles user input for an encryption passphrase to be used with wizards // PromptForEncryptionPassphrase handles user input for an encryption passphrase to be used with wizards.
func PromptForEncryptionPassphrase() (string, error) { func PromptForEncryptionPassphrase() (string, error) {
_, err := fmt.Print("Enter your encryption passphrase: ") _, err := fmt.Print("Enter your encryption passphrase: ")
if err != nil { if err != nil {

View File

@ -25,7 +25,7 @@ import (
"storj.io/storj/satellite/satellitedb" "storj.io/storj/satellite/satellitedb"
) )
// generateGracefulExitCSV creates a report with graceful exit data for exiting or exited nodes in a given period // generateGracefulExitCSV creates a report with graceful exit data for exiting or exited nodes in a given period.
func generateGracefulExitCSV(ctx context.Context, completed bool, start time.Time, end time.Time, output io.Writer) error { func generateGracefulExitCSV(ctx context.Context, completed bool, start time.Time, end time.Time, output io.Writer) error {
db, err := satellitedb.New(zap.L().Named("db"), gracefulExitCfg.Database, satellitedb.Options{}) db, err := satellitedb.New(zap.L().Named("db"), gracefulExitCfg.Database, satellitedb.Options{})
if err != nil { if err != nil {

View File

@ -37,7 +37,7 @@ import (
"storj.io/storj/satellite/satellitedb/dbx" "storj.io/storj/satellite/satellitedb/dbx"
) )
// Satellite defines satellite configuration // Satellite defines satellite configuration.
type Satellite struct { type Satellite struct {
Database string `help:"satellite database connection string" releaseDefault:"postgres://" devDefault:"postgres://"` Database string `help:"satellite database connection string" releaseDefault:"postgres://" devDefault:"postgres://"`
@ -55,7 +55,7 @@ type Satellite struct {
satellite.Config satellite.Config
} }
// APIKeysLRUOptions returns a cache.Options based on the APIKeys LRU config // APIKeysLRUOptions returns a cache.Options based on the APIKeys LRU config.
func (s *Satellite) APIKeysLRUOptions() cache.Options { func (s *Satellite) APIKeysLRUOptions() cache.Options {
return cache.Options{ return cache.Options{
Expiration: s.DatabaseOptions.APIKeysCache.Expiration, Expiration: s.DatabaseOptions.APIKeysCache.Expiration,
@ -63,7 +63,7 @@ func (s *Satellite) APIKeysLRUOptions() cache.Options {
} }
} }
// RevocationLRUOptions returns a cache.Options based on the Revocations LRU config // RevocationLRUOptions returns a cache.Options based on the Revocations LRU config.
func (s *Satellite) RevocationLRUOptions() cache.Options { func (s *Satellite) RevocationLRUOptions() cache.Options {
return cache.Options{ return cache.Options{
Expiration: s.DatabaseOptions.RevocationsCache.Expiration, Expiration: s.DatabaseOptions.RevocationsCache.Expiration,

View File

@ -29,7 +29,7 @@ var headers = []string{
"bytes:BWEgress", "bytes:BWEgress",
} }
// GenerateAttributionCSV creates a report with // GenerateAttributionCSV creates a report with.
func GenerateAttributionCSV(ctx context.Context, database string, partnerID uuid.UUID, start time.Time, end time.Time, output io.Writer) error { func GenerateAttributionCSV(ctx context.Context, database string, partnerID uuid.UUID, start time.Time, end time.Time, output io.Writer) error {
log := zap.L().Named("db") log := zap.L().Named("db")
db, err := satellitedb.New(log, database, satellitedb.Options{}) db, err := satellitedb.New(log, database, satellitedb.Options{})

View File

@ -19,7 +19,7 @@ import (
"storj.io/storj/satellite/satellitedb" "storj.io/storj/satellite/satellitedb"
) )
// generateNodeUsageCSV creates a report with node usage data for all nodes in a given period which can be used for payments // generateNodeUsageCSV creates a report with node usage data for all nodes in a given period which can be used for payments.
func generateNodeUsageCSV(ctx context.Context, start time.Time, end time.Time, output io.Writer) error { func generateNodeUsageCSV(ctx context.Context, start time.Time, end time.Time, output io.Writer) error {
db, err := satellitedb.New(zap.L().Named("db"), nodeUsageCfg.Database, satellitedb.Options{}) db, err := satellitedb.New(zap.L().Named("db"), nodeUsageCfg.Database, satellitedb.Options{})
if err != nil { if err != nil {

View File

@ -573,7 +573,7 @@ func TestObserver_findZombieSegments(t *testing.T) {
} }
// segmentRef is an object segment reference to be used for simulating calls to // segmentRef is an object segment reference to be used for simulating calls to
// observer.processSegment // observer.processSegment.
type segmentRef struct { type segmentRef struct {
path metainfo.ScopedPath path metainfo.ScopedPath
pointer *pb.Pointer pointer *pb.Pointer

View File

@ -145,7 +145,7 @@ func printDashboard(data *pb.DashboardResponse) error {
return nil return nil
} }
// clearScreen clears the screen so it can be redrawn // clearScreen clears the screen so it can be redrawn.
func clearScreen() { func clearScreen() {
switch runtime.GOOS { switch runtime.GOOS {
case "linux", "darwin": case "linux", "darwin":

View File

@ -12,7 +12,7 @@ import (
"go.uber.org/zap" "go.uber.org/zap"
) )
// Deprecated contains deprecated config structs // Deprecated contains deprecated config structs.
type Deprecated struct { type Deprecated struct {
Kademlia struct { Kademlia struct {
ExternalAddress string `default:"" hidden:"true"` ExternalAddress string `default:"" hidden:"true"`
@ -23,7 +23,7 @@ type Deprecated struct {
} }
} }
// maps deprecated config values to new values if applicable // maps deprecated config values to new values if applicable.
func mapDeprecatedConfigs(log *zap.Logger) { func mapDeprecatedConfigs(log *zap.Logger) {
type migration struct { type migration struct {
newValue interface{} newValue interface{}

View File

@ -27,7 +27,7 @@ import (
"storj.io/storj/storagenode/storagenodedb" "storj.io/storj/storagenode/storagenodedb"
) )
// StorageNodeFlags defines storage node configuration // StorageNodeFlags defines storage node configuration.
type StorageNodeFlags struct { type StorageNodeFlags struct {
EditConf bool `default:"false" help:"open config in default editor"` EditConf bool `default:"false" help:"open config in default editor"`

View File

@ -39,7 +39,7 @@ var (
setupCfg AdminConf setupCfg AdminConf
) )
// AdminConf defines necessary configuration to run the storj-admin UI // AdminConf defines necessary configuration to run the storj-admin UI.
type AdminConf struct { type AdminConf struct {
AuthKey string `help:"API authorization key" default:""` AuthKey string `help:"API authorization key" default:""`
Address string `help:"address to start the web server on" default:":8080"` Address string `help:"address to start the web server on" default:":8080"`

View File

@ -9,7 +9,7 @@ import (
"os/signal" "os/signal"
) )
// NewCLIContext creates a context that can be canceled with Ctrl-C // NewCLIContext creates a context that can be canceled with Ctrl-C.
func NewCLIContext(root context.Context) (context.Context, func()) { func NewCLIContext(root context.Context) (context.Context, func()) {
// trap Ctrl+C and call cancel on the context // trap Ctrl+C and call cancel on the context
ctx, cancel := context.WithCancel(root) ctx, cancel := context.WithCancel(root)

View File

@ -11,7 +11,7 @@ import (
"storj.io/common/fpath" "storj.io/common/fpath"
) )
// Flags contains different flags for commands // Flags contains different flags for commands.
type Flags struct { type Flags struct {
Directory string Directory string
Host string Host string

View File

@ -203,7 +203,7 @@ func networkDestroy(flags *Flags, args []string) error {
return os.RemoveAll(flags.Directory) return os.RemoveAll(flags.Directory)
} }
// newNetwork creates a default network // newNetwork creates a default network.
func newNetwork(flags *Flags) (*Processes, error) { func newNetwork(flags *Flags) (*Processes, error) {
_, filename, _, ok := runtime.Caller(0) _, filename, _, ok := runtime.Caller(0)
if !ok { if !ok {
@ -667,7 +667,7 @@ func identitySetup(network *Processes) (*Processes, error) {
return processes, nil return processes, nil
} }
// readConfigString reads from dir/config.yaml flagName returns the value in `into` // readConfigString reads from dir/config.yaml flagName returns the value in `into`.
func readConfigString(into *string, dir, flagName string) error { func readConfigString(into *string, dir, flagName string) error {
vip := viper.New() vip := viper.New()
vip.AddConfigPath(dir) vip.AddConfigPath(dir)

View File

@ -69,7 +69,7 @@ func (writer *PrefixWriter) Write(data []byte) (int, error) {
return writer.root.Write(data) return writer.root.Write(data)
} }
// Write implements io.Writer that prefixes lines // Write implements io.Writer that prefixes lines.
func (writer *prefixWriter) Write(data []byte) (int, error) { func (writer *prefixWriter) Write(data []byte) (int, error) {
if len(data) == 0 { if len(data) == 0 {
return 0, nil return 0, nil

View File

@ -23,7 +23,7 @@ import (
"storj.io/common/sync2" "storj.io/common/sync2"
) )
// Processes contains list of processes // Processes contains list of processes.
type Processes struct { type Processes struct {
Output *PrefixWriter Output *PrefixWriter
Directory string Directory string
@ -32,7 +32,7 @@ type Processes struct {
MaxStartupWait time.Duration MaxStartupWait time.Duration
} }
// NewProcesses returns a group of processes // NewProcesses returns a group of processes.
func NewProcesses(dir string) *Processes { func NewProcesses(dir string) *Processes {
return &Processes{ return &Processes{
Output: NewPrefixWriter("sim", os.Stdout), Output: NewPrefixWriter("sim", os.Stdout),
@ -42,14 +42,14 @@ func NewProcesses(dir string) *Processes {
} }
} }
// Exec executes a command on all processes // Exec executes a command on all processes.
func (processes *Processes) Exec(ctx context.Context, command string) error { func (processes *Processes) Exec(ctx context.Context, command string) error {
var group errgroup.Group var group errgroup.Group
processes.Start(ctx, &group, command) processes.Start(ctx, &group, command)
return group.Wait() return group.Wait()
} }
// Start executes all processes using specified errgroup.Group // Start executes all processes using specified errgroup.Group.
func (processes *Processes) Start(ctx context.Context, group *errgroup.Group, command string) { func (processes *Processes) Start(ctx context.Context, group *errgroup.Group, command string) {
for _, p := range processes.List { for _, p := range processes.List {
process := p process := p
@ -59,7 +59,7 @@ func (processes *Processes) Start(ctx context.Context, group *errgroup.Group, co
} }
} }
// Env returns environment flags for other nodes // Env returns environment flags for other nodes.
func (processes *Processes) Env() []string { func (processes *Processes) Env() []string {
var env []string var env []string
for _, process := range processes.List { for _, process := range processes.List {
@ -68,7 +68,7 @@ func (processes *Processes) Env() []string {
return env return env
} }
// Close closes all the processes and their resources // Close closes all the processes and their resources.
func (processes *Processes) Close() error { func (processes *Processes) Close() error {
var errlist errs.Group var errlist errs.Group
for _, process := range processes.List { for _, process := range processes.List {
@ -77,7 +77,7 @@ func (processes *Processes) Close() error {
return errlist.Err() return errlist.Err()
} }
// Info represents public information about the process // Info represents public information about the process.
type Info struct { type Info struct {
Name string Name string
Executable string Executable string
@ -88,7 +88,7 @@ type Info struct {
Extra []EnvVar Extra []EnvVar
} }
// EnvVar represents an environment variable like Key=Value // EnvVar represents an environment variable like Key=Value.
type EnvVar struct { type EnvVar struct {
Key string Key string
Value string Value string
@ -99,7 +99,7 @@ func (info *Info) AddExtra(key, value string) {
info.Extra = append(info.Extra, EnvVar{Key: key, Value: value}) info.Extra = append(info.Extra, EnvVar{Key: key, Value: value})
} }
// Env returns process flags // Env returns process flags.
func (info *Info) Env() []string { func (info *Info) Env() []string {
name := strings.ToUpper(info.Name) name := strings.ToUpper(info.Name)
@ -135,10 +135,10 @@ func (info *Info) Env() []string {
return env return env
} }
// Arguments contains arguments based on the main command // Arguments contains arguments based on the main command.
type Arguments map[string][]string type Arguments map[string][]string
// Process is a type for monitoring the process // Process is a type for monitoring the process.
type Process struct { type Process struct {
processes *Processes processes *Processes
@ -158,7 +158,7 @@ type Process struct {
stderr io.Writer stderr io.Writer
} }
// New creates a process which can be run in the specified directory // New creates a process which can be run in the specified directory.
func (processes *Processes) New(info Info) *Process { func (processes *Processes) New(info Info) *Process {
output := processes.Output.Prefixed(info.Name) output := processes.Output.Prefixed(info.Name)
@ -187,7 +187,7 @@ func (process *Process) WaitForExited(dependency *Process) {
process.Wait = append(process.Wait, &dependency.Status.Exited) process.Wait = append(process.Wait, &dependency.Status.Exited)
} }
// Exec runs the process using the arguments for a given command // Exec runs the process using the arguments for a given command.
func (process *Process) Exec(ctx context.Context, command string) (err error) { func (process *Process) Exec(ctx context.Context, command string) (err error) {
// ensure that we always release all status fences // ensure that we always release all status fences
defer process.Status.Started.Release() defer process.Status.Started.Release()
@ -318,7 +318,7 @@ func (process *Process) waitForAddress(maxStartupWait time.Duration) error {
return nil return nil
} }
// tryConnect will try to connect to the process public address // tryConnect will try to connect to the process public address.
func (process *Process) tryConnect() bool { func (process *Process) tryConnect() bool {
conn, err := net.Dial("tcp", process.Info.Address) conn, err := net.Dial("tcp", process.Info.Address)
if err != nil { if err != nil {
@ -331,5 +331,5 @@ func (process *Process) tryConnect() bool {
return true return true
} }
// Close closes process resources // Close closes process resources.
func (process *Process) Close() error { return nil } func (process *Process) Close() error { return nil }

View File

@ -20,7 +20,7 @@ func init() {
}, RootCmd) }, RootCmd)
} }
// catMain is the function executed when catCmd is called // catMain is the function executed when catCmd is called.
func catMain(cmd *cobra.Command, args []string) (err error) { func catMain(cmd *cobra.Command, args []string) (err error) {
if len(args) == 0 { if len(args) == 0 {
return fmt.Errorf("no object specified for copy") return fmt.Errorf("no object specified for copy")

View File

@ -20,7 +20,7 @@ type ClientConfig struct {
DialTimeout time.Duration `help:"timeout for dials" default:"0h2m00s"` DialTimeout time.Duration `help:"timeout for dials" default:"0h2m00s"`
} }
// Config uplink configuration // Config uplink configuration.
type Config struct { type Config struct {
AccessConfig AccessConfig
Client ClientConfig Client ClientConfig

View File

@ -42,7 +42,7 @@ func init() {
setBasicFlags(cpCmd.Flags(), "progress", "expires", "metadata") setBasicFlags(cpCmd.Flags(), "progress", "expires", "metadata")
} }
// upload transfers src from local machine to s3 compatible object dst // upload transfers src from local machine to s3 compatible object dst.
func upload(ctx context.Context, src fpath.FPath, dst fpath.FPath, showProgress bool) (err error) { func upload(ctx context.Context, src fpath.FPath, dst fpath.FPath, showProgress bool) (err error) {
if !src.IsLocal() { if !src.IsLocal() {
return fmt.Errorf("source must be local path: %s", src) return fmt.Errorf("source must be local path: %s", src)
@ -151,7 +151,7 @@ func upload(ctx context.Context, src fpath.FPath, dst fpath.FPath, showProgress
return nil return nil
} }
// download transfers s3 compatible object src to dst on local machine // download transfers s3 compatible object src to dst on local machine.
func download(ctx context.Context, src fpath.FPath, dst fpath.FPath, showProgress bool) (err error) { func download(ctx context.Context, src fpath.FPath, dst fpath.FPath, showProgress bool) (err error) {
if src.IsLocal() { if src.IsLocal() {
return fmt.Errorf("source must be Storj URL: %s", src) return fmt.Errorf("source must be Storj URL: %s", src)
@ -218,7 +218,7 @@ func download(ctx context.Context, src fpath.FPath, dst fpath.FPath, showProgres
return nil return nil
} }
// copy copies s3 compatible object src to s3 compatible object dst // copy copies s3 compatible object src to s3 compatible object dst.
func copyObject(ctx context.Context, src fpath.FPath, dst fpath.FPath) (err error) { func copyObject(ctx context.Context, src fpath.FPath, dst fpath.FPath) (err error) {
if src.IsLocal() { if src.IsLocal() {
return fmt.Errorf("source must be Storj URL: %s", src) return fmt.Errorf("source must be Storj URL: %s", src)

View File

@ -48,7 +48,7 @@ func init() {
cfgstruct.SetBoolAnnotation(importCmd.Flags(), "access", cfgstruct.BasicHelpAnnotationName, false) cfgstruct.SetBoolAnnotation(importCmd.Flags(), "access", cfgstruct.BasicHelpAnnotationName, false)
} }
// importMain is the function executed when importCmd is called // importMain is the function executed when importCmd is called.
func importMain(cmd *cobra.Command, args []string) (err error) { func importMain(cmd *cobra.Command, args []string) (err error) {
if cmd.Flag("access").Changed { if cmd.Flag("access").Changed {
return ErrAccessFlag return ErrAccessFlag

View File

@ -20,7 +20,7 @@ func init() {
}, RootCmd) }, RootCmd)
} }
// putMain is the function executed when putCmd is called // putMain is the function executed when putCmd is called.
func putMain(cmd *cobra.Command, args []string) (err error) { func putMain(cmd *cobra.Command, args []string) (err error) {
if len(args) == 0 { if len(args) == 0 {
return fmt.Errorf("no object specified for copy") return fmt.Errorf("no object specified for copy")

View File

@ -31,7 +31,7 @@ import (
const advancedFlagName = "advanced" const advancedFlagName = "advanced"
// UplinkFlags configuration flags // UplinkFlags configuration flags.
type UplinkFlags struct { type UplinkFlags struct {
Config Config
@ -66,7 +66,7 @@ func init() {
var cpuProfile = flag.String("profile.cpu", "", "file path of the cpu profile to be created") var cpuProfile = flag.String("profile.cpu", "", "file path of the cpu profile to be created")
var memoryProfile = flag.String("profile.mem", "", "file path of the memory profile to be created") var memoryProfile = flag.String("profile.mem", "", "file path of the memory profile to be created")
// RootCmd represents the base CLI command when called without any subcommands // RootCmd represents the base CLI command when called without any subcommands.
var RootCmd = &cobra.Command{ var RootCmd = &cobra.Command{
Use: "uplink", Use: "uplink",
Short: "The Storj client-side CLI", Short: "The Storj client-side CLI",

View File

@ -71,7 +71,7 @@ func parseHumanDate(date string, now time.Time) (time.Time, error) {
} }
} }
// shareMain is the function executed when shareCmd is called // shareMain is the function executed when shareCmd is called.
func shareMain(cmd *cobra.Command, args []string) (err error) { func shareMain(cmd *cobra.Command, args []string) (err error) {
now := time.Now() now := time.Now()
notBefore, err := parseHumanDate(shareCfg.NotBefore, now) notBefore, err := parseHumanDate(shareCfg.NotBefore, now)

View File

@ -38,7 +38,7 @@ func main() {
} }
} }
// Main is the exported CLI executable function // Main is the exported CLI executable function.
func Main() error { func Main() error {
ctx := context.Background() ctx := context.Background()
encKey := storj.Key(sha256.Sum256([]byte(*key))) encKey := storj.Key(sha256.Sum256([]byte(*key)))

View File

@ -27,7 +27,7 @@ func main() {
} }
} }
// Main is the exported CLI executable function // Main is the exported CLI executable function.
func Main() error { func Main() error {
pieces, err := ioutil.ReadDir(flag.Arg(0)) pieces, err := ioutil.ReadDir(flag.Arg(0))
if err != nil { if err != nil {

View File

@ -47,7 +47,7 @@ func main() {
} }
} }
// Main is the exported CLI executable function // Main is the exported CLI executable function.
func Main() error { func Main() error {
encKey := storj.Key(sha256.Sum256([]byte(*key))) encKey := storj.Key(sha256.Sum256([]byte(*key)))
fc, err := infectious.NewFEC(*rsk, *rsn) fc, err := infectious.NewFEC(*rsk, *rsn)

View File

@ -40,7 +40,7 @@ func main() {
} }
} }
// Main is the exported CLI executable function // Main is the exported CLI executable function.
func Main() error { func Main() error {
err := os.MkdirAll(flag.Arg(0), 0755) err := os.MkdirAll(flag.Arg(0), 0755)
if err != nil { if err != nil {

View File

@ -11,12 +11,12 @@ import (
// other packages. // other packages.
type apikey struct{} type apikey struct{}
// WithAPIKey creates context with api key // WithAPIKey creates context with api key.
func WithAPIKey(ctx context.Context, key []byte) context.Context { func WithAPIKey(ctx context.Context, key []byte) context.Context {
return context.WithValue(ctx, apikey{}, key) return context.WithValue(ctx, apikey{}, key)
} }
// GetAPIKey returns api key from context is exists // GetAPIKey returns api key from context is exists.
func GetAPIKey(ctx context.Context) ([]byte, bool) { func GetAPIKey(ctx context.Context) ([]byte, bool) {
key, ok := ctx.Value(apikey{}).([]byte) key, ok := ctx.Value(apikey{}).([]byte)
return key, ok return key, ok

View File

@ -21,7 +21,7 @@ func NewDBFromCfg(cfg tlsopts.Config) (*DB, error) {
return NewDB(cfg.RevocationDBURL) return NewDB(cfg.RevocationDBURL)
} }
// NewDB returns a new revocation database given the URL // NewDB returns a new revocation database given the URL.
func NewDB(dbURL string) (*DB, error) { func NewDB(dbURL string) (*DB, error) {
driver, source, _, err := dbutil.SplitConnStr(dbURL) driver, source, _, err := dbutil.SplitConnStr(dbURL)
if err != nil { if err != nil {
@ -45,7 +45,7 @@ func NewDB(dbURL string) (*DB, error) {
return db, nil return db, nil
} }
// newDBBolt creates a bolt-backed DB // newDBBolt creates a bolt-backed DB.
func newDBBolt(path string) (*DB, error) { func newDBBolt(path string) (*DB, error) {
client, err := boltdb.New(path, extensions.RevocationBucket) client, err := boltdb.New(path, extensions.RevocationBucket)
if err != nil { if err != nil {

View File

@ -100,7 +100,7 @@ func (db *DB) Put(ctx context.Context, chain []*x509.Certificate, revExt pkix.Ex
return nil return nil
} }
// List lists all revocations in the store // List lists all revocations in the store.
func (db *DB) List(ctx context.Context) (revs []*extensions.Revocation, err error) { func (db *DB) List(ctx context.Context) (revs []*extensions.Revocation, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
@ -134,7 +134,7 @@ func (db *DB) TestGetStore() storage.KeyValueStore {
return db.store return db.store
} }
// Close closes the underlying store // Close closes the underlying store.
func (db *DB) Close() error { func (db *DB) Close() error {
if db.store == nil { if db.store == nil {
return nil return nil

View File

@ -23,7 +23,7 @@ import (
"storj.io/storj/pkg/listenmux" "storj.io/storj/pkg/listenmux"
) )
// Config holds server specific configuration parameters // Config holds server specific configuration parameters.
type Config struct { type Config struct {
tlsopts.Config tlsopts.Config
Address string `user:"true" help:"public address to listen on" default:":7777"` Address string `user:"true" help:"public address to listen on" default:":7777"`
@ -99,22 +99,22 @@ func New(log *zap.Logger, tlsOptions *tlsopts.Options, publicAddr, privateAddr s
return server, nil return server, nil
} }
// Identity returns the server's identity // Identity returns the server's identity.
func (p *Server) Identity() *identity.FullIdentity { return p.tlsOptions.Ident } func (p *Server) Identity() *identity.FullIdentity { return p.tlsOptions.Ident }
// Addr returns the server's public listener address // Addr returns the server's public listener address.
func (p *Server) Addr() net.Addr { return p.public.listener.Addr() } func (p *Server) Addr() net.Addr { return p.public.listener.Addr() }
// PrivateAddr returns the server's private listener address // PrivateAddr returns the server's private listener address.
func (p *Server) PrivateAddr() net.Addr { return p.private.listener.Addr() } func (p *Server) PrivateAddr() net.Addr { return p.private.listener.Addr() }
// DRPC returns the server's dRPC mux for registration purposes // DRPC returns the server's dRPC mux for registration purposes.
func (p *Server) DRPC() *drpcmux.Mux { return p.public.mux } func (p *Server) DRPC() *drpcmux.Mux { return p.public.mux }
// PrivateDRPC returns the server's dRPC mux for registration purposes // PrivateDRPC returns the server's dRPC mux for registration purposes.
func (p *Server) PrivateDRPC() *drpcmux.Mux { return p.private.mux } func (p *Server) PrivateDRPC() *drpcmux.Mux { return p.private.mux }
// Close shuts down the server // Close shuts down the server.
func (p *Server) Close() error { func (p *Server) Close() error {
p.mu.Lock() p.mu.Lock()
defer p.mu.Unlock() defer p.mu.Unlock()
@ -132,7 +132,7 @@ func (p *Server) Close() error {
return nil return nil
} }
// Run will run the server and all of its services // Run will run the server and all of its services.
func (p *Server) Run(ctx context.Context) (err error) { func (p *Server) Run(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)

View File

@ -22,10 +22,10 @@ const padding = 2
// Y is the row // Y is the row
type Point struct{ X, Y int } type Point struct{ X, Y int }
// Rect is a 2D rectangle in console, excluding Max edge // Rect is a 2D rectangle in console, excluding Max edge.
type Rect struct{ Min, Max Point } type Rect struct{ Min, Max Point }
// Screen is a writable area on screen // Screen is a writable area on screen.
type Screen struct { type Screen struct {
rendering sync.Mutex rendering sync.Mutex
@ -81,7 +81,7 @@ func (screen *Screen) Close() error {
return nil return nil
} }
// Run runs the event loop // Run runs the event loop.
func (screen *Screen) Run() error { func (screen *Screen) Run() error {
defer screen.markClosed() defer screen.markClosed()
@ -124,10 +124,10 @@ func (screen *Screen) Size() (width, height int) {
return width, height return width, height
} }
// Lock screen for exclusive rendering // Lock screen for exclusive rendering.
func (screen *Screen) Lock() { screen.rendering.Lock() } func (screen *Screen) Lock() { screen.rendering.Lock() }
// Unlock screen // Unlock screen.
func (screen *Screen) Unlock() { screen.rendering.Unlock() } func (screen *Screen) Unlock() { screen.rendering.Unlock() }
// Write writes to the screen. // Write writes to the screen.
@ -152,7 +152,7 @@ func (screen *Screen) Flush() error {
return err return err
} }
// blit writes content to the console // blit writes content to the console.
func (screen *Screen) blit(frame *frame) error { func (screen *Screen) blit(frame *frame) error {
screen.flushed.content = frame.content screen.flushed.content = frame.content
size := screen.flushed.size size := screen.flushed.size
@ -191,7 +191,7 @@ var lightStyle = rectStyle{
{'└', '─', '┘'}, {'└', '─', '┘'},
} }
// drawRect draws a rectangle using termbox // drawRect draws a rectangle using termbox.
func drawRect(r Rect, style rectStyle) { func drawRect(r Rect, style rectStyle) {
attr := termbox.ColorDefault attr := termbox.ColorDefault

View File

@ -6,14 +6,14 @@ package date
import "time" import "time"
// MonthBoundary extract month from the provided date and returns its edges // MonthBoundary extract month from the provided date and returns its edges.
func MonthBoundary(t time.Time) (time.Time, time.Time) { func MonthBoundary(t time.Time) (time.Time, time.Time) {
startDate := time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, t.Location()) startDate := time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, t.Location())
endDate := time.Date(t.Year(), t.Month()+1, 1, 0, 0, 0, -1, t.Location()) endDate := time.Date(t.Year(), t.Month()+1, 1, 0, 0, 0, -1, t.Location())
return startDate, endDate return startDate, endDate
} }
// DayBoundary returns start and end of the provided day // DayBoundary returns start and end of the provided day.
func DayBoundary(t time.Time) (time.Time, time.Time) { func DayBoundary(t time.Time) (time.Time, time.Time) {
return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location()), return time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location()),
time.Date(t.Year(), t.Month(), t.Day()+1, 0, 0, 0, -1, t.Location()) time.Date(t.Year(), t.Month(), t.Day()+1, 0, 0, 0, -1, t.Location())
@ -36,7 +36,7 @@ func MonthsCountSince(from time.Time) int {
return MonthsBetweenDates(from, time.Now()) return MonthsBetweenDates(from, time.Now())
} }
// MonthsBetweenDates calculates amount of months between two dates // MonthsBetweenDates calculates amount of months between two dates.
func MonthsBetweenDates(from time.Time, to time.Time) int { func MonthsBetweenDates(from time.Time, to time.Time) int {
// we need UTC here before its the only sensible way to say what day it is // we need UTC here before its the only sensible way to say what day it is
y1, M1, _ := from.UTC().Date() y1, M1, _ := from.UTC().Date()
@ -47,7 +47,7 @@ func MonthsBetweenDates(from time.Time, to time.Time) int {
return months return months
} }
// TruncateToHourInNano returns the time truncated to the hour in nanoseconds // TruncateToHourInNano returns the time truncated to the hour in nanoseconds.
func TruncateToHourInNano(t time.Time) int64 { func TruncateToHourInNano(t time.Time) int64 {
return t.Truncate(1 * time.Hour).UnixNano() return t.Truncate(1 * time.Hour).UnixNano()
} }

View File

@ -3,7 +3,7 @@
package dbutil package dbutil
// Implementation type of valid DBs // Implementation type of valid DBs.
type Implementation int type Implementation int
const ( const (

View File

@ -13,12 +13,12 @@ import (
"github.com/zeebo/errs" "github.com/zeebo/errs"
) )
// Data is the database content formatted as strings // Data is the database content formatted as strings.
type Data struct { type Data struct {
Tables []*TableData Tables []*TableData
} }
// TableData is content of a sql table // TableData is content of a sql table.
type TableData struct { type TableData struct {
Name string Name string
Columns []string Columns []string
@ -36,7 +36,7 @@ func (c ColumnData) String() string {
return fmt.Sprintf("%s:%s", c.Column, c.Value) return fmt.Sprintf("%s:%s", c.Column, c.Value)
} }
// RowData is content of a single row // RowData is content of a single row.
type RowData []ColumnData type RowData []ColumnData
// Less returns true if one row is less than the other. // Less returns true if one row is less than the other.
@ -74,7 +74,7 @@ func (table *TableData) AddRow(row RowData) error {
return nil return nil
} }
// FindTable finds a table by name // FindTable finds a table by name.
func (data *Data) FindTable(tableName string) (*TableData, bool) { func (data *Data) FindTable(tableName string) (*TableData, bool) {
for _, table := range data.Tables { for _, table := range data.Tables {
if table.Name == tableName { if table.Name == tableName {
@ -103,7 +103,7 @@ func (row RowData) Clone() RowData {
return append(RowData{}, row...) return append(RowData{}, row...)
} }
// QueryData loads all data from tables // QueryData loads all data from tables.
func QueryData(ctx context.Context, db Queryer, schema *Schema, quoteColumn func(string) string) (*Data, error) { func QueryData(ctx context.Context, db Queryer, schema *Schema, quoteColumn func(string) string) (*Data, error) {
data := &Data{} data := &Data{}

View File

@ -9,7 +9,7 @@ import (
"strings" "strings"
) )
// Snapshots defines a collection of snapshot // Snapshots defines a collection of snapshot.
type Snapshots struct { type Snapshots struct {
List []*Snapshot List []*Snapshot
} }
@ -37,7 +37,7 @@ func (snapshots *Snapshots) FindVersion(version int) (*Snapshot, bool) {
return nil, false return nil, false
} }
// Sort sorts the snapshots by version // Sort sorts the snapshots by version.
func (snapshots *Snapshots) Sort() { func (snapshots *Snapshots) Sort() {
sort.Slice(snapshots.List, func(i, k int) bool { sort.Slice(snapshots.List, func(i, k int) bool {
return snapshots.List[i].Version < snapshots.List[k].Version return snapshots.List[i].Version < snapshots.List[k].Version

View File

@ -25,7 +25,7 @@ type ConfigurableDB interface {
Stats() sql.DBStats Stats() sql.DBStats
} }
// Configure Sets Connection Boundaries and adds db_stats monitoring to monkit // Configure Sets Connection Boundaries and adds db_stats monitoring to monkit.
func Configure(db ConfigurableDB, dbName string, mon *monkit.Scope) { func Configure(db ConfigurableDB, dbName string, mon *monkit.Scope) {
if *maxIdleConns >= 0 { if *maxIdleConns >= 0 {
db.SetMaxIdleConns(*maxIdleConns) db.SetMaxIdleConns(*maxIdleConns)

View File

@ -28,13 +28,13 @@ func getenv(priority ...string) string {
// postgres is the test database connection string. // postgres is the test database connection string.
var postgres = flag.String("postgres-test-db", getenv("STORJ_TEST_POSTGRES", "STORJ_POSTGRES_TEST"), "PostgreSQL test database connection string (semicolon delimited for multiple), \"omit\" is used to omit the tests from output") var postgres = flag.String("postgres-test-db", getenv("STORJ_TEST_POSTGRES", "STORJ_POSTGRES_TEST"), "PostgreSQL test database connection string (semicolon delimited for multiple), \"omit\" is used to omit the tests from output")
// cockroach is the test database connection string for CockroachDB // cockroach is the test database connection string for CockroachDB.
var cockroach = flag.String("cockroach-test-db", getenv("STORJ_TEST_COCKROACH", "STORJ_COCKROACH_TEST"), "CockroachDB test database connection string (semicolon delimited for multiple), \"omit\" is used to omit the tests from output") var cockroach = flag.String("cockroach-test-db", getenv("STORJ_TEST_COCKROACH", "STORJ_COCKROACH_TEST"), "CockroachDB test database connection string (semicolon delimited for multiple), \"omit\" is used to omit the tests from output")
// DefaultPostgres is expected to work under the storj-test docker-compose instance // DefaultPostgres is expected to work under the storj-test docker-compose instance.
const DefaultPostgres = "postgres://storj:storj-pass@test-postgres/teststorj?sslmode=disable" const DefaultPostgres = "postgres://storj:storj-pass@test-postgres/teststorj?sslmode=disable"
// DefaultCockroach is expected to work when a local cockroachDB instance is running // DefaultCockroach is expected to work when a local cockroachDB instance is running.
const DefaultCockroach = "cockroach://root@localhost:26257/master?sslmode=disable" const DefaultCockroach = "cockroach://root@localhost:26257/master?sslmode=disable"
// Database defines a postgres compatible database. // Database defines a postgres compatible database.

View File

@ -10,7 +10,7 @@ import (
"storj.io/storj/private/dbutil/dbschema" "storj.io/storj/private/dbutil/dbschema"
) )
// QueryData loads all data from tables // QueryData loads all data from tables.
func QueryData(ctx context.Context, db dbschema.Queryer, schema *dbschema.Schema) (*dbschema.Data, error) { func QueryData(ctx context.Context, db dbschema.Queryer, schema *dbschema.Schema) (*dbschema.Data, error) {
return dbschema.QueryData(ctx, db, schema, func(columnName string) string { return dbschema.QueryData(ctx, db, schema, func(columnName string) string {
quoted := strconv.Quote(columnName) quoted := strconv.Quote(columnName)

View File

@ -72,7 +72,7 @@ func OpenUnique(ctx context.Context, connstr string, schemaPrefix string) (*dbut
}, nil }, nil
} }
// QuerySnapshot loads snapshot from database // QuerySnapshot loads snapshot from database.
func QuerySnapshot(ctx context.Context, db dbschema.Queryer) (*dbschema.Snapshot, error) { func QuerySnapshot(ctx context.Context, db dbschema.Queryer) (*dbschema.Snapshot, error) {
schema, err := QuerySchema(ctx, db) schema, err := QuerySchema(ctx, db)
if err != nil { if err != nil {
@ -91,7 +91,7 @@ func QuerySnapshot(ctx context.Context, db dbschema.Queryer) (*dbschema.Snapshot
}, err }, err
} }
// CheckApplicationName ensures that the Connection String contains an application name // CheckApplicationName ensures that the Connection String contains an application name.
func CheckApplicationName(s string) (r string) { func CheckApplicationName(s string) (r string) {
if !strings.Contains(s, "application_name") { if !strings.Contains(s, "application_name") {
if !strings.Contains(s, "?") { if !strings.Contains(s, "?") {
@ -105,7 +105,7 @@ func CheckApplicationName(s string) (r string) {
return s return s
} }
// IsConstraintError checks if given error is about constraint violation // IsConstraintError checks if given error is about constraint violation.
func IsConstraintError(err error) bool { func IsConstraintError(err error) bool {
errCode := ErrorCode(err) errCode := ErrorCode(err)
return strings.HasPrefix(errCode, pgErrorClassConstraintViolation) return strings.HasPrefix(errCode, pgErrorClassConstraintViolation)

View File

@ -184,7 +184,7 @@ func QuerySchema(ctx context.Context, db dbschema.Queryer) (*dbschema.Schema, er
return schema, nil return schema, nil
} }
// matches FOREIGN KEY (project_id) REFERENCES projects(id) ON UPDATE CASCADE ON DELETE CASCADE // matches FOREIGN KEY (project_id) REFERENCES projects(id) ON UPDATE CASCADE ON DELETE CASCADE.
var rxPostgresForeignKey = regexp.MustCompile( var rxPostgresForeignKey = regexp.MustCompile(
`^FOREIGN KEY \([[:word:]]+\) ` + `^FOREIGN KEY \([[:word:]]+\) ` +
`REFERENCES ([[:word:]]+)\(([[:word:]]+)\)` + `REFERENCES ([[:word:]]+)\(([[:word:]]+)\)` +

View File

@ -23,7 +23,7 @@ func CreateRandomTestingSchemaName(n int) string {
return hex.EncodeToString(data) return hex.EncodeToString(data)
} }
// ConnstrWithSchema adds schema to a connection string // ConnstrWithSchema adds schema to a connection string.
func ConnstrWithSchema(connstr, schema string) string { func ConnstrWithSchema(connstr, schema string) string {
if strings.Contains(connstr, "?") { if strings.Contains(connstr, "?") {
connstr += "&options=" connstr += "&options="
@ -34,7 +34,7 @@ func ConnstrWithSchema(connstr, schema string) string {
} }
// ParseSchemaFromConnstr returns the name of the schema parsed from the // ParseSchemaFromConnstr returns the name of the schema parsed from the
// connection string if one is provided // connection string if one is provided.
func ParseSchemaFromConnstr(connstr string) (string, error) { func ParseSchemaFromConnstr(connstr string) (string, error) {
url, err := url.Parse(connstr) url, err := url.Parse(connstr)
if err != nil { if err != nil {
@ -56,12 +56,12 @@ func ParseSchemaFromConnstr(connstr string) (string, error) {
return "", nil return "", nil
} }
// QuoteSchema quotes schema name for // QuoteSchema quotes schema name for.
func QuoteSchema(schema string) string { func QuoteSchema(schema string) string {
return QuoteIdentifier(schema) return QuoteIdentifier(schema)
} }
// Execer is for executing sql // Execer is for executing sql.
type Execer interface { type Execer interface {
ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
} }
@ -84,7 +84,7 @@ func CreateSchema(ctx context.Context, db Execer, schema string) (err error) {
return err return err
} }
// DropSchema drops the named schema // DropSchema drops the named schema.
func DropSchema(ctx context.Context, db Execer, schema string) error { func DropSchema(ctx context.Context, db Execer, schema string) error {
_, err := db.ExecContext(ctx, `DROP SCHEMA `+QuoteSchema(schema)+` CASCADE;`) _, err := db.ExecContext(ctx, `DROP SCHEMA `+QuoteSchema(schema)+` CASCADE;`)
return err return err

View File

@ -53,7 +53,7 @@ func LoadSnapshotFromSQL(ctx context.Context, script string) (_ *dbschema.Snapsh
return snapshot, nil return snapshot, nil
} }
// QuerySnapshot loads snapshot from database // QuerySnapshot loads snapshot from database.
func QuerySnapshot(ctx context.Context, db dbschema.Queryer) (*dbschema.Snapshot, error) { func QuerySnapshot(ctx context.Context, db dbschema.Queryer) (*dbschema.Snapshot, error) {
schema, err := QuerySchema(ctx, db) schema, err := QuerySchema(ctx, db)
if err != nil { if err != nil {
@ -72,7 +72,7 @@ func QuerySnapshot(ctx context.Context, db dbschema.Queryer) (*dbschema.Snapshot
}, err }, err
} }
// QueryData loads all data from tables // QueryData loads all data from tables.
func QueryData(ctx context.Context, db dbschema.Queryer, schema *dbschema.Schema) (*dbschema.Data, error) { func QueryData(ctx context.Context, db dbschema.Queryer, schema *dbschema.Schema) (*dbschema.Data, error) {
return dbschema.QueryData(ctx, db, schema, func(columnName string) string { return dbschema.QueryData(ctx, db, schema, func(columnName string) string {
quoted := strconv.Quote(columnName) quoted := strconv.Quote(columnName)
@ -80,7 +80,7 @@ func QueryData(ctx context.Context, db dbschema.Queryer, schema *dbschema.Schema
}) })
} }
// IsConstraintError checks if given error is about constraint violation // IsConstraintError checks if given error is about constraint violation.
func IsConstraintError(err error) bool { func IsConstraintError(err error) bool {
return errs.IsFunc(err, func(err error) bool { return errs.IsFunc(err, func(err error) bool {
if e, ok := err.(sqlite3.Error); ok { if e, ok := err.(sqlite3.Error); ok {

View File

@ -172,7 +172,7 @@ func KeepTables(ctx context.Context, db tagsql.DB, tablesToKeep ...string) (err
return err return err
} }
// dropTables performs the table drops in a single transaction // dropTables performs the table drops in a single transaction.
func dropTables(ctx context.Context, db tagsql.DB, tablesToKeep ...string) (err error) { func dropTables(ctx context.Context, db tagsql.DB, tablesToKeep ...string) (err error) {
err = txutil.WithTx(ctx, db, nil, func(ctx context.Context, tx tagsql.Tx) error { err = txutil.WithTx(ctx, db, nil, func(ctx context.Context, tx tagsql.Tx) error {
// Get a list of tables excluding sqlite3 system tables. // Get a list of tables excluding sqlite3 system tables.

View File

@ -91,5 +91,5 @@ func (emptyStmt) Close() error { return nil }
func (emptyStmt) Exec(args []driver.Value) (driver.Result, error) { return nil, nil } func (emptyStmt) Exec(args []driver.Value) (driver.Result, error) { return nil, nil }
func (emptyStmt) Query(args []driver.Value) (driver.Rows, error) { return nil, nil } func (emptyStmt) Query(args []driver.Value) (driver.Rows, error) { return nil, nil }
// must be 1 so that we can pass 1 argument // must be 1 so that we can pass 1 argument.
func (emptyStmt) NumInput() int { return 1 } func (emptyStmt) NumInput() int { return 1 }

View File

@ -18,12 +18,12 @@ var (
diffOpts = jsondiff.DefaultConsoleOptions() diffOpts = jsondiff.DefaultConsoleOptions()
) )
// DebugCert is a subset of the most relevant fields from an x509.Certificate for debugging // DebugCert is a subset of the most relevant fields from an x509.Certificate for debugging.
type DebugCert struct { type DebugCert struct {
Cert *x509.Certificate Cert *x509.Certificate
} }
// NewDebugCert converts an *x509.Certificate into a DebugCert // NewDebugCert converts an *x509.Certificate into a DebugCert.
func NewDebugCert(cert x509.Certificate) DebugCert { func NewDebugCert(cert x509.Certificate) DebugCert {
return DebugCert{ return DebugCert{
Cert: &cert, Cert: &cert,
@ -31,7 +31,7 @@ func NewDebugCert(cert x509.Certificate) DebugCert {
} }
// PrintJSON uses a json marshaler to pretty-print arbitrary data for debugging // PrintJSON uses a json marshaler to pretty-print arbitrary data for debugging
// with special considerations for certain, specific types // with special considerations for certain, specific types.
func PrintJSON(data interface{}, label string) { func PrintJSON(data interface{}, label string) {
var ( var (
jsonBytes []byte jsonBytes []byte
@ -89,7 +89,7 @@ func PrintJSON(data interface{}, label string) {
fmt.Println("") fmt.Println("")
} }
// Cmp is used to compare 2 DebugCerts against each other and print the diff // Cmp is used to compare 2 DebugCerts against each other and print the diff.
func (c DebugCert) Cmp(c2 DebugCert, label string) error { func (c DebugCert) Cmp(c2 DebugCert, label string) error {
fmt.Println("diff " + label + " ---================================================================---") fmt.Println("diff " + label + " ---================================================================---")
cJSON, err := c.JSON() cJSON, err := c.JSON()
@ -107,7 +107,7 @@ func (c DebugCert) Cmp(c2 DebugCert, label string) error {
return nil return nil
} }
// JSON serializes the certificate to JSON // JSON serializes the certificate to JSON.
func (c DebugCert) JSON() ([]byte, error) { func (c DebugCert) JSON() ([]byte, error) {
return json.Marshal(c.Cert) return json.Marshal(c.Cert)
} }

View File

@ -14,10 +14,10 @@ import (
"storj.io/storj/private/tagsql" "storj.io/storj/private/tagsql"
) )
// Error is the default migrate errs class // Error is the default migrate errs class.
var Error = errs.Class("migrate") var Error = errs.Class("migrate")
// Create with a previous schema check // Create with a previous schema check.
func Create(ctx context.Context, identifier string, db DBX) error { func Create(ctx context.Context, identifier string, db DBX) error {
// is this necessary? it's not immediately obvious why we roll back the transaction // is this necessary? it's not immediately obvious why we roll back the transaction
// when the schemas match. // when the schemas match.

View File

@ -53,7 +53,7 @@ Scenarios it doesn't handle properly.
4. Figuring out what the exact executed steps are. 4. Figuring out what the exact executed steps are.
*/ */
// Migration describes a migration steps // Migration describes a migration steps.
type Migration struct { type Migration struct {
// Table is the table name to register the applied migration version. // Table is the table name to register the applied migration version.
// NOTE: Always validates its value with the ValidTableName method before it's // NOTE: Always validates its value with the ValidTableName method before it's
@ -74,12 +74,12 @@ type Step struct {
SeparateTx bool SeparateTx bool
} }
// Action is something that needs to be done // Action is something that needs to be done.
type Action interface { type Action interface {
Run(ctx context.Context, log *zap.Logger, db tagsql.DB, tx tagsql.Tx) error Run(ctx context.Context, log *zap.Logger, db tagsql.DB, tx tagsql.Tx) error
} }
// TargetVersion returns migration with steps upto specified version // TargetVersion returns migration with steps upto specified version.
func (migration *Migration) TargetVersion(version int) *Migration { func (migration *Migration) TargetVersion(version int) *Migration {
m := *migration m := *migration
m.Steps = nil m.Steps = nil
@ -105,7 +105,7 @@ func (migration *Migration) ValidTableName() error {
return nil return nil
} }
// ValidateSteps checks that the version for each migration step increments in order // ValidateSteps checks that the version for each migration step increments in order.
func (migration *Migration) ValidateSteps() error { func (migration *Migration) ValidateSteps() error {
sorted := sort.SliceIsSorted(migration.Steps, func(i, j int) bool { sorted := sort.SliceIsSorted(migration.Steps, func(i, j int) bool {
return migration.Steps[i].Version <= migration.Steps[j].Version return migration.Steps[i].Version <= migration.Steps[j].Version
@ -116,7 +116,7 @@ func (migration *Migration) ValidateSteps() error {
return nil return nil
} }
// ValidateVersions checks that the version of the migration matches the state of the database // ValidateVersions checks that the version of the migration matches the state of the database.
func (migration *Migration) ValidateVersions(ctx context.Context, log *zap.Logger) error { func (migration *Migration) ValidateVersions(ctx context.Context, log *zap.Logger) error {
for _, step := range migration.Steps { for _, step := range migration.Steps {
dbVersion, err := migration.getLatestVersion(ctx, log, step.DB) dbVersion, err := migration.getLatestVersion(ctx, log, step.DB)
@ -139,7 +139,7 @@ func (migration *Migration) ValidateVersions(ctx context.Context, log *zap.Logge
return nil return nil
} }
// Run runs the migration steps // Run runs the migration steps.
func (migration *Migration) Run(ctx context.Context, log *zap.Logger) error { func (migration *Migration) Run(ctx context.Context, log *zap.Logger) error {
err := migration.ValidateSteps() err := migration.ValidateSteps()
if err != nil { if err != nil {
@ -238,7 +238,7 @@ func (migration *Migration) getLatestVersion(ctx context.Context, log *zap.Logge
return int(version.Int64), Error.Wrap(err) return int(version.Int64), Error.Wrap(err)
} }
// addVersion adds information about a new migration // addVersion adds information about a new migration.
func (migration *Migration) addVersion(ctx context.Context, tx tagsql.Tx, db tagsql.DB, version int) error { func (migration *Migration) addVersion(ctx context.Context, tx tagsql.Tx, db tagsql.DB, version int) error {
err := migration.ValidTableName() err := migration.ValidTableName()
if err != nil { if err != nil {
@ -254,7 +254,7 @@ func (migration *Migration) addVersion(ctx context.Context, tx tagsql.Tx, db tag
return err return err
} }
// CurrentVersion finds the latest version for the db // CurrentVersion finds the latest version for the db.
func (migration *Migration) CurrentVersion(ctx context.Context, log *zap.Logger, db tagsql.DB) (int, error) { func (migration *Migration) CurrentVersion(ctx context.Context, log *zap.Logger, db tagsql.DB) (int, error) {
err := migration.ensureVersionTable(ctx, log, db) err := migration.ensureVersionTable(ctx, log, db)
if err != nil { if err != nil {
@ -263,10 +263,10 @@ func (migration *Migration) CurrentVersion(ctx context.Context, log *zap.Logger,
return migration.getLatestVersion(ctx, log, db) return migration.getLatestVersion(ctx, log, db)
} }
// SQL statements that are executed on the database // SQL statements that are executed on the database.
type SQL []string type SQL []string
// Run runs the SQL statements // Run runs the SQL statements.
func (sql SQL) Run(ctx context.Context, log *zap.Logger, db tagsql.DB, tx tagsql.Tx) (err error) { func (sql SQL) Run(ctx context.Context, log *zap.Logger, db tagsql.DB, tx tagsql.Tx) (err error) {
for _, query := range sql { for _, query := range sql {
_, err := tx.Exec(ctx, rebind(db, query)) _, err := tx.Exec(ctx, rebind(db, query))
@ -277,10 +277,10 @@ func (sql SQL) Run(ctx context.Context, log *zap.Logger, db tagsql.DB, tx tagsql
return nil return nil
} }
// Func is an arbitrary operation // Func is an arbitrary operation.
type Func func(ctx context.Context, log *zap.Logger, db tagsql.DB, tx tagsql.Tx) error type Func func(ctx context.Context, log *zap.Logger, db tagsql.DB, tx tagsql.Tx) error
// Run runs the migration // Run runs the migration.
func (fn Func) Run(ctx context.Context, log *zap.Logger, db tagsql.DB, tx tagsql.Tx) error { func (fn Func) Run(ctx context.Context, log *zap.Logger, db tagsql.DB, tx tagsql.Tx) error {
return fn(ctx, log, db, tx) return fn(ctx, log, db, tx)
} }

View File

@ -9,13 +9,13 @@ import (
"github.com/zeebo/errs" "github.com/zeebo/errs"
) )
// LoginAuth implements LOGIN authentication mechanism // LoginAuth implements LOGIN authentication mechanism.
type LoginAuth struct { type LoginAuth struct {
Username string Username string
Password string Password string
} }
// Start begins an authentication with a server // Start begins an authentication with a server.
func (auth LoginAuth) Start(server *smtp.ServerInfo) (proto string, toServer []byte, err error) { func (auth LoginAuth) Start(server *smtp.ServerInfo) (proto string, toServer []byte, err error) {
if !server.TLS { if !server.TLS {
return "", nil, errs.New("unencrypted connection") return "", nil, errs.New("unencrypted connection")
@ -24,7 +24,7 @@ func (auth LoginAuth) Start(server *smtp.ServerInfo) (proto string, toServer []b
} }
// Next continues the authentication with server response and flag representing // Next continues the authentication with server response and flag representing
// if server expects more data from client // if server expects more data from client.
func (auth LoginAuth) Next(fromServer []byte, more bool) (toServer []byte, err error) { func (auth LoginAuth) Next(fromServer []byte, more bool) (toServer []byte, err error) {
if more { if more {
switch string(fromServer) { switch string(fromServer) {

View File

@ -16,7 +16,7 @@ import (
"github.com/zeebo/errs" "github.com/zeebo/errs"
) )
// Message is RFC compliant email message // Message is RFC compliant email message.
type Message struct { type Message struct {
From Address From Address
To []Address To []Address
@ -29,7 +29,7 @@ type Message struct {
Parts []Part Parts []Part
} }
// Part represent one part of multipart message // Part represent one part of multipart message.
type Part struct { type Part struct {
Type string Type string
Encoding string Encoding string
@ -37,10 +37,10 @@ type Part struct {
Content string Content string
} }
// Error is the default message errs class // Error is the default message errs class.
var Error = errs.Class("Email message error") var Error = errs.Class("Email message error")
// Bytes builds message and returns result as bytes // Bytes builds message and returns result as bytes.
func (msg *Message) Bytes() (data []byte, err error) { func (msg *Message) Bytes() (data []byte, err error) {
// always returns nil error on read and write, so most of the errors can be ignored // always returns nil error on read and write, so most of the errors can be ignored
var body bytes.Buffer var body bytes.Buffer

View File

@ -22,14 +22,14 @@ var (
mon = monkit.Package() mon = monkit.Package()
) )
// Auth is XOAUTH2 implementation of smtp.Auth interface // Auth is XOAUTH2 implementation of smtp.Auth interface.
type Auth struct { type Auth struct {
UserEmail string UserEmail string
Storage *TokenStore Storage *TokenStore
} }
// Start returns proto and auth credentials for first auth msg // Start returns proto and auth credentials for first auth msg.
func (auth *Auth) Start(server *smtp.ServerInfo) (proto string, toServer []byte, err error) { func (auth *Auth) Start(server *smtp.ServerInfo) (proto string, toServer []byte, err error) {
ctx := context.TODO() ctx := context.TODO()
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
@ -46,7 +46,7 @@ func (auth *Auth) Start(server *smtp.ServerInfo) (proto string, toServer []byte,
return "XOAUTH2", []byte(format), nil return "XOAUTH2", []byte(format), nil
} }
// Next sends empty response to solve SASL challenge if response code is 334 // Next sends empty response to solve SASL challenge if response code is 334.
func (auth *Auth) Next(fromServer []byte, more bool) (toServer []byte, err error) { func (auth *Auth) Next(fromServer []byte, more bool) (toServer []byte, err error) {
if more { if more {
return make([]byte, 0), nil return make([]byte, 0), nil
@ -55,7 +55,7 @@ func (auth *Auth) Next(fromServer []byte, more bool) (toServer []byte, err error
return nil, nil return nil, nil
} }
// Token represents OAuth2 token // Token represents OAuth2 token.
type Token struct { type Token struct {
AccessToken string `json:"access_token"` AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"` RefreshToken string `json:"refresh_token"`
@ -63,21 +63,21 @@ type Token struct {
Expiry time.Time `json:"expiry"` Expiry time.Time `json:"expiry"`
} }
// Credentials represents OAuth2 credentials // Credentials represents OAuth2 credentials.
type Credentials struct { type Credentials struct {
ClientID string `json:"client_id"` ClientID string `json:"client_id"`
ClientSecret string `json:"client_secret"` ClientSecret string `json:"client_secret"`
TokenURI string `json:"token_uri"` TokenURI string `json:"token_uri"`
} }
// TokenStore is a thread safe storage for OAuth2 token and credentials // TokenStore is a thread safe storage for OAuth2 token and credentials.
type TokenStore struct { type TokenStore struct {
mu sync.Mutex mu sync.Mutex
token Token token Token
creds Credentials creds Credentials
} }
// NewTokenStore creates new instance of token storage // NewTokenStore creates new instance of token storage.
func NewTokenStore(creds Credentials, token Token) *TokenStore { func NewTokenStore(creds Credentials, token Token) *TokenStore {
return &TokenStore{ return &TokenStore{
token: token, token: token,
@ -85,7 +85,7 @@ func NewTokenStore(creds Credentials, token Token) *TokenStore {
} }
} }
// Token retrieves token in a thread safe way and refreshes it if needed // Token retrieves token in a thread safe way and refreshes it if needed.
func (s *TokenStore) Token(ctx context.Context) (_ *Token, err error) { func (s *TokenStore) Token(ctx context.Context) (_ *Token, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
s.mu.Lock() s.mu.Lock()
@ -105,7 +105,7 @@ func (s *TokenStore) Token(ctx context.Context) (_ *Token, err error) {
return token, nil return token, nil
} }
// RefreshToken is a helper method that refreshes token with given credentials and OUATH2 refresh token // RefreshToken is a helper method that refreshes token with given credentials and OUATH2 refresh token.
func RefreshToken(ctx context.Context, creds Credentials, refreshToken string) (_ *Token, err error) { func RefreshToken(ctx context.Context, creds Credentials, refreshToken string) (_ *Token, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)

View File

@ -15,12 +15,12 @@ import (
"github.com/zeebo/errs" "github.com/zeebo/errs"
) )
// Address is alias of net/mail.Address // Address is alias of net/mail.Address.
type Address = mail.Address type Address = mail.Address
var mon = monkit.Package() var mon = monkit.Package()
// SMTPSender is smtp sender // SMTPSender is smtp sender.
type SMTPSender struct { type SMTPSender struct {
ServerAddress string ServerAddress string
@ -28,12 +28,12 @@ type SMTPSender struct {
Auth smtp.Auth Auth smtp.Auth
} }
// FromAddress implements satellite/mail.SMTPSender // FromAddress implements satellite/mail.SMTPSender.
func (sender *SMTPSender) FromAddress() Address { func (sender *SMTPSender) FromAddress() Address {
return sender.From return sender.From
} }
// SendEmail sends email message to the given recipient // SendEmail sends email message to the given recipient.
func (sender *SMTPSender) SendEmail(ctx context.Context, msg *Message) (err error) { func (sender *SMTPSender) SendEmail(ctx context.Context, msg *Message) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
@ -49,7 +49,7 @@ func (sender *SMTPSender) SendEmail(ctx context.Context, msg *Message) (err erro
return nil return nil
} }
// communicate sends mail via SMTP using provided client and message // communicate sends mail via SMTP using provided client and message.
func (sender *SMTPSender) communicate(ctx context.Context, client *smtp.Client, msg *Message) error { func (sender *SMTPSender) communicate(ctx context.Context, client *smtp.Client, msg *Message) error {
// suppress error because address should be validated // suppress error because address should be validated
// before creating SMTPSender // before creating SMTPSender
@ -98,7 +98,7 @@ func (sender *SMTPSender) communicate(ctx context.Context, client *smtp.Client,
return client.Quit() return client.Quit()
} }
// writeData ensures that writer will be closed after data is written // writeData ensures that writer will be closed after data is written.
func writeData(writer io.WriteCloser, data []byte) (err error) { func writeData(writer io.WriteCloser, data []byte) (err error) {
defer func() { defer func() {
err = errs.Combine(err, writer.Close()) err = errs.Combine(err, writer.Close())

View File

@ -106,7 +106,7 @@ func (bad *BadBlobs) RestoreTrash(ctx context.Context, namespace []byte) ([][]by
return bad.blobs.RestoreTrash(ctx, namespace) return bad.blobs.RestoreTrash(ctx, namespace)
} }
// EmptyTrash empties the trash // EmptyTrash empties the trash.
func (bad *BadBlobs) EmptyTrash(ctx context.Context, namespace []byte, trashedBefore time.Time) (int64, [][]byte, error) { func (bad *BadBlobs) EmptyTrash(ctx context.Context, namespace []byte, trashedBefore time.Time) (int64, [][]byte, error) {
if bad.err != nil { if bad.err != nil {
return 0, nil, bad.err return 0, nil, bad.err

View File

@ -89,13 +89,13 @@ func (slow *SlowBlobs) Trash(ctx context.Context, ref storage.BlobRef) error {
return slow.blobs.Trash(ctx, ref) return slow.blobs.Trash(ctx, ref)
} }
// RestoreTrash restores all files in the trash // RestoreTrash restores all files in the trash.
func (slow *SlowBlobs) RestoreTrash(ctx context.Context, namespace []byte) ([][]byte, error) { func (slow *SlowBlobs) RestoreTrash(ctx context.Context, namespace []byte) ([][]byte, error) {
slow.sleep() slow.sleep()
return slow.blobs.RestoreTrash(ctx, namespace) return slow.blobs.RestoreTrash(ctx, namespace)
} }
// EmptyTrash empties the trash // EmptyTrash empties the trash.
func (slow *SlowBlobs) EmptyTrash(ctx context.Context, namespace []byte, trashedBefore time.Time) (int64, [][]byte, error) { func (slow *SlowBlobs) EmptyTrash(ctx context.Context, namespace []byte, trashedBefore time.Time) (int64, [][]byte, error) {
slow.sleep() slow.sleep()
return slow.blobs.EmptyTrash(ctx, namespace, trashedBefore) return slow.blobs.EmptyTrash(ctx, namespace, trashedBefore)
@ -107,7 +107,7 @@ func (slow *SlowBlobs) Delete(ctx context.Context, ref storage.BlobRef) error {
return slow.blobs.Delete(ctx, ref) return slow.blobs.Delete(ctx, ref)
} }
// DeleteWithStorageFormat deletes the blob with the namespace, key, and format version // DeleteWithStorageFormat deletes the blob with the namespace, key, and format version.
func (slow *SlowBlobs) DeleteWithStorageFormat(ctx context.Context, ref storage.BlobRef, formatVer storage.FormatVersion) error { func (slow *SlowBlobs) DeleteWithStorageFormat(ctx context.Context, ref storage.BlobRef, formatVer storage.FormatVersion) error {
slow.sleep() slow.sleep()
return slow.blobs.DeleteWithStorageFormat(ctx, ref, formatVer) return slow.blobs.DeleteWithStorageFormat(ctx, ref, formatVer)
@ -119,7 +119,7 @@ func (slow *SlowBlobs) DeleteNamespace(ctx context.Context, ref []byte) (err err
return slow.blobs.DeleteNamespace(ctx, ref) return slow.blobs.DeleteNamespace(ctx, ref)
} }
// Stat looks up disk metadata on the blob file // Stat looks up disk metadata on the blob file.
func (slow *SlowBlobs) Stat(ctx context.Context, ref storage.BlobRef) (storage.BlobInfo, error) { func (slow *SlowBlobs) Stat(ctx context.Context, ref storage.BlobRef) (storage.BlobInfo, error) {
slow.sleep() slow.sleep()
return slow.blobs.Stat(ctx, ref) return slow.blobs.Stat(ctx, ref)
@ -152,19 +152,19 @@ func (slow *SlowBlobs) FreeSpace() (int64, error) {
return slow.blobs.FreeSpace() return slow.blobs.FreeSpace()
} }
// SpaceUsedForBlobs adds up how much is used in all namespaces // SpaceUsedForBlobs adds up how much is used in all namespaces.
func (slow *SlowBlobs) SpaceUsedForBlobs(ctx context.Context) (int64, error) { func (slow *SlowBlobs) SpaceUsedForBlobs(ctx context.Context) (int64, error) {
slow.sleep() slow.sleep()
return slow.blobs.SpaceUsedForBlobs(ctx) return slow.blobs.SpaceUsedForBlobs(ctx)
} }
// SpaceUsedForBlobsInNamespace adds up how much is used in the given namespace // SpaceUsedForBlobsInNamespace adds up how much is used in the given namespace.
func (slow *SlowBlobs) SpaceUsedForBlobsInNamespace(ctx context.Context, namespace []byte) (int64, error) { func (slow *SlowBlobs) SpaceUsedForBlobsInNamespace(ctx context.Context, namespace []byte) (int64, error) {
slow.sleep() slow.sleep()
return slow.blobs.SpaceUsedForBlobsInNamespace(ctx, namespace) return slow.blobs.SpaceUsedForBlobsInNamespace(ctx, namespace)
} }
// SpaceUsedForTrash adds up how much is used in all namespaces // SpaceUsedForTrash adds up how much is used in all namespaces.
func (slow *SlowBlobs) SpaceUsedForTrash(ctx context.Context) (int64, error) { func (slow *SlowBlobs) SpaceUsedForTrash(ctx context.Context) (int64, error) {
slow.sleep() slow.sleep()
return slow.blobs.SpaceUsedForTrash(ctx) return slow.blobs.SpaceUsedForTrash(ctx)
@ -176,7 +176,7 @@ func (slow *SlowBlobs) SetLatency(delay time.Duration) {
atomic.StoreInt64(&slow.delay, int64(delay)) atomic.StoreInt64(&slow.delay, int64(delay))
} }
// sleep sleeps for the duration set to slow.delay // sleep sleeps for the duration set to slow.delay.
func (slow *SlowBlobs) sleep() { func (slow *SlowBlobs) sleep() {
delay := time.Duration(atomic.LoadInt64(&slow.delay)) delay := time.Duration(atomic.LoadInt64(&slow.delay))
time.Sleep(delay) time.Sleep(delay)

View File

@ -32,7 +32,7 @@ import (
const defaultInterval = 15 * time.Second const defaultInterval = 15 * time.Second
// Peer represents one of StorageNode or Satellite // Peer represents one of StorageNode or Satellite.
type Peer interface { type Peer interface {
ID() storj.NodeID ID() storj.NodeID
Addr() string Addr() string
@ -43,7 +43,7 @@ type Peer interface {
Close() error Close() error
} }
// Config describes planet configuration // Config describes planet configuration.
type Config struct { type Config struct {
SatelliteCount int SatelliteCount int
StorageNodeCount int StorageNodeCount int
@ -224,7 +224,7 @@ func (planet *Planet) Start(ctx context.Context) {
planet.started = true planet.started = true
} }
// StopPeer stops a single peer in the planet // StopPeer stops a single peer in the planet.
func (planet *Planet) StopPeer(peer Peer) error { func (planet *Planet) StopPeer(peer Peer) error {
if peer == nil { if peer == nil {
return errors.New("peer is nil") return errors.New("peer is nil")
@ -265,7 +265,7 @@ func (planet *Planet) StopNodeAndUpdate(ctx context.Context, node *StorageNode)
return nil return nil
} }
// Size returns number of nodes in the network // Size returns number of nodes in the network.
func (planet *Planet) Size() int { return len(planet.uplinks) + len(planet.peers) } func (planet *Planet) Size() int { return len(planet.uplinks) + len(planet.peers) }
// FindNode is a helper to retrieve a storage node record by its node ID. // FindNode is a helper to retrieve a storage node record by its node ID.
@ -336,12 +336,12 @@ func (planet *Planet) Identities() *testidentity.Identities {
return planet.identities return planet.identities
} }
// NewIdentity creates a new identity for a node // NewIdentity creates a new identity for a node.
func (planet *Planet) NewIdentity() (*identity.FullIdentity, error) { func (planet *Planet) NewIdentity() (*identity.FullIdentity, error) {
return planet.identities.NewIdentity() return planet.identities.NewIdentity()
} }
// NewListener creates a new listener // NewListener creates a new listener.
func (planet *Planet) NewListener() (net.Listener, error) { func (planet *Planet) NewListener() (net.Listener, error) {
return net.Listen("tcp", "127.0.0.1:0") return net.Listen("tcp", "127.0.0.1:0")
} }

View File

@ -54,7 +54,7 @@ func TestBasic(t *testing.T) {
} }
} }
// test that nodes get put into each satellite's overlay cache // test that nodes get put into each satellite's overlay cache.
func TestContact(t *testing.T) { func TestContact(t *testing.T) {
testplanet.Run(t, testplanet.Config{ testplanet.Run(t, testplanet.Config{
SatelliteCount: 2, StorageNodeCount: 5, UplinkCount: 0, SatelliteCount: 2, StorageNodeCount: 5, UplinkCount: 0,

View File

@ -17,7 +17,7 @@ import (
"storj.io/storj/storagenode" "storj.io/storj/storagenode"
) )
// Reconfigure allows to change node configurations // Reconfigure allows to change node configurations.
type Reconfigure struct { type Reconfigure struct {
SatelliteDB func(log *zap.Logger, index int, db satellite.DB) (satellite.DB, error) SatelliteDB func(log *zap.Logger, index int, db satellite.DB) (satellite.DB, error)
SatellitePointerDB func(log *zap.Logger, index int, db metainfo.PointerDB) (metainfo.PointerDB, error) SatellitePointerDB func(log *zap.Logger, index int, db metainfo.PointerDB) (metainfo.PointerDB, error)
@ -44,7 +44,7 @@ var DisablePeerCAWhitelist = Reconfigure{
} }
// ShortenOnlineWindow returns a `Reconfigure` that sets the NodeSelection // ShortenOnlineWindow returns a `Reconfigure` that sets the NodeSelection
// OnlineWindow to 1 second, meaning a connection failure leads to marking the nodes as offline // OnlineWindow to 1 second, meaning a connection failure leads to marking the nodes as offline.
var ShortenOnlineWindow = Reconfigure{ var ShortenOnlineWindow = Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) { Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Overlay.Node.OnlineWindow = 1 * time.Second config.Overlay.Node.OnlineWindow = 1 * time.Second
@ -60,7 +60,7 @@ var Combine = func(elements ...func(log *zap.Logger, index int, config *satellit
} }
} }
// ReconfigureRS returns function to change satellite redundancy scheme values // ReconfigureRS returns function to change satellite redundancy scheme values.
var ReconfigureRS = func(minThreshold, repairThreshold, successThreshold, totalThreshold int) func(log *zap.Logger, index int, config *satellite.Config) { var ReconfigureRS = func(minThreshold, repairThreshold, successThreshold, totalThreshold int) func(log *zap.Logger, index int, config *satellite.Config) {
return func(log *zap.Logger, index int, config *satellite.Config) { return func(log *zap.Logger, index int, config *satellite.Config) {
config.Metainfo.RS.MinThreshold = minThreshold config.Metainfo.RS.MinThreshold = minThreshold

View File

@ -15,12 +15,12 @@ import (
"storj.io/storj/pkg/server" "storj.io/storj/pkg/server"
) )
// DefaultReferralManagerServer implements the default behavior of a mock referral manager // DefaultReferralManagerServer implements the default behavior of a mock referral manager.
type DefaultReferralManagerServer struct { type DefaultReferralManagerServer struct {
tokenCount int tokenCount int
} }
// newReferralManager initializes a referral manager server // newReferralManager initializes a referral manager server.
func (planet *Planet) newReferralManager() (*server.Server, error) { func (planet *Planet) newReferralManager() (*server.Server, error) {
prefix := "referralmanager" prefix := "referralmanager"
log := planet.log.Named(prefix) log := planet.log.Named(prefix)

View File

@ -64,7 +64,7 @@ import (
"storj.io/storj/storage/redis/redisserver" "storj.io/storj/storage/redis/redisserver"
) )
// Satellite contains all the processes needed to run a full Satellite setup // Satellite contains all the processes needed to run a full Satellite setup.
type Satellite struct { type Satellite struct {
Config satellite.Config Config satellite.Config
@ -271,7 +271,7 @@ func (system *Satellite) authenticatedContext(ctx context.Context, userID uuid.U
return console.WithAuth(ctx, auth), nil return console.WithAuth(ctx, auth), nil
} }
// Close closes all the subsystems in the Satellite system // Close closes all the subsystems in the Satellite system.
func (system *Satellite) Close() error { func (system *Satellite) Close() error {
return errs.Combine( return errs.Combine(
system.API.Close(), system.API.Close(),
@ -282,7 +282,7 @@ func (system *Satellite) Close() error {
) )
} }
// Run runs all the subsystems in the Satellite system // Run runs all the subsystems in the Satellite system.
func (system *Satellite) Run(ctx context.Context) (err error) { func (system *Satellite) Run(ctx context.Context) (err error) {
group, ctx := errgroup.WithContext(ctx) group, ctx := errgroup.WithContext(ctx)
@ -307,7 +307,7 @@ func (system *Satellite) Run(ctx context.Context) (err error) {
// PrivateAddr returns the private address from the Satellite system API. // PrivateAddr returns the private address from the Satellite system API.
func (system *Satellite) PrivateAddr() string { return system.API.Server.PrivateAddr().String() } func (system *Satellite) PrivateAddr() string { return system.API.Server.PrivateAddr().String() }
// newSatellites initializes satellites // newSatellites initializes satellites.
func (planet *Planet) newSatellites(count int, satelliteDatabases satellitedbtest.SatelliteDatabases) ([]*Satellite, error) { func (planet *Planet) newSatellites(count int, satelliteDatabases satellitedbtest.SatelliteDatabases) ([]*Satellite, error) {
var xs []*Satellite var xs []*Satellite
defer func() { defer func() {

View File

@ -27,7 +27,7 @@ import (
"storj.io/uplink/private/testuplink" "storj.io/uplink/private/testuplink"
) )
// Uplink is a general purpose // Uplink is a general purpose.
type Uplink struct { type Uplink struct {
Log *zap.Logger Log *zap.Logger
Identity *identity.FullIdentity Identity *identity.FullIdentity
@ -156,16 +156,16 @@ func (planet *Planet) newUplink(name string) (*Uplink, error) {
return planetUplink, nil return planetUplink, nil
} }
// ID returns uplink id // ID returns uplink id.
func (client *Uplink) ID() storj.NodeID { return client.Identity.ID } func (client *Uplink) ID() storj.NodeID { return client.Identity.ID }
// Addr returns uplink address // Addr returns uplink address.
func (client *Uplink) Addr() string { return "" } func (client *Uplink) Addr() string { return "" }
// Shutdown shuts down all uplink dependencies // Shutdown shuts down all uplink dependencies.
func (client *Uplink) Shutdown() error { return nil } func (client *Uplink) Shutdown() error { return nil }
// DialMetainfo dials destination with apikey and returns metainfo Client // DialMetainfo dials destination with apikey and returns metainfo Client.
func (client *Uplink) DialMetainfo(ctx context.Context, destination Peer, apikey *macaroon.APIKey) (*metainfo.Client, error) { func (client *Uplink) DialMetainfo(ctx context.Context, destination Peer, apikey *macaroon.APIKey) (*metainfo.Client, error) {
return metainfo.DialNodeURL(ctx, client.Dialer, destination.NodeURL().String(), apikey, "Test/1.0") return metainfo.DialNodeURL(ctx, client.Dialer, destination.NodeURL().String(), apikey, "Test/1.0")
} }
@ -175,12 +175,12 @@ func (client *Uplink) DialPiecestore(ctx context.Context, destination Peer) (*pi
return piecestore.DialNodeURL(ctx, client.Dialer, destination.NodeURL(), client.Log.Named("uplink>piecestore"), piecestore.DefaultConfig) return piecestore.DialNodeURL(ctx, client.Dialer, destination.NodeURL(), client.Log.Named("uplink>piecestore"), piecestore.DefaultConfig)
} }
// Upload data to specific satellite // Upload data to specific satellite.
func (client *Uplink) Upload(ctx context.Context, satellite *Satellite, bucket string, path storj.Path, data []byte) error { func (client *Uplink) Upload(ctx context.Context, satellite *Satellite, bucket string, path storj.Path, data []byte) error {
return client.UploadWithExpiration(ctx, satellite, bucket, path, data, time.Time{}) return client.UploadWithExpiration(ctx, satellite, bucket, path, data, time.Time{})
} }
// UploadWithExpiration data to specific satellite and expiration time // UploadWithExpiration data to specific satellite and expiration time.
func (client *Uplink) UploadWithExpiration(ctx context.Context, satellite *Satellite, bucketName string, path storj.Path, data []byte, expiration time.Time) error { func (client *Uplink) UploadWithExpiration(ctx context.Context, satellite *Satellite, bucketName string, path storj.Path, data []byte, expiration time.Time) error {
_, found := testuplink.GetMaxSegmentSize(ctx) _, found := testuplink.GetMaxSegmentSize(ctx)
if !found { if !found {
@ -215,7 +215,7 @@ func (client *Uplink) UploadWithExpiration(ctx context.Context, satellite *Satel
return upload.Commit() return upload.Commit()
} }
// Download data from specific satellite // Download data from specific satellite.
func (client *Uplink) Download(ctx context.Context, satellite *Satellite, bucketName string, path storj.Path) ([]byte, error) { func (client *Uplink) Download(ctx context.Context, satellite *Satellite, bucketName string, path storj.Path) ([]byte, error) {
project, err := client.GetProject(ctx, satellite) project, err := client.GetProject(ctx, satellite)
if err != nil { if err != nil {
@ -236,7 +236,7 @@ func (client *Uplink) Download(ctx context.Context, satellite *Satellite, bucket
return data, nil return data, nil
} }
// DownloadStream returns stream for downloading data // DownloadStream returns stream for downloading data.
func (client *Uplink) DownloadStream(ctx context.Context, satellite *Satellite, bucketName string, path storj.Path) (_ io.ReadCloser, cleanup func() error, err error) { func (client *Uplink) DownloadStream(ctx context.Context, satellite *Satellite, bucketName string, path storj.Path) (_ io.ReadCloser, cleanup func() error, err error) {
project, err := client.GetProject(ctx, satellite) project, err := client.GetProject(ctx, satellite)
if err != nil { if err != nil {
@ -254,7 +254,7 @@ func (client *Uplink) DownloadStream(ctx context.Context, satellite *Satellite,
return downloader, cleanup, err return downloader, cleanup, err
} }
// DownloadStreamRange returns stream for downloading data // DownloadStreamRange returns stream for downloading data.
func (client *Uplink) DownloadStreamRange(ctx context.Context, satellite *Satellite, bucketName string, path storj.Path, start, limit int64) (_ io.ReadCloser, cleanup func() error, err error) { func (client *Uplink) DownloadStreamRange(ctx context.Context, satellite *Satellite, bucketName string, path storj.Path, start, limit int64) (_ io.ReadCloser, cleanup func() error, err error) {
project, err := client.GetProject(ctx, satellite) project, err := client.GetProject(ctx, satellite)
if err != nil { if err != nil {
@ -275,7 +275,7 @@ func (client *Uplink) DownloadStreamRange(ctx context.Context, satellite *Satell
return downloader, cleanup, err return downloader, cleanup, err
} }
// DeleteObject deletes an object at the path in a bucket // DeleteObject deletes an object at the path in a bucket.
func (client *Uplink) DeleteObject(ctx context.Context, satellite *Satellite, bucketName string, path storj.Path) error { func (client *Uplink) DeleteObject(ctx context.Context, satellite *Satellite, bucketName string, path storj.Path) error {
project, err := client.GetProject(ctx, satellite) project, err := client.GetProject(ctx, satellite)
if err != nil { if err != nil {
@ -290,7 +290,7 @@ func (client *Uplink) DeleteObject(ctx context.Context, satellite *Satellite, bu
return err return err
} }
// CreateBucket creates a new bucket // CreateBucket creates a new bucket.
func (client *Uplink) CreateBucket(ctx context.Context, satellite *Satellite, bucketName string) error { func (client *Uplink) CreateBucket(ctx context.Context, satellite *Satellite, bucketName string) error {
project, err := client.GetProject(ctx, satellite) project, err := client.GetProject(ctx, satellite)
if err != nil { if err != nil {

View File

@ -14,7 +14,7 @@ import (
"storj.io/storj/versioncontrol" "storj.io/storj/versioncontrol"
) )
// newVersionControlServer initializes the Versioning Server // newVersionControlServer initializes the Versioning Server.
func (planet *Planet) newVersionControlServer() (peer *versioncontrol.Peer, err error) { func (planet *Planet) newVersionControlServer() (peer *versioncontrol.Peer, err error) {
prefix := "versioncontrol" prefix := "versioncontrol"

View File

@ -9,20 +9,20 @@ import (
) )
// NodeIDFromBytes returns a node ID consisting of the bytes // NodeIDFromBytes returns a node ID consisting of the bytes
// and padding to the node ID length // and padding to the node ID length.
func NodeIDFromBytes(b []byte) storj.NodeID { func NodeIDFromBytes(b []byte) storj.NodeID {
id, _ := storj.NodeIDFromBytes(fit(b)) id, _ := storj.NodeIDFromBytes(fit(b))
return id return id
} }
// NodeIDFromString returns node ID consisting of the strings // NodeIDFromString returns node ID consisting of the strings
// and padding to the node ID length // and padding to the node ID length.
func NodeIDFromString(s string) storj.NodeID { func NodeIDFromString(s string) storj.NodeID {
return NodeIDFromBytes([]byte(s)) return NodeIDFromBytes([]byte(s))
} }
// NodeIDsFromBytes returns node IDs consisting of the byte slices // NodeIDsFromBytes returns node IDs consisting of the byte slices
// and padding to the node ID length // and padding to the node ID length.
func NodeIDsFromBytes(bs ...[]byte) (ids storj.NodeIDList) { func NodeIDsFromBytes(bs ...[]byte) (ids storj.NodeIDList) {
for _, b := range bs { for _, b := range bs {
ids = append(ids, NodeIDFromBytes(b)) ids = append(ids, NodeIDFromBytes(b))
@ -31,7 +31,7 @@ func NodeIDsFromBytes(bs ...[]byte) (ids storj.NodeIDList) {
} }
// NodeIDsFromStrings returns node IDs consisting of the strings // NodeIDsFromStrings returns node IDs consisting of the strings
// and padding to the node ID length // and padding to the node ID length.
func NodeIDsFromStrings(strs ...string) (ids storj.NodeIDList) { func NodeIDsFromStrings(strs ...string) (ids storj.NodeIDList) {
for _, s := range strs { for _, s := range strs {
ids = append(ids, NodeIDFromString(s)) ids = append(ids, NodeIDFromString(s))
@ -39,7 +39,7 @@ func NodeIDsFromStrings(strs ...string) (ids storj.NodeIDList) {
return ids return ids
} }
// used to pad node IDs // used to pad node IDs.
func fit(b []byte) []byte { func fit(b []byte) []byte {
l := len(storj.NodeID{}) l := len(storj.NodeID{})
if len(b) < l { if len(b) < l {
@ -50,7 +50,7 @@ func fit(b []byte) []byte {
} }
// MockNode returns a pb node with an ID consisting of the string // MockNode returns a pb node with an ID consisting of the string
// and padding to the node ID length // and padding to the node ID length.
func MockNode(s string) *pb.Node { func MockNode(s string) *pb.Node {
id := NodeIDFromString(s) id := NodeIDFromString(s)
var node pb.Node var node pb.Node

View File

@ -7,13 +7,13 @@ import (
"storj.io/common/storj" "storj.io/common/storj"
) )
// PieceIDFromBytes converts a byte slice into a piece ID // PieceIDFromBytes converts a byte slice into a piece ID.
func PieceIDFromBytes(b []byte) storj.PieceID { func PieceIDFromBytes(b []byte) storj.PieceID {
id, _ := storj.PieceIDFromBytes(fit(b)) id, _ := storj.PieceIDFromBytes(fit(b))
return id return id
} }
// PieceIDFromString decodes a hex encoded piece ID string // PieceIDFromString decodes a hex encoded piece ID string.
func PieceIDFromString(s string) storj.PieceID { func PieceIDFromString(s string) storj.PieceID {
return PieceIDFromBytes([]byte(s)) return PieceIDFromBytes([]byte(s))
} }

View File

@ -25,7 +25,7 @@ func NewChore(service *Service, checkInterval time.Duration) *Chore {
} }
} }
// Run logs the current version information // Run logs the current version information.
func (chore *Chore) Run(ctx context.Context) (err error) { func (chore *Chore) Run(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
if !chore.service.Checked() { if !chore.service.Checked() {

View File

@ -15,7 +15,7 @@ import (
"storj.io/private/version" "storj.io/private/version"
) )
// Config contains the necessary Information to check the Software Version // Config contains the necessary Information to check the Software Version.
type Config struct { type Config struct {
ClientConfig ClientConfig
@ -38,7 +38,7 @@ type Service struct {
acceptedVersion version.SemVer acceptedVersion version.SemVer
} }
// NewService creates a Version Check Client with default configuration // NewService creates a Version Check Client with default configuration.
func NewService(log *zap.Logger, config Config, info version.Info, service string) (client *Service) { func NewService(log *zap.Logger, config Config, info version.Info, service string) (client *Service) {
return &Service{ return &Service{
log: log, log: log,
@ -51,7 +51,7 @@ func NewService(log *zap.Logger, config Config, info version.Info, service strin
} }
// CheckProcessVersion is not meant to be used for peers but is meant to be // CheckProcessVersion is not meant to be used for peers but is meant to be
// used for other utilities // used for other utilities.
func CheckProcessVersion(ctx context.Context, log *zap.Logger, config Config, info version.Info, service string) (err error) { func CheckProcessVersion(ctx context.Context, log *zap.Logger, config Config, info version.Info, service string) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
_, err = NewService(log, config, info, service).CheckVersion(ctx) _, err = NewService(log, config, info, service).CheckVersion(ctx)
@ -141,7 +141,7 @@ func (service *Service) Checked() bool {
return service.checked.Released() return service.checked.Released()
} }
// isAcceptedVersion compares and checks if the passed version is greater/equal than the minimum required version // isAcceptedVersion compares and checks if the passed version is greater/equal than the minimum required version.
func isAcceptedVersion(test version.SemVer, target version.OldSemVer) bool { func isAcceptedVersion(test version.SemVer, target version.OldSemVer) bool {
return test.Major > uint64(target.Major) || (test.Major == uint64(target.Major) && (test.Minor > uint64(target.Minor) || (test.Minor == uint64(target.Minor) && test.Patch >= uint64(target.Patch)))) return test.Major > uint64(target.Major) || (test.Major == uint64(target.Major) && (test.Minor > uint64(target.Minor) || (test.Minor == uint64(target.Minor) && test.Patch >= uint64(target.Patch))))
} }

View File

@ -7,7 +7,7 @@ import (
"storj.io/common/uuid" "storj.io/common/uuid"
) )
// BucketTally contains information about aggregate data stored in a bucket // BucketTally contains information about aggregate data stored in a bucket.
type BucketTally struct { type BucketTally struct {
ProjectID uuid.UUID ProjectID uuid.UUID
BucketName []byte BucketName []byte
@ -23,7 +23,7 @@ type BucketTally struct {
MetadataSize int64 MetadataSize int64
} }
// Combine aggregates all the tallies // Combine aggregates all the tallies.
func (s *BucketTally) Combine(o *BucketTally) { func (s *BucketTally) Combine(o *BucketTally) {
s.ObjectCount += o.ObjectCount s.ObjectCount += o.ObjectCount

View File

@ -9,7 +9,7 @@ import (
"storj.io/common/uuid" "storj.io/common/uuid"
) )
// BucketStorageTally holds data about a bucket tally // BucketStorageTally holds data about a bucket tally.
type BucketStorageTally struct { type BucketStorageTally struct {
BucketName string BucketName string
ProjectID uuid.UUID ProjectID uuid.UUID

View File

@ -9,7 +9,7 @@ import (
"storj.io/common/storj" "storj.io/common/storj"
) )
// Constants for accounting_raw, accounting_rollup, and accounting_timestamps // Constants for accounting_raw, accounting_rollup, and accounting_timestamps.
const ( const (
// LastAtRestTally represents the accounting timestamp for the at-rest data calculation // LastAtRestTally represents the accounting timestamp for the at-rest data calculation
LastAtRestTally = "LastAtRestTally" LastAtRestTally = "LastAtRestTally"
@ -19,7 +19,7 @@ const (
LastRollup = "LastRollup" LastRollup = "LastRollup"
) )
// CSVRow represents data from QueryPaymentInfo without exposing dbx // CSVRow represents data from QueryPaymentInfo without exposing dbx.
type CSVRow struct { type CSVRow struct {
NodeID storj.NodeID NodeID storj.NodeID
NodeCreationDate time.Time NodeCreationDate time.Time

View File

@ -13,10 +13,10 @@ import (
"storj.io/storj/satellite/compensation" "storj.io/storj/satellite/compensation"
) )
// RollupStats is a convenience alias // RollupStats is a convenience alias.
type RollupStats map[time.Time]map[storj.NodeID]*Rollup type RollupStats map[time.Time]map[storj.NodeID]*Rollup
// StoragenodeStorageTally mirrors dbx.StoragenodeStorageTally, allowing us to use that struct without leaking dbx // StoragenodeStorageTally mirrors dbx.StoragenodeStorageTally, allowing us to use that struct without leaking dbx.
type StoragenodeStorageTally struct { type StoragenodeStorageTally struct {
ID int64 ID int64
NodeID storj.NodeID NodeID storj.NodeID
@ -24,7 +24,7 @@ type StoragenodeStorageTally struct {
DataTotal float64 DataTotal float64
} }
// StoragenodeBandwidthRollup mirrors dbx.StoragenodeBandwidthRollup, allowing us to use the struct without leaking dbx // StoragenodeBandwidthRollup mirrors dbx.StoragenodeBandwidthRollup, allowing us to use the struct without leaking dbx.
type StoragenodeBandwidthRollup struct { type StoragenodeBandwidthRollup struct {
NodeID storj.NodeID NodeID storj.NodeID
IntervalStart time.Time IntervalStart time.Time
@ -32,7 +32,7 @@ type StoragenodeBandwidthRollup struct {
Settled uint64 Settled uint64
} }
// Rollup mirrors dbx.AccountingRollup, allowing us to use that struct without leaking dbx // Rollup mirrors dbx.AccountingRollup, allowing us to use that struct without leaking dbx.
type Rollup struct { type Rollup struct {
ID int64 ID int64
NodeID storj.NodeID NodeID storj.NodeID
@ -45,7 +45,7 @@ type Rollup struct {
AtRestTotal float64 AtRestTotal float64
} }
// StorageNodePeriodUsage represents a statement for a node for a compensation period // StorageNodePeriodUsage represents a statement for a node for a compensation period.
type StorageNodePeriodUsage struct { type StorageNodePeriodUsage struct {
NodeID storj.NodeID NodeID storj.NodeID
AtRestTotal float64 AtRestTotal float64
@ -56,7 +56,7 @@ type StorageNodePeriodUsage struct {
GetAuditTotal int64 GetAuditTotal int64
} }
// StorageNodeUsage is node at rest space usage over a period of time // StorageNodeUsage is node at rest space usage over a period of time.
type StorageNodeUsage struct { type StorageNodeUsage struct {
NodeID storj.NodeID NodeID storj.NodeID
StorageUsed float64 StorageUsed float64
@ -65,7 +65,7 @@ type StorageNodeUsage struct {
} }
// ProjectUsage consist of period total storage, egress // ProjectUsage consist of period total storage, egress
// and objects count per hour for certain Project in bytes // and objects count per hour for certain Project in bytes.
type ProjectUsage struct { type ProjectUsage struct {
Storage float64 `json:"storage"` Storage float64 `json:"storage"`
Egress int64 `json:"egress"` Egress int64 `json:"egress"`
@ -75,7 +75,7 @@ type ProjectUsage struct {
Before time.Time `json:"before"` Before time.Time `json:"before"`
} }
// BucketUsage consist of total bucket usage for period // BucketUsage consist of total bucket usage for period.
type BucketUsage struct { type BucketUsage struct {
ProjectID uuid.UUID ProjectID uuid.UUID
BucketName string BucketName string
@ -89,14 +89,14 @@ type BucketUsage struct {
} }
// BucketUsageCursor holds info for bucket usage // BucketUsageCursor holds info for bucket usage
// cursor pagination // cursor pagination.
type BucketUsageCursor struct { type BucketUsageCursor struct {
Search string Search string
Limit uint Limit uint
Page uint Page uint
} }
// BucketUsagePage represents bucket usage page result // BucketUsagePage represents bucket usage page result.
type BucketUsagePage struct { type BucketUsagePage struct {
BucketUsages []BucketUsage BucketUsages []BucketUsage
@ -110,7 +110,7 @@ type BucketUsagePage struct {
} }
// BucketUsageRollup is total bucket usage info // BucketUsageRollup is total bucket usage info
// for certain period // for certain period.
type BucketUsageRollup struct { type BucketUsageRollup struct {
ProjectID uuid.UUID ProjectID uuid.UUID
BucketName []byte BucketName []byte

View File

@ -15,7 +15,7 @@ import (
"storj.io/storj/satellite/accounting" "storj.io/storj/satellite/accounting"
) )
// Config contains configurable values for rollup // Config contains configurable values for rollup.
type Config struct { type Config struct {
Interval time.Duration `help:"how frequently rollup should run" releaseDefault:"24h" devDefault:"120s"` Interval time.Duration `help:"how frequently rollup should run" releaseDefault:"24h" devDefault:"120s"`
DeleteTallies bool `help:"option for deleting tallies after they are rolled up" default:"true"` DeleteTallies bool `help:"option for deleting tallies after they are rolled up" default:"true"`
@ -31,7 +31,7 @@ type Service struct {
deleteTallies bool deleteTallies bool
} }
// New creates a new rollup service // New creates a new rollup service.
func New(logger *zap.Logger, sdb accounting.StoragenodeAccounting, interval time.Duration, deleteTallies bool) *Service { func New(logger *zap.Logger, sdb accounting.StoragenodeAccounting, interval time.Duration, deleteTallies bool) *Service {
return &Service{ return &Service{
logger: logger, logger: logger,
@ -41,7 +41,7 @@ func New(logger *zap.Logger, sdb accounting.StoragenodeAccounting, interval time
} }
} }
// Run the Rollup loop // Run the Rollup loop.
func (r *Service) Run(ctx context.Context) (err error) { func (r *Service) Run(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
return r.Loop.Run(ctx, func(ctx context.Context) error { return r.Loop.Run(ctx, func(ctx context.Context) error {
@ -59,7 +59,7 @@ func (r *Service) Close() error {
return nil return nil
} }
// Rollup aggregates storage and bandwidth amounts for the time interval // Rollup aggregates storage and bandwidth amounts for the time interval.
func (r *Service) Rollup(ctx context.Context) (err error) { func (r *Service) Rollup(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
// only Rollup new things - get LastRollup // only Rollup new things - get LastRollup
@ -102,7 +102,7 @@ func (r *Service) Rollup(ctx context.Context) (err error) {
return nil return nil
} }
// RollupStorage rolls up storage tally, modifies rollupStats map // RollupStorage rolls up storage tally, modifies rollupStats map.
func (r *Service) RollupStorage(ctx context.Context, lastRollup time.Time, rollupStats accounting.RollupStats) (latestTally time.Time, err error) { func (r *Service) RollupStorage(ctx context.Context, lastRollup time.Time, rollupStats accounting.RollupStats) (latestTally time.Time, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
tallies, err := r.sdb.GetTalliesSince(ctx, lastRollup) tallies, err := r.sdb.GetTalliesSince(ctx, lastRollup)
@ -136,7 +136,7 @@ func (r *Service) RollupStorage(ctx context.Context, lastRollup time.Time, rollu
return latestTally, nil return latestTally, nil
} }
// RollupBW aggregates the bandwidth rollups, modifies rollupStats map // RollupBW aggregates the bandwidth rollups, modifies rollupStats map.
func (r *Service) RollupBW(ctx context.Context, lastRollup time.Time, rollupStats accounting.RollupStats) (err error) { func (r *Service) RollupBW(ctx context.Context, lastRollup time.Time, rollupStats accounting.RollupStats) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
var latestTally time.Time var latestTally time.Time

View File

@ -178,7 +178,7 @@ func TestRollupDeletes(t *testing.T) {
}) })
} }
// expectedTotals sums test data up to, but not including the current day's // expectedTotals sums test data up to, but not including the current day's.
func expectedTotals(data []testData, id storj.NodeID, currentDay int) []float64 { func expectedTotals(data []testData, id storj.NodeID, currentDay int) []float64 {
totals := make([]float64, 5) totals := make([]float64, 5)
for i := 0; i < currentDay; i++ { for i := 0; i < currentDay; i++ {
@ -206,7 +206,7 @@ func createData(planet *testplanet.Planet, days int) []testData {
return data return data
} }
// dqNodes disqualifies half the nodes in the testplanet and returns a map of dqed nodes // dqNodes disqualifies half the nodes in the testplanet and returns a map of dqed nodes.
func dqNodes(ctx *testcontext.Context, planet *testplanet.Planet) (map[storj.NodeID]bool, error) { func dqNodes(ctx *testcontext.Context, planet *testplanet.Planet) (map[storj.NodeID]bool, error) {
dqed := make(map[storj.NodeID]bool) dqed := make(map[storj.NodeID]bool)

View File

@ -25,7 +25,7 @@ var (
mon = monkit.Package() mon = monkit.Package()
) )
// Config contains configurable values for the tally service // Config contains configurable values for the tally service.
type Config struct { type Config struct {
Interval time.Duration `help:"how frequently the tally service should run" releaseDefault:"1h" devDefault:"30s"` Interval time.Duration `help:"how frequently the tally service should run" releaseDefault:"1h" devDefault:"30s"`
} }
@ -44,7 +44,7 @@ type Service struct {
nowFn func() time.Time nowFn func() time.Time
} }
// New creates a new tally Service // New creates a new tally Service.
func New(log *zap.Logger, sdb accounting.StoragenodeAccounting, pdb accounting.ProjectAccounting, liveAccounting accounting.Cache, metainfoLoop *metainfo.Loop, interval time.Duration) *Service { func New(log *zap.Logger, sdb accounting.StoragenodeAccounting, pdb accounting.ProjectAccounting, liveAccounting accounting.Cache, metainfoLoop *metainfo.Loop, interval time.Duration) *Service {
return &Service{ return &Service{
log: log, log: log,
@ -58,7 +58,7 @@ func New(log *zap.Logger, sdb accounting.StoragenodeAccounting, pdb accounting.P
} }
} }
// Run the tally service loop // Run the tally service loop.
func (service *Service) Run(ctx context.Context) (err error) { func (service *Service) Run(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
@ -83,7 +83,7 @@ func (service *Service) SetNow(now func() time.Time) {
service.nowFn = now service.nowFn = now
} }
// Tally calculates data-at-rest usage once // Tally calculates data-at-rest usage once.
func (service *Service) Tally(ctx context.Context) (err error) { func (service *Service) Tally(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
@ -190,7 +190,7 @@ func (service *Service) Tally(ctx context.Context) (err error) {
var _ metainfo.Observer = (*Observer)(nil) var _ metainfo.Observer = (*Observer)(nil)
// Observer observes metainfo and adds up tallies for nodes and buckets // Observer observes metainfo and adds up tallies for nodes and buckets.
type Observer struct { type Observer struct {
Now time.Time Now time.Time
Log *zap.Logger Log *zap.Logger
@ -213,7 +213,7 @@ func (observer *Observer) pointerExpired(pointer *pb.Pointer) bool {
return !pointer.ExpirationDate.IsZero() && pointer.ExpirationDate.Before(observer.Now) return !pointer.ExpirationDate.IsZero() && pointer.ExpirationDate.Before(observer.Now)
} }
// ensureBucket returns bucket corresponding to the passed in path // ensureBucket returns bucket corresponding to the passed in path.
func (observer *Observer) ensureBucket(ctx context.Context, path metainfo.ScopedPath) *accounting.BucketTally { func (observer *Observer) ensureBucket(ctx context.Context, path metainfo.ScopedPath) *accounting.BucketTally {
bucketID := storj.JoinPaths(path.ProjectIDString, path.BucketName) bucketID := storj.JoinPaths(path.ProjectIDString, path.BucketName)
@ -290,5 +290,5 @@ func projectTotalsFromBuckets(buckets map[string]*accounting.BucketTally) map[uu
return projectTallyTotals return projectTallyTotals
} }
// using custom name to avoid breaking monitoring // using custom name to avoid breaking monitoring.
var monAccounting = monkit.ScopeNamed("storj.io/storj/satellite/accounting") var monAccounting = monkit.ScopeNamed("storj.io/storj/satellite/accounting")

View File

@ -314,7 +314,7 @@ func TestTallyEmptyProjectUpdatesLiveAccounting(t *testing.T) {
} }
// addBucketTally creates a new expected bucket tally based on the // addBucketTally creates a new expected bucket tally based on the
// pointer that was just created for the test case // pointer that was just created for the test case.
func addBucketTally(existingTally *accounting.BucketTally, inline, last bool) *accounting.BucketTally { func addBucketTally(existingTally *accounting.BucketTally, inline, last bool) *accounting.BucketTally {
// if there is already an existing tally for this project and bucket, then // if there is already an existing tally for this project and bucket, then
// add the new pointer data to the existing tally // add the new pointer data to the existing tally
@ -349,7 +349,7 @@ func addBucketTally(existingTally *accounting.BucketTally, inline, last bool) *a
return newRemoteTally return newRemoteTally
} }
// makePointer creates a pointer // makePointer creates a pointer.
func makePointer(storageNodes []*testplanet.StorageNode, rs storj.RedundancyScheme, segmentSize int64, inline bool) *pb.Pointer { func makePointer(storageNodes []*testplanet.StorageNode, rs storj.RedundancyScheme, segmentSize int64, inline bool) *pb.Pointer {
if inline { if inline {
inlinePointer := &pb.Pointer{ inlinePointer := &pb.Pointer{

View File

@ -161,7 +161,7 @@ type API struct {
} }
} }
// NewAPI creates a new satellite API process // NewAPI creates a new satellite API process.
func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB, func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
pointerDB metainfo.PointerDB, revocationDB extensions.RevocationDB, liveAccounting accounting.Cache, rollupsWriteCache *orders.RollupsWriteCache, pointerDB metainfo.PointerDB, revocationDB extensions.RevocationDB, liveAccounting accounting.Cache, rollupsWriteCache *orders.RollupsWriteCache,
config *Config, versionInfo version.Info, atomicLogLevel *zap.AtomicLevel) (*API, error) { config *Config, versionInfo version.Info, atomicLogLevel *zap.AtomicLevel) (*API, error) {

View File

@ -13,10 +13,10 @@ import (
"storj.io/common/uuid" "storj.io/common/uuid"
) )
// ErrBucketNotAttributed is returned if a requested bucket not attributed(entry not found) // ErrBucketNotAttributed is returned if a requested bucket not attributed(entry not found).
var ErrBucketNotAttributed = errs.Class("bucket not attributed") var ErrBucketNotAttributed = errs.Class("bucket not attributed")
// Info describing value attribution from partner to bucket // Info describing value attribution from partner to bucket.
type Info struct { type Info struct {
ProjectID uuid.UUID ProjectID uuid.UUID
BucketName []byte BucketName []byte
@ -24,7 +24,7 @@ type Info struct {
CreatedAt time.Time CreatedAt time.Time
} }
// CSVRow represents data from QueryAttribution without exposing dbx // CSVRow represents data from QueryAttribution without exposing dbx.
type CSVRow struct { type CSVRow struct {
PartnerID []byte PartnerID []byte
ProjectID []byte ProjectID []byte

View File

@ -23,7 +23,7 @@ var (
ErrContainDelete = errs.Class("unable to delete pending audit") ErrContainDelete = errs.Class("unable to delete pending audit")
) )
// PendingAudit contains info needed for retrying an audit for a contained node // PendingAudit contains info needed for retrying an audit for a contained node.
type PendingAudit struct { type PendingAudit struct {
NodeID storj.NodeID NodeID storj.NodeID
PieceID storj.PieceID PieceID storj.PieceID

View File

@ -8,7 +8,7 @@ import (
"encoding/binary" "encoding/binary"
) )
// cryptoSource implements the math/rand Source interface using crypto/rand // cryptoSource implements the math/rand Source interface using crypto/rand.
type cryptoSource struct{} type cryptoSource struct{}
func (s cryptoSource) Seed(seed int64) {} func (s cryptoSource) Seed(seed int64) {}

View File

@ -23,7 +23,7 @@ type PathCollector struct {
rand *rand.Rand rand *rand.Rand
} }
// NewPathCollector instantiates a path collector // NewPathCollector instantiates a path collector.
func NewPathCollector(reservoirSlots int, r *rand.Rand) *PathCollector { func NewPathCollector(reservoirSlots int, r *rand.Rand) *PathCollector {
return &PathCollector{ return &PathCollector{
Reservoirs: make(map[storj.NodeID]*Reservoir), Reservoirs: make(map[storj.NodeID]*Reservoir),
@ -32,7 +32,7 @@ func NewPathCollector(reservoirSlots int, r *rand.Rand) *PathCollector {
} }
} }
// RemoteSegment takes a remote segment found in metainfo and creates a reservoir for it if it doesn't exist already // RemoteSegment takes a remote segment found in metainfo and creates a reservoir for it if it doesn't exist already.
func (collector *PathCollector) RemoteSegment(ctx context.Context, path metainfo.ScopedPath, pointer *pb.Pointer) (err error) { func (collector *PathCollector) RemoteSegment(ctx context.Context, path metainfo.ScopedPath, pointer *pb.Pointer) (err error) {
for _, piece := range pointer.GetRemote().GetRemotePieces() { for _, piece := range pointer.GetRemote().GetRemotePieces() {
if _, ok := collector.Reservoirs[piece.NodeId]; !ok { if _, ok := collector.Reservoirs[piece.NodeId]; !ok {
@ -43,12 +43,12 @@ func (collector *PathCollector) RemoteSegment(ctx context.Context, path metainfo
return nil return nil
} }
// Object returns nil because the audit service does not interact with objects // Object returns nil because the audit service does not interact with objects.
func (collector *PathCollector) Object(ctx context.Context, path metainfo.ScopedPath, pointer *pb.Pointer) (err error) { func (collector *PathCollector) Object(ctx context.Context, path metainfo.ScopedPath, pointer *pb.Pointer) (err error) {
return nil return nil
} }
// InlineSegment returns nil because we're only auditing for storage nodes for now // InlineSegment returns nil because we're only auditing for storage nodes for now.
func (collector *PathCollector) InlineSegment(ctx context.Context, path metainfo.ScopedPath, pointer *pb.Pointer) (err error) { func (collector *PathCollector) InlineSegment(ctx context.Context, path metainfo.ScopedPath, pointer *pb.Pointer) (err error) {
return nil return nil
} }

View File

@ -27,7 +27,7 @@ type Reporter struct {
maxReverifyCount int32 maxReverifyCount int32
} }
// Report contains audit result lists for nodes that succeeded, failed, were offline, have pending audits, or failed for unknown reasons // Report contains audit result lists for nodes that succeeded, failed, were offline, have pending audits, or failed for unknown reasons.
type Report struct { type Report struct {
Successes storj.NodeIDList Successes storj.NodeIDList
Fails storj.NodeIDList Fails storj.NodeIDList
@ -36,7 +36,7 @@ type Report struct {
Unknown storj.NodeIDList Unknown storj.NodeIDList
} }
// NewReporter instantiates a reporter // NewReporter instantiates a reporter.
func NewReporter(log *zap.Logger, overlay *overlay.Service, containment Containment, maxRetries int, maxReverifyCount int32) *Reporter { func NewReporter(log *zap.Logger, overlay *overlay.Service, containment Containment, maxRetries int, maxReverifyCount int32) *Reporter {
return &Reporter{ return &Reporter{
log: log, log: log,
@ -124,7 +124,7 @@ func (reporter *Reporter) RecordAudits(ctx context.Context, req Report, path sto
return Report{}, nil return Report{}, nil
} }
// recordAuditFailStatus updates nodeIDs in overlay with isup=true, auditoutcome=fail // recordAuditFailStatus updates nodeIDs in overlay with isup=true, auditoutcome=fail.
func (reporter *Reporter) recordAuditFailStatus(ctx context.Context, failedAuditNodeIDs storj.NodeIDList) (failed storj.NodeIDList, err error) { func (reporter *Reporter) recordAuditFailStatus(ctx context.Context, failedAuditNodeIDs storj.NodeIDList) (failed storj.NodeIDList, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
@ -144,7 +144,7 @@ func (reporter *Reporter) recordAuditFailStatus(ctx context.Context, failedAudit
return nil, nil return nil, nil
} }
// recordAuditUnknownStatus updates nodeIDs in overlay with isup=true, auditoutcome=unknown // recordAuditUnknownStatus updates nodeIDs in overlay with isup=true, auditoutcome=unknown.
func (reporter *Reporter) recordAuditUnknownStatus(ctx context.Context, unknownAuditNodeIDs storj.NodeIDList) (failed storj.NodeIDList, err error) { func (reporter *Reporter) recordAuditUnknownStatus(ctx context.Context, unknownAuditNodeIDs storj.NodeIDList) (failed storj.NodeIDList, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
@ -186,7 +186,7 @@ func (reporter *Reporter) recordOfflineStatus(ctx context.Context, offlineNodeID
return nil, nil return nil, nil
} }
// recordAuditSuccessStatus updates nodeIDs in overlay with isup=true, auditoutcome=success // recordAuditSuccessStatus updates nodeIDs in overlay with isup=true, auditoutcome=success.
func (reporter *Reporter) recordAuditSuccessStatus(ctx context.Context, successNodeIDs storj.NodeIDList) (failed storj.NodeIDList, err error) { func (reporter *Reporter) recordAuditSuccessStatus(ctx context.Context, successNodeIDs storj.NodeIDList) (failed storj.NodeIDList, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
@ -207,7 +207,7 @@ func (reporter *Reporter) recordAuditSuccessStatus(ctx context.Context, successN
return nil, nil return nil, nil
} }
// recordPendingAudits updates the containment status of nodes with pending audits // recordPendingAudits updates the containment status of nodes with pending audits.
func (reporter *Reporter) recordPendingAudits(ctx context.Context, pendingAudits []*PendingAudit) (failed []*PendingAudit, err error) { func (reporter *Reporter) recordPendingAudits(ctx context.Context, pendingAudits []*PendingAudit) (failed []*PendingAudit, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
var errlist errs.Group var errlist errs.Group

View File

@ -11,14 +11,14 @@ import (
const maxReservoirSize = 3 const maxReservoirSize = 3
// Reservoir holds a certain number of segments to reflect a random sample // Reservoir holds a certain number of segments to reflect a random sample.
type Reservoir struct { type Reservoir struct {
Paths [maxReservoirSize]storj.Path Paths [maxReservoirSize]storj.Path
size int8 size int8
index int64 index int64
} }
// NewReservoir instantiates a Reservoir // NewReservoir instantiates a Reservoir.
func NewReservoir(size int) *Reservoir { func NewReservoir(size int) *Reservoir {
if size < 1 { if size < 1 {
size = 1 size = 1
@ -32,7 +32,7 @@ func NewReservoir(size int) *Reservoir {
} }
// Sample makes sure that for every segment in metainfo from index i=size..n-1, // Sample makes sure that for every segment in metainfo from index i=size..n-1,
// pick a random number r = rand(0..i), and if r < size, replace reservoir.Segments[r] with segment // pick a random number r = rand(0..i), and if r < size, replace reservoir.Segments[r] with segment.
func (reservoir *Reservoir) Sample(r *rand.Rand, path storj.Path) { func (reservoir *Reservoir) Sample(r *rand.Rand, path storj.Path) {
reservoir.index++ reservoir.index++
if reservoir.index < int64(reservoir.size) { if reservoir.index < int64(reservoir.size) {

View File

@ -899,7 +899,7 @@ func TestReverifyDifferentShare(t *testing.T) {
}) })
} }
// TestReverifyExpired1 tests the case where the segment passed into Reverify is expired // TestReverifyExpired1 tests the case where the segment passed into Reverify is expired.
func TestReverifyExpired1(t *testing.T) { func TestReverifyExpired1(t *testing.T) {
testplanet.Run(t, testplanet.Config{ testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1, SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
@ -1074,7 +1074,7 @@ func TestReverifyExpired2(t *testing.T) {
} }
// TestReverifySlowDownload checks that a node that times out while sending data to the // TestReverifySlowDownload checks that a node that times out while sending data to the
// audit service gets put into containment mode // audit service gets put into containment mode.
func TestReverifySlowDownload(t *testing.T) { func TestReverifySlowDownload(t *testing.T) {
testplanet.Run(t, testplanet.Config{ testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1, SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,

View File

@ -44,7 +44,7 @@ var (
ErrSegmentModified = errs.Class("segment has been modified") ErrSegmentModified = errs.Class("segment has been modified")
) )
// Share represents required information about an audited share // Share represents required information about an audited share.
type Share struct { type Share struct {
Error error Error error
PieceNum int PieceNum int
@ -69,7 +69,7 @@ type Verifier struct {
OnTestingCheckSegmentAlteredHook func() OnTestingCheckSegmentAlteredHook func()
} }
// NewVerifier creates a Verifier // NewVerifier creates a Verifier.
func NewVerifier(log *zap.Logger, metainfo *metainfo.Service, dialer rpc.Dialer, overlay *overlay.Service, containment Containment, orders *orders.Service, id *identity.FullIdentity, minBytesPerSecond memory.Size, minDownloadTimeout time.Duration) *Verifier { func NewVerifier(log *zap.Logger, metainfo *metainfo.Service, dialer rpc.Dialer, overlay *overlay.Service, containment Containment, orders *orders.Service, id *identity.FullIdentity, minBytesPerSecond memory.Size, minDownloadTimeout time.Duration) *Verifier {
return &Verifier{ return &Verifier{
log: log, log: log,
@ -308,7 +308,7 @@ func (verifier *Verifier) Verify(ctx context.Context, path storj.Path, skip map[
}, nil }, nil
} }
// DownloadShares downloads shares from the nodes where remote pieces are located // DownloadShares downloads shares from the nodes where remote pieces are located.
func (verifier *Verifier) DownloadShares(ctx context.Context, limits []*pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, stripeIndex int64, shareSize int32) (shares map[int]Share, err error) { func (verifier *Verifier) DownloadShares(ctx context.Context, limits []*pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, stripeIndex int64, shareSize int32) (shares map[int]Share, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
@ -345,7 +345,7 @@ func (verifier *Verifier) DownloadShares(ctx context.Context, limits []*pb.Addre
return shares, nil return shares, nil
} }
// Reverify reverifies the contained nodes in the stripe // Reverify reverifies the contained nodes in the stripe.
func (verifier *Verifier) Reverify(ctx context.Context, path storj.Path) (report Report, err error) { func (verifier *Verifier) Reverify(ctx context.Context, path storj.Path) (report Report, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
@ -619,7 +619,7 @@ func (verifier *Verifier) Reverify(ctx context.Context, path storj.Path) (report
return report, err return report, err
} }
// GetShare use piece store client to download shares from nodes // GetShare use piece store client to download shares from nodes.
func (verifier *Verifier) GetShare(ctx context.Context, limit *pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, stripeIndex int64, shareSize int32, pieceNum int) (share Share, err error) { func (verifier *Verifier) GetShare(ctx context.Context, limit *pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, stripeIndex int64, shareSize int32, pieceNum int) (share Share, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
@ -729,7 +729,7 @@ func auditShares(ctx context.Context, required, total int, originals map[int]Sha
return pieceNums, copies, nil return pieceNums, copies, nil
} }
// makeCopies takes in a map of audit Shares and deep copies their data to a slice of infectious Shares // makeCopies takes in a map of audit Shares and deep copies their data to a slice of infectious Shares.
func makeCopies(ctx context.Context, originals map[int]Share) (copies []infectious.Share, err error) { func makeCopies(ctx context.Context, originals map[int]Share) (copies []infectious.Share, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
copies = make([]infectious.Share, 0, len(originals)) copies = make([]infectious.Share, 0, len(originals))
@ -762,7 +762,7 @@ func getOfflineNodes(pointer *pb.Pointer, limits []*pb.AddressedOrderLimit, skip
return offlines return offlines
} }
// getSuccessNodes uses the failed nodes, offline nodes and contained nodes arrays to determine which nodes passed the audit // getSuccessNodes uses the failed nodes, offline nodes and contained nodes arrays to determine which nodes passed the audit.
func getSuccessNodes(ctx context.Context, shares map[int]Share, failedNodes, offlineNodes, unknownNodes storj.NodeIDList, containedNodes map[int]storj.NodeID) (successNodes storj.NodeIDList) { func getSuccessNodes(ctx context.Context, shares map[int]Share, failedNodes, offlineNodes, unknownNodes storj.NodeIDList, containedNodes map[int]storj.NodeID) (successNodes storj.NodeIDList) {
defer mon.Task()(&ctx)(nil) defer mon.Task()(&ctx)(nil)
fails := make(map[storj.NodeID]bool) fails := make(map[storj.NodeID]bool)

View File

@ -763,7 +763,7 @@ func TestVerifierModifiedSegmentFailsOnce(t *testing.T) {
} }
// TestVerifierSlowDownload checks that a node that times out while sending data to the // TestVerifierSlowDownload checks that a node that times out while sending data to the
// audit service gets put into containment mode // audit service gets put into containment mode.
func TestVerifierSlowDownload(t *testing.T) { func TestVerifierSlowDownload(t *testing.T) {
testplanet.Run(t, testplanet.Config{ testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1, SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
@ -821,7 +821,7 @@ func TestVerifierSlowDownload(t *testing.T) {
} }
// TestVerifierUnknownError checks that a node that returns an unknown error in response to an audit request // TestVerifierUnknownError checks that a node that returns an unknown error in response to an audit request
// does not get marked as successful, failed, or contained // does not get marked as successful, failed, or contained.
func TestVerifierUnknownError(t *testing.T) { func TestVerifierUnknownError(t *testing.T) {
testplanet.Run(t, testplanet.Config{ testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1, SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,

View File

@ -34,7 +34,7 @@ func (percents Percents) String() string {
return strings.Join(s, ",") return strings.Join(s, ",")
} }
// Set implements pflag.Value by parsing a comma separated list of percents // Set implements pflag.Value by parsing a comma separated list of percents.
func (percents *Percents) Set(value string) error { func (percents *Percents) Set(value string) error {
var entries []string var entries []string
if value != "" { if value != "" {
@ -54,7 +54,7 @@ func (percents *Percents) Set(value string) error {
return nil return nil
} }
// Type returns the type of the pflag.Value // Type returns the type of the pflag.Value.
func (percents Percents) Type() string { func (percents Percents) Type() string {
return "percents" return "percents"
} }

View File

@ -30,7 +30,7 @@ type APIKeys interface {
Delete(ctx context.Context, id uuid.UUID) error Delete(ctx context.Context, id uuid.UUID) error
} }
// APIKeyInfo describing api key model in the database // APIKeyInfo describing api key model in the database.
type APIKeyInfo struct { type APIKeyInfo struct {
ID uuid.UUID `json:"id"` ID uuid.UUID `json:"id"`
ProjectID uuid.UUID `json:"projectId"` ProjectID uuid.UUID `json:"projectId"`
@ -40,7 +40,7 @@ type APIKeyInfo struct {
CreatedAt time.Time `json:"createdAt"` CreatedAt time.Time `json:"createdAt"`
} }
// APIKeyCursor holds info for api keys cursor pagination // APIKeyCursor holds info for api keys cursor pagination.
type APIKeyCursor struct { type APIKeyCursor struct {
Search string Search string
Limit uint Limit uint
@ -49,7 +49,7 @@ type APIKeyCursor struct {
OrderDirection OrderDirection OrderDirection OrderDirection
} }
// APIKeyPage represent api key page result // APIKeyPage represent api key page result.
type APIKeyPage struct { type APIKeyPage struct {
APIKeys []APIKeyInfo APIKeys []APIKeyInfo
@ -64,7 +64,7 @@ type APIKeyPage struct {
TotalCount uint64 TotalCount uint64
} }
// APIKeyOrder is used for querying api keys in specified order // APIKeyOrder is used for querying api keys in specified order.
type APIKeyOrder uint8 type APIKeyOrder uint8
const ( const (

View File

@ -14,12 +14,12 @@ import (
//TODO: change to JWT or Macaroon based auth //TODO: change to JWT or Macaroon based auth
// Signer creates signature for provided data // Signer creates signature for provided data.
type Signer interface { type Signer interface {
Sign(data []byte) ([]byte, error) Sign(data []byte) ([]byte, error)
} }
// signToken signs token with given signer // signToken signs token with given signer.
func signToken(token *consoleauth.Token, signer Signer) error { func signToken(token *consoleauth.Token, signer Signer) error {
encoded := base64.URLEncoding.EncodeToString(token.Payload) encoded := base64.URLEncoding.EncodeToString(token.Payload)
@ -32,32 +32,32 @@ func signToken(token *consoleauth.Token, signer Signer) error {
return nil return nil
} }
// key is a context value key type // key is a context value key type.
type key int type key int
// authKey is context key for Authorization // authKey is context key for Authorization.
const authKey key = 0 const authKey key = 0
// ErrUnauthorized is error class for authorization related errors // ErrUnauthorized is error class for authorization related errors.
var ErrUnauthorized = errs.Class("unauthorized error") var ErrUnauthorized = errs.Class("unauthorized error")
// Authorization contains auth info of authorized User // Authorization contains auth info of authorized User.
type Authorization struct { type Authorization struct {
User User User User
Claims consoleauth.Claims Claims consoleauth.Claims
} }
// WithAuth creates new context with Authorization // WithAuth creates new context with Authorization.
func WithAuth(ctx context.Context, auth Authorization) context.Context { func WithAuth(ctx context.Context, auth Authorization) context.Context {
return context.WithValue(ctx, authKey, auth) return context.WithValue(ctx, authKey, auth)
} }
// WithAuthFailure creates new context with authorization failure // WithAuthFailure creates new context with authorization failure.
func WithAuthFailure(ctx context.Context, err error) context.Context { func WithAuthFailure(ctx context.Context, err error) context.Context {
return context.WithValue(ctx, authKey, err) return context.WithValue(ctx, authKey, err)
} }
// GetAuth gets Authorization from context // GetAuth gets Authorization from context.
func GetAuth(ctx context.Context) (Authorization, error) { func GetAuth(ctx context.Context) (Authorization, error) {
value := ctx.Value(authKey) value := ctx.Value(authKey)

View File

@ -13,14 +13,14 @@ import (
//TODO: change to JWT or Macaroon based auth //TODO: change to JWT or Macaroon based auth
// Claims represents data signed by server and used for authentication // Claims represents data signed by server and used for authentication.
type Claims struct { type Claims struct {
ID uuid.UUID `json:"id"` ID uuid.UUID `json:"id"`
Email string `json:"email,omitempty"` Email string `json:"email,omitempty"`
Expiration time.Time `json:"expires,omitempty"` Expiration time.Time `json:"expires,omitempty"`
} }
// JSON returns json representation of Claims // JSON returns json representation of Claims.
func (c *Claims) JSON() ([]byte, error) { func (c *Claims) JSON() ([]byte, error) {
buffer := bytes.NewBuffer(nil) buffer := bytes.NewBuffer(nil)
@ -28,7 +28,7 @@ func (c *Claims) JSON() ([]byte, error) {
return buffer.Bytes(), err return buffer.Bytes(), err
} }
// FromJSON returns Claims instance, parsed from JSON // FromJSON returns Claims instance, parsed from JSON.
func FromJSON(data []byte) (*Claims, error) { func FromJSON(data []byte) (*Claims, error) {
claims := new(Claims) claims := new(Claims)

View File

@ -10,12 +10,12 @@ import (
//TODO: change to JWT or Macaroon based auth //TODO: change to JWT or Macaroon based auth
// Hmac is hmac256 based Signer // Hmac is hmac256 based Signer.
type Hmac struct { type Hmac struct {
Secret []byte Secret []byte
} }
// Sign implements satellite signer // Sign implements satellite signer.
func (a *Hmac) Sign(data []byte) ([]byte, error) { func (a *Hmac) Sign(data []byte) ([]byte, error) {
mac := hmac.New(sha256.New, a.Secret) mac := hmac.New(sha256.New, a.Secret)

View File

@ -14,7 +14,7 @@ import (
//TODO: change to JWT or Macaroon based auth //TODO: change to JWT or Macaroon based auth
// Token represents authentication data structure // Token represents authentication data structure.
type Token struct { type Token struct {
Payload []byte Payload []byte
Signature []byte Signature []byte
@ -28,7 +28,7 @@ func (t Token) String() string {
return strings.Join([]string{payload, signature}, ".") return strings.Join([]string{payload, signature}, ".")
} }
// FromBase64URLString creates Token instance from base64URLEncoded string representation // FromBase64URLString creates Token instance from base64URLEncoded string representation.
func FromBase64URLString(token string) (Token, error) { func FromBase64URLString(token string) (Token, error) {
i := strings.Index(token, ".") i := strings.Index(token, ".")
if i < 0 { if i < 0 {

View File

@ -19,7 +19,7 @@ const (
FieldKey = "key" FieldKey = "key"
) )
// graphqlAPIKeyInfo creates satellite.APIKeyInfo graphql object // graphqlAPIKeyInfo creates satellite.APIKeyInfo graphql object.
func graphqlAPIKeyInfo() *graphql.Object { func graphqlAPIKeyInfo() *graphql.Object {
return graphql.NewObject(graphql.ObjectConfig{ return graphql.NewObject(graphql.ObjectConfig{
Name: APIKeyInfoType, Name: APIKeyInfoType,
@ -43,7 +43,7 @@ func graphqlAPIKeyInfo() *graphql.Object {
}) })
} }
// graphqlCreateAPIKey creates createAPIKey graphql object // graphqlCreateAPIKey creates createAPIKey graphql object.
func graphqlCreateAPIKey(types *TypeCreator) *graphql.Object { func graphqlCreateAPIKey(types *TypeCreator) *graphql.Object {
return graphql.NewObject(graphql.ObjectConfig{ return graphql.NewObject(graphql.ObjectConfig{
Name: CreateAPIKeyType, Name: CreateAPIKeyType,
@ -116,7 +116,7 @@ func graphqlAPIKeysPage(types *TypeCreator) *graphql.Object {
}) })
} }
// createAPIKey holds macaroon.APIKey and console.APIKeyInfo // createAPIKey holds macaroon.APIKey and console.APIKeyInfo.
type createAPIKey struct { type createAPIKey struct {
Key string Key string
KeyInfo *console.APIKeyInfo KeyInfo *console.APIKeyInfo

View File

@ -20,7 +20,7 @@ const (
TermsAndConditionsURL = "termsAndConditionsURL" TermsAndConditionsURL = "termsAndConditionsURL"
) )
// AccountActivationEmail is mailservice template with activation data // AccountActivationEmail is mailservice template with activation data.
type AccountActivationEmail struct { type AccountActivationEmail struct {
Origin string Origin string
ActivationLink string ActivationLink string
@ -29,13 +29,13 @@ type AccountActivationEmail struct {
UserName string UserName string
} }
// Template returns email template name // Template returns email template name.
func (*AccountActivationEmail) Template() string { return "Welcome" } func (*AccountActivationEmail) Template() string { return "Welcome" }
// Subject gets email subject // Subject gets email subject.
func (*AccountActivationEmail) Subject() string { return "Activate your email" } func (*AccountActivationEmail) Subject() string { return "Activate your email" }
// ForgotPasswordEmail is mailservice template with reset password data // ForgotPasswordEmail is mailservice template with reset password data.
type ForgotPasswordEmail struct { type ForgotPasswordEmail struct {
Origin string Origin string
UserName string UserName string
@ -46,13 +46,13 @@ type ForgotPasswordEmail struct {
TermsAndConditionsURL string TermsAndConditionsURL string
} }
// Template returns email template name // Template returns email template name.
func (*ForgotPasswordEmail) Template() string { return "Forgot" } func (*ForgotPasswordEmail) Template() string { return "Forgot" }
// Subject gets email subject // Subject gets email subject.
func (*ForgotPasswordEmail) Subject() string { return "Password recovery request" } func (*ForgotPasswordEmail) Subject() string { return "Password recovery request" }
// ProjectInvitationEmail is mailservice template for project invitation email // ProjectInvitationEmail is mailservice template for project invitation email.
type ProjectInvitationEmail struct { type ProjectInvitationEmail struct {
Origin string Origin string
UserName string UserName string
@ -63,10 +63,10 @@ type ProjectInvitationEmail struct {
TermsAndConditionsURL string TermsAndConditionsURL string
} }
// Template returns email template name // Template returns email template name.
func (*ProjectInvitationEmail) Template() string { return "Invite" } func (*ProjectInvitationEmail) Template() string { return "Invite" }
// Subject gets email subject // Subject gets email subject.
func (email *ProjectInvitationEmail) Subject() string { func (email *ProjectInvitationEmail) Subject() string {
return "You were invited to join the Project " + email.ProjectName return "You were invited to join the Project " + email.ProjectName
} }

View File

@ -53,7 +53,7 @@ const (
ReferrerUserID = "referrerUserId" ReferrerUserID = "referrerUserId"
) )
// rootMutation creates mutation for graphql populated by AccountsClient // rootMutation creates mutation for graphql populated by AccountsClient.
func rootMutation(log *zap.Logger, service *console.Service, mailService *mailservice.Service, types *TypeCreator) *graphql.Object { func rootMutation(log *zap.Logger, service *console.Service, mailService *mailservice.Service, types *TypeCreator) *graphql.Object {
return graphql.NewObject(graphql.ObjectConfig{ return graphql.NewObject(graphql.ObjectConfig{
Name: Mutation, Name: Mutation,

View File

@ -32,15 +32,15 @@ import (
"storj.io/storj/storage/redis/redisserver" "storj.io/storj/storage/redis/redisserver"
) )
// discardSender discard sending of an actual email // discardSender discard sending of an actual email.
type discardSender struct{} type discardSender struct{}
// SendEmail immediately returns with nil error // SendEmail immediately returns with nil error.
func (*discardSender) SendEmail(ctx context.Context, msg *post.Message) error { func (*discardSender) SendEmail(ctx context.Context, msg *post.Message) error {
return nil return nil
} }
// FromAddress returns empty post.Address // FromAddress returns empty post.Address.
func (*discardSender) FromAddress() post.Address { func (*discardSender) FromAddress() post.Address {
return post.Address{} return post.Address{}
} }

View File

@ -84,7 +84,7 @@ const (
BeforeArg = "before" BeforeArg = "before"
) )
// graphqlProject creates *graphql.Object type representation of satellite.ProjectInfo // graphqlProject creates *graphql.Object type representation of satellite.ProjectInfo.
func graphqlProject(service *console.Service, types *TypeCreator) *graphql.Object { func graphqlProject(service *console.Service, types *TypeCreator) *graphql.Object {
return graphql.NewObject(graphql.ObjectConfig{ return graphql.NewObject(graphql.ObjectConfig{
Name: ProjectType, Name: ProjectType,
@ -240,7 +240,7 @@ func graphqlProject(service *console.Service, types *TypeCreator) *graphql.Objec
}) })
} }
// graphqlProjectInput creates graphql.InputObject type needed to create/update satellite.Project // graphqlProjectInput creates graphql.InputObject type needed to create/update satellite.Project.
func graphqlProjectInput() *graphql.InputObject { func graphqlProjectInput() *graphql.InputObject {
return graphql.NewInputObject(graphql.InputObjectConfig{ return graphql.NewInputObject(graphql.InputObjectConfig{
Name: ProjectInputType, Name: ProjectInputType,
@ -255,7 +255,7 @@ func graphqlProjectInput() *graphql.InputObject {
}) })
} }
// graphqlBucketUsageCursor creates bucket usage cursor graphql input type // graphqlBucketUsageCursor creates bucket usage cursor graphql input type.
func graphqlBucketUsageCursor() *graphql.InputObject { func graphqlBucketUsageCursor() *graphql.InputObject {
return graphql.NewInputObject(graphql.InputObjectConfig{ return graphql.NewInputObject(graphql.InputObjectConfig{
Name: BucketUsageCursorInputType, Name: BucketUsageCursorInputType,
@ -273,7 +273,7 @@ func graphqlBucketUsageCursor() *graphql.InputObject {
}) })
} }
// graphqlBucketUsage creates bucket usage grapqhl type // graphqlBucketUsage creates bucket usage grapqhl type.
func graphqlBucketUsage() *graphql.Object { func graphqlBucketUsage() *graphql.Object {
return graphql.NewObject(graphql.ObjectConfig{ return graphql.NewObject(graphql.ObjectConfig{
Name: BucketUsageType, Name: BucketUsageType,
@ -300,7 +300,7 @@ func graphqlBucketUsage() *graphql.Object {
}) })
} }
// graphqlBucketUsagePage creates bucket usage page graphql object // graphqlBucketUsagePage creates bucket usage page graphql object.
func graphqlBucketUsagePage(types *TypeCreator) *graphql.Object { func graphqlBucketUsagePage(types *TypeCreator) *graphql.Object {
return graphql.NewObject(graphql.ObjectConfig{ return graphql.NewObject(graphql.ObjectConfig{
Name: BucketUsagePageType, Name: BucketUsagePageType,
@ -330,7 +330,7 @@ func graphqlBucketUsagePage(types *TypeCreator) *graphql.Object {
}) })
} }
// graphqlProjectUsage creates project usage graphql type // graphqlProjectUsage creates project usage graphql type.
func graphqlProjectUsage() *graphql.Object { func graphqlProjectUsage() *graphql.Object {
return graphql.NewObject(graphql.ObjectConfig{ return graphql.NewObject(graphql.ObjectConfig{
Name: ProjectUsageType, Name: ProjectUsageType,
@ -354,7 +354,7 @@ func graphqlProjectUsage() *graphql.Object {
}) })
} }
// fromMapProjectInfo creates console.ProjectInfo from input args // fromMapProjectInfo creates console.ProjectInfo from input args.
func fromMapProjectInfo(args map[string]interface{}) (project console.ProjectInfo) { func fromMapProjectInfo(args map[string]interface{}) (project console.ProjectInfo) {
project.Name, _ = args[FieldName].(string) project.Name, _ = args[FieldName].(string)
project.Description, _ = args[FieldDescription].(string) project.Description, _ = args[FieldDescription].(string)
@ -362,7 +362,7 @@ func fromMapProjectInfo(args map[string]interface{}) (project console.ProjectInf
return return
} }
// fromMapBucketUsageCursor creates console.BucketUsageCursor from input args // fromMapBucketUsageCursor creates console.BucketUsageCursor from input args.
func fromMapBucketUsageCursor(args map[string]interface{}) (cursor accounting.BucketUsageCursor) { func fromMapBucketUsageCursor(args map[string]interface{}) (cursor accounting.BucketUsageCursor) {
limit, _ := args[LimitArg].(int) limit, _ := args[LimitArg].(int)
page, _ := args[PageArg].(int) page, _ := args[PageArg].(int)

View File

@ -18,7 +18,7 @@ const (
FieldJoinedAt = "joinedAt" FieldJoinedAt = "joinedAt"
) )
// graphqlProjectMember creates projectMember type // graphqlProjectMember creates projectMember type.
func graphqlProjectMember(service *console.Service, types *TypeCreator) *graphql.Object { func graphqlProjectMember(service *console.Service, types *TypeCreator) *graphql.Object {
return graphql.NewObject(graphql.ObjectConfig{ return graphql.NewObject(graphql.ObjectConfig{
Name: ProjectMemberType, Name: ProjectMemberType,
@ -96,7 +96,7 @@ func graphqlProjectMembersPage(types *TypeCreator) *graphql.Object {
}) })
} }
// projectMember encapsulates User and joinedAt // projectMember encapsulates User and joinedAt.
type projectMember struct { type projectMember struct {
User *console.User User *console.User
JoinedAt time.Time JoinedAt time.Time

Some files were not shown because too many files have changed in this diff Show More