all: fix comments

Change-Id: I2d2307e3fab87de47a72b3595d051e2c95ff4f8a
This commit is contained in:
Egon Elbre 2020-07-16 18:27:24 +03:00
parent 1a6e25579c
commit e70da5cd4e
15 changed files with 40 additions and 33 deletions

View File

@ -27,7 +27,8 @@ import (
"storj.io/uplink/private/testuplink" "storj.io/uplink/private/testuplink"
) )
// Uplink is a general purpose. // Uplink is a registered user on all satellites,
// which contains the necessary accesses and project info.
type Uplink struct { type Uplink struct {
Log *zap.Logger Log *zap.Logger
Identity *identity.FullIdentity Identity *identity.FullIdentity

View File

@ -55,9 +55,9 @@ func baseUserConfig() graphql.ObjectConfig {
} }
} }
// graphqlUser creates *graphql.Object type representation of satellite.User // graphqlUser creates *graphql.Object type representation of satellite.User.
// TODO: simplify.
func graphqlUser() *graphql.Object { func graphqlUser() *graphql.Object {
// TODO: simplify
return graphql.NewObject(baseUserConfig()) return graphql.NewObject(baseUserConfig())
} }

View File

@ -7,8 +7,8 @@ package console
type OrderDirection uint8 type OrderDirection uint8
const ( const (
// Ascending indicates that we should order ascending // Ascending indicates that we should order ascending.
Ascending OrderDirection = 1 Ascending OrderDirection = 1
// Descending indicates that we should order descending // Descending indicates that we should order descending.
Descending OrderDirection = 2 Descending OrderDirection = 2
) )

View File

@ -19,8 +19,9 @@ import (
) )
// TestEstimationChoreBasic tests the basic functionality of the downtime estimation chore: // TestEstimationChoreBasic tests the basic functionality of the downtime estimation chore:
// 1. Test that when a node that had one failed ping, and one successful ping >1s later does not have recorded downtime //
// 2. Test that when a node that had one failed ping, and another failed ping >1s later has at least 1s of recorded downtime. // 1. Test that when a node that had one failed ping, and one successful ping >1s later does not have recorded downtime.
// 2. Test that when a node that had one failed ping, and another failed ping >1s later has at least 1s of recorded downtime.
func TestEstimationChoreBasic(t *testing.T) { func TestEstimationChoreBasic(t *testing.T) {
testplanet.Run(t, testplanet.Config{ testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 2, UplinkCount: 0, SatelliteCount: 1, StorageNodeCount: 2, UplinkCount: 0,

View File

@ -34,7 +34,7 @@ type CurrencyRateInfo struct {
LastUpdate time.Time LastUpdate time.Time
} }
// UnmarshalJSON converts JSON string to currency rate info,. // UnmarshalJSON converts JSON string to currency rate info.
func (rateInfo *CurrencyRateInfo) UnmarshalJSON(b []byte) error { func (rateInfo *CurrencyRateInfo) UnmarshalJSON(b []byte) error {
var rateRaw struct { var rateRaw struct {
IsFiat int `json:"is_fiat"` IsFiat int `json:"is_fiat"`

View File

@ -30,7 +30,7 @@ import (
"storj.io/storj/satellite/satellitedb/dbx" "storj.io/storj/satellite/satellitedb/dbx"
) )
// loadSnapshots loads all the dbschemas from testdata/postgres.*. // loadSnapshots loads all the dbschemas from `testdata/postgres.*`.
func loadSnapshots(ctx context.Context, connstr, dbxscript string) (*dbschema.Snapshots, *dbschema.Schema, error) { func loadSnapshots(ctx context.Context, connstr, dbxscript string) (*dbschema.Snapshots, *dbschema.Schema, error) {
snapshots := &dbschema.Snapshots{} snapshots := &dbschema.Snapshots{}

View File

@ -14,6 +14,7 @@ import (
// pg-to-crdb converts a Postgres plaintext sql backup generated by pg_dump // pg-to-crdb converts a Postgres plaintext sql backup generated by pg_dump
// to a compatible plaintext sql backup that only has SQL statements compatible with CockroachDB. // to a compatible plaintext sql backup that only has SQL statements compatible with CockroachDB.
//
// Usage: // Usage:
// cat postgres_backup.sql | go run pg-to-crdb.go > cockroach_backup.sql // cat postgres_backup.sql | go run pg-to-crdb.go > cockroach_backup.sql
func main() { func main() {

View File

@ -127,8 +127,8 @@ func rename(oldpath, newpath string) error {
return nil return nil
} }
// openFileReadOnly opens the file with read only // openFileReadOnly opens the file with read only.
// a custom implementation, because os.Open doesn't support specifying FILE_SHARE_DELETE. // Custom implementation, because os.Open doesn't support specifying FILE_SHARE_DELETE.
func openFileReadOnly(path string, perm os.FileMode) (*os.File, error) { func openFileReadOnly(path string, perm os.FileMode) (*os.File, error) {
pathp, err := windows.UTF16PtrFromString(tryFixLongPath(path)) pathp, err := windows.UTF16PtrFromString(tryFixLongPath(path))
if err != nil { if err != nil {

View File

@ -155,8 +155,8 @@ func (store *blobStore) GarbageCollect(ctx context.Context) (err error) {
return Error.Wrap(err) return Error.Wrap(err)
} }
// Create creates a new blob that can be written // Create creates a new blob that can be written.
// optionally takes a size argument for performance improvements, -1 is unknown size. // Optionally takes a size argument for performance improvements, -1 is unknown size.
func (store *blobStore) Create(ctx context.Context, ref storage.BlobRef, size int64) (_ storage.BlobWriter, err error) { func (store *blobStore) Create(ctx context.Context, ref storage.BlobRef, size int64) (_ storage.BlobWriter, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
file, err := store.dir.CreateTemporaryFile(ctx, size) file, err := store.dir.CreateTemporaryFile(ctx, size)

View File

@ -55,32 +55,32 @@ type bindataFileInfo struct {
modTime time.Time modTime time.Time
} }
// Name return file name. // Name return file name
func (fi bindataFileInfo) Name() string { func (fi bindataFileInfo) Name() string {
return fi.name return fi.name
} }
// Size return file size. // Size return file size
func (fi bindataFileInfo) Size() int64 { func (fi bindataFileInfo) Size() int64 {
return fi.size return fi.size
} }
// Mode return file mode. // Mode return file mode
func (fi bindataFileInfo) Mode() os.FileMode { func (fi bindataFileInfo) Mode() os.FileMode {
return fi.mode return fi.mode
} }
// Mode return file modify time. // Mode return file modify time
func (fi bindataFileInfo) ModTime() time.Time { func (fi bindataFileInfo) ModTime() time.Time {
return fi.modTime return fi.modTime
} }
// IsDir return file whether a directory. // IsDir return file whether a directory
func (fi bindataFileInfo) IsDir() bool { func (fi bindataFileInfo) IsDir() bool {
return fi.mode&os.ModeDir != 0 return fi.mode&os.ModeDir != 0
} }
// Sys return file is sys mode. // Sys return file is sys mode
func (fi bindataFileInfo) Sys() interface{} { func (fi bindataFileInfo) Sys() interface{} {
return nil return nil
} }
@ -358,7 +358,7 @@ var _bintree = &bintree{nil, map[string]*bintree{
"2020011601_kvimplementation_switchover.up.sql": &bintree{_2020011601_kvimplementation_switchoverUpSql, map[string]*bintree{}}, "2020011601_kvimplementation_switchover.up.sql": &bintree{_2020011601_kvimplementation_switchoverUpSql, map[string]*bintree{}},
}} }}
// RestoreAsset restores an asset under the given directory. // RestoreAsset restores an asset under the given directory
func RestoreAsset(dir, name string) error { func RestoreAsset(dir, name string) error {
data, err := Asset(name) data, err := Asset(name)
if err != nil { if err != nil {
@ -383,7 +383,7 @@ func RestoreAsset(dir, name string) error {
return nil return nil
} }
// RestoreAssets restores an asset under the given directory recursively. // RestoreAssets restores an asset under the given directory recursively
func RestoreAssets(dir, name string) error { func RestoreAssets(dir, name string) error {
children, err := AssetDir(name) children, err := AssetDir(name)
// File // File

View File

@ -18,7 +18,7 @@ import (
"storj.io/storj/storagenode/trust" "storj.io/storj/storagenode/trust"
) )
// Endpoint is. // Endpoint implements private inspector for Graceful Exit.
type Endpoint struct { type Endpoint struct {
log *zap.Logger log *zap.Logger
usageCache *pieces.BlobsUsageCache usageCache *pieces.BlobsUsageCache

View File

@ -362,8 +362,8 @@ func (service *Service) settle(ctx context.Context, log *zap.Logger, satelliteID
return errList.Err() return errList.Err()
} }
// sleep for random interval in [0;maxSleep) // sleep for random interval in [0;maxSleep).
// returns error if context was cancelled. // Returns an error if context was cancelled.
func (service *Service) sleep(ctx context.Context) error { func (service *Service) sleep(ctx context.Context) error {
if service.config.MaxSleep <= 0 { if service.config.MaxSleep <= 0 {
return nil return nil

View File

@ -245,8 +245,10 @@ func (r *Reader) StorageFormatVersion() storage.FormatVersion {
} }
// GetPieceHeader reads, unmarshals, and returns the piece header. It may only be called once, // GetPieceHeader reads, unmarshals, and returns the piece header. It may only be called once,
// before any Read() calls. (Retrieving the header at any time could be supported, but for the sake // before any Read() calls.
// of performance we need to understand why and how often that would happen.). //
// Retrieving the header at any time could be supported, but for the sake
// of performance we need to understand why and how often that would happen.
func (r *Reader) GetPieceHeader() (*pb.PieceHeader, error) { func (r *Reader) GetPieceHeader() (*pb.PieceHeader, error) {
if r.formatVersion < filestore.FormatV1 { if r.formatVersion < filestore.FormatV1 {
return nil, BadFormatVersion.New("Can't get piece header from storage format V0 reader") return nil, BadFormatVersion.New("Can't get piece header from storage format V0 reader")

View File

@ -49,9 +49,10 @@ func (a Full) Less(b Full) bool {
return binary.BigEndian.Uint64(a[:]) < binary.BigEndian.Uint64(b[:]) return binary.BigEndian.Uint64(a[:]) < binary.BigEndian.Uint64(b[:])
} }
// serialsList is a structure that contains a list of partial serials and a list of full serials // serialsList is a structure that contains a list of partial serials and a list of full serials.
// if the satellite puts the expiration time as the first 8 bytes, the partial serials list will be used //
// otherwise, the full serials list will be used. // For serials where expiration time is the first 8 bytes, it uses partialSerials.
// It uses fullSerials otherwise.
type serialsList struct { type serialsList struct {
partialSerials []Partial partialSerials []Partial
fullSerials []storj.SerialNumber fullSerials []storj.SerialNumber

View File

@ -70,10 +70,11 @@ func isReserved(s string) (schema string, ok bool) {
// reProbablySatelliteURL matches config strings that are (intended, but // reProbablySatelliteURL matches config strings that are (intended, but
// possibly misconfigured) satellite URLs, like the following: // possibly misconfigured) satellite URLs, like the following:
// - @ //
// - id@ // - @
// - host:9999 // - id@
// - id@host:9999. // - host:9999
// - id@host:9999
var reProbablySatelliteURL = regexp.MustCompile(`@|(^[^/\\]{2,}:\d+$)`) var reProbablySatelliteURL = regexp.MustCompile(`@|(^[^/\\]{2,}:\d+$)`)
func isProbablySatelliteURL(s string) bool { func isProbablySatelliteURL(s string) bool {