pkg/*: add monkit task to missing places (#2109)
This commit is contained in:
parent
bc3463b9d3
commit
9c5708da32
@ -5,6 +5,7 @@ package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"io"
|
||||
@ -18,6 +19,7 @@ import (
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/storj/pkg/certificates"
|
||||
"storj.io/storj/pkg/process"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -63,6 +65,7 @@ func parseEmailsList(fileName, delimiter string) (emails []string, err error) {
|
||||
}
|
||||
|
||||
func cmdCreateAuth(cmd *cobra.Command, args []string) error {
|
||||
ctx := process.Ctx(cmd)
|
||||
count, err := strconv.Atoi(args[0])
|
||||
if err != nil {
|
||||
return errs.New("Count couldn't be parsed: %s", args[0])
|
||||
@ -87,7 +90,7 @@ func cmdCreateAuth(cmd *cobra.Command, args []string) error {
|
||||
|
||||
var incErrs errs.Group
|
||||
for _, email := range emails {
|
||||
if _, err := authDB.Create(email, count); err != nil {
|
||||
if _, err := authDB.Create(ctx, email, count); err != nil {
|
||||
incErrs.Add(err)
|
||||
}
|
||||
}
|
||||
@ -95,6 +98,7 @@ func cmdCreateAuth(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
func cmdInfoAuth(cmd *cobra.Command, args []string) error {
|
||||
ctx := process.Ctx(cmd)
|
||||
authDB, err := config.Signer.NewAuthDB()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -107,7 +111,7 @@ func cmdInfoAuth(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
emails = args
|
||||
} else if len(args) == 0 || config.All {
|
||||
emails, err = authDB.UserIDs()
|
||||
emails, err = authDB.UserIDs(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -127,7 +131,7 @@ func cmdInfoAuth(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
for _, email := range emails {
|
||||
if err := writeAuthInfo(authDB, email, w); err != nil {
|
||||
if err := writeAuthInfo(ctx, authDB, email, w); err != nil {
|
||||
emailErrs.Add(err)
|
||||
continue
|
||||
}
|
||||
@ -139,8 +143,8 @@ func cmdInfoAuth(cmd *cobra.Command, args []string) error {
|
||||
return errs.Combine(emailErrs.Err(), printErrs.Err())
|
||||
}
|
||||
|
||||
func writeAuthInfo(authDB *certificates.AuthorizationDB, email string, w io.Writer) error {
|
||||
auths, err := authDB.Get(email)
|
||||
func writeAuthInfo(ctx context.Context, authDB *certificates.AuthorizationDB, email string, w io.Writer) error {
|
||||
auths, err := authDB.Get(ctx, email)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -189,6 +193,7 @@ func writeTokenInfo(claimed, open certificates.Authorizations, w io.Writer) erro
|
||||
}
|
||||
|
||||
func cmdExportAuth(cmd *cobra.Command, args []string) error {
|
||||
ctx := process.Ctx(cmd)
|
||||
authDB, err := config.Signer.NewAuthDB()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -202,7 +207,7 @@ func cmdExportAuth(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
emails = args
|
||||
case len(args) == 0 || config.All:
|
||||
emails, err = authDB.UserIDs()
|
||||
emails, err = authDB.UserIDs(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -232,7 +237,7 @@ func cmdExportAuth(cmd *cobra.Command, args []string) error {
|
||||
csvWriter := csv.NewWriter(output)
|
||||
|
||||
for _, email := range emails {
|
||||
if err := writeAuthExport(authDB, email, csvWriter); err != nil {
|
||||
if err := writeAuthExport(ctx, authDB, email, csvWriter); err != nil {
|
||||
emailErrs.Add(err)
|
||||
}
|
||||
}
|
||||
@ -241,8 +246,8 @@ func cmdExportAuth(cmd *cobra.Command, args []string) error {
|
||||
return errs.Combine(emailErrs.Err(), csvErrs.Err())
|
||||
}
|
||||
|
||||
func writeAuthExport(authDB *certificates.AuthorizationDB, email string, w *csv.Writer) error {
|
||||
auths, err := authDB.Get(email)
|
||||
func writeAuthExport(ctx context.Context, authDB *certificates.AuthorizationDB, email string, w *csv.Writer) error {
|
||||
auths, err := authDB.Get(ctx, email)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/storj/pkg/certificates"
|
||||
"storj.io/storj/pkg/process"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -44,6 +45,7 @@ var (
|
||||
)
|
||||
|
||||
func cmdExportClaims(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := process.Ctx(cmd)
|
||||
authDB, err := claimsExportCfg.Signer.NewAuthDB()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -52,7 +54,7 @@ func cmdExportClaims(cmd *cobra.Command, args []string) (err error) {
|
||||
err = errs.Combine(err, authDB.Close())
|
||||
}()
|
||||
|
||||
auths, err := authDB.List()
|
||||
auths, err := authDB.List(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -81,6 +83,7 @@ func cmdExportClaims(cmd *cobra.Command, args []string) (err error) {
|
||||
}
|
||||
|
||||
func cmdDeleteClaim(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := process.Ctx(cmd)
|
||||
authDB, err := claimsDeleteCfg.Signer.NewAuthDB()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -89,7 +92,7 @@ func cmdDeleteClaim(cmd *cobra.Command, args []string) (err error) {
|
||||
err = errs.Combine(err, authDB.Close())
|
||||
}()
|
||||
|
||||
if err := authDB.Unclaim(args[0]); err != nil {
|
||||
if err := authDB.Unclaim(ctx, args[0]); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -161,6 +161,7 @@ func cmdRevokeCA(cmd *cobra.Command, args []string) (err error) {
|
||||
}
|
||||
|
||||
func cmdRevokePeerCA(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := process.Ctx(cmd)
|
||||
argLen := len(args)
|
||||
switch {
|
||||
case argLen > 0:
|
||||
@ -200,7 +201,7 @@ func cmdRevokePeerCA(cmd *cobra.Command, args []string) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
if err = revDB.Put([]*x509.Certificate{ca.Cert, peerCA.Cert}, ext); err != nil {
|
||||
if err = revDB.Put(ctx, []*x509.Certificate{ca.Cert, peerCA.Cert}, ext); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -38,6 +38,7 @@ func init() {
|
||||
}
|
||||
|
||||
func cmdRevocations(cmd *cobra.Command, args []string) error {
|
||||
ctx := process.Ctx(cmd)
|
||||
if len(args) > 0 {
|
||||
revCfg.RevocationDBURL = "bolt://" + filepath.Join(configDir, args[0], "revocations.db")
|
||||
}
|
||||
@ -47,7 +48,7 @@ func cmdRevocations(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
revs, err := revDB.List()
|
||||
revs, err := revDB.List(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -38,6 +38,7 @@ func main() {
|
||||
|
||||
// Main is the exported CLI executable function
|
||||
func Main() error {
|
||||
ctx := context.Background()
|
||||
encKey := storj.Key(sha256.Sum256([]byte(*key)))
|
||||
fc, err := infectious.NewFEC(*rsk, *rsn)
|
||||
if err != nil {
|
||||
@ -60,7 +61,7 @@ func Main() error {
|
||||
for i := 0; i < *rsn; i++ {
|
||||
go func(i int) {
|
||||
url := fmt.Sprintf("http://18.184.133.99:%d", 10000+i)
|
||||
rr, err := ranger.HTTPRanger(url)
|
||||
rr, err := ranger.HTTPRanger(ctx, url)
|
||||
result <- indexRangerError{i: i, rr: rr, err: err}
|
||||
}(i)
|
||||
}
|
||||
@ -81,7 +82,6 @@ func Main() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ctx := context.Background()
|
||||
rr, err = eestream.UnpadSlow(ctx, rr)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -49,6 +49,8 @@ func NewProjectUsage(projectAccountingDB ProjectAccounting, liveAccounting live.
|
||||
// expansion factor, so that the uplinks have a raw limit.
|
||||
// Ref: https://storjlabs.atlassian.net/browse/V3-1274
|
||||
func (usage *ProjectUsage) ExceedsBandwidthUsage(ctx context.Context, projectID uuid.UUID, bucketID []byte) (_ bool, limit memory.Size, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var group errgroup.Group
|
||||
var bandwidthGetTotal int64
|
||||
limit = usage.maxAlphaUsage
|
||||
@ -86,6 +88,8 @@ func (usage *ProjectUsage) ExceedsBandwidthUsage(ctx context.Context, projectID
|
||||
// expansion factor, so that the uplinks have a raw limit.
|
||||
// Ref: https://storjlabs.atlassian.net/browse/V3-1274
|
||||
func (usage *ProjectUsage) ExceedsStorageUsage(ctx context.Context, projectID uuid.UUID) (_ bool, limit memory.Size, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var group errgroup.Group
|
||||
var inlineTotal, remoteTotal int64
|
||||
limit = usage.maxAlphaUsage
|
||||
@ -116,7 +120,9 @@ func (usage *ProjectUsage) ExceedsStorageUsage(ctx context.Context, projectID uu
|
||||
return false, limit, nil
|
||||
}
|
||||
|
||||
func (usage *ProjectUsage) getProjectStorageTotals(ctx context.Context, projectID uuid.UUID) (int64, int64, error) {
|
||||
func (usage *ProjectUsage) getProjectStorageTotals(ctx context.Context, projectID uuid.UUID) (inline int64, remote int64, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
lastCountInline, lastCountRemote, err := usage.projectAccountingDB.GetStorageTotals(ctx, projectID)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
@ -131,6 +137,7 @@ func (usage *ProjectUsage) getProjectStorageTotals(ctx context.Context, projectI
|
||||
// AddProjectStorageUsage lets the live accounting know that the given
|
||||
// project has just added inlineSpaceUsed bytes of inline space usage
|
||||
// and remoteSpaceUsed bytes of remote space usage.
|
||||
func (usage *ProjectUsage) AddProjectStorageUsage(ctx context.Context, projectID uuid.UUID, inlineSpaceUsed, remoteSpaceUsed int64) error {
|
||||
func (usage *ProjectUsage) AddProjectStorageUsage(ctx context.Context, projectID uuid.UUID, inlineSpaceUsed, remoteSpaceUsed int64) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return usage.liveAccounting.AddProjectStorageUsage(ctx, projectID, inlineSpaceUsed, remoteSpaceUsed)
|
||||
}
|
||||
|
@ -42,8 +42,8 @@ func New(logger *zap.Logger, sdb accounting.StoragenodeAccounting, interval time
|
||||
|
||||
// Run the Rollup loop
|
||||
func (r *Service) Run(ctx context.Context) (err error) {
|
||||
r.logger.Info("Rollup service starting up")
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
r.logger.Info("Rollup service starting up")
|
||||
for {
|
||||
err = r.Rollup(ctx)
|
||||
if err != nil {
|
||||
@ -58,7 +58,8 @@ func (r *Service) Run(ctx context.Context) (err error) {
|
||||
}
|
||||
|
||||
// Rollup aggregates storage and bandwidth amounts for the time interval
|
||||
func (r *Service) Rollup(ctx context.Context) error {
|
||||
func (r *Service) Rollup(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
// only Rollup new things - get LastRollup
|
||||
lastRollup, err := r.sdb.LastTimestamp(ctx, accounting.LastRollup)
|
||||
if err != nil {
|
||||
@ -101,6 +102,7 @@ func (r *Service) Rollup(ctx context.Context) error {
|
||||
|
||||
// RollupStorage rolls up storage tally, modifies rollupStats map
|
||||
func (r *Service) RollupStorage(ctx context.Context, lastRollup time.Time, rollupStats accounting.RollupStats) (latestTally time.Time, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
tallies, err := r.sdb.GetTalliesSince(ctx, lastRollup)
|
||||
if err != nil {
|
||||
return time.Now(), Error.Wrap(err)
|
||||
@ -133,7 +135,8 @@ func (r *Service) RollupStorage(ctx context.Context, lastRollup time.Time, rollu
|
||||
}
|
||||
|
||||
// RollupBW aggregates the bandwidth rollups, modifies rollupStats map
|
||||
func (r *Service) RollupBW(ctx context.Context, lastRollup time.Time, rollupStats accounting.RollupStats) error {
|
||||
func (r *Service) RollupBW(ctx context.Context, lastRollup time.Time, rollupStats accounting.RollupStats) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
var latestTally time.Time
|
||||
bws, err := r.sdb.GetBandwidthSince(ctx, lastRollup.UTC())
|
||||
if err != nil {
|
||||
|
@ -53,8 +53,8 @@ func New(logger *zap.Logger, sdb accounting.StoragenodeAccounting, pdb accountin
|
||||
|
||||
// Run the tally service loop
|
||||
func (t *Service) Run(ctx context.Context) (err error) {
|
||||
t.logger.Info("Tally service starting up")
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
t.logger.Info("Tally service starting up")
|
||||
|
||||
for {
|
||||
if err = t.Tally(ctx); err != nil {
|
||||
@ -69,7 +69,8 @@ func (t *Service) Run(ctx context.Context) (err error) {
|
||||
}
|
||||
|
||||
// Tally calculates data-at-rest usage once
|
||||
func (t *Service) Tally(ctx context.Context) error {
|
||||
func (t *Service) Tally(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
// The live accounting store will only keep a delta to space used relative
|
||||
// to the latest tally. Since a new tally is beginning, we will zero it out
|
||||
// now. There is a window between this call and the point where the tally DB
|
||||
|
@ -44,6 +44,8 @@ func NewCursor(metainfo *metainfo.Service) *Cursor {
|
||||
|
||||
// NextStripe returns a random stripe to be audited. "more" is true except when we have completed iterating over metainfo. It can be disregarded if there is an error or stripe returned
|
||||
func (cursor *Cursor) NextStripe(ctx context.Context) (stripe *Stripe, more bool, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
cursor.mutex.Lock()
|
||||
defer cursor.mutex.Unlock()
|
||||
|
||||
@ -62,7 +64,7 @@ func (cursor *Cursor) NextStripe(ctx context.Context) (stripe *Stripe, more bool
|
||||
cursor.lastPath = pointerItems[len(pointerItems)-1].Path
|
||||
}
|
||||
|
||||
pointer, path, err := cursor.getRandomValidPointer(pointerItems)
|
||||
pointer, path, err := cursor.getRandomValidPointer(ctx, pointerItems)
|
||||
if err != nil {
|
||||
return nil, more, err
|
||||
}
|
||||
@ -70,7 +72,7 @@ func (cursor *Cursor) NextStripe(ctx context.Context) (stripe *Stripe, more bool
|
||||
return nil, more, nil
|
||||
}
|
||||
|
||||
index, err := getRandomStripe(pointer)
|
||||
index, err := getRandomStripe(ctx, pointer)
|
||||
if err != nil {
|
||||
return nil, more, err
|
||||
}
|
||||
@ -82,7 +84,8 @@ func (cursor *Cursor) NextStripe(ctx context.Context) (stripe *Stripe, more bool
|
||||
}, more, nil
|
||||
}
|
||||
|
||||
func getRandomStripe(pointer *pb.Pointer) (index int64, err error) {
|
||||
func getRandomStripe(ctx context.Context, pointer *pb.Pointer) (index int64, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
redundancy, err := eestream.NewRedundancyStrategyFromProto(pointer.GetRemote().GetRedundancy())
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@ -102,7 +105,8 @@ func getRandomStripe(pointer *pb.Pointer) (index int64, err error) {
|
||||
}
|
||||
|
||||
// getRandomValidPointer attempts to get a random remote pointer from a list. If it sees expired pointers in the process of looking, deletes them
|
||||
func (cursor *Cursor) getRandomValidPointer(pointerItems []*pb.ListResponse_Item) (pointer *pb.Pointer, path storj.Path, err error) {
|
||||
func (cursor *Cursor) getRandomValidPointer(ctx context.Context, pointerItems []*pb.ListResponse_Item) (pointer *pb.Pointer, path storj.Path, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
var src cryptoSource
|
||||
rnd := rand.New(src)
|
||||
errGroup := new(errs.Group)
|
||||
|
@ -39,6 +39,7 @@ func NewReporter(overlay *overlay.Cache, containment Containment, maxRetries int
|
||||
|
||||
// RecordAudits saves audit details to overlay
|
||||
func (reporter *Reporter) RecordAudits(ctx context.Context, req *Report) (failed *Report, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if req == nil {
|
||||
return nil, nil
|
||||
}
|
||||
@ -100,6 +101,7 @@ func (reporter *Reporter) RecordAudits(ctx context.Context, req *Report) (failed
|
||||
|
||||
// recordAuditFailStatus updates nodeIDs in overlay with isup=true, auditsuccess=false
|
||||
func (reporter *Reporter) recordAuditFailStatus(ctx context.Context, failedAuditNodeIDs storj.NodeIDList) (failed storj.NodeIDList, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
var errlist errs.Group
|
||||
for _, nodeID := range failedAuditNodeIDs {
|
||||
_, err := reporter.overlay.UpdateStats(ctx, &overlay.UpdateRequest{
|
||||
@ -127,6 +129,7 @@ func (reporter *Reporter) recordAuditFailStatus(ctx context.Context, failedAudit
|
||||
|
||||
// recordOfflineStatus updates nodeIDs in overlay with isup=false
|
||||
func (reporter *Reporter) recordOfflineStatus(ctx context.Context, offlineNodeIDs storj.NodeIDList) (failed storj.NodeIDList, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
var errlist errs.Group
|
||||
for _, nodeID := range offlineNodeIDs {
|
||||
_, err := reporter.overlay.UpdateUptime(ctx, nodeID, false)
|
||||
@ -143,6 +146,7 @@ func (reporter *Reporter) recordOfflineStatus(ctx context.Context, offlineNodeID
|
||||
|
||||
// recordAuditSuccessStatus updates nodeIDs in overlay with isup=true, auditsuccess=true
|
||||
func (reporter *Reporter) recordAuditSuccessStatus(ctx context.Context, successNodeIDs storj.NodeIDList) (failed storj.NodeIDList, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
var errlist errs.Group
|
||||
for _, nodeID := range successNodeIDs {
|
||||
_, err := reporter.overlay.UpdateStats(ctx, &overlay.UpdateRequest{
|
||||
@ -170,6 +174,7 @@ func (reporter *Reporter) recordAuditSuccessStatus(ctx context.Context, successN
|
||||
|
||||
// recordPendingAudits updates the containment status of nodes with pending audits
|
||||
func (reporter *Reporter) recordPendingAudits(ctx context.Context, pendingAudits []*PendingAudit) (failed []*PendingAudit, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
var errlist errs.Group
|
||||
for _, pendingAudit := range pendingAudits {
|
||||
if pendingAudit.ReverifyCount < reporter.maxReverifyCount {
|
||||
|
@ -79,7 +79,8 @@ func (service *Service) Close() error {
|
||||
}
|
||||
|
||||
// process picks a random stripe and verifies correctness
|
||||
func (service *Service) process(ctx context.Context) error {
|
||||
func (service *Service) process(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
var stripe *Stripe
|
||||
for {
|
||||
s, more, err := service.Cursor.NextStripe(ctx)
|
||||
|
@ -157,7 +157,7 @@ func (verifier *Verifier) Verify(ctx context.Context, stripe *Stripe, skip map[s
|
||||
mon.FloatVal("audit_successful_percentage").Observe(successfulPercentage)
|
||||
mon.FloatVal("audit_failed_percentage").Observe(failedPercentage)
|
||||
|
||||
pendingAudits, err := createPendingAudits(containedNodes, correctedShares, stripe)
|
||||
pendingAudits, err := createPendingAudits(ctx, containedNodes, correctedShares, stripe)
|
||||
if err != nil {
|
||||
return &Report{
|
||||
Successes: successNodes,
|
||||
@ -392,6 +392,7 @@ func makeCopies(ctx context.Context, originals map[int]Share) (copies []infectio
|
||||
|
||||
// getSuccessNodes uses the failed nodes, offline nodes and contained nodes arrays to determine which nodes passed the audit
|
||||
func getSuccessNodes(ctx context.Context, nodes map[int]storj.NodeID, failedNodes, offlineNodes storj.NodeIDList, containedNodes map[int]storj.NodeID) (successNodes storj.NodeIDList) {
|
||||
defer mon.Task()(&ctx)(nil)
|
||||
fails := make(map[storj.NodeID]bool)
|
||||
for _, fail := range failedNodes {
|
||||
fails[fail] = true
|
||||
@ -421,7 +422,8 @@ func createBucketID(path storj.Path) []byte {
|
||||
return []byte(storj.JoinPaths(comps[0], comps[2]))
|
||||
}
|
||||
|
||||
func createPendingAudits(containedNodes map[int]storj.NodeID, correctedShares []infectious.Share, stripe *Stripe) ([]*PendingAudit, error) {
|
||||
func createPendingAudits(ctx context.Context, containedNodes map[int]storj.NodeID, correctedShares []infectious.Share, stripe *Stripe) (_ []*PendingAudit, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if len(containedNodes) > 0 {
|
||||
return nil, nil
|
||||
}
|
||||
@ -436,7 +438,7 @@ func createPendingAudits(containedNodes map[int]storj.NodeID, correctedShares []
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
stripeData, err := rebuildStripe(fec, correctedShares, int(shareSize))
|
||||
stripeData, err := rebuildStripe(ctx, fec, correctedShares, int(shareSize))
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
@ -460,9 +462,10 @@ func createPendingAudits(containedNodes map[int]storj.NodeID, correctedShares []
|
||||
return pendingAudits, nil
|
||||
}
|
||||
|
||||
func rebuildStripe(fec *infectious.FEC, corrected []infectious.Share, shareSize int) ([]byte, error) {
|
||||
func rebuildStripe(ctx context.Context, fec *infectious.FEC, corrected []infectious.Share, shareSize int) (_ []byte, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
stripe := make([]byte, fec.Required()*shareSize)
|
||||
err := fec.Rebuild(corrected, func(share infectious.Share) {
|
||||
err = fec.Rebuild(corrected, func(share infectious.Share) {
|
||||
copy(stripe[share.Number*shareSize:], share.Data)
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -6,6 +6,12 @@ package auth
|
||||
import (
|
||||
"context"
|
||||
"crypto/subtle"
|
||||
|
||||
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
mon = monkit.Package()
|
||||
)
|
||||
|
||||
// The key type is unexported to prevent collisions with context keys defined in
|
||||
@ -27,7 +33,8 @@ func GetAPIKey(ctx context.Context) ([]byte, bool) {
|
||||
}
|
||||
|
||||
// ValidateAPIKey compares the context api key with the key passed in as an argument
|
||||
func ValidateAPIKey(ctx context.Context, actualKey []byte) error {
|
||||
func ValidateAPIKey(ctx context.Context, actualKey []byte) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
expectedKey, ok := GetAPIKey(ctx)
|
||||
if !ok {
|
||||
return Error.New("Could not get api key from context")
|
||||
|
@ -4,6 +4,8 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
@ -68,7 +70,9 @@ func SignMessage(msg SignableMessage, id identity.FullIdentity) error {
|
||||
}
|
||||
|
||||
//VerifyMsg checks the crypto-related aspects of signed message
|
||||
func VerifyMsg(msg SignableMessage, signer storj.NodeID) error {
|
||||
func VerifyMsg(msg SignableMessage, signer storj.NodeID) (err error) {
|
||||
ctx := context.TODO()
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
//setup
|
||||
switch {
|
||||
case msg == nil:
|
||||
|
@ -4,13 +4,18 @@
|
||||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
|
||||
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
||||
|
||||
"storj.io/storj/pkg/identity"
|
||||
"storj.io/storj/pkg/pkcrypto"
|
||||
"storj.io/storj/pkg/storj"
|
||||
)
|
||||
|
||||
var mon = monkit.Package()
|
||||
|
||||
// PrivateKey implements a signer and signee using a crypto.PrivateKey.
|
||||
type PrivateKey struct {
|
||||
Self storj.NodeID
|
||||
@ -34,7 +39,9 @@ func (private *PrivateKey) HashAndSign(data []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
// HashAndVerifySignature hashes the data and verifies that the signature belongs to the PrivateKey.
|
||||
func (private *PrivateKey) HashAndVerifySignature(data, signature []byte) error {
|
||||
func (private *PrivateKey) HashAndVerifySignature(data, signature []byte) (err error) {
|
||||
ctx := context.TODO()
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
pub := pkcrypto.PublicKeyFromPrivate(private.Key)
|
||||
return pkcrypto.HashAndVerifySignature(pub, data, signature)
|
||||
}
|
||||
@ -57,6 +64,8 @@ func SigneeFromPeerIdentity(identity *identity.PeerIdentity) Signee {
|
||||
func (public *PublicKey) ID() storj.NodeID { return public.Self }
|
||||
|
||||
// HashAndVerifySignature hashes the data and verifies that the signature belongs to the PublicKey.
|
||||
func (public *PublicKey) HashAndVerifySignature(data, signature []byte) error {
|
||||
func (public *PublicKey) HashAndVerifySignature(data, signature []byte) (err error) {
|
||||
ctx := context.TODO()
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return pkcrypto.HashAndVerifySignature(public.Key, data, signature)
|
||||
}
|
||||
|
@ -4,6 +4,8 @@
|
||||
package signing
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/storj"
|
||||
)
|
||||
@ -15,7 +17,9 @@ type Signee interface {
|
||||
}
|
||||
|
||||
// VerifyOrderLimitSignature verifies that the signature inside order limit belongs to the satellite.
|
||||
func VerifyOrderLimitSignature(satellite Signee, signed *pb.OrderLimit2) error {
|
||||
func VerifyOrderLimitSignature(satellite Signee, signed *pb.OrderLimit2) (err error) {
|
||||
ctx := context.TODO()
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
bytes, err := EncodeOrderLimit(signed)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
@ -25,7 +29,9 @@ func VerifyOrderLimitSignature(satellite Signee, signed *pb.OrderLimit2) error {
|
||||
}
|
||||
|
||||
// VerifyOrderSignature verifies that the signature inside order belongs to the uplink.
|
||||
func VerifyOrderSignature(uplink Signee, signed *pb.Order2) error {
|
||||
func VerifyOrderSignature(uplink Signee, signed *pb.Order2) (err error) {
|
||||
ctx := context.TODO()
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
bytes, err := EncodeOrder(signed)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
@ -35,7 +41,9 @@ func VerifyOrderSignature(uplink Signee, signed *pb.Order2) error {
|
||||
}
|
||||
|
||||
// VerifyPieceHashSignature verifies that the signature inside piece hash belongs to the signer, which is either uplink or storage node.
|
||||
func VerifyPieceHashSignature(signee Signee, signed *pb.PieceHash) error {
|
||||
func VerifyPieceHashSignature(signee Signee, signed *pb.PieceHash) (err error) {
|
||||
ctx := context.TODO()
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
bytes, err := EncodePieceHash(signed)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
|
@ -209,7 +209,8 @@ func (s *Server) Settlement(client pb.Bandwidth_SettlementServer) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) verifySignature(ctx context.Context, rba *pb.Order) error {
|
||||
func (s *Server) verifySignature(ctx context.Context, rba *pb.Order) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
pba := rba.GetPayerAllocation()
|
||||
|
||||
// Get renter's public key from uplink agreement db
|
||||
|
@ -195,7 +195,8 @@ func (c *Client) Close() error {
|
||||
|
||||
// Sign claims an authorization using the token string and returns a signed
|
||||
// copy of the client's CA certificate
|
||||
func (c *Client) Sign(ctx context.Context, tokenStr string) ([][]byte, error) {
|
||||
func (c *Client) Sign(ctx context.Context, tokenStr string) (_ [][]byte, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
res, err := c.client.Sign(ctx, &pb.SigningRequest{
|
||||
AuthToken: tokenStr,
|
||||
Timestamp: time.Now().Unix(),
|
||||
@ -209,7 +210,8 @@ func (c *Client) Sign(ctx context.Context, tokenStr string) ([][]byte, error) {
|
||||
|
||||
// Sign signs the CA certificate of the remote peer's identity with the signer's certificate.
|
||||
// Returns a certificate chain consisting of the remote peer's CA followed by the signer's chain.
|
||||
func (c CertificateSigner) Sign(ctx context.Context, req *pb.SigningRequest) (*pb.SigningResponse, error) {
|
||||
func (c CertificateSigner) Sign(ctx context.Context, req *pb.SigningRequest) (_ *pb.SigningResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
grpcPeer, ok := peer.FromContext(ctx)
|
||||
if !ok {
|
||||
// TODO: better error
|
||||
@ -228,7 +230,7 @@ func (c CertificateSigner) Sign(ctx context.Context, req *pb.SigningRequest) (*p
|
||||
|
||||
signedChainBytes := [][]byte{signedPeerCA.Raw, c.signer.Cert.Raw}
|
||||
signedChainBytes = append(signedChainBytes, c.signer.RawRestChain()...)
|
||||
err = c.authDB.Claim(&ClaimOpts{
|
||||
err = c.authDB.Claim(ctx, &ClaimOpts{
|
||||
Req: req,
|
||||
Peer: grpcPeer,
|
||||
ChainBytes: signedChainBytes,
|
||||
@ -266,7 +268,8 @@ func (authDB *AuthorizationDB) Close() error {
|
||||
}
|
||||
|
||||
// Create creates a new authorization and adds it to the authorization database.
|
||||
func (authDB *AuthorizationDB) Create(userID string, count int) (Authorizations, error) {
|
||||
func (authDB *AuthorizationDB) Create(ctx context.Context, userID string, count int) (_ Authorizations, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if len(userID) == 0 {
|
||||
return nil, ErrAuthorizationDB.New("userID cannot be empty")
|
||||
}
|
||||
@ -290,7 +293,7 @@ func (authDB *AuthorizationDB) Create(userID string, count int) (Authorizations,
|
||||
return nil, ErrAuthorizationDB.Wrap(err)
|
||||
}
|
||||
|
||||
if err := authDB.add(userID, newAuths); err != nil {
|
||||
if err := authDB.add(ctx, userID, newAuths); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -298,7 +301,8 @@ func (authDB *AuthorizationDB) Create(userID string, count int) (Authorizations,
|
||||
}
|
||||
|
||||
// Get retrieves authorizations by user ID.
|
||||
func (authDB *AuthorizationDB) Get(userID string) (Authorizations, error) {
|
||||
func (authDB *AuthorizationDB) Get(ctx context.Context, userID string) (_ Authorizations, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
authsBytes, err := authDB.DB.Get(storage.Key(userID))
|
||||
if err != nil && !storage.ErrKeyNotFound.Has(err) {
|
||||
return nil, ErrAuthorizationDB.Wrap(err)
|
||||
@ -315,7 +319,8 @@ func (authDB *AuthorizationDB) Get(userID string) (Authorizations, error) {
|
||||
}
|
||||
|
||||
// UserIDs returns a list of all userIDs present in the authorization database.
|
||||
func (authDB *AuthorizationDB) UserIDs() (userIDs []string, err error) {
|
||||
func (authDB *AuthorizationDB) UserIDs(ctx context.Context) (userIDs []string, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
err = authDB.DB.Iterate(storage.IterateOptions{
|
||||
Recurse: true,
|
||||
}, func(iterator storage.Iterator) error {
|
||||
@ -329,7 +334,8 @@ func (authDB *AuthorizationDB) UserIDs() (userIDs []string, err error) {
|
||||
}
|
||||
|
||||
// List returns all authorizations in the database.
|
||||
func (authDB *AuthorizationDB) List() (auths Authorizations, err error) {
|
||||
func (authDB *AuthorizationDB) List(ctx context.Context) (auths Authorizations, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
err = authDB.DB.Iterate(storage.IterateOptions{
|
||||
Recurse: true,
|
||||
}, func(iterator storage.Iterator) error {
|
||||
@ -348,7 +354,8 @@ func (authDB *AuthorizationDB) List() (auths Authorizations, err error) {
|
||||
}
|
||||
|
||||
// Claim marks an authorization as claimed and records claim information.
|
||||
func (authDB *AuthorizationDB) Claim(opts *ClaimOpts) error {
|
||||
func (authDB *AuthorizationDB) Claim(ctx context.Context, opts *ClaimOpts) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
now := time.Now().Unix()
|
||||
if !(now-MaxClaimDelaySeconds < opts.Req.Timestamp) ||
|
||||
!(opts.Req.Timestamp < now+MaxClaimDelaySeconds) {
|
||||
@ -374,7 +381,7 @@ func (authDB *AuthorizationDB) Claim(opts *ClaimOpts) error {
|
||||
return err
|
||||
}
|
||||
|
||||
auths, err := authDB.Get(token.UserID)
|
||||
auths, err := authDB.Get(ctx, token.UserID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -394,7 +401,7 @@ func (authDB *AuthorizationDB) Claim(opts *ClaimOpts) error {
|
||||
SignedChainBytes: opts.ChainBytes,
|
||||
},
|
||||
}
|
||||
if err := authDB.put(token.UserID, auths); err != nil {
|
||||
if err := authDB.put(ctx, token.UserID, auths); err != nil {
|
||||
return err
|
||||
}
|
||||
break
|
||||
@ -404,13 +411,14 @@ func (authDB *AuthorizationDB) Claim(opts *ClaimOpts) error {
|
||||
}
|
||||
|
||||
// Unclaim removes a claim from an authorization.
|
||||
func (authDB *AuthorizationDB) Unclaim(authToken string) error {
|
||||
func (authDB *AuthorizationDB) Unclaim(ctx context.Context, authToken string) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
token, err := ParseToken(authToken)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
auths, err := authDB.Get(token.UserID)
|
||||
auths, err := authDB.Get(ctx, token.UserID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -418,23 +426,25 @@ func (authDB *AuthorizationDB) Unclaim(authToken string) error {
|
||||
for i, auth := range auths {
|
||||
if auth.Token.Equal(token) {
|
||||
auths[i].Claim = nil
|
||||
return authDB.put(token.UserID, auths)
|
||||
return authDB.put(ctx, token.UserID, auths)
|
||||
}
|
||||
}
|
||||
return errs.New("token not found in authorizations DB")
|
||||
}
|
||||
|
||||
func (authDB *AuthorizationDB) add(userID string, newAuths Authorizations) error {
|
||||
auths, err := authDB.Get(userID)
|
||||
func (authDB *AuthorizationDB) add(ctx context.Context, userID string, newAuths Authorizations) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
auths, err := authDB.Get(ctx, userID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
auths = append(auths, newAuths...)
|
||||
return authDB.put(userID, auths)
|
||||
return authDB.put(ctx, userID, auths)
|
||||
}
|
||||
|
||||
func (authDB *AuthorizationDB) put(userID string, auths Authorizations) error {
|
||||
func (authDB *AuthorizationDB) put(ctx context.Context, userID string, auths Authorizations) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
authsBytes, err := auths.Marshal()
|
||||
if err != nil {
|
||||
return ErrAuthorizationDB.Wrap(err)
|
||||
|
@ -120,7 +120,7 @@ func TestAuthorizationDB_Create(t *testing.T) {
|
||||
require.Len(t, existingAuths, testCase.startCount)
|
||||
}
|
||||
|
||||
expectedAuths, err := authDB.Create(testCase.email, testCase.incCount)
|
||||
expectedAuths, err := authDB.Create(ctx, testCase.email, testCase.incCount)
|
||||
if testCase.errClass != nil {
|
||||
assert.True(t, testCase.errClass.Has(err))
|
||||
}
|
||||
@ -185,7 +185,7 @@ func TestAuthorizationDB_Get(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
testCase := c
|
||||
t.Run(testCase.testID, func(t *testing.T) {
|
||||
auths, err := authDB.Get(testCase.email)
|
||||
auths, err := authDB.Get(ctx, testCase.email)
|
||||
require.NoError(t, err)
|
||||
if testCase.result != nil {
|
||||
assert.NotEmpty(t, auths)
|
||||
@ -207,7 +207,7 @@ func TestAuthorizationDB_Claim_Valid(t *testing.T) {
|
||||
|
||||
userID := "user@example.com"
|
||||
|
||||
auths, err := authDB.Create(userID, 1)
|
||||
auths, err := authDB.Create(ctx, userID, 1)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, auths)
|
||||
|
||||
@ -236,7 +236,7 @@ func TestAuthorizationDB_Claim_Valid(t *testing.T) {
|
||||
difficulty, err := ident.ID.Difficulty()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = authDB.Claim(&ClaimOpts{
|
||||
err = authDB.Claim(ctx, &ClaimOpts{
|
||||
Req: req,
|
||||
Peer: grpcPeer,
|
||||
ChainBytes: [][]byte{ident.CA.Raw},
|
||||
@ -244,7 +244,7 @@ func TestAuthorizationDB_Claim_Valid(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
updatedAuths, err := authDB.Get(userID)
|
||||
updatedAuths, err := authDB.Get(ctx, userID)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, updatedAuths)
|
||||
assert.Equal(t, auths[0].Token, updatedAuths[0].Token)
|
||||
@ -281,7 +281,7 @@ func TestAuthorizationDB_Claim_Invalid(t *testing.T) {
|
||||
Leaf: ident1.Leaf,
|
||||
}
|
||||
|
||||
auths, err := authDB.Create(userID, 2)
|
||||
auths, err := authDB.Create(ctx, userID, 2)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, auths)
|
||||
|
||||
@ -293,7 +293,7 @@ func TestAuthorizationDB_Claim_Invalid(t *testing.T) {
|
||||
Identity: claimedIdent,
|
||||
SignedChainBytes: [][]byte{claimedIdent.CA.Raw},
|
||||
}
|
||||
err = authDB.put(userID, auths)
|
||||
err = authDB.put(ctx, userID, auths)
|
||||
require.NoError(t, err)
|
||||
|
||||
ident2, err := testidentity.NewTestIdentity(ctx)
|
||||
@ -317,7 +317,7 @@ func TestAuthorizationDB_Claim_Invalid(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run("double claim", func(t *testing.T) {
|
||||
err = authDB.Claim(&ClaimOpts{
|
||||
err = authDB.Claim(ctx, &ClaimOpts{
|
||||
Req: &pb.SigningRequest{
|
||||
AuthToken: auths[claimedIndex].Token.String(),
|
||||
Timestamp: time.Now().Unix(),
|
||||
@ -332,7 +332,7 @@ func TestAuthorizationDB_Claim_Invalid(t *testing.T) {
|
||||
assert.NotContains(t, err.Error(), auths[claimedIndex].Token.String())
|
||||
}
|
||||
|
||||
updatedAuths, err := authDB.Get(userID)
|
||||
updatedAuths, err := authDB.Get(ctx, userID)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, updatedAuths)
|
||||
|
||||
@ -345,7 +345,7 @@ func TestAuthorizationDB_Claim_Invalid(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("invalid timestamp", func(t *testing.T) {
|
||||
err = authDB.Claim(&ClaimOpts{
|
||||
err = authDB.Claim(ctx, &ClaimOpts{
|
||||
Req: &pb.SigningRequest{
|
||||
AuthToken: auths[unclaimedIndex].Token.String(),
|
||||
// NB: 1 day ago
|
||||
@ -361,7 +361,7 @@ func TestAuthorizationDB_Claim_Invalid(t *testing.T) {
|
||||
assert.NotContains(t, err.Error(), auths[unclaimedIndex].Token.String())
|
||||
}
|
||||
|
||||
updatedAuths, err := authDB.Get(userID)
|
||||
updatedAuths, err := authDB.Get(ctx, userID)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, updatedAuths)
|
||||
|
||||
@ -370,7 +370,7 @@ func TestAuthorizationDB_Claim_Invalid(t *testing.T) {
|
||||
})
|
||||
|
||||
t.Run("invalid difficulty", func(t *testing.T) {
|
||||
err = authDB.Claim(&ClaimOpts{
|
||||
err = authDB.Claim(ctx, &ClaimOpts{
|
||||
Req: &pb.SigningRequest{
|
||||
AuthToken: auths[unclaimedIndex].Token.String(),
|
||||
Timestamp: time.Now().Unix(),
|
||||
@ -385,7 +385,7 @@ func TestAuthorizationDB_Claim_Invalid(t *testing.T) {
|
||||
assert.NotContains(t, err.Error(), auths[unclaimedIndex].Token.String())
|
||||
}
|
||||
|
||||
updatedAuths, err := authDB.Get(userID)
|
||||
updatedAuths, err := authDB.Get(ctx, userID)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, updatedAuths)
|
||||
|
||||
@ -476,14 +476,14 @@ func TestAuthorizationDB_Emails(t *testing.T) {
|
||||
|
||||
var authErrs errs.Group
|
||||
for i := 0; i < 5; i++ {
|
||||
_, err := authDB.Create(fmt.Sprintf("user%d@example.com", i), 1)
|
||||
_, err := authDB.Create(ctx, fmt.Sprintf("user%d@example.com", i), 1)
|
||||
if err != nil {
|
||||
authErrs.Add(err)
|
||||
}
|
||||
}
|
||||
require.NoError(t, authErrs.Err())
|
||||
|
||||
userIDs, err := authDB.UserIDs()
|
||||
userIDs, err := authDB.UserIDs(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, userIDs)
|
||||
}
|
||||
@ -591,7 +591,7 @@ func TestCertificateSigner_Sign_E2E(t *testing.T) {
|
||||
authDB, err := config.NewAuthDB()
|
||||
require.NoError(t, err)
|
||||
|
||||
auths, err := authDB.Create("user@example.com", 1)
|
||||
auths, err := authDB.Create(ctx, "user@example.com", 1)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, auths)
|
||||
|
||||
@ -653,7 +653,7 @@ func TestCertificateSigner_Sign_E2E(t *testing.T) {
|
||||
defer ctx.Check(authDB.Close)
|
||||
require.NotNil(t, authDB)
|
||||
|
||||
updatedAuths, err := authDB.Get(userID)
|
||||
updatedAuths, err := authDB.Get(ctx, userID)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, updatedAuths)
|
||||
require.NotNil(t, updatedAuths[0].Claim)
|
||||
@ -752,7 +752,7 @@ func TestCertificateSigner_Sign(t *testing.T) {
|
||||
defer ctx.Check(authDB.Close)
|
||||
require.NotNil(t, authDB)
|
||||
|
||||
auths, err := authDB.Create(userID, 1)
|
||||
auths, err := authDB.Create(ctx, userID, 1)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, auths)
|
||||
|
||||
@ -791,7 +791,7 @@ func TestCertificateSigner_Sign(t *testing.T) {
|
||||
err = signedChain[0].CheckSignatureFrom(signer.Cert)
|
||||
require.NoError(t, err)
|
||||
|
||||
updatedAuths, err := authDB.Get(userID)
|
||||
updatedAuths, err := authDB.Get(ctx, userID)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, updatedAuths)
|
||||
require.NotNil(t, updatedAuths[0].Claim)
|
||||
|
@ -37,7 +37,8 @@ type CertServerConfig struct {
|
||||
}
|
||||
|
||||
// Sign submits a certificate signing request given the config
|
||||
func (c CertClientConfig) Sign(ctx context.Context, ident *identity.FullIdentity, authToken string) ([][]byte, error) {
|
||||
func (c CertClientConfig) Sign(ctx context.Context, ident *identity.FullIdentity, authToken string) (_ [][]byte, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
tlsOpts, err := tlsopts.NewOptions(ident, c.TLS)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -158,6 +158,7 @@ func contains(a []string, x string) bool {
|
||||
}
|
||||
|
||||
func (checker *Checker) updateSegmentStatus(ctx context.Context, pointer *pb.Pointer, path string, monStats *durabilityStats) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
remote := pointer.GetRemote()
|
||||
if remote == nil {
|
||||
return nil
|
||||
|
@ -6,9 +6,15 @@ package irreparable
|
||||
import (
|
||||
"context"
|
||||
|
||||
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
||||
|
||||
"storj.io/storj/pkg/pb"
|
||||
)
|
||||
|
||||
var (
|
||||
mon = monkit.Package()
|
||||
)
|
||||
|
||||
// Inspector is a gRPC service for inspecting irreparable internals
|
||||
type Inspector struct {
|
||||
irrdb DB
|
||||
@ -20,7 +26,8 @@ func NewInspector(irrdb DB) *Inspector {
|
||||
}
|
||||
|
||||
// ListIrreparableSegments returns a number of irreparable segments by limit and offset
|
||||
func (srv *Inspector) ListIrreparableSegments(ctx context.Context, req *pb.ListIrreparableSegmentsRequest) (*pb.ListIrreparableSegmentsResponse, error) {
|
||||
func (srv *Inspector) ListIrreparableSegments(ctx context.Context, req *pb.ListIrreparableSegmentsRequest) (_ *pb.ListIrreparableSegmentsResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
segments, err := srv.irrdb.GetLimited(ctx, int(req.GetLimit()), int64(req.GetOffset()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -16,6 +16,7 @@ import (
|
||||
"storj.io/storj/pkg/datarepair/queue"
|
||||
"storj.io/storj/pkg/identity"
|
||||
"storj.io/storj/pkg/overlay"
|
||||
"storj.io/storj/pkg/pb"
|
||||
ecclient "storj.io/storj/pkg/storage/ec"
|
||||
"storj.io/storj/pkg/storage/segments"
|
||||
"storj.io/storj/pkg/storj"
|
||||
@ -113,7 +114,8 @@ func (service *Service) Run(ctx context.Context) (err error) {
|
||||
}
|
||||
|
||||
// process picks items from repair queue and spawns a repair worker
|
||||
func (service *Service) process(ctx context.Context) error {
|
||||
func (service *Service) process(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
for {
|
||||
seg, err := service.queue.Select(ctx)
|
||||
zap.L().Info("Dequeued segment from repair queue", zap.String("segment", seg.GetPath()))
|
||||
@ -125,16 +127,25 @@ func (service *Service) process(ctx context.Context) error {
|
||||
}
|
||||
|
||||
service.Limiter.Go(ctx, func() {
|
||||
zap.L().Info("Limiter running repair on segment", zap.String("segment", seg.GetPath()))
|
||||
err := service.repairer.Repair(ctx, seg.GetPath())
|
||||
err := service.worker(ctx, seg)
|
||||
if err != nil {
|
||||
zap.L().Error("repair failed", zap.Error(err))
|
||||
}
|
||||
zap.L().Info("Deleting segment from repair queue", zap.String("segment", seg.GetPath()))
|
||||
err = service.queue.Delete(ctx, seg)
|
||||
if err != nil {
|
||||
zap.L().Error("repair delete failed", zap.Error(err))
|
||||
zap.L().Error("repair failed:", zap.Error(err))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (service *Service) worker(ctx context.Context, seg *pb.InjuredSegment) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
zap.L().Info("Limiter running repair on segment", zap.String("segment", seg.GetPath()))
|
||||
err = service.repairer.Repair(ctx, seg.GetPath())
|
||||
if err != nil {
|
||||
return Error.New("repair failed: %v", err)
|
||||
}
|
||||
zap.L().Info("Deleting segment from repair queue", zap.String("segment", seg.GetPath()))
|
||||
err = service.queue.Delete(ctx, seg)
|
||||
if err != nil {
|
||||
return Error.New("repair delete failed: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1,24 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package discovery
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/storj"
|
||||
)
|
||||
|
||||
// GracefulDisconnect is called when a node alerts the network they're
|
||||
// going offline for a short period of time with intent to come back
|
||||
func (d *Discovery) GracefulDisconnect(id storj.NodeID) {
|
||||
}
|
||||
|
||||
// ConnFailure implements the Transport Observer interface `ConnFailure` function
|
||||
func (d *Discovery) ConnFailure(ctx context.Context, node *pb.Node, err error) {
|
||||
}
|
||||
|
||||
// ConnSuccess implements the Transport Observer interface `ConnSuccess` function
|
||||
func (d *Discovery) ConnSuccess(ctx context.Context, node *pb.Node) {
|
||||
}
|
@ -11,6 +11,7 @@ import (
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/sync/errgroup"
|
||||
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
||||
|
||||
"storj.io/storj/internal/sync2"
|
||||
"storj.io/storj/pkg/kademlia"
|
||||
@ -19,7 +20,7 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
// mon = monkit.Package() //TODO: check whether this needs monitoring
|
||||
mon = monkit.Package()
|
||||
|
||||
// Error is a general error class of this package
|
||||
Error = errs.Class("discovery error")
|
||||
@ -75,7 +76,9 @@ func (discovery *Discovery) Close() error {
|
||||
}
|
||||
|
||||
// Run runs the discovery service
|
||||
func (discovery *Discovery) Run(ctx context.Context) error {
|
||||
func (discovery *Discovery) Run(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var group errgroup.Group
|
||||
discovery.Refresh.Start(ctx, &group, func(ctx context.Context) error {
|
||||
err := discovery.refresh(ctx)
|
||||
@ -104,7 +107,9 @@ func (discovery *Discovery) Run(ctx context.Context) error {
|
||||
// refresh updates the cache db with the current DHT.
|
||||
// We currently do not penalize nodes that are unresponsive,
|
||||
// but should in the future.
|
||||
func (discovery *Discovery) refresh(ctx context.Context) error {
|
||||
func (discovery *Discovery) refresh(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
nodes := discovery.kad.Seen()
|
||||
for _, v := range nodes {
|
||||
if err := discovery.cache.Put(ctx, v.Id, *v); err != nil {
|
||||
@ -167,7 +172,9 @@ func (discovery *Discovery) refresh(ctx context.Context) error {
|
||||
// graveyard attempts to ping all nodes in the Seen() map from Kademlia and adds them to the cache
|
||||
// if they respond. This is an attempt to resurrect nodes that may have gone offline in the last hour
|
||||
// and were removed from the cache due to an unsuccessful response.
|
||||
func (discovery *Discovery) searchGraveyard(ctx context.Context) error {
|
||||
func (discovery *Discovery) searchGraveyard(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
seen := discovery.kad.Seen()
|
||||
|
||||
var errors errs.Group
|
||||
@ -203,7 +210,9 @@ func (discovery *Discovery) searchGraveyard(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// Discovery runs lookups for random node ID's to find new nodes in the network
|
||||
func (discovery *Discovery) discover(ctx context.Context) error {
|
||||
func (discovery *Discovery) discover(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
r, err := randomID()
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
@ -215,12 +224,6 @@ func (discovery *Discovery) discover(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Walk iterates over each node in each bucket to traverse the network
|
||||
func (discovery *Discovery) walk(ctx context.Context) error {
|
||||
// TODO: This should walk the cache, rather than be a duplicate of refresh
|
||||
return nil
|
||||
}
|
||||
|
||||
func randomID() (storj.NodeID, error) {
|
||||
b := make([]byte, 32)
|
||||
_, err := rand.Read(b)
|
||||
|
@ -40,6 +40,7 @@ type decodedReader struct {
|
||||
// mbm is the maximum memory (in bytes) to be allocated for read buffers. If
|
||||
// set to 0, the minimum possible memory will be used.
|
||||
func DecodeReaders(ctx context.Context, rs map[int]io.ReadCloser, es ErasureScheme, expectedSize int64, mbm int) io.ReadCloser {
|
||||
defer mon.Task()(&ctx)(nil)
|
||||
if expectedSize < 0 {
|
||||
return readcloser.FatalReadCloser(Error.New("negative expected size"))
|
||||
}
|
||||
@ -80,7 +81,7 @@ func (dr *decodedReader) Read(p []byte) (n int, err error) {
|
||||
return 0, dr.err
|
||||
}
|
||||
// read the input buffers of the next stripe - may also decode it
|
||||
dr.outbuf, dr.err = dr.stripeReader.ReadStripe(dr.currentStripe, dr.outbuf)
|
||||
dr.outbuf, dr.err = dr.stripeReader.ReadStripe(context.TODO(), dr.currentStripe, dr.outbuf)
|
||||
if dr.err != nil {
|
||||
return 0, dr.err
|
||||
}
|
||||
@ -174,7 +175,8 @@ func (dr *decodedRanger) Size() int64 {
|
||||
return blocks * int64(dr.es.StripeSize())
|
||||
}
|
||||
|
||||
func (dr *decodedRanger) Range(ctx context.Context, offset, length int64) (io.ReadCloser, error) {
|
||||
func (dr *decodedRanger) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
// offset and length might not be block-aligned. figure out which
|
||||
// blocks contain this request
|
||||
firstBlock, blockCount := encryption.CalcEncompassingBlocks(offset, length, dr.es.StripeSize())
|
||||
@ -207,7 +209,7 @@ func (dr *decodedRanger) Range(ctx context.Context, offset, length int64) (io.Re
|
||||
// decode from all those ranges
|
||||
r := DecodeReaders(ctx, readers, dr.es, blockCount*int64(dr.es.StripeSize()), dr.mbm)
|
||||
// offset might start a few bytes in, potentially discard the initial bytes
|
||||
_, err := io.CopyN(ioutil.Discard, r,
|
||||
_, err = io.CopyN(ioutil.Discard, r,
|
||||
offset-firstBlock*int64(dr.es.StripeSize()))
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
|
@ -126,6 +126,8 @@ type encodedReader struct {
|
||||
// EncodeReader takes a Reader and a RedundancyStrategy and returns a slice of
|
||||
// io.ReadClosers.
|
||||
func EncodeReader(ctx context.Context, r io.Reader, rs RedundancyStrategy) (_ []io.ReadCloser, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
er := &encodedReader{
|
||||
rs: rs,
|
||||
pieces: make(map[int]*encodedPiece, rs.TotalCount()),
|
||||
@ -166,7 +168,9 @@ func EncodeReader(ctx context.Context, r io.Reader, rs RedundancyStrategy) (_ []
|
||||
}
|
||||
|
||||
func (er *encodedReader) fillBuffer(ctx context.Context, r io.Reader, w sync2.PipeWriter) {
|
||||
_, err := sync2.Copy(ctx, w, r)
|
||||
var err error
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
_, err = sync2.Copy(ctx, w, r)
|
||||
err = w.CloseWithError(err)
|
||||
if err != nil {
|
||||
zap.S().Error(err)
|
||||
@ -247,7 +251,8 @@ func (er *EncodedRanger) OutputSize() int64 {
|
||||
}
|
||||
|
||||
// Range is like Ranger.Range, but returns a slice of Readers
|
||||
func (er *EncodedRanger) Range(ctx context.Context, offset, length int64) ([]io.ReadCloser, error) {
|
||||
func (er *EncodedRanger) Range(ctx context.Context, offset, length int64) (_ []io.ReadCloser, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
// the offset and length given may not be block-aligned, so let's figure
|
||||
// out which blocks contain the request.
|
||||
firstBlock, blockCount := encryption.CalcEncompassingBlocks(
|
||||
|
@ -48,7 +48,8 @@ func Unpad(data ranger.Ranger, padding int) (ranger.Ranger, error) {
|
||||
|
||||
// UnpadSlow is like Unpad, but does not require the amount of padding.
|
||||
// UnpadSlow will have to do extra work to make up for this missing information.
|
||||
func UnpadSlow(ctx context.Context, data ranger.Ranger) (ranger.Ranger, error) {
|
||||
func UnpadSlow(ctx context.Context, data ranger.Ranger) (_ ranger.Ranger, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
r, err := data.Range(ctx, data.Size()-uint32Size, uint32Size)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
|
@ -4,6 +4,7 @@
|
||||
package eestream
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
@ -11,6 +12,11 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/vivint/infectious"
|
||||
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
mon = monkit.Package()
|
||||
)
|
||||
|
||||
// StripeReader can read and decodes stripes from a set of readers
|
||||
@ -82,7 +88,8 @@ func (r *StripeReader) Close() error {
|
||||
|
||||
// ReadStripe reads and decodes the num-th stripe and concatenates it to p. The
|
||||
// return value is the updated byte slice.
|
||||
func (r *StripeReader) ReadStripe(num int64, p []byte) ([]byte, error) {
|
||||
func (r *StripeReader) ReadStripe(ctx context.Context, num int64, p []byte) (_ []byte, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
for i := range r.inmap {
|
||||
delete(r.inmap, i)
|
||||
}
|
||||
@ -91,7 +98,7 @@ func (r *StripeReader) ReadStripe(num int64, p []byte) ([]byte, error) {
|
||||
defer r.cond.L.Unlock()
|
||||
|
||||
for r.pendingReaders() {
|
||||
for r.readAvailableShares(num) == 0 {
|
||||
for r.readAvailableShares(ctx, num) == 0 {
|
||||
r.cond.Wait()
|
||||
}
|
||||
if r.hasEnoughShares() {
|
||||
@ -112,7 +119,8 @@ func (r *StripeReader) ReadStripe(num int64, p []byte) ([]byte, error) {
|
||||
// readAvailableShares reads the available num-th erasure shares from the piece
|
||||
// buffers without blocking. The return value n is the number of erasure shares
|
||||
// read.
|
||||
func (r *StripeReader) readAvailableShares(num int64) (n int) {
|
||||
func (r *StripeReader) readAvailableShares(ctx context.Context, num int64) (n int) {
|
||||
defer mon.Task()(&ctx)(nil)
|
||||
for i, buf := range r.bufs {
|
||||
if r.inmap[i] != nil || r.errmap[i] != nil {
|
||||
continue
|
||||
|
@ -9,10 +9,14 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
||||
|
||||
"storj.io/storj/internal/readcloser"
|
||||
"storj.io/storj/pkg/ranger"
|
||||
)
|
||||
|
||||
var mon = monkit.Package()
|
||||
|
||||
// A Transformer is a data transformation that may change the size of the blocks
|
||||
// of data it operates on in a deterministic fashion.
|
||||
type Transformer interface {
|
||||
@ -144,7 +148,9 @@ func CalcEncompassingBlocks(offset, length int64, blockSize int) (
|
||||
return firstBlock, 1 + lastBlock - firstBlock
|
||||
}
|
||||
|
||||
func (t *transformedRanger) Range(ctx context.Context, offset, length int64) (io.ReadCloser, error) {
|
||||
func (t *transformedRanger) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
// Range may not have been called for block-aligned offsets and lengths, so
|
||||
// let's figure out which blocks encompass the request
|
||||
firstBlock, blockCount := CalcEncompassingBlocks(
|
||||
|
@ -15,6 +15,8 @@ import (
|
||||
// minDifficulty. No parallelism is used.
|
||||
func GenerateKey(ctx context.Context, minDifficulty uint16, version storj.IDVersion) (
|
||||
k crypto.PrivateKey, id storj.NodeID, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var d uint16
|
||||
for {
|
||||
err = ctx.Err()
|
||||
@ -46,7 +48,8 @@ type GenerateCallback func(crypto.PrivateKey, storj.NodeID) (done bool, err erro
|
||||
|
||||
// GenerateKeys continues to generate keys until found returns done == false,
|
||||
// or the ctx is canceled.
|
||||
func GenerateKeys(ctx context.Context, minDifficulty uint16, concurrency int, version storj.IDVersion, found GenerateCallback) error {
|
||||
func GenerateKeys(ctx context.Context, minDifficulty uint16, concurrency int, version storj.IDVersion, found GenerateCallback) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
errchan := make(chan error, concurrency)
|
||||
|
@ -4,6 +4,7 @@
|
||||
package identity
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
|
||||
@ -72,7 +73,8 @@ func newRevocationDBRedis(address string) (*RevocationDB, error) {
|
||||
|
||||
// Get attempts to retrieve the most recent revocation for the given cert chain
|
||||
// (the key used in the underlying database is the nodeID of the certificate chain).
|
||||
func (r RevocationDB) Get(chain []*x509.Certificate) (*extensions.Revocation, error) {
|
||||
func (r RevocationDB) Get(ctx context.Context, chain []*x509.Certificate) (_ *extensions.Revocation, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
nodeID, err := NodeIDFromCert(chain[peertls.CAIndex])
|
||||
if err != nil {
|
||||
return nil, extensions.ErrRevocation.Wrap(err)
|
||||
@ -96,7 +98,8 @@ func (r RevocationDB) Get(chain []*x509.Certificate) (*extensions.Revocation, er
|
||||
// Put stores the most recent revocation for the given cert chain IF the timestamp
|
||||
// is newer than the current value (the key used in the underlying database is
|
||||
// the nodeID of the certificate chain).
|
||||
func (r RevocationDB) Put(chain []*x509.Certificate, revExt pkix.Extension) error {
|
||||
func (r RevocationDB) Put(ctx context.Context, chain []*x509.Certificate, revExt pkix.Extension) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
ca := chain[peertls.CAIndex]
|
||||
var rev extensions.Revocation
|
||||
if err := rev.Unmarshal(revExt.Value); err != nil {
|
||||
@ -110,7 +113,7 @@ func (r RevocationDB) Put(chain []*x509.Certificate, revExt pkix.Extension) erro
|
||||
return err
|
||||
}
|
||||
|
||||
lastRev, err := r.Get(chain)
|
||||
lastRev, err := r.Get(ctx, chain)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if lastRev != nil && lastRev.Timestamp >= rev.Timestamp {
|
||||
@ -128,7 +131,8 @@ func (r RevocationDB) Put(chain []*x509.Certificate, revExt pkix.Extension) erro
|
||||
}
|
||||
|
||||
// List lists all revocations in the store
|
||||
func (r RevocationDB) List() (revs []*extensions.Revocation, err error) {
|
||||
func (r RevocationDB) List(ctx context.Context) (revs []*extensions.Revocation, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
keys, err := r.DB.List([]byte{}, 0)
|
||||
if err != nil {
|
||||
return nil, extensions.ErrRevocationDB.Wrap(err)
|
||||
|
@ -37,7 +37,7 @@ func TestRevocationDB_Get(t *testing.T) {
|
||||
|
||||
{
|
||||
t.Log("missing key")
|
||||
rev, err = revDB.Get(chain)
|
||||
rev, err = revDB.Get(ctx, chain)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, rev)
|
||||
|
||||
@ -50,7 +50,7 @@ func TestRevocationDB_Get(t *testing.T) {
|
||||
|
||||
{
|
||||
t.Log("existing key")
|
||||
rev, err = revDB.Get(chain)
|
||||
rev, err = revDB.Get(ctx, chain)
|
||||
assert.NoError(t, err)
|
||||
|
||||
revBytes, err := rev.Marshal()
|
||||
@ -96,7 +96,7 @@ func TestRevocationDB_Put_success(t *testing.T) {
|
||||
t.Log(testcase.name)
|
||||
require.NotNil(t, testcase.ext)
|
||||
|
||||
err = revDB.Put(chain, testcase.ext)
|
||||
err = revDB.Put(ctx, chain, testcase.ext)
|
||||
require.NoError(t, err)
|
||||
|
||||
nodeID, err := identity.NodeIDFromCert(chain[peertls.CAIndex])
|
||||
@ -125,7 +125,7 @@ func TestRevocationDB_Put_error(t *testing.T) {
|
||||
newerRevocation, err := extensions.NewRevocationExt(keys[peertls.CAIndex], chain[peertls.LeafIndex])
|
||||
require.NoError(t, err)
|
||||
|
||||
err = revDB.Put(chain, newerRevocation)
|
||||
err = revDB.Put(ctx, chain, newerRevocation)
|
||||
require.NoError(t, err)
|
||||
|
||||
testcases := []struct {
|
||||
@ -145,7 +145,7 @@ func TestRevocationDB_Put_error(t *testing.T) {
|
||||
t.Log(testcase.name)
|
||||
require.NotNil(t, testcase.ext)
|
||||
|
||||
err = revDB.Put(chain, testcase.ext)
|
||||
err = revDB.Put(ctx, chain, testcase.ext)
|
||||
assert.True(t, extensions.Error.Has(err))
|
||||
assert.Equal(t, testcase.err, err)
|
||||
}
|
||||
|
@ -47,7 +47,8 @@ func (dialer *Dialer) Close() error {
|
||||
}
|
||||
|
||||
// Lookup queries ask about find, and also sends information about self.
|
||||
func (dialer *Dialer) Lookup(ctx context.Context, self pb.Node, ask pb.Node, find pb.Node) ([]*pb.Node, error) {
|
||||
func (dialer *Dialer) Lookup(ctx context.Context, self pb.Node, ask pb.Node, find pb.Node) (_ []*pb.Node, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if !dialer.limit.Lock() {
|
||||
return nil, context.Canceled
|
||||
}
|
||||
@ -72,7 +73,8 @@ func (dialer *Dialer) Lookup(ctx context.Context, self pb.Node, ask pb.Node, fin
|
||||
}
|
||||
|
||||
// PingNode pings target.
|
||||
func (dialer *Dialer) PingNode(ctx context.Context, target pb.Node) (bool, error) {
|
||||
func (dialer *Dialer) PingNode(ctx context.Context, target pb.Node) (_ bool, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if !dialer.limit.Lock() {
|
||||
return false, context.Canceled
|
||||
}
|
||||
@ -90,6 +92,7 @@ func (dialer *Dialer) PingNode(ctx context.Context, target pb.Node) (bool, error
|
||||
|
||||
// FetchPeerIdentity connects to a node and returns its peer identity
|
||||
func (dialer *Dialer) FetchPeerIdentity(ctx context.Context, target pb.Node) (_ *identity.PeerIdentity, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if !dialer.limit.Lock() {
|
||||
return nil, context.Canceled
|
||||
}
|
||||
@ -111,6 +114,7 @@ func (dialer *Dialer) FetchPeerIdentity(ctx context.Context, target pb.Node) (_
|
||||
|
||||
// FetchPeerIdentityUnverified connects to an address and returns its peer identity (no node ID verification).
|
||||
func (dialer *Dialer) FetchPeerIdentityUnverified(ctx context.Context, address string, opts ...grpc.CallOption) (_ *identity.PeerIdentity, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if !dialer.limit.Lock() {
|
||||
return nil, context.Canceled
|
||||
}
|
||||
@ -131,7 +135,8 @@ func (dialer *Dialer) FetchPeerIdentityUnverified(ctx context.Context, address s
|
||||
}
|
||||
|
||||
// FetchInfo connects to a node and returns its node info.
|
||||
func (dialer *Dialer) FetchInfo(ctx context.Context, target pb.Node) (*pb.InfoResponse, error) {
|
||||
func (dialer *Dialer) FetchInfo(ctx context.Context, target pb.Node) (_ *pb.InfoResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if !dialer.limit.Lock() {
|
||||
return nil, context.Canceled
|
||||
}
|
||||
@ -148,7 +153,8 @@ func (dialer *Dialer) FetchInfo(ctx context.Context, target pb.Node) (*pb.InfoRe
|
||||
}
|
||||
|
||||
// dialNode dials the specified node.
|
||||
func (dialer *Dialer) dialNode(ctx context.Context, target pb.Node) (*Conn, error) {
|
||||
func (dialer *Dialer) dialNode(ctx context.Context, target pb.Node) (_ *Conn, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
grpcconn, err := dialer.transport.DialNode(ctx, &target)
|
||||
return &Conn{
|
||||
conn: grpcconn,
|
||||
@ -157,7 +163,8 @@ func (dialer *Dialer) dialNode(ctx context.Context, target pb.Node) (*Conn, erro
|
||||
}
|
||||
|
||||
// dialAddress dials the specified node by address (no node ID verification)
|
||||
func (dialer *Dialer) dialAddress(ctx context.Context, address string) (*Conn, error) {
|
||||
func (dialer *Dialer) dialAddress(ctx context.Context, address string) (_ *Conn, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
grpcconn, err := dialer.transport.DialAddress(ctx, address)
|
||||
return &Conn{
|
||||
conn: grpcconn,
|
||||
|
@ -34,7 +34,8 @@ func NewEndpoint(log *zap.Logger, service *Kademlia, routingTable *RoutingTable)
|
||||
}
|
||||
|
||||
// Query is a node to node communication query
|
||||
func (endpoint *Endpoint) Query(ctx context.Context, req *pb.QueryRequest) (*pb.QueryResponse, error) {
|
||||
func (endpoint *Endpoint) Query(ctx context.Context, req *pb.QueryRequest) (_ *pb.QueryResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
endpoint.service.Queried()
|
||||
|
||||
if req.GetPingback() {
|
||||
@ -51,7 +52,9 @@ func (endpoint *Endpoint) Query(ctx context.Context, req *pb.QueryRequest) (*pb.
|
||||
|
||||
// pingback implements pingback for queries
|
||||
func (endpoint *Endpoint) pingback(ctx context.Context, target *pb.Node) {
|
||||
_, err := endpoint.service.Ping(ctx, *target)
|
||||
var err error
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
_, err = endpoint.service.Ping(ctx, *target)
|
||||
if err != nil {
|
||||
endpoint.log.Debug("connection to node failed", zap.Error(err), zap.String("nodeID", target.Id.String()))
|
||||
err = endpoint.routingTable.ConnectionFailed(target)
|
||||
@ -74,13 +77,15 @@ func (endpoint *Endpoint) pingback(ctx context.Context, target *pb.Node) {
|
||||
}
|
||||
|
||||
// Ping provides an easy way to verify a node is online and accepting requests
|
||||
func (endpoint *Endpoint) Ping(ctx context.Context, req *pb.PingRequest) (*pb.PingResponse, error) {
|
||||
func (endpoint *Endpoint) Ping(ctx context.Context, req *pb.PingRequest) (_ *pb.PingResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
endpoint.service.Pinged()
|
||||
return &pb.PingResponse{}, nil
|
||||
}
|
||||
|
||||
// RequestInfo returns the node info
|
||||
func (endpoint *Endpoint) RequestInfo(ctx context.Context, req *pb.InfoRequest) (*pb.InfoResponse, error) {
|
||||
func (endpoint *Endpoint) RequestInfo(ctx context.Context, req *pb.InfoRequest) (_ *pb.InfoResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
self := endpoint.service.Local()
|
||||
|
||||
return &pb.InfoResponse{
|
||||
|
@ -26,7 +26,9 @@ func NewInspector(kad *Kademlia, identity *identity.FullIdentity) *Inspector {
|
||||
}
|
||||
|
||||
// CountNodes returns the number of nodes in the routing table
|
||||
func (srv *Inspector) CountNodes(ctx context.Context, req *pb.CountNodesRequest) (*pb.CountNodesResponse, error) {
|
||||
func (srv *Inspector) CountNodes(ctx context.Context, req *pb.CountNodesRequest) (_ *pb.CountNodesResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
// TODO: this is definitely the wrong way to get this
|
||||
kadNodes, err := srv.dht.FindNear(ctx, srv.identity.ID, 100000)
|
||||
if err != nil {
|
||||
@ -39,7 +41,8 @@ func (srv *Inspector) CountNodes(ctx context.Context, req *pb.CountNodesRequest)
|
||||
}
|
||||
|
||||
// GetBuckets returns all kademlia buckets for current kademlia instance
|
||||
func (srv *Inspector) GetBuckets(ctx context.Context, req *pb.GetBucketsRequest) (*pb.GetBucketsResponse, error) {
|
||||
func (srv *Inspector) GetBuckets(ctx context.Context, req *pb.GetBucketsRequest) (_ *pb.GetBucketsResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
b, err := srv.dht.GetBucketIds()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -57,7 +60,8 @@ func (srv *Inspector) GetBuckets(ctx context.Context, req *pb.GetBucketsRequest)
|
||||
}
|
||||
|
||||
// FindNear sends back limit of near nodes
|
||||
func (srv *Inspector) FindNear(ctx context.Context, req *pb.FindNearRequest) (*pb.FindNearResponse, error) {
|
||||
func (srv *Inspector) FindNear(ctx context.Context, req *pb.FindNearRequest) (_ *pb.FindNearResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
start := req.Start
|
||||
limit := req.Limit
|
||||
nodes, err := srv.dht.FindNear(ctx, start, int(limit))
|
||||
@ -70,8 +74,9 @@ func (srv *Inspector) FindNear(ctx context.Context, req *pb.FindNearRequest) (*p
|
||||
}
|
||||
|
||||
// PingNode sends a PING RPC to the provided node ID in the Kad network.
|
||||
func (srv *Inspector) PingNode(ctx context.Context, req *pb.PingNodeRequest) (*pb.PingNodeResponse, error) {
|
||||
_, err := srv.dht.Ping(ctx, pb.Node{
|
||||
func (srv *Inspector) PingNode(ctx context.Context, req *pb.PingNodeRequest) (_ *pb.PingNodeResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
_, err = srv.dht.Ping(ctx, pb.Node{
|
||||
Id: req.Id,
|
||||
Address: &pb.NodeAddress{
|
||||
Address: req.Address,
|
||||
@ -87,7 +92,8 @@ func (srv *Inspector) PingNode(ctx context.Context, req *pb.PingNodeRequest) (*p
|
||||
}
|
||||
|
||||
// LookupNode triggers a Kademlia lookup and returns the node the network found.
|
||||
func (srv *Inspector) LookupNode(ctx context.Context, req *pb.LookupNodeRequest) (*pb.LookupNodeResponse, error) {
|
||||
func (srv *Inspector) LookupNode(ctx context.Context, req *pb.LookupNodeRequest) (_ *pb.LookupNodeResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
id, err := storj.NodeIDFromString(req.Id)
|
||||
if err != nil {
|
||||
return &pb.LookupNodeResponse{}, err
|
||||
@ -103,7 +109,8 @@ func (srv *Inspector) LookupNode(ctx context.Context, req *pb.LookupNodeRequest)
|
||||
}
|
||||
|
||||
// DumpNodes returns all of the nodes in the routing table database.
|
||||
func (srv *Inspector) DumpNodes(ctx context.Context, req *pb.DumpNodesRequest) (*pb.DumpNodesResponse, error) {
|
||||
func (srv *Inspector) DumpNodes(ctx context.Context, req *pb.DumpNodesRequest) (_ *pb.DumpNodesResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
nodes, err := srv.dht.DumpNodes(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -115,7 +122,8 @@ func (srv *Inspector) DumpNodes(ctx context.Context, req *pb.DumpNodesRequest) (
|
||||
}
|
||||
|
||||
// NodeInfo sends a PING RPC to a node and returns its local info.
|
||||
func (srv *Inspector) NodeInfo(ctx context.Context, req *pb.NodeInfoRequest) (*pb.NodeInfoResponse, error) {
|
||||
func (srv *Inspector) NodeInfo(ctx context.Context, req *pb.NodeInfoRequest) (_ *pb.NodeInfoResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
info, err := srv.dht.FetchInfo(ctx, pb.Node{
|
||||
Id: req.Id,
|
||||
Address: req.Address,
|
||||
@ -132,7 +140,8 @@ func (srv *Inspector) NodeInfo(ctx context.Context, req *pb.NodeInfoRequest) (*p
|
||||
}
|
||||
|
||||
// GetBucketList returns the list of buckets with their routing nodes and their cached nodes
|
||||
func (srv *Inspector) GetBucketList(ctx context.Context, req *pb.GetBucketListRequest) (*pb.GetBucketListResponse, error) {
|
||||
func (srv *Inspector) GetBucketList(ctx context.Context, req *pb.GetBucketListRequest) (_ *pb.GetBucketListResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
bucketIds, err := srv.dht.GetBucketIds()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
||||
|
||||
"storj.io/storj/internal/sync2"
|
||||
"storj.io/storj/pkg/identity"
|
||||
@ -32,6 +33,7 @@ var (
|
||||
// TODO: shouldn't default to TCP but not sure what to do yet
|
||||
defaultTransport = pb.NodeTransport_TCP_TLS_GRPC
|
||||
defaultRetries = 3
|
||||
mon = monkit.Package()
|
||||
)
|
||||
|
||||
type discoveryOptions struct {
|
||||
@ -116,7 +118,8 @@ func (k *Kademlia) Queried() {
|
||||
|
||||
// FindNear returns all nodes from a starting node up to a maximum limit
|
||||
// stored in the local routing table.
|
||||
func (k *Kademlia) FindNear(ctx context.Context, start storj.NodeID, limit int) ([]*pb.Node, error) {
|
||||
func (k *Kademlia) FindNear(ctx context.Context, start storj.NodeID, limit int) (_ []*pb.Node, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return k.routingTable.FindNear(start, limit)
|
||||
}
|
||||
|
||||
@ -144,7 +147,8 @@ func (k *Kademlia) DumpNodes(ctx context.Context) ([]*pb.Node, error) {
|
||||
|
||||
// Bootstrap contacts one of a set of pre defined trusted nodes on the network and
|
||||
// begins populating the local Kademlia node
|
||||
func (k *Kademlia) Bootstrap(ctx context.Context) error {
|
||||
func (k *Kademlia) Bootstrap(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
defer k.bootstrapFinished.Release()
|
||||
|
||||
if !k.lookups.Start() {
|
||||
@ -219,7 +223,8 @@ func (k *Kademlia) WaitForBootstrap() {
|
||||
}
|
||||
|
||||
// FetchPeerIdentity connects to a node and returns its peer identity
|
||||
func (k *Kademlia) FetchPeerIdentity(ctx context.Context, nodeID storj.NodeID) (*identity.PeerIdentity, error) {
|
||||
func (k *Kademlia) FetchPeerIdentity(ctx context.Context, nodeID storj.NodeID) (_ *identity.PeerIdentity, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if !k.lookups.Start() {
|
||||
return nil, context.Canceled
|
||||
}
|
||||
@ -232,7 +237,8 @@ func (k *Kademlia) FetchPeerIdentity(ctx context.Context, nodeID storj.NodeID) (
|
||||
}
|
||||
|
||||
// Ping checks that the provided node is still accessible on the network
|
||||
func (k *Kademlia) Ping(ctx context.Context, node pb.Node) (pb.Node, error) {
|
||||
func (k *Kademlia) Ping(ctx context.Context, node pb.Node) (_ pb.Node, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if !k.lookups.Start() {
|
||||
return pb.Node{}, context.Canceled
|
||||
}
|
||||
@ -249,7 +255,8 @@ func (k *Kademlia) Ping(ctx context.Context, node pb.Node) (pb.Node, error) {
|
||||
}
|
||||
|
||||
// FetchInfo connects to a node address and returns the node info
|
||||
func (k *Kademlia) FetchInfo(ctx context.Context, node pb.Node) (*pb.InfoResponse, error) {
|
||||
func (k *Kademlia) FetchInfo(ctx context.Context, node pb.Node) (_ *pb.InfoResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if !k.lookups.Start() {
|
||||
return nil, context.Canceled
|
||||
}
|
||||
@ -264,7 +271,8 @@ func (k *Kademlia) FetchInfo(ctx context.Context, node pb.Node) (*pb.InfoRespons
|
||||
|
||||
// FindNode looks up the provided NodeID first in the local Node, and if it is not found
|
||||
// begins searching the network for the NodeID. Returns and error if node was not found
|
||||
func (k *Kademlia) FindNode(ctx context.Context, nodeID storj.NodeID) (pb.Node, error) {
|
||||
func (k *Kademlia) FindNode(ctx context.Context, nodeID storj.NodeID) (_ pb.Node, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if !k.lookups.Start() {
|
||||
return pb.Node{}, context.Canceled
|
||||
}
|
||||
@ -274,7 +282,8 @@ func (k *Kademlia) FindNode(ctx context.Context, nodeID storj.NodeID) (pb.Node,
|
||||
}
|
||||
|
||||
//lookup initiates a kadmelia node lookup
|
||||
func (k *Kademlia) lookup(ctx context.Context, nodeID storj.NodeID, isBootstrap bool) (pb.Node, error) {
|
||||
func (k *Kademlia) lookup(ctx context.Context, nodeID storj.NodeID, isBootstrap bool) (_ pb.Node, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if !k.lookups.Start() {
|
||||
return pb.Node{}, context.Canceled
|
||||
}
|
||||
@ -345,7 +354,9 @@ func (k *Kademlia) SetBucketRefreshThreshold(threshold time.Duration) {
|
||||
}
|
||||
|
||||
// Run occasionally refreshes stale kad buckets
|
||||
func (k *Kademlia) Run(ctx context.Context) error {
|
||||
func (k *Kademlia) Run(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
if !k.lookups.Start() {
|
||||
return context.Canceled
|
||||
}
|
||||
@ -363,7 +374,8 @@ func (k *Kademlia) Run(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// refresh updates each Kademlia bucket not contacted in the last hour
|
||||
func (k *Kademlia) refresh(ctx context.Context, threshold time.Duration) error {
|
||||
func (k *Kademlia) refresh(ctx context.Context, threshold time.Duration) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
bIDs, err := k.routingTable.GetBucketIds()
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
|
@ -45,6 +45,7 @@ func newPeerDiscovery(log *zap.Logger, self pb.Node, nodes []*pb.Node, dialer *D
|
||||
}
|
||||
|
||||
func (lookup *peerDiscovery) Run(ctx context.Context) (target *pb.Node, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if lookup.queue.Len() == 0 {
|
||||
return nil, nil // TODO: should we return an error here?
|
||||
}
|
||||
|
@ -181,9 +181,11 @@ func (rt *RoutingTable) DumpNodes() ([]*pb.Node, error) {
|
||||
|
||||
// FindNear returns the node corresponding to the provided nodeID
|
||||
// returns all Nodes (excluding self) closest via XOR to the provided nodeID up to the provided limit
|
||||
func (rt *RoutingTable) FindNear(target storj.NodeID, limit int) ([]*pb.Node, error) {
|
||||
func (rt *RoutingTable) FindNear(target storj.NodeID, limit int) (_ []*pb.Node, err error) {
|
||||
ctx := context.TODO()
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
closestNodes := make([]*pb.Node, 0, limit+1)
|
||||
err := rt.iterateNodes(storj.NodeID{}, func(newID storj.NodeID, protoNode []byte) error {
|
||||
err = rt.iterateNodes(storj.NodeID{}, func(newID storj.NodeID, protoNode []byte) error {
|
||||
newPos := len(closestNodes)
|
||||
for ; newPos > 0 && compareByXor(closestNodes[newPos-1].Id, newID, target) > 0; newPos-- {
|
||||
}
|
||||
@ -209,7 +211,9 @@ func (rt *RoutingTable) FindNear(target storj.NodeID, limit int) ([]*pb.Node, er
|
||||
|
||||
// ConnectionSuccess updates or adds a node to the routing table when
|
||||
// a successful connection is made to the node on the network
|
||||
func (rt *RoutingTable) ConnectionSuccess(node *pb.Node) error {
|
||||
func (rt *RoutingTable) ConnectionSuccess(node *pb.Node) (err error) {
|
||||
ctx := context.TODO()
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
// valid to connect to node without ID but don't store connection
|
||||
if node.Id == (storj.NodeID{}) {
|
||||
return nil
|
||||
@ -239,8 +243,10 @@ func (rt *RoutingTable) ConnectionSuccess(node *pb.Node) error {
|
||||
|
||||
// ConnectionFailed removes a node from the routing table when
|
||||
// a connection fails for the node on the network
|
||||
func (rt *RoutingTable) ConnectionFailed(node *pb.Node) error {
|
||||
err := rt.removeNode(node)
|
||||
func (rt *RoutingTable) ConnectionFailed(node *pb.Node) (err error) {
|
||||
ctx := context.TODO()
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
err = rt.removeNode(node)
|
||||
if err != nil {
|
||||
return RoutingErr.New("could not remove node %s", err)
|
||||
}
|
||||
|
@ -4,6 +4,7 @@
|
||||
package kademlia
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"time"
|
||||
|
||||
@ -17,7 +18,9 @@ import (
|
||||
// addNode attempts to add a new contact to the routing table
|
||||
// Requires node not already in table
|
||||
// Returns true if node was added successfully
|
||||
func (rt *RoutingTable) addNode(node *pb.Node) (bool, error) {
|
||||
func (rt *RoutingTable) addNode(node *pb.Node) (_ bool, err error) {
|
||||
ctx := context.TODO()
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
rt.mutex.Lock()
|
||||
defer rt.mutex.Unlock()
|
||||
|
||||
@ -91,7 +94,9 @@ func (rt *RoutingTable) addNode(node *pb.Node) (bool, error) {
|
||||
|
||||
// updateNode will update the node information given that
|
||||
// the node is already in the routing table.
|
||||
func (rt *RoutingTable) updateNode(node *pb.Node) error {
|
||||
func (rt *RoutingTable) updateNode(node *pb.Node) (err error) {
|
||||
ctx := context.TODO()
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if err := rt.putNode(node); err != nil {
|
||||
return RoutingErr.New("could not update node: %v", err)
|
||||
}
|
||||
@ -99,7 +104,9 @@ func (rt *RoutingTable) updateNode(node *pb.Node) error {
|
||||
}
|
||||
|
||||
// removeNode will remove churned nodes and replace those entries with nodes from the replacement cache.
|
||||
func (rt *RoutingTable) removeNode(node *pb.Node) error {
|
||||
func (rt *RoutingTable) removeNode(node *pb.Node) (err error) {
|
||||
ctx := context.TODO()
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
rt.mutex.Lock()
|
||||
defer rt.mutex.Unlock()
|
||||
kadBucketID, err := rt.getKBucketID(node.Id)
|
||||
@ -145,7 +152,9 @@ func (rt *RoutingTable) removeNode(node *pb.Node) error {
|
||||
}
|
||||
|
||||
// putNode: helper, adds or updates Node and ID to nodeBucketDB
|
||||
func (rt *RoutingTable) putNode(node *pb.Node) error {
|
||||
func (rt *RoutingTable) putNode(node *pb.Node) (err error) {
|
||||
ctx := context.TODO()
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
v, err := proto.Marshal(node)
|
||||
if err != nil {
|
||||
return RoutingErr.Wrap(err)
|
||||
|
@ -5,11 +5,13 @@ package macaroon
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/btcsuite/btcutil/base58"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/zeebo/errs"
|
||||
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -23,6 +25,8 @@ var (
|
||||
ErrUnauthorized = errs.Class("api key unauthorized error")
|
||||
// ErrRevoked means the API key has been revoked
|
||||
ErrRevoked = errs.Class("api key revocation error")
|
||||
|
||||
mon = monkit.Package()
|
||||
)
|
||||
|
||||
// ActionType specifies the operation type being performed that the Macaroon will validate
|
||||
@ -81,7 +85,9 @@ func NewAPIKey(secret []byte) (*APIKey, error) {
|
||||
// Check makes sure that the key authorizes the provided action given the root
|
||||
// project secret and any possible revocations, returning an error if the action
|
||||
// is not authorized. 'revoked' is a list of revoked heads.
|
||||
func (a *APIKey) Check(secret []byte, action Action, revoked [][]byte) error {
|
||||
func (a *APIKey) Check(secret []byte, action Action, revoked [][]byte) (err error) {
|
||||
ctx := context.TODO()
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if !a.mac.Validate(secret) {
|
||||
return ErrInvalid.New("macaroon unauthorized")
|
||||
}
|
||||
|
@ -230,7 +230,7 @@ func (db *DB) getInfo(ctx context.Context, prefix string, bucket string, path st
|
||||
|
||||
fullpath := bucket + "/" + path
|
||||
|
||||
encryptedPath, err := streams.EncryptAfterBucket(fullpath, bucketInfo.PathCipher, db.rootKey)
|
||||
encryptedPath, err := streams.EncryptAfterBucket(ctx, fullpath, bucketInfo.PathCipher, db.rootKey)
|
||||
if err != nil {
|
||||
return object{}, storj.Object{}, err
|
||||
}
|
||||
@ -381,22 +381,26 @@ type mutableObject struct {
|
||||
|
||||
func (object *mutableObject) Info() storj.Object { return object.info }
|
||||
|
||||
func (object *mutableObject) CreateStream(ctx context.Context) (storj.MutableStream, error) {
|
||||
func (object *mutableObject) CreateStream(ctx context.Context) (_ storj.MutableStream, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return &mutableStream{
|
||||
db: object.db,
|
||||
info: object.info,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (object *mutableObject) ContinueStream(ctx context.Context) (storj.MutableStream, error) {
|
||||
func (object *mutableObject) ContinueStream(ctx context.Context) (_ storj.MutableStream, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (object *mutableObject) DeleteStream(ctx context.Context) error {
|
||||
func (object *mutableObject) DeleteStream(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (object *mutableObject) Commit(ctx context.Context) error {
|
||||
func (object *mutableObject) Commit(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
_, info, err := object.db.getInfo(ctx, committedPrefix, object.info.Bucket.Name, object.info.Path)
|
||||
object.info = info
|
||||
return err
|
||||
|
@ -141,10 +141,12 @@ type mutableStream struct {
|
||||
|
||||
func (stream *mutableStream) Info() storj.Object { return stream.info }
|
||||
|
||||
func (stream *mutableStream) AddSegments(ctx context.Context, segments ...storj.Segment) error {
|
||||
func (stream *mutableStream) AddSegments(ctx context.Context, segments ...storj.Segment) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
|
||||
func (stream *mutableStream) UpdateSegments(ctx context.Context, segments ...storj.Segment) error {
|
||||
func (stream *mutableStream) UpdateSegments(ctx context.Context, segments ...storj.Segment) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return errors.New("not implemented")
|
||||
}
|
||||
|
@ -139,7 +139,8 @@ func NewCache(log *zap.Logger, db DB, preferences NodeSelectionConfig) *Cache {
|
||||
func (cache *Cache) Close() error { return nil }
|
||||
|
||||
// Inspect lists limited number of items in the cache
|
||||
func (cache *Cache) Inspect(ctx context.Context) (storage.Keys, error) {
|
||||
func (cache *Cache) Inspect(ctx context.Context) (_ storage.Keys, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
// TODO: implement inspection tools
|
||||
return nil, errors.New("not implemented")
|
||||
}
|
||||
@ -166,7 +167,8 @@ func (cache *Cache) IsOnline(node *NodeDossier) bool {
|
||||
}
|
||||
|
||||
// FindStorageNodes searches the overlay network for nodes that meet the provided requirements
|
||||
func (cache *Cache) FindStorageNodes(ctx context.Context, req FindStorageNodesRequest) ([]*pb.Node, error) {
|
||||
func (cache *Cache) FindStorageNodes(ctx context.Context, req FindStorageNodesRequest) (_ []*pb.Node, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return cache.FindStorageNodesWithPreferences(ctx, req, &cache.preferences)
|
||||
}
|
||||
|
||||
@ -270,8 +272,8 @@ func (cache *Cache) Put(ctx context.Context, nodeID storj.NodeID, value pb.Node)
|
||||
if value.Address == nil {
|
||||
return errors.New("node has no address")
|
||||
}
|
||||
//Resolve IP Address to ensure it is set
|
||||
value.LastIp, err = getIP(value.Address.Address)
|
||||
// Resolve IP Address to ensure it is set
|
||||
value.LastIp, err = getIP(ctx, value.Address.Address)
|
||||
if err != nil {
|
||||
return OverlayError.Wrap(err)
|
||||
}
|
||||
@ -349,6 +351,7 @@ func (cache *Cache) ConnSuccess(ctx context.Context, node *pb.Node) {
|
||||
|
||||
// GetMissingPieces returns the list of offline nodes
|
||||
func (cache *Cache) GetMissingPieces(ctx context.Context, pieces []*pb.RemotePiece) (missingPieces []int32, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
var nodeIDs storj.NodeIDList
|
||||
for _, p := range pieces {
|
||||
nodeIDs = append(nodeIDs, p.NodeId)
|
||||
@ -368,7 +371,8 @@ func (cache *Cache) GetMissingPieces(ctx context.Context, pieces []*pb.RemotePie
|
||||
return missingPieces, nil
|
||||
}
|
||||
|
||||
func getIP(target string) (string, error) {
|
||||
func getIP(ctx context.Context, target string) (_ string, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
host, _, err := net.SplitHostPort(target)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -22,7 +22,8 @@ func NewInspector(cache *Cache) *Inspector {
|
||||
}
|
||||
|
||||
// CountNodes returns the number of nodes in the cache
|
||||
func (srv *Inspector) CountNodes(ctx context.Context, req *pb.CountNodesRequest) (*pb.CountNodesResponse, error) {
|
||||
func (srv *Inspector) CountNodes(ctx context.Context, req *pb.CountNodesRequest) (_ *pb.CountNodesResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
overlayKeys, err := srv.cache.Inspect(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -34,12 +35,14 @@ func (srv *Inspector) CountNodes(ctx context.Context, req *pb.CountNodesRequest)
|
||||
}
|
||||
|
||||
// DumpNodes returns all of the nodes in the overlay cachea
|
||||
func (srv *Inspector) DumpNodes(ctx context.Context, req *pb.DumpNodesRequest) (*pb.DumpNodesResponse, error) {
|
||||
func (srv *Inspector) DumpNodes(ctx context.Context, req *pb.DumpNodesRequest) (_ *pb.DumpNodesResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return &pb.DumpNodesResponse{}, errs.New("Not Implemented")
|
||||
}
|
||||
|
||||
// GetStats returns the stats for a particular node ID
|
||||
func (srv *Inspector) GetStats(ctx context.Context, req *pb.GetStatsRequest) (*pb.GetStatsResponse, error) {
|
||||
func (srv *Inspector) GetStats(ctx context.Context, req *pb.GetStatsRequest) (_ *pb.GetStatsResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
node, err := srv.cache.Get(ctx, req.NodeId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -54,7 +57,8 @@ func (srv *Inspector) GetStats(ctx context.Context, req *pb.GetStatsRequest) (*p
|
||||
}
|
||||
|
||||
// CreateStats creates a node with specified stats
|
||||
func (srv *Inspector) CreateStats(ctx context.Context, req *pb.CreateStatsRequest) (*pb.CreateStatsResponse, error) {
|
||||
func (srv *Inspector) CreateStats(ctx context.Context, req *pb.CreateStatsRequest) (_ *pb.CreateStatsResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
stats := &NodeStats{
|
||||
AuditCount: req.AuditCount,
|
||||
AuditSuccessCount: req.AuditSuccessCount,
|
||||
@ -62,7 +66,7 @@ func (srv *Inspector) CreateStats(ctx context.Context, req *pb.CreateStatsReques
|
||||
UptimeSuccessCount: req.UptimeSuccessCount,
|
||||
}
|
||||
|
||||
_, err := srv.cache.Create(ctx, req.NodeId, stats)
|
||||
_, err = srv.cache.Create(ctx, req.NodeId, stats)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -5,6 +5,7 @@ package extensions
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto"
|
||||
"crypto/x509"
|
||||
"crypto/x509/pkix"
|
||||
@ -49,9 +50,9 @@ type Revocation struct {
|
||||
|
||||
// RevocationDB stores certificate revocation data.
|
||||
type RevocationDB interface {
|
||||
Get(chain []*x509.Certificate) (*Revocation, error)
|
||||
Put(chain []*x509.Certificate, ext pkix.Extension) error
|
||||
List() ([]*Revocation, error)
|
||||
Get(ctx context.Context, chain []*x509.Certificate) (*Revocation, error)
|
||||
Put(ctx context.Context, chain []*x509.Certificate, ext pkix.Extension) error
|
||||
List(ctx context.Context) ([]*Revocation, error)
|
||||
Close() error
|
||||
}
|
||||
|
||||
@ -96,7 +97,7 @@ func NewRevocationExt(key crypto.PrivateKey, revokedCert *x509.Certificate) (pki
|
||||
func revocationChecker(opts *Options) HandlerFunc {
|
||||
return func(_ pkix.Extension, chains [][]*x509.Certificate) error {
|
||||
ca, leaf := chains[0][peertls.CAIndex], chains[0][peertls.LeafIndex]
|
||||
lastRev, lastRevErr := opts.RevDB.Get(chains[0])
|
||||
lastRev, lastRevErr := opts.RevDB.Get(context.TODO(), chains[0])
|
||||
if lastRevErr != nil {
|
||||
return Error.Wrap(lastRevErr)
|
||||
}
|
||||
@ -128,7 +129,7 @@ func revocationChecker(opts *Options) HandlerFunc {
|
||||
|
||||
func revocationUpdater(opts *Options) HandlerFunc {
|
||||
return func(ext pkix.Extension, chains [][]*x509.Certificate) error {
|
||||
if err := opts.RevDB.Put(chains[0], ext); err != nil {
|
||||
if err := opts.RevDB.Put(context.TODO(), chains[0], ext); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
@ -4,6 +4,7 @@
|
||||
package extensions_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/x509/pkix"
|
||||
"testing"
|
||||
"time"
|
||||
@ -20,6 +21,8 @@ import (
|
||||
"storj.io/storj/storage"
|
||||
)
|
||||
|
||||
var ctx = context.Background() // test context
|
||||
|
||||
func TestRevocationCheckHandler(t *testing.T) {
|
||||
testidentity.RevocationDBsTest(t, func(t *testing.T, revDB extensions.RevocationDB, _ storage.KeyValueStore) {
|
||||
keys, chain, err := testpeertls.NewCertChain(2, storj.LatestIDVersion().Number)
|
||||
@ -47,7 +50,7 @@ func TestRevocationCheckHandler(t *testing.T) {
|
||||
|
||||
// NB: add leaf revocation to revocation DB
|
||||
t.Log("revocation DB put leaf revocation")
|
||||
err = revDB.Put(revokingChain, leafRevocationExt)
|
||||
err = revDB.Put(ctx, revokingChain, leafRevocationExt)
|
||||
require.NoError(t, err)
|
||||
|
||||
{
|
||||
@ -97,7 +100,7 @@ func TestRevocationCheckHandler(t *testing.T) {
|
||||
|
||||
// NB: add CA revocation to revocation DB
|
||||
t.Log("revocation DB put CA revocation")
|
||||
err = revDB.Put(revokingChain, caRevocationExt)
|
||||
err = revDB.Put(ctx, revokingChain, caRevocationExt)
|
||||
require.NoError(t, err)
|
||||
|
||||
{
|
||||
|
@ -139,7 +139,7 @@ func TestExtensionMap_HandleExtensions_error(t *testing.T) {
|
||||
|
||||
assert.NotEqual(t, oldRevocation, newRevocation)
|
||||
|
||||
err = revDB.Put(chain, newRevocation)
|
||||
err = revDB.Put(ctx, chain, newRevocation)
|
||||
assert.NoError(t, err)
|
||||
|
||||
opts := &extensions.Options{RevDB: revDB}
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
// ServeContent is the Go standard library's http.ServeContent but modified to
|
||||
// work with Rangers.
|
||||
func ServeContent(ctx context.Context, w http.ResponseWriter, r *http.Request, name string, modtime time.Time, content Ranger) {
|
||||
defer mon.Task()(&ctx)(nil)
|
||||
setLastModified(w, modtime)
|
||||
done, rangeReq := checkPreconditions(w, r, modtime)
|
||||
if done {
|
||||
|
@ -29,7 +29,8 @@ func (rr *fileRanger) Size() int64 {
|
||||
return rr.size
|
||||
}
|
||||
|
||||
func (rr *fileRanger) Range(ctx context.Context, offset, length int64) (io.ReadCloser, error) {
|
||||
func (rr *fileRanger) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if offset < 0 {
|
||||
return nil, Error.New("negative offset")
|
||||
}
|
||||
|
@ -20,7 +20,8 @@ type httpRanger struct {
|
||||
}
|
||||
|
||||
// HTTPRanger turns an HTTP URL into a Ranger
|
||||
func HTTPRanger(url string) (Ranger, error) {
|
||||
func HTTPRanger(ctx context.Context, url string) (_ Ranger, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
resp, err := http.Head(url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -63,7 +64,8 @@ func (r *httpRanger) Size() int64 {
|
||||
}
|
||||
|
||||
// Range implements Ranger.Range
|
||||
func (r *httpRanger) Range(ctx context.Context, offset, length int64) (io.ReadCloser, error) {
|
||||
func (r *httpRanger) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if offset < 0 {
|
||||
return nil, Error.New("negative offset")
|
||||
}
|
||||
|
@ -16,6 +16,10 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var (
|
||||
ctx = context.Background() // test context
|
||||
)
|
||||
|
||||
func TestHTTPRanger(t *testing.T) {
|
||||
var content string
|
||||
ts := httptest.NewServer(http.HandlerFunc(
|
||||
@ -46,7 +50,7 @@ func TestHTTPRanger(t *testing.T) {
|
||||
tag := fmt.Sprintf("#%d. %+v", i, tt)
|
||||
|
||||
content = tt.data
|
||||
rr, err := HTTPRanger(ts.URL)
|
||||
rr, err := HTTPRanger(ctx, ts.URL)
|
||||
if assert.NoError(t, err, tag) {
|
||||
assert.Equal(t, tt.size, rr.Size(), tag)
|
||||
}
|
||||
@ -64,7 +68,7 @@ func TestHTTPRanger(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestHTTPRangerURLError(t *testing.T) {
|
||||
rr, err := HTTPRanger("")
|
||||
rr, err := HTTPRanger(ctx, "")
|
||||
assert.Nil(t, rr)
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
@ -75,7 +79,7 @@ func TestHTTPRangeStatusCodeOk(t *testing.T) {
|
||||
w.WriteHeader(http.StatusForbidden)
|
||||
http.ServeContent(w, r, "test", time.Now(), strings.NewReader(""))
|
||||
}))
|
||||
rr, err := HTTPRanger(ts.URL)
|
||||
rr, err := HTTPRanger(ctx, ts.URL)
|
||||
assert.Nil(t, rr)
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
|
@ -9,9 +9,13 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
monkit "gopkg.in/spacemonkeygo/monkit.v2"
|
||||
|
||||
"storj.io/storj/internal/readcloser"
|
||||
)
|
||||
|
||||
var mon = monkit.Package()
|
||||
|
||||
// A Ranger is a flexible data stream type that allows for more effective
|
||||
// pipelining during seeking. A Ranger can return multiple parallel Readers for
|
||||
// any subranges.
|
||||
@ -27,7 +31,8 @@ type ByteRanger []byte
|
||||
func (b ByteRanger) Size() int64 { return int64(len(b)) }
|
||||
|
||||
// Range implements Ranger.Range
|
||||
func (b ByteRanger) Range(ctx context.Context, offset, length int64) (io.ReadCloser, error) {
|
||||
func (b ByteRanger) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if offset < 0 {
|
||||
return nil, Error.New("negative offset")
|
||||
}
|
||||
@ -50,7 +55,8 @@ func (c *concatReader) Size() int64 {
|
||||
return c.r1.Size() + c.r2.Size()
|
||||
}
|
||||
|
||||
func (c *concatReader) Range(ctx context.Context, offset, length int64) (io.ReadCloser, error) {
|
||||
func (c *concatReader) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
r1Size := c.r1.Size()
|
||||
if offset+length <= r1Size {
|
||||
return c.r1.Range(ctx, offset, length)
|
||||
@ -109,6 +115,7 @@ func (s *subrange) Size() int64 {
|
||||
return s.length
|
||||
}
|
||||
|
||||
func (s *subrange) Range(ctx context.Context, offset, length int64) (io.ReadCloser, error) {
|
||||
func (s *subrange) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return s.r.Range(ctx, offset+s.offset, length)
|
||||
}
|
||||
|
@ -30,7 +30,8 @@ type readerAtReader struct {
|
||||
offset, length int64
|
||||
}
|
||||
|
||||
func (r *readerAtRanger) Range(ctx context.Context, offset, length int64) (io.ReadCloser, error) {
|
||||
func (r *readerAtRanger) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if offset < 0 {
|
||||
return nil, Error.New("negative offset")
|
||||
}
|
||||
|
@ -61,7 +61,8 @@ func NewStore(stream streams.Store) Store {
|
||||
}
|
||||
|
||||
// GetObjectStore returns an implementation of objects.Store
|
||||
func (b *BucketStore) GetObjectStore(ctx context.Context, bucket string) (objects.Store, error) {
|
||||
func (b *BucketStore) GetObjectStore(ctx context.Context, bucket string) (_ objects.Store, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if bucket == "" {
|
||||
return nil, storj.ErrNoBucket.New("")
|
||||
}
|
||||
@ -96,7 +97,7 @@ func (b *BucketStore) Get(ctx context.Context, bucket string) (meta Meta, err er
|
||||
return Meta{}, err
|
||||
}
|
||||
|
||||
return convertMeta(objMeta)
|
||||
return convertMeta(ctx, objMeta)
|
||||
}
|
||||
|
||||
// Put calls objects store Put and fills in some specific metadata to be used
|
||||
@ -169,7 +170,7 @@ func (b *BucketStore) List(ctx context.Context, startAfter, endBefore string, li
|
||||
if itm.IsPrefix {
|
||||
continue
|
||||
}
|
||||
m, err := convertMeta(itm.Meta)
|
||||
m, err := convertMeta(ctx, itm.Meta)
|
||||
if err != nil {
|
||||
return items, more, err
|
||||
}
|
||||
@ -182,7 +183,9 @@ func (b *BucketStore) List(ctx context.Context, startAfter, endBefore string, li
|
||||
}
|
||||
|
||||
// convertMeta converts stream metadata to object metadata
|
||||
func convertMeta(m objects.Meta) (out Meta, err error) {
|
||||
func convertMeta(ctx context.Context, m objects.Meta) (out Meta, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
out.Created = m.Modified
|
||||
// backwards compatibility for old buckets
|
||||
out.PathEncryptionType = storj.AESGCM
|
||||
|
@ -271,6 +271,7 @@ func (ec *ecClient) Repair(ctx context.Context, limits []*pb.AddressedOrderLimit
|
||||
}
|
||||
|
||||
func (ec *ecClient) putPiece(ctx, parent context.Context, limit *pb.AddressedOrderLimit, data io.ReadCloser, expiration time.Time) (hash *pb.PieceHash, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
defer func() { err = errs.Combine(err, data.Close()) }()
|
||||
|
||||
if limit == nil {
|
||||
@ -454,7 +455,8 @@ func (lr *lazyPieceRanger) Size() int64 {
|
||||
}
|
||||
|
||||
// Range implements Ranger.Range to be lazily connected
|
||||
func (lr *lazyPieceRanger) Range(ctx context.Context, offset, length int64) (io.ReadCloser, error) {
|
||||
func (lr *lazyPieceRanger) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
ps, err := lr.newPSClientHelper(ctx, &pb.Node{
|
||||
Id: lr.limit.GetLimit().StorageNodeId,
|
||||
Address: lr.limit.GetStorageNodeAddress(),
|
||||
|
@ -189,7 +189,7 @@ func (s *streamStore) upload(ctx context.Context, path storj.Path, pathCipher st
|
||||
}
|
||||
|
||||
putMeta, err = s.segments.Put(ctx, transformedReader, expiration, func() (storj.Path, []byte, error) {
|
||||
encPath, err := EncryptAfterBucket(path, pathCipher, s.rootKey)
|
||||
encPath, err := EncryptAfterBucket(ctx, path, pathCipher, s.rootKey)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
@ -283,7 +283,7 @@ func getSegmentPath(path storj.Path, segNum int64) storj.Path {
|
||||
func (s *streamStore) Get(ctx context.Context, path storj.Path, pathCipher storj.Cipher) (rr ranger.Ranger, meta Meta, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
encPath, err := EncryptAfterBucket(path, pathCipher, s.rootKey)
|
||||
encPath, err := EncryptAfterBucket(ctx, path, pathCipher, s.rootKey)
|
||||
if err != nil {
|
||||
return nil, Meta{}, err
|
||||
}
|
||||
@ -361,7 +361,7 @@ func (s *streamStore) Get(ctx context.Context, path storj.Path, pathCipher storj
|
||||
func (s *streamStore) Meta(ctx context.Context, path storj.Path, pathCipher storj.Cipher) (meta Meta, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
encPath, err := EncryptAfterBucket(path, pathCipher, s.rootKey)
|
||||
encPath, err := EncryptAfterBucket(ctx, path, pathCipher, s.rootKey)
|
||||
if err != nil {
|
||||
return Meta{}, err
|
||||
}
|
||||
@ -387,7 +387,7 @@ func (s *streamStore) Meta(ctx context.Context, path storj.Path, pathCipher stor
|
||||
func (s *streamStore) Delete(ctx context.Context, path storj.Path, pathCipher storj.Cipher) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
encPath, err := EncryptAfterBucket(path, pathCipher, s.rootKey)
|
||||
encPath, err := EncryptAfterBucket(ctx, path, pathCipher, s.rootKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -406,7 +406,7 @@ func (s *streamStore) Delete(ctx context.Context, path storj.Path, pathCipher st
|
||||
}
|
||||
|
||||
for i := 0; i < int(stream.NumberOfSegments-1); i++ {
|
||||
encPath, err = EncryptAfterBucket(path, pathCipher, s.rootKey)
|
||||
encPath, err = EncryptAfterBucket(ctx, path, pathCipher, s.rootKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -439,7 +439,7 @@ func (s *streamStore) List(ctx context.Context, prefix, startAfter, endBefore st
|
||||
|
||||
prefix = strings.TrimSuffix(prefix, "/")
|
||||
|
||||
encPrefix, err := EncryptAfterBucket(prefix, pathCipher, s.rootKey)
|
||||
encPrefix, err := EncryptAfterBucket(ctx, prefix, pathCipher, s.rootKey)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
@ -449,12 +449,12 @@ func (s *streamStore) List(ctx context.Context, prefix, startAfter, endBefore st
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
encStartAfter, err := s.encryptMarker(startAfter, pathCipher, prefixKey)
|
||||
encStartAfter, err := s.encryptMarker(ctx, startAfter, pathCipher, prefixKey)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
encEndBefore, err := s.encryptMarker(endBefore, pathCipher, prefixKey)
|
||||
encEndBefore, err := s.encryptMarker(ctx, endBefore, pathCipher, prefixKey)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
@ -466,7 +466,7 @@ func (s *streamStore) List(ctx context.Context, prefix, startAfter, endBefore st
|
||||
|
||||
items = make([]ListItem, len(segments))
|
||||
for i, item := range segments {
|
||||
path, err := s.decryptMarker(item.Path, pathCipher, prefixKey)
|
||||
path, err := s.decryptMarker(ctx, item.Path, pathCipher, prefixKey)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
@ -488,17 +488,19 @@ func (s *streamStore) List(ctx context.Context, prefix, startAfter, endBefore st
|
||||
}
|
||||
|
||||
// encryptMarker is a helper method for encrypting startAfter and endBefore markers
|
||||
func (s *streamStore) encryptMarker(marker storj.Path, pathCipher storj.Cipher, prefixKey *storj.Key) (storj.Path, error) {
|
||||
func (s *streamStore) encryptMarker(ctx context.Context, marker storj.Path, pathCipher storj.Cipher, prefixKey *storj.Key) (_ storj.Path, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if bytes.Equal(s.rootKey[:], prefixKey[:]) { // empty prefix
|
||||
return EncryptAfterBucket(marker, pathCipher, s.rootKey)
|
||||
return EncryptAfterBucket(ctx, marker, pathCipher, s.rootKey)
|
||||
}
|
||||
return encryption.EncryptPath(marker, pathCipher, prefixKey)
|
||||
}
|
||||
|
||||
// decryptMarker is a helper method for decrypting listed path markers
|
||||
func (s *streamStore) decryptMarker(marker storj.Path, pathCipher storj.Cipher, prefixKey *storj.Key) (storj.Path, error) {
|
||||
func (s *streamStore) decryptMarker(ctx context.Context, marker storj.Path, pathCipher storj.Cipher, prefixKey *storj.Key) (_ storj.Path, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if bytes.Equal(s.rootKey[:], prefixKey[:]) { // empty prefix
|
||||
return DecryptAfterBucket(marker, pathCipher, s.rootKey)
|
||||
return DecryptAfterBucket(ctx, marker, pathCipher, s.rootKey)
|
||||
}
|
||||
return encryption.DecryptPath(marker, pathCipher, prefixKey)
|
||||
}
|
||||
@ -520,7 +522,8 @@ func (lr *lazySegmentRanger) Size() int64 {
|
||||
}
|
||||
|
||||
// Range implements Ranger.Range to be lazily connected
|
||||
func (lr *lazySegmentRanger) Range(ctx context.Context, offset, length int64) (io.ReadCloser, error) {
|
||||
func (lr *lazySegmentRanger) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if lr.ranger == nil {
|
||||
rr, m, err := lr.segments.Get(ctx, lr.path)
|
||||
if err != nil {
|
||||
@ -542,6 +545,7 @@ func (lr *lazySegmentRanger) Range(ctx context.Context, offset, length int64) (i
|
||||
|
||||
// decryptRanger returns a decrypted ranger of the given rr ranger
|
||||
func decryptRanger(ctx context.Context, rr ranger.Ranger, decryptedSize int64, cipher storj.Cipher, derivedKey *storj.Key, encryptedKey storj.EncryptedPrivateKey, encryptedKeyNonce, startingNonce *storj.Nonce, encBlockSize int) (decrypted ranger.Ranger, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
contentKey, err := encryption.DecryptKey(encryptedKey, cipher, derivedKey, encryptedKeyNonce)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -578,7 +582,8 @@ func decryptRanger(ctx context.Context, rr ranger.Ranger, decryptedSize int64, c
|
||||
}
|
||||
|
||||
// EncryptAfterBucket encrypts a path without encrypting its first element
|
||||
func EncryptAfterBucket(path storj.Path, cipher storj.Cipher, key *storj.Key) (encrypted storj.Path, err error) {
|
||||
func EncryptAfterBucket(ctx context.Context, path storj.Path, cipher storj.Cipher, key *storj.Key) (encrypted storj.Path, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
comps := storj.SplitPath(path)
|
||||
if len(comps) <= 1 {
|
||||
return path, nil
|
||||
@ -594,7 +599,8 @@ func EncryptAfterBucket(path storj.Path, cipher storj.Cipher, key *storj.Key) (e
|
||||
}
|
||||
|
||||
// DecryptAfterBucket decrypts a path without modifying its first element
|
||||
func DecryptAfterBucket(path storj.Path, cipher storj.Cipher, key *storj.Key) (decrypted storj.Path, err error) {
|
||||
func DecryptAfterBucket(ctx context.Context, path storj.Path, cipher storj.Cipher, key *storj.Key) (decrypted storj.Path, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
comps := storj.SplitPath(path)
|
||||
if len(comps) <= 1 {
|
||||
return path, nil
|
||||
@ -618,8 +624,9 @@ func DecryptAfterBucket(path storj.Path, cipher storj.Cipher, key *storj.Key) (d
|
||||
|
||||
// CancelHandler handles clean up of segments on receiving CTRL+C
|
||||
func (s *streamStore) cancelHandler(ctx context.Context, totalSegments int64, path storj.Path, pathCipher storj.Cipher) {
|
||||
defer mon.Task()(&ctx)(nil)
|
||||
for i := int64(0); i < totalSegments; i++ {
|
||||
encPath, err := EncryptAfterBucket(path, pathCipher, s.rootKey)
|
||||
encPath, err := EncryptAfterBucket(ctx, path, pathCipher, s.rootKey)
|
||||
if err != nil {
|
||||
zap.S().Warnf("Failed deleting a segment due to encryption path %v %v", i, err)
|
||||
}
|
||||
@ -646,6 +653,7 @@ func getEncryptedKeyAndNonce(m *pb.SegmentMeta) (storj.EncryptedPrivateKey, *sto
|
||||
// DecryptStreamInfo decrypts stream info
|
||||
func DecryptStreamInfo(ctx context.Context, streamMetaBytes []byte, path storj.Path, rootKey *storj.Key) (
|
||||
streamInfo []byte, streamMeta pb.StreamMeta, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
err = proto.Unmarshal(streamMetaBytes, &streamMeta)
|
||||
if err != nil {
|
||||
return nil, pb.StreamMeta{}, err
|
||||
|
@ -118,6 +118,7 @@ func (c *Client) Run(ctx context.Context) {
|
||||
}
|
||||
|
||||
// Report bundles up all the current stats and writes them out as UDP packets
|
||||
func (c *Client) Report(ctx context.Context) error {
|
||||
func (c *Client) Report(ctx context.Context) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return c.send(ctx, c.opts)
|
||||
}
|
||||
|
@ -36,12 +36,14 @@ type slowTransport struct {
|
||||
}
|
||||
|
||||
// DialNode dials a node with latency
|
||||
func (client *slowTransport) DialNode(ctx context.Context, node *pb.Node, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||
func (client *slowTransport) DialNode(ctx context.Context, node *pb.Node, opts ...grpc.DialOption) (_ *grpc.ClientConn, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return client.client.DialNode(ctx, node, append(client.network.DialOptions(), opts...)...)
|
||||
}
|
||||
|
||||
// DialAddress dials an address with latency
|
||||
func (client *slowTransport) DialAddress(ctx context.Context, address string, opts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||
func (client *slowTransport) DialAddress(ctx context.Context, address string, opts ...grpc.DialOption) (_ *grpc.ClientConn, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
return client.client.DialAddress(ctx, address, append(client.network.DialOptions(), opts...)...)
|
||||
}
|
||||
|
||||
@ -61,7 +63,8 @@ func (network *SimulatedNetwork) DialOptions() []grpc.DialOption {
|
||||
}
|
||||
|
||||
// GRPCDialContext implements DialContext that is suitable for `grpc.WithContextDialer`
|
||||
func (network *SimulatedNetwork) GRPCDialContext(ctx context.Context, address string) (net.Conn, error) {
|
||||
func (network *SimulatedNetwork) GRPCDialContext(ctx context.Context, address string) (_ net.Conn, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
timer := time.NewTimer(network.DialLatency)
|
||||
defer timer.Stop()
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user