storj/pkg/overlay/client.go

112 lines
3.0 KiB
Go
Raw Normal View History

// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package overlay
import (
"context"
"github.com/zeebo/errs"
"storj.io/storj/pkg/transport"
Transport security (#63) * wip initial transport security * wip: transport security (add tests / refactor) * wip tests * refactoring - still wip * refactor, improve tests * wip tls testing * fix typo * wip testing * wip testing * wip * tls_test passing * code-style improvemente / refactor; service and tls tests passing! * code-style auto-format * add TestNewServer_LoadTLS * refactor; test improvements * refactor * add client cert * port changes * Merge remote-tracking branch 'upstream/master' * Merge remote-tracking branch 'upstream/master' * Merge remote-tracking branch 'upstream/master' * files created * Merge remote-tracking branch 'upstream/master' into coyle/kad-tests * wip * add separate `Process` tests for bolt and redis-backed overlay * more testing * fix gitignore * fix linter error * goimports goimports GOIMPORTS GoImPortS!!!! * wip * fix port madness * forgot to add * add `mux` as handler and shorten context timeouts * gofreakingimports * fix comments * refactor test & add logger/monkit registry * debugging travis * add comment * Set redisAddress to empty string for bolt-test * Merge remote-tracking branch 'upstream/master' into coyle/kad-tests * Merge branch 'tls' into tls-upstream * tls: add client cert refactor refactor; test improvements add TestNewServer_LoadTLS code-style auto-format code-style improvemente / refactor; service and tls tests passing! tls_test passing wip wip testing wip testing fix typo wip tls testing refactor, improve tests refactoring - still wip wip tests wip: transport security (add tests / refactor) wip initial transport security * fixing linter things * wip * remove bkad dependencie from tests * wip * wip * wip * wip * wip * updated coyle/kademlia * wip * cleanup * ports * overlay upgraded * linter fixes * piecestore kademlia newID * Merge branch 'master' into tls-upstream * master: Add error to the return values of Ranger.Range method (#90) udp-forwarding: demo week work! (#84) * Merge branch 'kad-tests' into tls-upstream * kad-tests: piecestore kademlia newID linter fixes overlay upgraded ports cleanup wip updated coyle/kademlia wip wip wip wip wip remove bkad dependencie from tests wip wip files created port changes * wip * finish merging service tests * add test for different client/server certs * wip * Merge branch 'master' into tls-upstream * master: Add context to Ranger.Range method (#99) Coyle/kad client (#91) * wip * wip; refactoring/cleanup * wip * Merge branch 'master' into tls * master: Bolt backed overlay cache (#94) internal/test: switch errors to error classes (#96) * wip - test passing * cleanup * remove port.go * cleanup * Merge branch 'master' into tls * master: hardcode version (#111) Coyle/docker fix (#109) pkg/kademlia tests and restructuring (#97) Use continue instead of return in table tests (#106) prepend storjlabs to docker tag (#108) Automatically build, tag and push docker images on merge to master (#103) * more belated merging * more belated merging * more belated merging * add copyrights * cleanup * goimports * refactoring * wip * wip * implement `TLSFileOptions#loadTLS`, refactoring: `peertls.TestNewClient_LoadTLS` is the failing holdout; Still trying to figure out why I'm getting ECDSA verification is failing. * not sure if actually working: Tests are now passing (no more "ECDSA verification failed"); however, `len(*tls.Certificates.Certificate) == 1` which I don't think should be the case if the root and leaf are being created correctly. * Experimenting/bugfixing?: I think leaf certs should be properly signed by the parent now but not entirely certain. It's also unclear to me why in `VerifyPeerCertificate`, `len(rawCerts) == 1` when the certs should contain both the root and leaf afaik. * Properly write/read certificate chain (root/leaf): I think I'm now properly reading and writing the root and leaf certificate chain such that they're both being received by `VerifyPeerCertificate`. The next step is to parse the certificates with `x509.ParseCertificate` (or similar) and verify that the public keys and signatures match. * Add tls certificate chain signature veification (spike): + `VerifyPeerCertificate` verifies signatures of certificates using the key of it's parent if there is one; otherwise, it verifies the certificate is self-signed + TODO: refactor + TODO: test * refactoring `VerifyPeerCertificate` * cleanup * refactor * Merge branch 'master' into tls * master: Remove some structural folders we don't seem to be using. (#125) license code with agplv3 (#126) Update .clabot (#124) added team memebers (#123) clabot file added (#121) ECClient (#110) docker image issue fixed (#118) Piecestore Farmer CLI (#92) Define Path type (#101) adds netstate pagination (#95) Transport Client (#89) Implement psclient interface (#107) pkg/process: start replacing pkg/process with cobra helpers (#98) protos/netstate: remove stuff we're not using (#100) adding coveralls / code coverage (#112) * responding to review feedback / cleanup / add copywrite headers * suggestions * realitive * Merge pull request #1 from coyle/coyle/tls suggestions * remove unnecessary `_`s * Merge branch 'tls' of github.com:bryanchriswhite/storj into tls * 'tls' of github.com:bryanchriswhite/storj: realitive suggestions * Responding to review feedback: + refactor `VerifyPeerCertificate` * remove tls expiration * remove "hosts" and "clien option" from tls options * goimports * linter fixes
2018-07-09 18:43:13 +01:00
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/provider"
)
// Client is the interface that defines an overlay client.
//
// Choose returns a list of storage NodeID's that fit the provided criteria.
// limit is the maximum number of nodes to be returned.
// space is the storage and bandwidth requested consumption in bytes.
//
// Lookup finds a Node with the provided identifier.
// ClientError creates class of errors for stack traces
var ClientError = errs.Class("Client Error")
//Client implements the Overlay Client interface
type Client interface {
Choose(ctx context.Context, op Options) ([]*pb.Node, error)
Lookup(ctx context.Context, nodeID dht.NodeID) (*pb.Node, error)
BulkLookup(ctx context.Context, nodeIDs []dht.NodeID) ([]*pb.Node, error)
}
// Overlay is the overlay concrete implementation of the client interface
type Overlay struct {
client pb.OverlayClient
}
// Options contains parameters for selecting nodes
type Options struct {
Amount int
Space int64
Excluded []dht.NodeID
}
// NewOverlayClient returns a new intialized Overlay Client
func NewOverlayClient(identity *provider.FullIdentity, address string) (Client, error) {
tc := transport.NewClient(identity)
conn, err := tc.DialAddress(context.Background(), address)
if err != nil {
return nil, err
}
return &Overlay{
client: pb.NewOverlayClient(conn),
}, nil
}
// a compiler trick to make sure *Overlay implements Client
var _ Client = (*Overlay)(nil)
// Choose implements the client.Choose interface
func (o *Overlay) Choose(ctx context.Context, op Options) ([]*pb.Node, error) {
var exIDs []string
for _, id := range op.Excluded {
exIDs = append(exIDs, id.String())
}
// TODO(coyle): We will also need to communicate with the reputation service here
resp, err := o.client.FindStorageNodes(ctx, &pb.FindStorageNodesRequest{
Opts: &pb.OverlayOptions{
Amount: int64(op.Amount),
Restrictions: &pb.NodeRestrictions{FreeDisk: op.Space},
ExcludedNodes: exIDs,
},
})
if err != nil {
return nil, Error.Wrap(err)
}
return resp.GetNodes(), nil
}
// Lookup provides a Node with the given ID
func (o *Overlay) Lookup(ctx context.Context, nodeID dht.NodeID) (*pb.Node, error) {
resp, err := o.client.Lookup(ctx, &pb.LookupRequest{NodeID: nodeID.String()})
if err != nil {
return nil, err
}
return resp.GetNode(), nil
}
//BulkLookup provides a list of Nodes with the given IDs
func (o *Overlay) BulkLookup(ctx context.Context, nodeIDs []dht.NodeID) ([]*pb.Node, error) {
var reqs pb.LookupRequests
for _, v := range nodeIDs {
reqs.Lookuprequest = append(reqs.Lookuprequest, &pb.LookupRequest{NodeID: v.String()})
}
resp, err := o.client.BulkLookup(ctx, &reqs)
if err != nil {
return nil, ClientError.Wrap(err)
}
var nodes []*pb.Node
for _, v := range resp.Lookupresponse {
nodes = append(nodes, v.Node)
}
return nodes, nil
}