adds SegmentStore Put functionality (#138)

* lays out SegmentStore functions to implement

* Merge branch 'master' into segment-store

* adds overlay calls to put

* allows SegmentStore Put to upload a file to ecclient, then save pointer to pointerdb

* Merge branch 'master' into segment-store

* removes new overlay client instance in Put

* fixes syntax

* fixes syntax again

* fixes imports

* fixes typo

* removes pointerdb client from segmentStore struct for now

* changes SegmentStore to segmentStore

* changing types in parameters to fit other function calls

* takes RedundancyStrategy out of Put params

* changes NewClient param back to take an interface (not pointer to interface)

* fixes types

* moves pointer into PutRequest in SegmentStore Put

* passes interfact, not pointer to interface to NewSegmentStore

* fixes some types

* Get returns an instance of Meta

* fixes PutRequest fields

* adds remotePieces slice to pointerdb PutRequest

* ecClient Put now takes *proto.Nodes instead of proto.Nodes

* fixes syntax

* changes ec client dial interface to use *proto.Node

* changes other instances of proto.Node to *proto.Node in ecclient pkg

* adds *proto.Node to Get and Delete functions in ecclient pkg

* changes proto.Node to pointer in ec client_test

* changes proto.Node to pointer in ec client_test

* adds ecclient and pointerdb client to the segmentstore constructor

* adds ecclient and pointerDBClient to segmentStore constructor
This commit is contained in:
Natalie Villasana 2018-07-12 18:37:50 -04:00 committed by GitHub
parent 8d8350fea7
commit d82486b85b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 216 additions and 70 deletions

View File

@ -42,58 +42,58 @@ type ErasureScheme interface {
// RedundancyStrategy is an ErasureScheme with a minimum and optimum thresholds
type RedundancyStrategy struct {
ErasureScheme
min int
opt int
Min int
Opt int
}
// NewRedundancyStrategy from the given ErasureScheme, minimum and optimum
// thresholds
//
// min is the minimum threshold. If set to 0, it will be reset to the
// Min is the minimum threshold. If set to 0, it will be reset to the
// TotalCount of the ErasureScheme.
// opt is the optimum threshold. If set to 0, it will be reset to the
// Opt is the optimum threshold. If set to 0, it will be reset to the
// TotalCount of the ErasureScheme.
func NewRedundancyStrategy(es ErasureScheme, min, opt int) (RedundancyStrategy, error) {
if min == 0 {
min = es.TotalCount()
func NewRedundancyStrategy(es ErasureScheme, Min, Opt int) (RedundancyStrategy, error) {
if Min == 0 {
Min = es.TotalCount()
}
if opt == 0 {
opt = es.TotalCount()
if Opt == 0 {
Opt = es.TotalCount()
}
if min < 0 {
if Min < 0 {
return RedundancyStrategy{}, Error.New("negative minimum threshold")
}
if min > 0 && min < es.RequiredCount() {
if Min > 0 && Min < es.RequiredCount() {
return RedundancyStrategy{}, Error.New("minimum threshold less than required count")
}
if min > es.TotalCount() {
if Min > es.TotalCount() {
return RedundancyStrategy{}, Error.New("minimum threshold greater than total count")
}
if opt < 0 {
if Opt < 0 {
return RedundancyStrategy{}, Error.New("negative optimum threshold")
}
if opt > 0 && opt < es.RequiredCount() {
if Opt > 0 && Opt < es.RequiredCount() {
return RedundancyStrategy{}, Error.New("optimum threshold less than required count")
}
if opt > es.TotalCount() {
if Opt > es.TotalCount() {
return RedundancyStrategy{}, Error.New("optimum threshold greater than total count")
}
if min > opt {
if Min > Opt {
return RedundancyStrategy{}, Error.New("minimum threshold greater than optimum threshold")
}
return RedundancyStrategy{ErasureScheme: es, min: min, opt: opt}, nil
return RedundancyStrategy{ErasureScheme: es, Min: Min, Opt: Opt}, nil
}
// MinimumThreshold is the number of available erasure pieces below which
// the data must be repaired to avoid loss
func (rs *RedundancyStrategy) MinimumThreshold() int {
return rs.min
return rs.Min
}
// OptimumThreshold is the number of available erasure pieces above which
// there is no need for the data to be repaired
func (rs *RedundancyStrategy) OptimumThreshold() int {
return rs.opt
return rs.Opt
}
type encodedReader struct {

11
pkg/segment/common.go Normal file
View File

@ -0,0 +1,11 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package segment
import (
"github.com/zeebo/errs"
)
// Error is the errs class of standard segment errors
var Error = errs.Class("segment error")

135
pkg/segment/segment.go Normal file
View File

@ -0,0 +1,135 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package segment
import (
"context"
"fmt"
"io"
"time"
"go.uber.org/zap"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/eestream"
"storj.io/storj/pkg/paths"
"storj.io/storj/pkg/piecestore/rpc/client"
"storj.io/storj/pkg/ranger"
"storj.io/storj/pkg/storage/ec"
opb "storj.io/storj/protos/overlay"
ppb "storj.io/storj/protos/pointerdb"
)
var (
mon = monkit.Package()
)
// Meta describes associated Nodes and if data is Inline or Remote
type Meta struct {
Inline bool
Nodes []dht.NodeID
}
// Store allows Put, Get, Delete, and List methods on paths
type Store interface {
Put(ctx context.Context, path paths.Path, data io.Reader, metadata []byte,
expiration time.Time) error
Get(ctx context.Context, path paths.Path) (ranger.Ranger, Meta, error)
Delete(ctx context.Context, path paths.Path) error
List(ctx context.Context, startingPath, endingPath paths.Path) (
paths []paths.Path, truncated bool, err error)
}
type segmentStore struct {
oc opb.OverlayClient
ec ecclient.Client
pc ppb.PointerDBClient
rs eestream.RedundancyStrategy
}
// NewSegmentStore creates a new instance of segmentStore
func NewSegmentStore(oc opb.OverlayClient, ec ecclient.Client, pc ppb.PointerDBClient,
rs eestream.RedundancyStrategy) Store {
return &segmentStore{oc: oc, ec: ec, pc: pc, rs: rs}
}
// Put uploads a file to an erasure code client
func (s *segmentStore) Put(ctx context.Context, path paths.Path, data io.Reader,
metadata []byte, expiration time.Time) (err error) {
defer mon.Task()(&ctx)(&err)
// uses overlay client to request a list of nodes
nodeRes, err := s.oc.FindStorageNodes(ctx, &opb.FindStorageNodesRequest{})
if err != nil {
return Error.Wrap(err)
}
pieceID := client.NewPieceID()
// puts file to ecclient
err = s.ec.Put(ctx, nodeRes.GetNodes(), s.rs, pieceID, data, expiration)
if err != nil {
zap.S().Error("Failed putting nodes to ecclient")
return Error.Wrap(err)
}
var remotePieces []*ppb.RemotePiece
for i := range nodeRes.Nodes {
remotePieces = append(remotePieces, &ppb.RemotePiece{
PieceNum: int64(i),
NodeId: nodeRes.Nodes[i].Id,
})
}
// creates pointer
pr := ppb.PutRequest{
Path: []byte(fmt.Sprintf("%s", path)),
Pointer: &ppb.Pointer{
Type: ppb.Pointer_REMOTE,
Remote: &ppb.RemoteSegment{
Redundancy: &ppb.RedundancyScheme{
Type: ppb.RedundancyScheme_RS,
MinReq: int64(s.rs.RequiredCount()),
Total: int64(s.rs.TotalCount()),
RepairThreshold: int64(s.rs.Min),
SuccessThreshold: int64(s.rs.Opt),
},
PieceId: string(pieceID),
RemotePieces: remotePieces,
},
},
APIKey: nil,
}
// puts pointer to pointerDB
_, err = s.pc.Put(ctx, &pr)
if err != nil || status.Code(err) == codes.Internal {
zap.L().Error("failed to put", zap.Error(err))
return Error.Wrap(err)
}
return nil
}
// Get retrieves a file from the erasure code client with help from overlay and pointerdb
func (s *segmentStore) Get(ctx context.Context, path paths.Path) (ranger.Ranger, Meta, error) {
m := Meta{
Inline: true,
Nodes: nil,
}
return nil, m, nil
}
// Delete issues deletes of a file to all piece stores and deletes from pointerdb
func (s *segmentStore) Delete(ctx context.Context, path paths.Path) error {
return nil
}
// List lists paths stored in the pointerdb
func (s *segmentStore) List(ctx context.Context, startingPath, endingPath paths.Path) (
paths []paths.Path, truncated bool, err error) {
return nil, true, nil
}

View File

@ -22,22 +22,22 @@ var mon = monkit.Package()
// Client defines an interface for storing erasure coded data to piece store nodes
type Client interface {
Put(ctx context.Context, nodes []proto.Node, rs eestream.RedundancyStrategy,
Put(ctx context.Context, nodes []*proto.Node, rs eestream.RedundancyStrategy,
pieceID client.PieceID, data io.Reader, expiration time.Time) error
Get(ctx context.Context, nodes []proto.Node, es eestream.ErasureScheme,
Get(ctx context.Context, nodes []*proto.Node, es eestream.ErasureScheme,
pieceID client.PieceID, size int64) (ranger.RangeCloser, error)
Delete(ctx context.Context, nodes []proto.Node, pieceID client.PieceID) error
Delete(ctx context.Context, nodes []*proto.Node, pieceID client.PieceID) error
}
type dialer interface {
dial(ctx context.Context, node proto.Node) (ps client.PSClient, err error)
dial(ctx context.Context, node *proto.Node) (ps client.PSClient, err error)
}
type defaultDialer struct {
t transport.Client
}
func (d *defaultDialer) dial(ctx context.Context, node proto.Node) (ps client.PSClient, err error) {
func (d *defaultDialer) dial(ctx context.Context, node *proto.Node) (ps client.PSClient, err error) {
defer mon.Task()(&ctx)(&err)
c, err := d.t.DialNode(ctx, node)
if err != nil {
@ -56,7 +56,7 @@ func NewClient(t transport.Client, mbm int) Client {
return &ecClient{d: &defaultDialer{t: t}, mbm: mbm}
}
func (ec *ecClient) Put(ctx context.Context, nodes []proto.Node, rs eestream.RedundancyStrategy,
func (ec *ecClient) Put(ctx context.Context, nodes []*proto.Node, rs eestream.RedundancyStrategy,
pieceID client.PieceID, data io.Reader, expiration time.Time) (err error) {
defer mon.Task()(&ctx)(&err)
if len(nodes) != rs.TotalCount() {
@ -68,7 +68,7 @@ func (ec *ecClient) Put(ctx context.Context, nodes []proto.Node, rs eestream.Red
}
errs := make(chan error, len(readers))
for i, n := range nodes {
go func(i int, n proto.Node) {
go func(i int, n *proto.Node) {
derivedPieceID := pieceID.Derive([]byte(n.GetId()))
ps, err := ec.d.dial(ctx, n)
if err != nil {
@ -96,7 +96,7 @@ func (ec *ecClient) Put(ctx context.Context, nodes []proto.Node, rs eestream.Red
return nil
}
func (ec *ecClient) Get(ctx context.Context, nodes []proto.Node, es eestream.ErasureScheme,
func (ec *ecClient) Get(ctx context.Context, nodes []*proto.Node, es eestream.ErasureScheme,
pieceID client.PieceID, size int64) (rr ranger.RangeCloser, err error) {
defer mon.Task()(&ctx)(&err)
if len(nodes) != es.TotalCount() {
@ -110,7 +110,7 @@ func (ec *ecClient) Get(ctx context.Context, nodes []proto.Node, es eestream.Era
}
ch := make(chan rangerInfo, len(nodes))
for i, n := range nodes {
go func(i int, n proto.Node) {
go func(i int, n *proto.Node) {
derivedPieceID := pieceID.Derive([]byte(n.GetId()))
ps, err := ec.d.dial(ctx, n)
if err != nil {
@ -138,11 +138,11 @@ func (ec *ecClient) Get(ctx context.Context, nodes []proto.Node, es eestream.Era
return eestream.Decode(rrs, es, ec.mbm)
}
func (ec *ecClient) Delete(ctx context.Context, nodes []proto.Node, pieceID client.PieceID) (err error) {
func (ec *ecClient) Delete(ctx context.Context, nodes []*proto.Node, pieceID client.PieceID) (err error) {
defer mon.Task()(&ctx)(&err)
errs := make(chan error, len(nodes))
for _, n := range nodes {
go func(n proto.Node) {
go func(n *proto.Node) {
derivedPieceID := pieceID.Derive([]byte(n.GetId()))
ps, err := ec.d.dial(ctx, n)
if err != nil {

View File

@ -33,17 +33,17 @@ var (
)
var (
node0 = proto.Node{Id: "node-0"}
node1 = proto.Node{Id: "node-1"}
node2 = proto.Node{Id: "node-2"}
node3 = proto.Node{Id: "node-3"}
node0 = &proto.Node{Id: "node-0"}
node1 = &proto.Node{Id: "node-1"}
node2 = &proto.Node{Id: "node-2"}
node3 = &proto.Node{Id: "node-3"}
)
type mockDialer struct {
m map[proto.Node]client.PSClient
m map[*proto.Node]client.PSClient
}
func (d *mockDialer) dial(ctx context.Context, node proto.Node) (
func (d *mockDialer) dial(ctx context.Context, node *proto.Node) (
ps client.PSClient, err error) {
ps = d.m[node]
if ps == nil {
@ -107,28 +107,28 @@ func TestPut(t *testing.T) {
defer ctrl.Finish()
for i, tt := range []struct {
nodes []proto.Node
nodes []*proto.Node
min int
mbm int
errs []error
errString string
}{
{[]proto.Node{}, 0, 0, []error{}, "ecclient error: " +
{[]*proto.Node{}, 0, 0, []error{}, "ecclient error: " +
"number of nodes do not match total count of erasure scheme"},
{[]proto.Node{node0, node1, node2, node3}, 0, -1,
{[]*proto.Node{node0, node1, node2, node3}, 0, -1,
[]error{nil, nil, nil, nil},
"eestream error: negative max buffer memory"},
{[]proto.Node{node0, node1, node2, node3}, 0, 0,
{[]*proto.Node{node0, node1, node2, node3}, 0, 0,
[]error{nil, nil, nil, nil}, ""},
{[]proto.Node{node0, node1, node2, node3}, 0, 0,
{[]*proto.Node{node0, node1, node2, node3}, 0, 0,
[]error{nil, ErrDialFailed, nil, nil},
"ecclient error: successful puts (3) less than minimum threshold (4)"},
{[]proto.Node{node0, node1, node2, node3}, 0, 0,
{[]*proto.Node{node0, node1, node2, node3}, 0, 0,
[]error{nil, ErrOpFailed, nil, nil},
"ecclient error: successful puts (3) less than minimum threshold (4)"},
{[]proto.Node{node0, node1, node2, node3}, 2, 0,
{[]*proto.Node{node0, node1, node2, node3}, 2, 0,
[]error{nil, ErrDialFailed, nil, nil}, ""},
{[]proto.Node{node0, node1, node2, node3}, 2, 0,
{[]*proto.Node{node0, node1, node2, node3}, 2, 0,
[]error{ErrOpFailed, ErrDialFailed, nil, ErrDialFailed},
"ecclient error: successful puts (1) less than minimum threshold (2)"},
} {
@ -138,12 +138,12 @@ func TestPut(t *testing.T) {
size := 32 * 1024
ttl := time.Now()
errs := make(map[proto.Node]error, len(tt.nodes))
errs := make(map[*proto.Node]error, len(tt.nodes))
for i, n := range tt.nodes {
errs[n] = tt.errs[i]
}
m := make(map[proto.Node]client.PSClient, len(tt.nodes))
m := make(map[*proto.Node]client.PSClient, len(tt.nodes))
for _, n := range tt.nodes {
if errs[n] != ErrDialFailed && tt.mbm >= 0 {
derivedID := id.Derive([]byte(n.GetId()))
@ -183,26 +183,26 @@ func TestGet(t *testing.T) {
defer ctrl.Finish()
for i, tt := range []struct {
nodes []proto.Node
nodes []*proto.Node
mbm int
errs []error
errString string
}{
{[]proto.Node{}, 0, []error{}, "ecclient error: " +
{[]*proto.Node{}, 0, []error{}, "ecclient error: " +
"number of nodes do not match total count of erasure scheme"},
{[]proto.Node{node0, node1, node2, node3}, -1,
{[]*proto.Node{node0, node1, node2, node3}, -1,
[]error{nil, nil, nil, nil},
"eestream error: negative max buffer memory"},
{[]proto.Node{node0, node1, node2, node3}, 0,
{[]*proto.Node{node0, node1, node2, node3}, 0,
[]error{nil, nil, nil, nil}, ""},
{[]proto.Node{node0, node1, node2, node3}, 0,
{[]*proto.Node{node0, node1, node2, node3}, 0,
[]error{nil, ErrDialFailed, nil, nil}, ""},
{[]proto.Node{node0, node1, node2, node3}, 0,
{[]*proto.Node{node0, node1, node2, node3}, 0,
[]error{nil, ErrOpFailed, nil, nil}, ""},
{[]proto.Node{node0, node1, node2, node3}, 0,
{[]*proto.Node{node0, node1, node2, node3}, 0,
[]error{ErrOpFailed, ErrDialFailed, nil, ErrDialFailed},
"eestream error: not enough readers to reconstruct data!"},
{[]proto.Node{node0, node1, node2, node3}, 0,
{[]*proto.Node{node0, node1, node2, node3}, 0,
[]error{ErrDialFailed, ErrOpFailed, ErrOpFailed, ErrDialFailed},
"eestream error: not enough readers to reconstruct data!"},
} {
@ -211,12 +211,12 @@ func TestGet(t *testing.T) {
id := client.NewPieceID()
size := 32 * 1024
errs := make(map[proto.Node]error, len(tt.nodes))
errs := make(map[*proto.Node]error, len(tt.nodes))
for i, n := range tt.nodes {
errs[n] = tt.errs[i]
}
m := make(map[proto.Node]client.PSClient, len(tt.nodes))
m := make(map[*proto.Node]client.PSClient, len(tt.nodes))
for _, n := range tt.nodes {
if errs[n] != ErrDialFailed {
derivedID := id.Derive([]byte(n.GetId()))
@ -250,30 +250,30 @@ func TestDelete(t *testing.T) {
defer ctrl.Finish()
for i, tt := range []struct {
nodes []proto.Node
nodes []*proto.Node
errs []error
errString string
}{
{[]proto.Node{}, []error{}, ""},
{[]proto.Node{node0}, []error{nil}, ""},
{[]proto.Node{node0}, []error{ErrDialFailed}, dialFailed},
{[]proto.Node{node0}, []error{ErrOpFailed}, opFailed},
{[]proto.Node{node0, node1}, []error{nil, nil}, ""},
{[]proto.Node{node0, node1}, []error{ErrDialFailed, nil}, ""},
{[]proto.Node{node0, node1}, []error{nil, ErrOpFailed}, ""},
{[]proto.Node{node0, node1}, []error{ErrDialFailed, ErrDialFailed}, dialFailed},
{[]proto.Node{node0, node1}, []error{ErrOpFailed, ErrOpFailed}, opFailed},
{[]*proto.Node{}, []error{}, ""},
{[]*proto.Node{node0}, []error{nil}, ""},
{[]*proto.Node{node0}, []error{ErrDialFailed}, dialFailed},
{[]*proto.Node{node0}, []error{ErrOpFailed}, opFailed},
{[]*proto.Node{node0, node1}, []error{nil, nil}, ""},
{[]*proto.Node{node0, node1}, []error{ErrDialFailed, nil}, ""},
{[]*proto.Node{node0, node1}, []error{nil, ErrOpFailed}, ""},
{[]*proto.Node{node0, node1}, []error{ErrDialFailed, ErrDialFailed}, dialFailed},
{[]*proto.Node{node0, node1}, []error{ErrOpFailed, ErrOpFailed}, opFailed},
} {
errTag := fmt.Sprintf("Test case #%d", i)
id := client.NewPieceID()
errs := make(map[proto.Node]error, len(tt.nodes))
errs := make(map[*proto.Node]error, len(tt.nodes))
for i, n := range tt.nodes {
errs[n] = tt.errs[i]
}
m := make(map[proto.Node]client.PSClient, len(tt.nodes))
m := make(map[*proto.Node]client.PSClient, len(tt.nodes))
for _, n := range tt.nodes {
if errs[n] != ErrDialFailed {
derivedID := id.Derive([]byte(n.GetId()))

View File

@ -40,7 +40,7 @@ func (m *MockClient) EXPECT() *MockClientMockRecorder {
}
// DialNode mocks base method
func (m *MockClient) DialNode(arg0 context.Context, arg1 overlay.Node) (*grpc.ClientConn, error) {
func (m *MockClient) DialNode(arg0 context.Context, arg1 *overlay.Node) (*grpc.ClientConn, error) {
ret := m.ctrl.Call(m, "DialNode", arg0, arg1)
ret0, _ := ret[0].(*grpc.ClientConn)
ret1, _ := ret[1].(error)

View File

@ -22,5 +22,5 @@ var (
// Client defines the interface to an transport client.
type Client interface {
DialUnauthenticated(ctx context.Context, addr proto.NodeAddress) (*grpc.ClientConn, error)
DialNode(ctx context.Context, node proto.Node) (*grpc.ClientConn, error)
DialNode(ctx context.Context, node *proto.Node) (*grpc.ClientConn, error)
}