2019-01-24 20:15:10 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
2018-04-11 14:41:50 +01:00
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package eestream
|
|
|
|
|
|
|
|
import (
|
2018-05-30 16:27:09 +01:00
|
|
|
"context"
|
2018-04-11 14:41:50 +01:00
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
2019-02-05 10:54:25 +00:00
|
|
|
"os"
|
2018-04-11 14:41:50 +01:00
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
"github.com/vivint/infectious"
|
2019-02-05 10:54:25 +00:00
|
|
|
"go.uber.org/zap"
|
|
|
|
|
2019-05-24 09:13:01 +01:00
|
|
|
"storj.io/storj/internal/fpath"
|
|
|
|
"storj.io/storj/internal/memory"
|
2019-02-05 10:54:25 +00:00
|
|
|
"storj.io/storj/internal/readcloser"
|
|
|
|
"storj.io/storj/internal/sync2"
|
2018-10-18 12:10:29 +01:00
|
|
|
"storj.io/storj/pkg/encryption"
|
2019-03-18 10:55:06 +00:00
|
|
|
"storj.io/storj/pkg/pb"
|
2018-04-11 14:41:50 +01:00
|
|
|
"storj.io/storj/pkg/ranger"
|
|
|
|
)
|
|
|
|
|
|
|
|
// ErasureScheme represents the general format of any erasure scheme algorithm.
|
|
|
|
// If this interface can be implemented, the rest of this library will work
|
|
|
|
// with it.
|
|
|
|
type ErasureScheme interface {
|
|
|
|
// Encode will take 'in' and call 'out' with erasure coded pieces.
|
|
|
|
Encode(in []byte, out func(num int, data []byte)) error
|
|
|
|
|
2019-02-05 10:54:25 +00:00
|
|
|
// EncodeSingle will take 'in' with the stripe and fill 'out' with the erasure share for piece 'num'.
|
|
|
|
EncodeSingle(in, out []byte, num int) error
|
|
|
|
|
2018-04-11 14:41:50 +01:00
|
|
|
// Decode will take a mapping of available erasure coded piece num -> data,
|
|
|
|
// 'in', and append the combined data to 'out', returning it.
|
|
|
|
Decode(out []byte, in map[int][]byte) ([]byte, error)
|
|
|
|
|
2018-09-27 12:52:18 +01:00
|
|
|
// ErasureShareSize is the size of the erasure shares that come from Encode
|
|
|
|
// and are passed to Decode.
|
|
|
|
ErasureShareSize() int
|
2018-04-11 14:41:50 +01:00
|
|
|
|
2018-09-27 12:52:18 +01:00
|
|
|
// StripeSize is the size the stripes that are passed to Encode and come
|
|
|
|
// from Decode.
|
|
|
|
StripeSize() int
|
2018-04-11 14:41:50 +01:00
|
|
|
|
2019-06-11 18:14:05 +01:00
|
|
|
// Encode will generate this many erasure shares and therefore this many pieces
|
2018-04-11 14:41:50 +01:00
|
|
|
TotalCount() int
|
|
|
|
|
|
|
|
// Decode requires at least this many pieces
|
|
|
|
RequiredCount() int
|
|
|
|
}
|
|
|
|
|
2018-09-26 15:23:33 +01:00
|
|
|
// RedundancyStrategy is an ErasureScheme with a repair and optimal thresholds
|
2018-07-03 09:35:01 +01:00
|
|
|
type RedundancyStrategy struct {
|
|
|
|
ErasureScheme
|
2018-09-26 15:23:33 +01:00
|
|
|
repairThreshold int
|
|
|
|
optimalThreshold int
|
2018-07-03 09:35:01 +01:00
|
|
|
}
|
|
|
|
|
2018-09-26 15:23:33 +01:00
|
|
|
// NewRedundancyStrategy from the given ErasureScheme, repair and optimal thresholds.
|
2018-07-03 09:35:01 +01:00
|
|
|
//
|
2018-09-26 15:23:33 +01:00
|
|
|
// repairThreshold is the minimum repair threshold.
|
|
|
|
// If set to 0, it will be reset to the TotalCount of the ErasureScheme.
|
|
|
|
// optimalThreshold is the optimal threshold.
|
|
|
|
// If set to 0, it will be reset to the TotalCount of the ErasureScheme.
|
|
|
|
func NewRedundancyStrategy(es ErasureScheme, repairThreshold, optimalThreshold int) (RedundancyStrategy, error) {
|
|
|
|
if repairThreshold == 0 {
|
|
|
|
repairThreshold = es.TotalCount()
|
2018-07-03 09:35:01 +01:00
|
|
|
}
|
2018-09-26 15:23:33 +01:00
|
|
|
|
|
|
|
if optimalThreshold == 0 {
|
|
|
|
optimalThreshold = es.TotalCount()
|
2018-07-03 09:35:01 +01:00
|
|
|
}
|
2018-09-26 15:23:33 +01:00
|
|
|
if repairThreshold < 0 {
|
|
|
|
return RedundancyStrategy{}, Error.New("negative repair threshold")
|
2018-07-03 09:35:01 +01:00
|
|
|
}
|
2018-09-26 15:23:33 +01:00
|
|
|
if repairThreshold > 0 && repairThreshold < es.RequiredCount() {
|
|
|
|
return RedundancyStrategy{}, Error.New("repair threshold less than required count")
|
2018-07-03 09:35:01 +01:00
|
|
|
}
|
2018-09-26 15:23:33 +01:00
|
|
|
if repairThreshold > es.TotalCount() {
|
|
|
|
return RedundancyStrategy{}, Error.New("repair threshold greater than total count")
|
2018-07-03 09:35:01 +01:00
|
|
|
}
|
2018-09-26 15:23:33 +01:00
|
|
|
if optimalThreshold < 0 {
|
|
|
|
return RedundancyStrategy{}, Error.New("negative optimal threshold")
|
2018-07-03 09:35:01 +01:00
|
|
|
}
|
2018-09-26 15:23:33 +01:00
|
|
|
if optimalThreshold > 0 && optimalThreshold < es.RequiredCount() {
|
|
|
|
return RedundancyStrategy{}, Error.New("optimal threshold less than required count")
|
2018-07-03 09:35:01 +01:00
|
|
|
}
|
2018-09-26 15:23:33 +01:00
|
|
|
if optimalThreshold > es.TotalCount() {
|
|
|
|
return RedundancyStrategy{}, Error.New("optimal threshold greater than total count")
|
2018-07-03 09:35:01 +01:00
|
|
|
}
|
2018-09-26 15:23:33 +01:00
|
|
|
if repairThreshold > optimalThreshold {
|
|
|
|
return RedundancyStrategy{}, Error.New("repair threshold greater than optimal threshold")
|
2018-07-03 09:35:01 +01:00
|
|
|
}
|
2018-09-26 15:23:33 +01:00
|
|
|
return RedundancyStrategy{ErasureScheme: es, repairThreshold: repairThreshold, optimalThreshold: optimalThreshold}, nil
|
2018-07-03 09:35:01 +01:00
|
|
|
}
|
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
// NewRedundancyStrategyFromProto creates new RedundancyStrategy from the given
|
|
|
|
// RedundancyScheme protobuf.
|
|
|
|
func NewRedundancyStrategyFromProto(scheme *pb.RedundancyScheme) (RedundancyStrategy, error) {
|
|
|
|
fc, err := infectious.NewFEC(int(scheme.GetMinReq()), int(scheme.GetTotal()))
|
|
|
|
if err != nil {
|
|
|
|
return RedundancyStrategy{}, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
es := NewRSScheme(fc, int(scheme.GetErasureShareSize()))
|
|
|
|
return NewRedundancyStrategy(es, int(scheme.GetRepairThreshold()), int(scheme.GetSuccessThreshold()))
|
|
|
|
}
|
|
|
|
|
2018-09-26 15:23:33 +01:00
|
|
|
// RepairThreshold is the number of available erasure pieces below which
|
2018-07-03 09:35:01 +01:00
|
|
|
// the data must be repaired to avoid loss
|
2018-09-26 15:23:33 +01:00
|
|
|
func (rs *RedundancyStrategy) RepairThreshold() int {
|
|
|
|
return rs.repairThreshold
|
2018-07-03 09:35:01 +01:00
|
|
|
}
|
|
|
|
|
2018-09-26 15:23:33 +01:00
|
|
|
// OptimalThreshold is the number of available erasure pieces above which
|
2018-07-03 09:35:01 +01:00
|
|
|
// there is no need for the data to be repaired
|
2018-09-26 15:23:33 +01:00
|
|
|
func (rs *RedundancyStrategy) OptimalThreshold() int {
|
|
|
|
return rs.optimalThreshold
|
2018-07-03 09:35:01 +01:00
|
|
|
}
|
|
|
|
|
2018-04-11 14:41:50 +01:00
|
|
|
type encodedReader struct {
|
2019-06-26 14:05:58 +01:00
|
|
|
ctx context.Context
|
2018-07-03 09:35:01 +01:00
|
|
|
rs RedundancyStrategy
|
2019-02-05 10:54:25 +00:00
|
|
|
pieces map[int]*encodedPiece
|
2018-08-24 14:06:27 +01:00
|
|
|
}
|
|
|
|
|
2018-07-03 09:35:01 +01:00
|
|
|
// EncodeReader takes a Reader and a RedundancyStrategy and returns a slice of
|
2019-02-05 10:54:25 +00:00
|
|
|
// io.ReadClosers.
|
2019-05-24 09:13:01 +01:00
|
|
|
func EncodeReader(ctx context.Context, r io.Reader, rs RedundancyStrategy) (_ []io.ReadCloser, err error) {
|
2019-06-04 12:36:27 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2018-04-11 14:41:50 +01:00
|
|
|
er := &encodedReader{
|
2019-06-26 14:05:58 +01:00
|
|
|
ctx: ctx,
|
2019-02-05 10:54:25 +00:00
|
|
|
rs: rs,
|
|
|
|
pieces: make(map[int]*encodedPiece, rs.TotalCount()),
|
2018-04-11 14:41:50 +01:00
|
|
|
}
|
2018-05-30 16:27:09 +01:00
|
|
|
|
2019-05-24 09:13:01 +01:00
|
|
|
var pipeReaders []sync2.PipeReader
|
|
|
|
var pipeWriter sync2.PipeWriter
|
|
|
|
|
|
|
|
tempDir, inmemory, _ := fpath.GetTempData(ctx)
|
|
|
|
if inmemory {
|
|
|
|
// TODO what default inmemory size will be enough
|
|
|
|
pipeReaders, pipeWriter, err = sync2.NewTeeInmemory(rs.TotalCount(), memory.MiB.Int64())
|
|
|
|
} else {
|
|
|
|
if tempDir == "" {
|
|
|
|
tempDir = os.TempDir()
|
|
|
|
}
|
|
|
|
pipeReaders, pipeWriter, err = sync2.NewTeeFile(rs.TotalCount(), tempDir)
|
|
|
|
}
|
2019-02-05 10:54:25 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2018-09-25 12:39:14 +01:00
|
|
|
}
|
|
|
|
|
2019-02-05 10:54:25 +00:00
|
|
|
readers := make([]io.ReadCloser, 0, rs.TotalCount())
|
|
|
|
for i := 0; i < rs.TotalCount(); i++ {
|
|
|
|
er.pieces[i] = &encodedPiece{
|
|
|
|
er: er,
|
|
|
|
pipeReader: pipeReaders[i],
|
|
|
|
num: i,
|
|
|
|
stripeBuf: make([]byte, rs.StripeSize()),
|
|
|
|
shareBuf: make([]byte, rs.ErasureShareSize()),
|
2018-05-30 16:27:09 +01:00
|
|
|
}
|
2019-02-05 10:54:25 +00:00
|
|
|
readers = append(readers, er.pieces[i])
|
2018-05-30 16:27:09 +01:00
|
|
|
}
|
|
|
|
|
2019-02-05 10:54:25 +00:00
|
|
|
go er.fillBuffer(ctx, r, pipeWriter)
|
|
|
|
|
|
|
|
return readers, nil
|
2018-05-30 16:27:09 +01:00
|
|
|
}
|
|
|
|
|
2019-02-05 10:54:25 +00:00
|
|
|
func (er *encodedReader) fillBuffer(ctx context.Context, r io.Reader, w sync2.PipeWriter) {
|
2019-06-04 12:36:27 +01:00
|
|
|
var err error
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
_, err = sync2.Copy(ctx, w, r)
|
2019-02-05 10:54:25 +00:00
|
|
|
err = w.CloseWithError(err)
|
|
|
|
if err != nil {
|
|
|
|
zap.S().Error(err)
|
2018-04-11 14:41:50 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
type encodedPiece struct {
|
2019-02-05 10:54:25 +00:00
|
|
|
er *encodedReader
|
|
|
|
pipeReader sync2.PipeReader
|
|
|
|
num int
|
|
|
|
currentStripe int64
|
|
|
|
stripeBuf []byte
|
|
|
|
shareBuf []byte
|
|
|
|
available int
|
|
|
|
err error
|
2018-04-11 14:41:50 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func (ep *encodedPiece) Read(p []byte) (n int, err error) {
|
2019-06-26 14:05:58 +01:00
|
|
|
ctx := ep.er.ctx
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-05-30 16:27:09 +01:00
|
|
|
if ep.err != nil {
|
|
|
|
return 0, ep.err
|
|
|
|
}
|
2019-02-05 10:54:25 +00:00
|
|
|
|
|
|
|
if ep.available == 0 {
|
|
|
|
// take the next stripe from the segment buffer
|
|
|
|
_, err := io.ReadFull(ep.pipeReader, ep.stripeBuf)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// encode the num-th erasure share
|
|
|
|
err = ep.er.rs.EncodeSingle(ep.stripeBuf, ep.shareBuf, ep.num)
|
|
|
|
if err != nil {
|
|
|
|
return 0, err
|
2018-04-11 14:41:50 +01:00
|
|
|
}
|
2019-02-05 10:54:25 +00:00
|
|
|
|
|
|
|
ep.currentStripe++
|
|
|
|
ep.available = ep.er.rs.ErasureShareSize()
|
2018-04-11 14:41:50 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// we have some buffer remaining for this piece. write it to the output
|
2019-02-05 10:54:25 +00:00
|
|
|
off := len(ep.shareBuf) - ep.available
|
|
|
|
n = copy(p, ep.shareBuf[off:])
|
|
|
|
ep.available -= n
|
|
|
|
|
2018-04-11 14:41:50 +01:00
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
2019-06-26 14:05:58 +01:00
|
|
|
func (ep *encodedPiece) Close() (err error) {
|
|
|
|
ctx := ep.er.ctx
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-02-05 10:54:25 +00:00
|
|
|
return ep.pipeReader.Close()
|
|
|
|
}
|
|
|
|
|
2018-04-11 14:41:50 +01:00
|
|
|
// EncodedRanger will take an existing Ranger and provide a means to get
|
|
|
|
// multiple Ranged sub-Readers. EncodedRanger does not match the normal Ranger
|
|
|
|
// interface.
|
|
|
|
type EncodedRanger struct {
|
2019-02-05 10:54:25 +00:00
|
|
|
rr ranger.Ranger
|
|
|
|
rs RedundancyStrategy
|
2018-04-11 14:41:50 +01:00
|
|
|
}
|
|
|
|
|
2018-07-03 09:35:01 +01:00
|
|
|
// NewEncodedRanger from the given Ranger and RedundancyStrategy. See the
|
2019-02-05 10:54:25 +00:00
|
|
|
// comments for EncodeReader about the repair and success thresholds.
|
|
|
|
func NewEncodedRanger(rr ranger.Ranger, rs RedundancyStrategy) (*EncodedRanger, error) {
|
2018-09-27 12:52:18 +01:00
|
|
|
if rr.Size()%int64(rs.StripeSize()) != 0 {
|
2018-04-11 14:41:50 +01:00
|
|
|
return nil, Error.New("invalid erasure encoder and range reader combo. " +
|
|
|
|
"range reader size must be a multiple of erasure encoder block size")
|
|
|
|
}
|
|
|
|
return &EncodedRanger{
|
2019-02-05 10:54:25 +00:00
|
|
|
rs: rs,
|
|
|
|
rr: rr,
|
2018-04-11 14:41:50 +01:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// OutputSize is like Ranger.Size but returns the Size of the erasure encoded
|
|
|
|
// pieces that come out.
|
|
|
|
func (er *EncodedRanger) OutputSize() int64 {
|
2018-09-27 12:52:18 +01:00
|
|
|
blocks := er.rr.Size() / int64(er.rs.StripeSize())
|
|
|
|
return blocks * int64(er.rs.ErasureShareSize())
|
2018-04-11 14:41:50 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Range is like Ranger.Range, but returns a slice of Readers
|
2019-06-04 12:36:27 +01:00
|
|
|
func (er *EncodedRanger) Range(ctx context.Context, offset, length int64) (_ []io.ReadCloser, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2018-04-11 14:41:50 +01:00
|
|
|
// the offset and length given may not be block-aligned, so let's figure
|
|
|
|
// out which blocks contain the request.
|
2018-10-18 12:10:29 +01:00
|
|
|
firstBlock, blockCount := encryption.CalcEncompassingBlocks(
|
2018-09-27 12:52:18 +01:00
|
|
|
offset, length, er.rs.ErasureShareSize())
|
2018-04-11 14:41:50 +01:00
|
|
|
// okay, now let's encode the reader for the range containing the blocks
|
2018-06-19 16:59:09 +01:00
|
|
|
r, err := er.rr.Range(ctx,
|
2018-09-27 12:52:18 +01:00
|
|
|
firstBlock*int64(er.rs.StripeSize()),
|
|
|
|
blockCount*int64(er.rs.StripeSize()))
|
2018-06-18 17:46:49 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-02-05 10:54:25 +00:00
|
|
|
readers, err := EncodeReader(ctx, r, er.rs)
|
2018-05-30 16:27:09 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-04-11 14:41:50 +01:00
|
|
|
for i, r := range readers {
|
|
|
|
// the offset might start a few bytes in, so we potentially have to
|
|
|
|
// discard the beginning bytes
|
|
|
|
_, err := io.CopyN(ioutil.Discard, r,
|
2018-09-27 12:52:18 +01:00
|
|
|
offset-firstBlock*int64(er.rs.ErasureShareSize()))
|
2018-04-11 14:41:50 +01:00
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
// the length might be shorter than a multiple of the block size, so
|
|
|
|
// limit it
|
2019-02-05 10:54:25 +00:00
|
|
|
readers[i] = readcloser.LimitReadCloser(r, length)
|
2018-04-11 14:41:50 +01:00
|
|
|
}
|
|
|
|
return readers, nil
|
|
|
|
}
|
2019-03-18 10:55:06 +00:00
|
|
|
|
|
|
|
// CalcPieceSize calculates what would be the piece size of the encoded data
|
|
|
|
// after erasure coding data with dataSize using the given ErasureScheme.
|
|
|
|
func CalcPieceSize(dataSize int64, scheme ErasureScheme) int64 {
|
2019-03-21 13:47:48 +00:00
|
|
|
stripeSize := int64(scheme.StripeSize())
|
|
|
|
stripes := (dataSize + uint32Size + stripeSize - 1) / stripeSize
|
2019-03-18 10:55:06 +00:00
|
|
|
|
|
|
|
encodedSize := stripes * int64(scheme.StripeSize())
|
|
|
|
pieceSize := encodedSize / int64(scheme.RequiredCount())
|
|
|
|
|
|
|
|
return pieceSize
|
|
|
|
}
|