uplink: move to storj.io/uplink (#3746)

This commit is contained in:
Egon Elbre 2020-01-08 15:40:19 +02:00 committed by GitHub
parent cf2128d3b9
commit 082ec81714
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
82 changed files with 66 additions and 7448 deletions

View File

@ -57,7 +57,7 @@ pipeline {
environment { environment {
STORJ_COCKROACH_TEST = 'cockroach://root@localhost:26257/testcockroach?sslmode=disable' STORJ_COCKROACH_TEST = 'cockroach://root@localhost:26257/testcockroach?sslmode=disable'
STORJ_POSTGRES_TEST = 'postgres://postgres@localhost/teststorj?sslmode=disable' STORJ_POSTGRES_TEST = 'postgres://postgres@localhost/teststorj?sslmode=disable'
COVERFLAGS = "${ env.BRANCH_NAME != 'master' ? '' : '-coverprofile=.build/coverprofile -coverpkg=storj.io/storj/private/...,storj.io/storj/lib/...,storj.io/storj/pkg/...,storj.io/storj/satellite/...,storj.io/storj/storage/...,storj.io/storj/storagenode/...,storj.io/storj/uplink/...,storj.io/storj/versioncontrol/...'}" COVERFLAGS = "${ env.BRANCH_NAME != 'master' ? '' : '-coverprofile=.build/coverprofile -coverpkg=storj.io/storj/private/...,storj.io/storj/lib/...,storj.io/storj/pkg/...,storj.io/storj/satellite/...,storj.io/storj/storage/...,storj.io/storj/storagenode/...,storj.io/storj/versioncontrol/...'}"
} }
steps { steps {
sh 'cockroach sql --insecure --host=localhost:26257 -e \'create database testcockroach;\'' sh 'cockroach sql --insecure --host=localhost:26257 -e \'create database testcockroach;\''

View File

@ -23,7 +23,7 @@ import (
"storj.io/common/rpc" "storj.io/common/rpc"
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/storj/pkg/process" "storj.io/storj/pkg/process"
"storj.io/storj/uplink/eestream" "storj.io/uplink/eestream"
) )
var ( var (

View File

@ -18,7 +18,7 @@ import (
"storj.io/common/encryption" "storj.io/common/encryption"
"storj.io/common/ranger" "storj.io/common/ranger"
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/storj/uplink/eestream" "storj.io/uplink/eestream"
) )
var ( var (

View File

@ -22,7 +22,7 @@ import (
"storj.io/common/encryption" "storj.io/common/encryption"
"storj.io/common/ranger" "storj.io/common/ranger"
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/storj/uplink/eestream" "storj.io/uplink/eestream"
) )
var ( var (

View File

@ -17,7 +17,7 @@ import (
"storj.io/common/encryption" "storj.io/common/encryption"
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/storj/uplink/eestream" "storj.io/uplink/eestream"
) )
var ( var (

2
go.mod
View File

@ -82,7 +82,6 @@ require (
github.com/skyrings/skyring-common v0.0.0-20160929130248-d1c0bb1cbd5e github.com/skyrings/skyring-common v0.0.0-20160929130248-d1c0bb1cbd5e
github.com/smartystreets/assertions v0.0.0-20180820201707-7c9eb446e3cf // indirect github.com/smartystreets/assertions v0.0.0-20180820201707-7c9eb446e3cf // indirect
github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a // indirect github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a // indirect
github.com/spacemonkeygo/errors v0.0.0-20171212215202-9064522e9fd1 // indirect
github.com/spf13/cast v1.3.0 github.com/spf13/cast v1.3.0
github.com/spf13/cobra v0.0.5 github.com/spf13/cobra v0.0.5
github.com/spf13/pflag v1.0.3 github.com/spf13/pflag v1.0.3
@ -114,4 +113,5 @@ require (
gopkg.in/yaml.v2 v2.2.2 gopkg.in/yaml.v2 v2.2.2
storj.io/common v0.0.0-20200108114547-1c62e5708bce storj.io/common v0.0.0-20200108114547-1c62e5708bce
storj.io/drpc v0.0.7-0.20191115031725-2171c57838d2 storj.io/drpc v0.0.7-0.20191115031725-2171c57838d2
storj.io/uplink v0.0.0-20200108132132-c2c5e0d46c1a
) )

2
go.sum
View File

@ -590,3 +590,5 @@ storj.io/common v0.0.0-20200108114547-1c62e5708bce h1:0okFmuEp38BNSKXQ72PXMTouSY
storj.io/common v0.0.0-20200108114547-1c62e5708bce/go.mod h1:mDnchZF+e7g7u630Wwgu/X/zCmqpU0lvy5kQ3jQgs5k= storj.io/common v0.0.0-20200108114547-1c62e5708bce/go.mod h1:mDnchZF+e7g7u630Wwgu/X/zCmqpU0lvy5kQ3jQgs5k=
storj.io/drpc v0.0.7-0.20191115031725-2171c57838d2 h1:8SgLYEhe99R8QlAD1EAOBPRyIR+cn2hqkXtWlAUPf/c= storj.io/drpc v0.0.7-0.20191115031725-2171c57838d2 h1:8SgLYEhe99R8QlAD1EAOBPRyIR+cn2hqkXtWlAUPf/c=
storj.io/drpc v0.0.7-0.20191115031725-2171c57838d2/go.mod h1:/ascUDbzNAv0A3Jj7wUIKFBH2JdJ2uJIBO/b9+2yHgQ= storj.io/drpc v0.0.7-0.20191115031725-2171c57838d2/go.mod h1:/ascUDbzNAv0A3Jj7wUIKFBH2JdJ2uJIBO/b9+2yHgQ=
storj.io/uplink v0.0.0-20200108132132-c2c5e0d46c1a h1:w/588H+U5IfTXCHA2GTFVLzpUbworS0DtoB4sR9h/8M=
storj.io/uplink v0.0.0-20200108132132-c2c5e0d46c1a/go.mod h1:3498FK1ewiOxrVTbPwGJmE/kwIWA3q9ULtAU/WAreys=

View File

@ -11,9 +11,9 @@ import (
"github.com/zeebo/errs" "github.com/zeebo/errs"
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/storj/uplink/metainfo/kvmetainfo" "storj.io/uplink/metainfo/kvmetainfo"
"storj.io/storj/uplink/storage/streams" "storj.io/uplink/storage/streams"
"storj.io/storj/uplink/stream" "storj.io/uplink/stream"
) )
// Bucket represents operations you can perform on a bucket // Bucket represents operations you can perform on a bucket

View File

@ -9,9 +9,9 @@ import (
"time" "time"
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/storj/uplink/metainfo/kvmetainfo" "storj.io/uplink/metainfo/kvmetainfo"
"storj.io/storj/uplink/storage/streams" "storj.io/uplink/storage/streams"
"storj.io/storj/uplink/stream" "storj.io/uplink/stream"
) )
// ObjectMeta contains metadata about a specific Object. // ObjectMeta contains metadata about a specific Object.

View File

@ -13,12 +13,12 @@ import (
"storj.io/common/memory" "storj.io/common/memory"
"storj.io/common/rpc" "storj.io/common/rpc"
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/storj/uplink/ecclient" "storj.io/uplink/ecclient"
"storj.io/storj/uplink/eestream" "storj.io/uplink/eestream"
"storj.io/storj/uplink/metainfo" "storj.io/uplink/metainfo"
"storj.io/storj/uplink/metainfo/kvmetainfo" "storj.io/uplink/metainfo/kvmetainfo"
"storj.io/storj/uplink/storage/segments" "storj.io/uplink/storage/segments"
"storj.io/storj/uplink/storage/streams" "storj.io/uplink/storage/streams"
) )
// Project represents a specific project access session. // Project represents a specific project access session.

View File

@ -14,8 +14,8 @@ import (
"storj.io/common/memory" "storj.io/common/memory"
"storj.io/common/peertls/tlsopts" "storj.io/common/peertls/tlsopts"
"storj.io/common/rpc" "storj.io/common/rpc"
"storj.io/storj/uplink/metainfo" "storj.io/uplink/metainfo"
"storj.io/storj/uplink/metainfo/kvmetainfo" "storj.io/uplink/metainfo/kvmetainfo"
) )
const defaultUplinkDialTimeout = 20 * time.Second const defaultUplinkDialTimeout = 20 * time.Second

View File

@ -68,4 +68,3 @@ storj.io/storj/storagenode/contact."satellite_contact_request" Meter
storj.io/storj/storagenode/gracefulexit."satellite_gracefulexit_request" Meter storj.io/storj/storagenode/gracefulexit."satellite_gracefulexit_request" Meter
storj.io/storj/storagenode/monitor."allocated_bandwidth" IntVal storj.io/storj/storagenode/monitor."allocated_bandwidth" IntVal
storj.io/storj/storagenode/monitor."used_bandwidth" IntVal storj.io/storj/storagenode/monitor."used_bandwidth" IntVal
storj.io/storj/uplink/eestream."download_stripe_failed_not_enough_pieces_uplink" Meter

View File

@ -18,9 +18,9 @@ import (
"storj.io/common/memory" "storj.io/common/memory"
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/storj/lib/uplink" "storj.io/storj/lib/uplink"
"storj.io/storj/uplink/metainfo/kvmetainfo" "storj.io/uplink/metainfo/kvmetainfo"
"storj.io/storj/uplink/storage/streams" "storj.io/uplink/storage/streams"
"storj.io/storj/uplink/stream" "storj.io/uplink/stream"
) )
var ( var (

View File

@ -27,11 +27,11 @@ import (
libuplink "storj.io/storj/lib/uplink" libuplink "storj.io/storj/lib/uplink"
"storj.io/storj/private/testplanet" "storj.io/storj/private/testplanet"
"storj.io/storj/satellite/console" "storj.io/storj/satellite/console"
"storj.io/storj/uplink/ecclient" "storj.io/uplink/ecclient"
"storj.io/storj/uplink/eestream" "storj.io/uplink/eestream"
"storj.io/storj/uplink/metainfo/kvmetainfo" "storj.io/uplink/metainfo/kvmetainfo"
"storj.io/storj/uplink/storage/segments" "storj.io/uplink/storage/segments"
"storj.io/storj/uplink/storage/streams" "storj.io/uplink/storage/streams"
) )
const ( const (

View File

@ -11,6 +11,7 @@ import (
"github.com/mattn/go-sqlite3" "github.com/mattn/go-sqlite3"
"github.com/zeebo/errs" "github.com/zeebo/errs"
"storj.io/storj/private/dbutil/txutil" "storj.io/storj/private/dbutil/txutil"
"storj.io/storj/private/migrate" "storj.io/storj/private/migrate"
) )

View File

@ -12,6 +12,7 @@ import (
"github.com/cockroachdb/cockroach-go/crdb" "github.com/cockroachdb/cockroach-go/crdb"
"github.com/zeebo/errs" "github.com/zeebo/errs"
"storj.io/storj/private/dbutil/cockroachutil" "storj.io/storj/private/dbutil/cockroachutil"
) )

View File

@ -28,8 +28,8 @@ import (
libuplink "storj.io/storj/lib/uplink" libuplink "storj.io/storj/lib/uplink"
"storj.io/storj/pkg/cfgstruct" "storj.io/storj/pkg/cfgstruct"
"storj.io/storj/satellite/console" "storj.io/storj/satellite/console"
"storj.io/storj/uplink/metainfo" "storj.io/uplink/metainfo"
"storj.io/storj/uplink/piecestore" "storj.io/uplink/piecestore"
) )
// Uplink is a general purpose // Uplink is a general purpose

View File

@ -26,7 +26,7 @@ import (
"storj.io/storj/pkg/server" "storj.io/storj/pkg/server"
"storj.io/storj/private/testplanet" "storj.io/storj/private/testplanet"
"storj.io/storj/satellite/overlay" "storj.io/storj/satellite/overlay"
"storj.io/storj/uplink/metainfo" "storj.io/uplink/metainfo"
) )
func TestUplinksParallel(t *testing.T) { func TestUplinksParallel(t *testing.T) {

View File

@ -19,11 +19,11 @@ import (
"storj.io/common/testcontext" "storj.io/common/testcontext"
"storj.io/storj/private/testplanet" "storj.io/storj/private/testplanet"
"storj.io/storj/satellite/console" "storj.io/storj/satellite/console"
"storj.io/storj/uplink/ecclient" "storj.io/uplink/ecclient"
"storj.io/storj/uplink/eestream" "storj.io/uplink/eestream"
"storj.io/storj/uplink/metainfo/kvmetainfo" "storj.io/uplink/metainfo/kvmetainfo"
"storj.io/storj/uplink/storage/segments" "storj.io/uplink/storage/segments"
"storj.io/storj/uplink/storage/streams" "storj.io/uplink/storage/streams"
) )
const ( const (

View File

@ -24,8 +24,8 @@ import (
"storj.io/common/testrand" "storj.io/common/testrand"
"storj.io/storj/private/testplanet" "storj.io/storj/private/testplanet"
"storj.io/storj/storagenode" "storj.io/storj/storagenode"
"storj.io/storj/uplink/ecclient" "storj.io/uplink/ecclient"
"storj.io/storj/uplink/eestream" "storj.io/uplink/eestream"
) )
const ( const (

View File

@ -22,9 +22,9 @@ import (
"storj.io/common/testcontext" "storj.io/common/testcontext"
"storj.io/common/testrand" "storj.io/common/testrand"
"storj.io/storj/private/testplanet" "storj.io/storj/private/testplanet"
"storj.io/storj/uplink/metainfo/kvmetainfo" "storj.io/uplink/metainfo/kvmetainfo"
"storj.io/storj/uplink/storage/streams" "storj.io/uplink/storage/streams"
"storj.io/storj/uplink/stream" "storj.io/uplink/stream"
) )
const TestFile = "test-file" const TestFile = "test-file"

View File

@ -27,8 +27,8 @@ import (
"storj.io/storj/satellite/metainfo" "storj.io/storj/satellite/metainfo"
"storj.io/storj/satellite/orders" "storj.io/storj/satellite/orders"
"storj.io/storj/satellite/overlay" "storj.io/storj/satellite/overlay"
"storj.io/storj/uplink/eestream" "storj.io/uplink/eestream"
"storj.io/storj/uplink/piecestore" "storj.io/uplink/piecestore"
) )
var ( var (

View File

@ -8,6 +8,7 @@ import (
"github.com/zeebo/errs" "github.com/zeebo/errs"
"go.uber.org/zap" "go.uber.org/zap"
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/storj/satellite/contact" "storj.io/storj/satellite/contact"
"storj.io/storj/satellite/overlay" "storj.io/storj/satellite/overlay"

View File

@ -18,7 +18,7 @@ import (
"storj.io/common/sync2" "storj.io/common/sync2"
"storj.io/storj/satellite/metainfo" "storj.io/storj/satellite/metainfo"
"storj.io/storj/satellite/overlay" "storj.io/storj/satellite/overlay"
"storj.io/storj/uplink/piecestore" "storj.io/uplink/piecestore"
) )
var ( var (

View File

@ -23,7 +23,7 @@ import (
"storj.io/storj/satellite/metainfo" "storj.io/storj/satellite/metainfo"
"storj.io/storj/satellite/orders" "storj.io/storj/satellite/orders"
"storj.io/storj/satellite/overlay" "storj.io/storj/satellite/overlay"
"storj.io/storj/uplink/eestream" "storj.io/uplink/eestream"
) )
// millis for the transfer queue building ticker // millis for the transfer queue building ticker

View File

@ -13,7 +13,7 @@ import (
"storj.io/common/pb" "storj.io/common/pb"
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/storj/satellite/metainfo" "storj.io/storj/satellite/metainfo"
"storj.io/storj/uplink/eestream" "storj.io/uplink/eestream"
) )
var _ metainfo.Observer = (*PathCollector)(nil) var _ metainfo.Observer = (*PathCollector)(nil)

View File

@ -19,7 +19,7 @@ import (
"storj.io/common/testrand" "storj.io/common/testrand"
"storj.io/storj/private/testplanet" "storj.io/storj/private/testplanet"
"storj.io/storj/storage" "storj.io/storj/storage"
"storj.io/storj/uplink/eestream" "storj.io/uplink/eestream"
) )
func TestInspectorStats(t *testing.T) { func TestInspectorStats(t *testing.T) {

View File

@ -34,9 +34,9 @@ import (
"storj.io/storj/satellite/orders" "storj.io/storj/satellite/orders"
"storj.io/storj/satellite/overlay" "storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/rewards" "storj.io/storj/satellite/rewards"
"storj.io/storj/uplink/eestream" "storj.io/uplink/eestream"
"storj.io/storj/uplink/piecestore" "storj.io/uplink/piecestore"
"storj.io/storj/uplink/storage/meta" "storj.io/uplink/storage/meta"
) )
const ( const (

View File

@ -24,8 +24,8 @@ import (
"storj.io/common/testcontext" "storj.io/common/testcontext"
"storj.io/storj/private/testplanet" "storj.io/storj/private/testplanet"
"storj.io/storj/satellite" "storj.io/storj/satellite"
"storj.io/storj/uplink/eestream" "storj.io/uplink/eestream"
"storj.io/storj/uplink/metainfo" "storj.io/uplink/metainfo"
) )
func TestInvalidAPIKeyOld(t *testing.T) { func TestInvalidAPIKeyOld(t *testing.T) {

View File

@ -26,7 +26,7 @@ import (
"storj.io/common/testrand" "storj.io/common/testrand"
"storj.io/storj/private/testplanet" "storj.io/storj/private/testplanet"
"storj.io/storj/satellite" "storj.io/storj/satellite"
"storj.io/storj/uplink/metainfo" "storj.io/uplink/metainfo"
) )
func TestInvalidAPIKey(t *testing.T) { func TestInvalidAPIKey(t *testing.T) {

View File

@ -15,7 +15,7 @@ import (
"storj.io/common/pb" "storj.io/common/pb"
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/storj/storage" "storj.io/storj/storage"
"storj.io/storj/uplink/storage/meta" "storj.io/uplink/storage/meta"
) )
// Service structure // Service structure

View File

@ -17,7 +17,7 @@ import (
"storj.io/common/signing" "storj.io/common/signing"
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/storj/satellite/overlay" "storj.io/storj/satellite/overlay"
"storj.io/storj/uplink/eestream" "storj.io/uplink/eestream"
) )
// ErrDownloadFailedNotEnoughPieces is returned when download failed due to missing pieces // ErrDownloadFailedNotEnoughPieces is returned when download failed due to missing pieces

View File

@ -24,8 +24,8 @@ import (
"storj.io/common/signing" "storj.io/common/signing"
"storj.io/common/storj" "storj.io/common/storj"
"storj.io/common/sync2" "storj.io/common/sync2"
"storj.io/storj/uplink/eestream" "storj.io/uplink/eestream"
"storj.io/storj/uplink/piecestore" "storj.io/uplink/piecestore"
) )
// ErrPieceHashVerifyFailed is the errs class when a piece hash downloaded from storagenode fails to match the original hash. // ErrPieceHashVerifyFailed is the errs class when a piece hash downloaded from storagenode fails to match the original hash.

View File

@ -18,7 +18,7 @@ import (
"storj.io/storj/satellite/metainfo" "storj.io/storj/satellite/metainfo"
"storj.io/storj/satellite/orders" "storj.io/storj/satellite/orders"
"storj.io/storj/satellite/overlay" "storj.io/storj/satellite/overlay"
"storj.io/storj/uplink/eestream" "storj.io/uplink/eestream"
) )
// IrreparableError is the errs class of irreparable segment errors // IrreparableError is the errs class of irreparable segment errors

View File

@ -9,6 +9,7 @@ import (
"github.com/zeebo/errs" "github.com/zeebo/errs"
"gopkg.in/spacemonkeygo/monkit.v2" "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/private/dbutil/txutil" "storj.io/storj/private/dbutil/txutil"
// load our cockroach sql driver for anywhere that uses this dbx.Open // load our cockroach sql driver for anywhere that uses this dbx.Open

View File

@ -23,7 +23,7 @@ import (
"storj.io/storj/storagenode/pieces" "storj.io/storj/storagenode/pieces"
"storj.io/storj/storagenode/piecestore" "storj.io/storj/storagenode/piecestore"
"storj.io/storj/storagenode/satellites" "storj.io/storj/storagenode/satellites"
"storj.io/storj/uplink/ecclient" "storj.io/uplink/ecclient"
) )
// Worker is responsible for completing the graceful exit for a given satellite. // Worker is responsible for completing the graceful exit for a given satellite.

View File

@ -29,7 +29,7 @@ import (
"storj.io/storj/private/testplanet" "storj.io/storj/private/testplanet"
"storj.io/storj/storagenode" "storj.io/storj/storagenode"
"storj.io/storj/storagenode/bandwidth" "storj.io/storj/storagenode/bandwidth"
"storj.io/storj/uplink/piecestore" "storj.io/uplink/piecestore"
) )
func TestUploadAndPartialDownload(t *testing.T) { func TestUploadAndPartialDownload(t *testing.T) {

View File

@ -1,502 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package ecclient
import (
"context"
"io"
"io/ioutil"
"sort"
"sync"
"time"
"github.com/zeebo/errs"
"go.uber.org/zap"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/common/encryption"
"storj.io/common/errs2"
"storj.io/common/groupcancel"
"storj.io/common/identity"
"storj.io/common/pb"
"storj.io/common/ranger"
"storj.io/common/rpc"
"storj.io/common/storj"
"storj.io/common/sync2"
"storj.io/storj/uplink/eestream"
"storj.io/storj/uplink/piecestore"
)
var mon = monkit.Package()
// Client defines an interface for storing erasure coded data to piece store nodes
type Client interface {
Put(ctx context.Context, limits []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, rs eestream.RedundancyStrategy, data io.Reader, expiration time.Time) (successfulNodes []*pb.Node, successfulHashes []*pb.PieceHash, err error)
Get(ctx context.Context, limits []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, es eestream.ErasureScheme, size int64) (ranger.Ranger, error)
Delete(ctx context.Context, limits []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey) error
WithForceErrorDetection(force bool) Client
// PutPiece is not intended to be used by normal uplinks directly, but is exported to support storagenode graceful exit transfers.
PutPiece(ctx, parent context.Context, limit *pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, data io.ReadCloser) (hash *pb.PieceHash, id *identity.PeerIdentity, err error)
}
type dialPiecestoreFunc func(context.Context, *pb.Node) (*piecestore.Client, error)
type ecClient struct {
log *zap.Logger
dialer rpc.Dialer
memoryLimit int
forceErrorDetection bool
}
// NewClient from the given identity and max buffer memory
func NewClient(log *zap.Logger, dialer rpc.Dialer, memoryLimit int) Client {
return &ecClient{
log: log,
dialer: dialer,
memoryLimit: memoryLimit,
}
}
func (ec *ecClient) WithForceErrorDetection(force bool) Client {
ec.forceErrorDetection = force
return ec
}
func (ec *ecClient) dialPiecestore(ctx context.Context, n *pb.Node) (*piecestore.Client, error) {
logger := ec.log.Named(n.Id.String())
return piecestore.Dial(ctx, ec.dialer, n, logger, piecestore.DefaultConfig)
}
func (ec *ecClient) Put(ctx context.Context, limits []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, rs eestream.RedundancyStrategy, data io.Reader, expiration time.Time) (successfulNodes []*pb.Node, successfulHashes []*pb.PieceHash, err error) {
defer mon.Task()(&ctx)(&err)
pieceCount := len(limits)
if pieceCount != rs.TotalCount() {
return nil, nil, Error.New("size of limits slice (%d) does not match total count (%d) of erasure scheme", pieceCount, rs.TotalCount())
}
nonNilLimits := nonNilCount(limits)
if nonNilLimits <= rs.RepairThreshold() && nonNilLimits < rs.OptimalThreshold() {
return nil, nil, Error.New("number of non-nil limits (%d) is less than or equal to the repair threshold (%d) of erasure scheme", nonNilLimits, rs.RepairThreshold())
}
if !unique(limits) {
return nil, nil, Error.New("duplicated nodes are not allowed")
}
ec.log.Debug("Uploading to storage nodes",
zap.Int("Erasure Share Size", rs.ErasureShareSize()),
zap.Int("Stripe Size", rs.StripeSize()),
zap.Int("Repair Threshold", rs.RepairThreshold()),
zap.Int("Optimal Threshold", rs.OptimalThreshold()),
)
padded := encryption.PadReader(ioutil.NopCloser(data), rs.StripeSize())
readers, err := eestream.EncodeReader(ctx, ec.log, padded, rs)
if err != nil {
return nil, nil, err
}
type info struct {
i int
err error
hash *pb.PieceHash
}
infos := make(chan info, pieceCount)
psCtx, cancel := context.WithCancel(ctx)
defer cancel()
for i, addressedLimit := range limits {
go func(i int, addressedLimit *pb.AddressedOrderLimit) {
hash, _, err := ec.PutPiece(psCtx, ctx, addressedLimit, privateKey, readers[i])
infos <- info{i: i, err: err, hash: hash}
}(i, addressedLimit)
}
successfulNodes = make([]*pb.Node, pieceCount)
successfulHashes = make([]*pb.PieceHash, pieceCount)
var successfulCount, failureCount, cancellationCount int32
for range limits {
info := <-infos
if limits[info.i] == nil {
continue
}
if info.err != nil {
if !errs2.IsCanceled(info.err) {
failureCount++
} else {
cancellationCount++
}
ec.log.Debug("Upload to storage node failed",
zap.Stringer("Node ID", limits[info.i].GetLimit().StorageNodeId),
zap.Error(info.err),
)
continue
}
successfulNodes[info.i] = &pb.Node{
Id: limits[info.i].GetLimit().StorageNodeId,
Address: limits[info.i].GetStorageNodeAddress(),
}
successfulHashes[info.i] = info.hash
successfulCount++
if int(successfulCount) >= rs.OptimalThreshold() {
ec.log.Debug("Success threshold reached. Cancelling remaining uploads.",
zap.Int("Optimal Threshold", rs.OptimalThreshold()),
)
cancel()
}
}
defer func() {
select {
case <-ctx.Done():
err = Error.New("upload cancelled by user")
// TODO: clean up the partially uploaded segment's pieces
// ec.Delete(context.Background(), nodes, pieceID, pba.SatelliteId),
default:
}
}()
mon.IntVal("put_segment_pieces_total").Observe(int64(pieceCount))
mon.IntVal("put_segment_pieces_optimal").Observe(int64(rs.OptimalThreshold()))
mon.IntVal("put_segment_pieces_successful").Observe(int64(successfulCount))
mon.IntVal("put_segment_pieces_failed").Observe(int64(failureCount))
mon.IntVal("put_segment_pieces_canceled").Observe(int64(cancellationCount))
if int(successfulCount) <= rs.RepairThreshold() && int(successfulCount) < rs.OptimalThreshold() {
return nil, nil, Error.New("successful puts (%d) less than or equal to repair threshold (%d)", successfulCount, rs.RepairThreshold())
}
if int(successfulCount) < rs.OptimalThreshold() {
return nil, nil, Error.New("successful puts (%d) less than success threshold (%d)", successfulCount, rs.OptimalThreshold())
}
return successfulNodes, successfulHashes, nil
}
func (ec *ecClient) PutPiece(ctx, parent context.Context, limit *pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, data io.ReadCloser) (hash *pb.PieceHash, peerID *identity.PeerIdentity, err error) {
nodeName := "nil"
if limit != nil {
nodeName = limit.GetLimit().StorageNodeId.String()[0:8]
}
defer mon.Task()(&ctx, "node: "+nodeName)(&err)
defer func() { err = errs.Combine(err, data.Close()) }()
if limit == nil {
_, _ = io.Copy(ioutil.Discard, data)
return nil, nil, nil
}
storageNodeID := limit.GetLimit().StorageNodeId
pieceID := limit.GetLimit().PieceId
ps, err := ec.dialPiecestore(ctx, &pb.Node{
Id: storageNodeID,
Address: limit.GetStorageNodeAddress(),
})
if err != nil {
ec.log.Debug("Failed dialing for putting piece to node",
zap.Stringer("Piece ID", pieceID),
zap.Stringer("Node ID", storageNodeID),
zap.Error(err),
)
return nil, nil, err
}
defer func() { err = errs.Combine(err, ps.Close()) }()
peerID, err = ps.GetPeerIdentity()
if err != nil {
ec.log.Debug("Failed getting peer identity from node connection",
zap.Stringer("Node ID", storageNodeID),
zap.Error(err),
)
return nil, nil, err
}
upload, err := ps.Upload(ctx, limit.GetLimit(), privateKey)
if err != nil {
ec.log.Debug("Failed requesting upload of pieces to node",
zap.Stringer("Piece ID", pieceID),
zap.Stringer("Node ID", storageNodeID),
zap.Error(err),
)
return nil, nil, err
}
defer func() {
if err != nil {
err = errs.Combine(err, upload.Cancel(ctx))
return
}
hash, err = upload.Commit(ctx)
}()
_, err = sync2.Copy(ctx, upload, data)
// Canceled context means the piece upload was interrupted by user or due
// to slow connection. No error logging for this case.
if err != nil {
if errs2.IsCanceled(err) {
if parent.Err() == context.Canceled {
ec.log.Info("Upload to node canceled by user", zap.Stringer("Node ID", storageNodeID))
} else {
ec.log.Debug("Node cut from upload due to slow connection", zap.Stringer("Node ID", storageNodeID))
}
} else {
nodeAddress := ""
if limit.GetStorageNodeAddress() != nil {
nodeAddress = limit.GetStorageNodeAddress().GetAddress()
}
ec.log.Debug("Failed uploading piece to node",
zap.Stringer("Piece ID", pieceID),
zap.Stringer("Node ID", storageNodeID),
zap.String("Node Address", nodeAddress),
zap.Error(err),
)
}
return nil, nil, err
}
return nil, peerID, nil
}
func (ec *ecClient) Get(ctx context.Context, limits []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, es eestream.ErasureScheme, size int64) (rr ranger.Ranger, err error) {
defer mon.Task()(&ctx)(&err)
if len(limits) != es.TotalCount() {
return nil, Error.New("size of limits slice (%d) does not match total count (%d) of erasure scheme", len(limits), es.TotalCount())
}
if nonNilCount(limits) < es.RequiredCount() {
return nil, Error.New("number of non-nil limits (%d) is less than required count (%d) of erasure scheme", nonNilCount(limits), es.RequiredCount())
}
paddedSize := calcPadded(size, es.StripeSize())
pieceSize := paddedSize / int64(es.RequiredCount())
rrs := map[int]ranger.Ranger{}
for i, addressedLimit := range limits {
if addressedLimit == nil {
continue
}
rrs[i] = &lazyPieceRanger{
dialPiecestore: ec.dialPiecestore,
limit: addressedLimit,
privateKey: privateKey,
size: pieceSize,
}
}
rr, err = eestream.Decode(ec.log, rrs, es, ec.memoryLimit, ec.forceErrorDetection)
if err != nil {
return nil, Error.Wrap(err)
}
ranger, err := encryption.Unpad(rr, int(paddedSize-size))
return ranger, Error.Wrap(err)
}
func (ec *ecClient) Delete(ctx context.Context, limits []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey) (err error) {
defer mon.Task()(&ctx)(&err)
setLimits := 0
for _, addressedLimit := range limits {
if addressedLimit != nil {
setLimits++
}
}
gctx, cancel := groupcancel.NewContext(ctx, setLimits, .75, 2)
defer cancel()
errch := make(chan error, setLimits)
for _, addressedLimit := range limits {
if addressedLimit == nil {
continue
}
go func(addressedLimit *pb.AddressedOrderLimit) {
limit := addressedLimit.GetLimit()
ps, err := ec.dialPiecestore(gctx, &pb.Node{
Id: limit.StorageNodeId,
Address: addressedLimit.GetStorageNodeAddress(),
})
if err != nil {
ec.log.Debug("Failed dialing for deleting piece from node",
zap.Stringer("Piece ID", limit.PieceId),
zap.Stringer("Node ID", limit.StorageNodeId),
zap.Error(err),
)
errch <- err
return
}
err = ps.Delete(gctx, limit, privateKey)
err = errs.Combine(err, ps.Close())
if err != nil {
ec.log.Debug("Failed deleting piece from node",
zap.Stringer("Piece ID", limit.PieceId),
zap.Stringer("Node ID", limit.StorageNodeId),
zap.Error(err),
)
}
errch <- err
}(addressedLimit)
}
var anySuccess bool
var lastErr error
for i := 0; i < setLimits; i++ {
if err := <-errch; err == nil {
gctx.Success()
anySuccess = true
} else {
gctx.Failure()
lastErr = err
}
}
if anySuccess {
return nil
}
return lastErr
}
func unique(limits []*pb.AddressedOrderLimit) bool {
if len(limits) < 2 {
return true
}
ids := make(storj.NodeIDList, len(limits))
for i, addressedLimit := range limits {
if addressedLimit != nil {
ids[i] = addressedLimit.GetLimit().StorageNodeId
}
}
// sort the ids and check for identical neighbors
sort.Sort(ids)
// sort.Slice(ids, func(i, k int) bool { return ids[i].Less(ids[k]) })
for i := 1; i < len(ids); i++ {
if ids[i] != (storj.NodeID{}) && ids[i] == ids[i-1] {
return false
}
}
return true
}
func calcPadded(size int64, blockSize int) int64 {
mod := size % int64(blockSize)
if mod == 0 {
return size
}
return size + int64(blockSize) - mod
}
type lazyPieceRanger struct {
dialPiecestore dialPiecestoreFunc
limit *pb.AddressedOrderLimit
privateKey storj.PiecePrivateKey
size int64
}
// Size implements Ranger.Size
func (lr *lazyPieceRanger) Size() int64 {
return lr.size
}
// Range implements Ranger.Range to be lazily connected
func (lr *lazyPieceRanger) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) {
defer mon.Task()(&ctx)(&err)
return &lazyPieceReader{
ranger: lr,
ctx: ctx,
offset: offset,
length: length,
}, nil
}
type lazyPieceReader struct {
ranger *lazyPieceRanger
ctx context.Context
offset int64
length int64
mu sync.Mutex
isClosed bool
piecestore.Downloader
client *piecestore.Client
}
func (lr *lazyPieceReader) Read(data []byte) (_ int, err error) {
lr.mu.Lock()
defer lr.mu.Unlock()
if lr.isClosed {
return 0, io.EOF
}
if lr.Downloader == nil {
client, downloader, err := lr.ranger.dial(lr.ctx, lr.offset, lr.length)
if err != nil {
return 0, err
}
lr.Downloader = downloader
lr.client = client
}
return lr.Downloader.Read(data)
}
func (lr *lazyPieceRanger) dial(ctx context.Context, offset, length int64) (_ *piecestore.Client, _ piecestore.Downloader, err error) {
defer mon.Task()(&ctx)(&err)
ps, err := lr.dialPiecestore(ctx, &pb.Node{
Id: lr.limit.GetLimit().StorageNodeId,
Address: lr.limit.GetStorageNodeAddress(),
})
if err != nil {
return nil, nil, err
}
download, err := ps.Download(ctx, lr.limit.GetLimit(), lr.privateKey, offset, length)
if err != nil {
return nil, nil, errs.Combine(err, ps.Close())
}
return ps, download, nil
}
func (lr *lazyPieceReader) Close() (err error) {
lr.mu.Lock()
defer lr.mu.Unlock()
if lr.isClosed {
return nil
}
lr.isClosed = true
if lr.Downloader != nil {
err = errs.Combine(err, lr.Downloader.Close())
}
if lr.client != nil {
err = errs.Combine(err, lr.client.Close())
}
return err
}
func nonNilCount(limits []*pb.AddressedOrderLimit) int {
total := 0
for _, limit := range limits {
if limit != nil {
total++
}
}
return total
}

View File

@ -1,46 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package ecclient
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"storj.io/common/pb"
"storj.io/common/testrand"
)
func TestUnique(t *testing.T) {
limits := make([]*pb.AddressedOrderLimit, 4)
for i := 0; i < len(limits); i++ {
limits[i] = &pb.AddressedOrderLimit{
Limit: &pb.OrderLimit{
StorageNodeId: testrand.NodeID(),
},
}
}
for i, tt := range []struct {
limits []*pb.AddressedOrderLimit
unique bool
}{
{nil, true},
{[]*pb.AddressedOrderLimit{}, true},
{[]*pb.AddressedOrderLimit{limits[0]}, true},
{[]*pb.AddressedOrderLimit{limits[0], limits[1]}, true},
{[]*pb.AddressedOrderLimit{limits[0], limits[0]}, false},
{[]*pb.AddressedOrderLimit{limits[0], limits[1], limits[0]}, false},
{[]*pb.AddressedOrderLimit{limits[1], limits[0], limits[0]}, false},
{[]*pb.AddressedOrderLimit{limits[0], limits[0], limits[1]}, false},
{[]*pb.AddressedOrderLimit{limits[2], limits[0], limits[1]}, true},
{[]*pb.AddressedOrderLimit{limits[2], limits[0], limits[3], limits[1]}, true},
{[]*pb.AddressedOrderLimit{limits[2], limits[0], limits[2], limits[1]}, false},
{[]*pb.AddressedOrderLimit{limits[1], limits[0], limits[3], limits[1]}, false},
} {
errTag := fmt.Sprintf("Test case #%d", i)
assert.Equal(t, tt.unique, unique(tt.limits), errTag)
}
}

View File

@ -1,11 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package ecclient
import (
"github.com/zeebo/errs"
)
// Error is the errs class of standard Ranger errors
var Error = errs.Class("ecclient error")

View File

@ -1,11 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package eestream
import (
"github.com/zeebo/errs"
)
// Error is the default eestream errs class
var Error = errs.Class("eestream error")

View File

@ -1,230 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package eestream
import (
"context"
"io"
"io/ioutil"
"sync"
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/common/encryption"
"storj.io/common/errs2"
"storj.io/common/ranger"
"storj.io/common/readcloser"
)
type decodedReader struct {
log *zap.Logger
ctx context.Context
cancel context.CancelFunc
readers map[int]io.ReadCloser
scheme ErasureScheme
stripeReader *StripeReader
outbuf []byte
err error
currentStripe int64
expectedStripes int64
close sync.Once
closeErr error
}
// DecodeReaders takes a map of readers and an ErasureScheme returning a
// combined Reader.
//
// rs is a map of erasure piece numbers to erasure piece streams.
// expectedSize is the number of bytes expected to be returned by the Reader.
// mbm is the maximum memory (in bytes) to be allocated for read buffers. If
// set to 0, the minimum possible memory will be used.
// if forceErrorDetection is set to true then k+1 pieces will be always
// required for decoding, so corrupted pieces can be detected.
func DecodeReaders(ctx context.Context, cancel func(), log *zap.Logger, rs map[int]io.ReadCloser, es ErasureScheme, expectedSize int64, mbm int, forceErrorDetection bool) io.ReadCloser {
defer mon.Task()(&ctx)(nil)
if expectedSize < 0 {
return readcloser.FatalReadCloser(Error.New("negative expected size"))
}
if expectedSize%int64(es.StripeSize()) != 0 {
return readcloser.FatalReadCloser(
Error.New("expected size (%d) not a factor decoded block size (%d)",
expectedSize, es.StripeSize()))
}
if err := checkMBM(mbm); err != nil {
return readcloser.FatalReadCloser(err)
}
dr := &decodedReader{
log: log,
readers: rs,
scheme: es,
stripeReader: NewStripeReader(log, rs, es, mbm, forceErrorDetection),
outbuf: make([]byte, 0, es.StripeSize()),
expectedStripes: expectedSize / int64(es.StripeSize()),
}
dr.ctx, dr.cancel = ctx, cancel
// Kick off a goroutine to watch for context cancelation.
go func() {
<-dr.ctx.Done()
_ = dr.Close()
}()
return dr
}
func (dr *decodedReader) Read(p []byte) (n int, err error) {
ctx := dr.ctx
if len(dr.outbuf) == 0 {
// if the output buffer is empty, let's fill it again
// if we've already had an error, fail
if dr.err != nil {
return 0, dr.err
}
// return EOF is the expected stripes were read
if dr.currentStripe >= dr.expectedStripes {
dr.err = io.EOF
return 0, dr.err
}
// read the input buffers of the next stripe - may also decode it
dr.outbuf, dr.err = dr.stripeReader.ReadStripe(ctx, dr.currentStripe, dr.outbuf)
if dr.err != nil {
return 0, dr.err
}
dr.currentStripe++
}
// copy what data we have to the output
n = copy(p, dr.outbuf)
// slide the remaining bytes to the beginning
copy(dr.outbuf, dr.outbuf[n:])
// shrink the remaining buffer
dr.outbuf = dr.outbuf[:len(dr.outbuf)-n]
return n, nil
}
func (dr *decodedReader) Close() (err error) {
ctx := dr.ctx
defer mon.Task()(&ctx)(&err)
// cancel the context to terminate reader goroutines
dr.cancel()
errorThreshold := len(dr.readers) - dr.scheme.RequiredCount()
var closeGroup errs2.Group
// avoid double close of readers
dr.close.Do(func() {
for _, r := range dr.readers {
r := r
closeGroup.Go(func() error {
return errs2.IgnoreCanceled(r.Close())
})
}
// close the stripe reader
closeGroup.Go(dr.stripeReader.Close)
allErrors := closeGroup.Wait()
errorThreshold -= len(allErrors)
dr.closeErr = errs.Combine(allErrors...)
})
// TODO this is workaround, we need reorganize to return multiple errors or divide into fatal, non fatal
if errorThreshold < 0 {
return dr.closeErr
}
if dr.closeErr != nil {
dr.log.Debug("decode close non fatal error: ", zap.Error(dr.closeErr))
}
return nil
}
type decodedRanger struct {
log *zap.Logger
es ErasureScheme
rrs map[int]ranger.Ranger
inSize int64
mbm int // max buffer memory
forceErrorDetection bool
}
// Decode takes a map of Rangers and an ErasureScheme and returns a combined
// Ranger.
//
// rrs is a map of erasure piece numbers to erasure piece rangers.
// mbm is the maximum memory (in bytes) to be allocated for read buffers. If
// set to 0, the minimum possible memory will be used.
// if forceErrorDetection is set to true then k+1 pieces will be always
// required for decoding, so corrupted pieces can be detected.
func Decode(log *zap.Logger, rrs map[int]ranger.Ranger, es ErasureScheme, mbm int, forceErrorDetection bool) (ranger.Ranger, error) {
if err := checkMBM(mbm); err != nil {
return nil, err
}
if len(rrs) < es.RequiredCount() {
return nil, Error.New("not enough readers to reconstruct data!")
}
size := int64(-1)
for _, rr := range rrs {
if size == -1 {
size = rr.Size()
} else if size != rr.Size() {
return nil, Error.New(
"decode failure: range reader sizes don't all match")
}
}
if size == -1 {
return ranger.ByteRanger(nil), nil
}
if size%int64(es.ErasureShareSize()) != 0 {
return nil, Error.New("invalid erasure decoder and range reader combo. "+
"range reader size (%d) must be a multiple of erasure encoder block size (%d)",
size, es.ErasureShareSize())
}
return &decodedRanger{
log: log,
es: es,
rrs: rrs,
inSize: size,
mbm: mbm,
forceErrorDetection: forceErrorDetection,
}, nil
}
func (dr *decodedRanger) Size() int64 {
blocks := dr.inSize / int64(dr.es.ErasureShareSize())
return blocks * int64(dr.es.StripeSize())
}
func (dr *decodedRanger) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) {
defer mon.Task()(&ctx)(&err)
ctx, cancel := context.WithCancel(ctx)
// offset and length might not be block-aligned. figure out which
// blocks contain this request
firstBlock, blockCount := encryption.CalcEncompassingBlocks(offset, length, dr.es.StripeSize())
// go ask for ranges for all those block boundaries
readers := make(map[int]io.ReadCloser, len(dr.rrs))
for i, rr := range dr.rrs {
r, err := rr.Range(ctx, firstBlock*int64(dr.es.ErasureShareSize()), blockCount*int64(dr.es.ErasureShareSize()))
if err != nil {
readers[i] = readcloser.FatalReadCloser(err)
} else {
readers[i] = r
}
}
// decode from all those ranges
r := DecodeReaders(ctx, cancel, dr.log, readers, dr.es, blockCount*int64(dr.es.StripeSize()), dr.mbm, dr.forceErrorDetection)
// offset might start a few bytes in, potentially discard the initial bytes
_, err = io.CopyN(ioutil.Discard, r, offset-firstBlock*int64(dr.es.StripeSize()))
if err != nil {
return nil, Error.Wrap(err)
}
// length might not have included all of the blocks, limit what we return
return readcloser.LimitReadCloser(r, length), nil
}
func checkMBM(mbm int) error {
if mbm < 0 {
return Error.New("negative max buffer memory")
}
return nil
}

View File

@ -1,318 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package eestream
import (
"context"
"io"
"io/ioutil"
"os"
"github.com/vivint/infectious"
"go.uber.org/zap"
"storj.io/common/encryption"
"storj.io/common/fpath"
"storj.io/common/memory"
"storj.io/common/pb"
"storj.io/common/ranger"
"storj.io/common/readcloser"
"storj.io/common/storj"
"storj.io/common/sync2"
)
// ErasureScheme represents the general format of any erasure scheme algorithm.
// If this interface can be implemented, the rest of this library will work
// with it.
type ErasureScheme interface {
// Encode will take 'in' and call 'out' with erasure coded pieces.
Encode(in []byte, out func(num int, data []byte)) error
// EncodeSingle will take 'in' with the stripe and fill 'out' with the erasure share for piece 'num'.
EncodeSingle(in, out []byte, num int) error
// Decode will take a mapping of available erasure coded piece num -> data,
// 'in', and append the combined data to 'out', returning it.
Decode(out []byte, in map[int][]byte) ([]byte, error)
// ErasureShareSize is the size of the erasure shares that come from Encode
// and are passed to Decode.
ErasureShareSize() int
// StripeSize is the size the stripes that are passed to Encode and come
// from Decode.
StripeSize() int
// Encode will generate this many erasure shares and therefore this many pieces
TotalCount() int
// Decode requires at least this many pieces
RequiredCount() int
}
// RedundancyStrategy is an ErasureScheme with a repair and optimal thresholds
type RedundancyStrategy struct {
ErasureScheme
repairThreshold int
optimalThreshold int
}
// NewRedundancyStrategy from the given ErasureScheme, repair and optimal thresholds.
//
// repairThreshold is the minimum repair threshold.
// If set to 0, it will be reset to the TotalCount of the ErasureScheme.
// optimalThreshold is the optimal threshold.
// If set to 0, it will be reset to the TotalCount of the ErasureScheme.
func NewRedundancyStrategy(es ErasureScheme, repairThreshold, optimalThreshold int) (RedundancyStrategy, error) {
if repairThreshold == 0 {
repairThreshold = es.TotalCount()
}
if optimalThreshold == 0 {
optimalThreshold = es.TotalCount()
}
if repairThreshold < 0 {
return RedundancyStrategy{}, Error.New("negative repair threshold")
}
if repairThreshold > 0 && repairThreshold < es.RequiredCount() {
return RedundancyStrategy{}, Error.New("repair threshold less than required count")
}
if repairThreshold > es.TotalCount() {
return RedundancyStrategy{}, Error.New("repair threshold greater than total count")
}
if optimalThreshold < 0 {
return RedundancyStrategy{}, Error.New("negative optimal threshold")
}
if optimalThreshold > 0 && optimalThreshold < es.RequiredCount() {
return RedundancyStrategy{}, Error.New("optimal threshold less than required count")
}
if optimalThreshold > es.TotalCount() {
return RedundancyStrategy{}, Error.New("optimal threshold greater than total count")
}
if repairThreshold > optimalThreshold {
return RedundancyStrategy{}, Error.New("repair threshold greater than optimal threshold")
}
return RedundancyStrategy{ErasureScheme: es, repairThreshold: repairThreshold, optimalThreshold: optimalThreshold}, nil
}
// NewRedundancyStrategyFromProto creates new RedundancyStrategy from the given
// RedundancyScheme protobuf.
func NewRedundancyStrategyFromProto(scheme *pb.RedundancyScheme) (RedundancyStrategy, error) {
fc, err := infectious.NewFEC(int(scheme.GetMinReq()), int(scheme.GetTotal()))
if err != nil {
return RedundancyStrategy{}, Error.Wrap(err)
}
es := NewRSScheme(fc, int(scheme.GetErasureShareSize()))
return NewRedundancyStrategy(es, int(scheme.GetRepairThreshold()), int(scheme.GetSuccessThreshold()))
}
// NewRedundancyStrategyFromStorj creates new RedundancyStrategy from the given
// storj.RedundancyScheme.
func NewRedundancyStrategyFromStorj(scheme storj.RedundancyScheme) (RedundancyStrategy, error) {
fc, err := infectious.NewFEC(int(scheme.RequiredShares), int(scheme.TotalShares))
if err != nil {
return RedundancyStrategy{}, Error.Wrap(err)
}
es := NewRSScheme(fc, int(scheme.ShareSize))
return NewRedundancyStrategy(es, int(scheme.RepairShares), int(scheme.OptimalShares))
}
// RepairThreshold is the number of available erasure pieces below which
// the data must be repaired to avoid loss
func (rs *RedundancyStrategy) RepairThreshold() int {
return rs.repairThreshold
}
// OptimalThreshold is the number of available erasure pieces above which
// there is no need for the data to be repaired
func (rs *RedundancyStrategy) OptimalThreshold() int {
return rs.optimalThreshold
}
type encodedReader struct {
log *zap.Logger
ctx context.Context
rs RedundancyStrategy
pieces map[int]*encodedPiece
}
// EncodeReader takes a Reader and a RedundancyStrategy and returns a slice of
// io.ReadClosers.
func EncodeReader(ctx context.Context, log *zap.Logger, r io.Reader, rs RedundancyStrategy) (_ []io.ReadCloser, err error) {
defer mon.Task()(&ctx)(&err)
er := &encodedReader{
log: log,
ctx: ctx,
rs: rs,
pieces: make(map[int]*encodedPiece, rs.TotalCount()),
}
var pipeReaders []sync2.PipeReader
var pipeWriter sync2.PipeWriter
tempDir, inmemory, _ := fpath.GetTempData(ctx)
if inmemory {
// TODO what default inmemory size will be enough
pipeReaders, pipeWriter, err = sync2.NewTeeInmemory(rs.TotalCount(), memory.MiB.Int64())
} else {
if tempDir == "" {
tempDir = os.TempDir()
}
pipeReaders, pipeWriter, err = sync2.NewTeeFile(rs.TotalCount(), tempDir)
}
if err != nil {
return nil, err
}
readers := make([]io.ReadCloser, 0, rs.TotalCount())
for i := 0; i < rs.TotalCount(); i++ {
er.pieces[i] = &encodedPiece{
er: er,
pipeReader: pipeReaders[i],
num: i,
stripeBuf: make([]byte, rs.StripeSize()),
shareBuf: make([]byte, rs.ErasureShareSize()),
}
readers = append(readers, er.pieces[i])
}
go er.fillBuffer(ctx, r, pipeWriter)
return readers, nil
}
func (er *encodedReader) fillBuffer(ctx context.Context, r io.Reader, w sync2.PipeWriter) {
var err error
defer mon.Task()(&ctx)(&err)
_, err = sync2.Copy(ctx, w, r)
err = w.CloseWithError(err)
if err != nil {
er.log.Sugar().Error(err)
}
}
type encodedPiece struct {
er *encodedReader
pipeReader sync2.PipeReader
num int
currentStripe int64
stripeBuf []byte
shareBuf []byte
available int
err error
}
func (ep *encodedPiece) Read(p []byte) (n int, err error) {
// No need to trace this function because it's very fast and called many times.
if ep.err != nil {
return 0, ep.err
}
if ep.available == 0 {
// take the next stripe from the segment buffer
_, err := io.ReadFull(ep.pipeReader, ep.stripeBuf)
if err != nil {
return 0, err
}
// encode the num-th erasure share
err = ep.er.rs.EncodeSingle(ep.stripeBuf, ep.shareBuf, ep.num)
if err != nil {
return 0, err
}
ep.currentStripe++
ep.available = ep.er.rs.ErasureShareSize()
}
// we have some buffer remaining for this piece. write it to the output
off := len(ep.shareBuf) - ep.available
n = copy(p, ep.shareBuf[off:])
ep.available -= n
return n, nil
}
func (ep *encodedPiece) Close() (err error) {
ctx := ep.er.ctx
defer mon.Task()(&ctx)(&err)
return ep.pipeReader.Close()
}
// EncodedRanger will take an existing Ranger and provide a means to get
// multiple Ranged sub-Readers. EncodedRanger does not match the normal Ranger
// interface.
type EncodedRanger struct {
log *zap.Logger
rr ranger.Ranger
rs RedundancyStrategy
}
// NewEncodedRanger from the given Ranger and RedundancyStrategy. See the
// comments for EncodeReader about the repair and success thresholds.
func NewEncodedRanger(log *zap.Logger, rr ranger.Ranger, rs RedundancyStrategy) (*EncodedRanger, error) {
if rr.Size()%int64(rs.StripeSize()) != 0 {
return nil, Error.New("invalid erasure encoder and range reader combo. " +
"range reader size must be a multiple of erasure encoder block size")
}
return &EncodedRanger{
log: log,
rs: rs,
rr: rr,
}, nil
}
// OutputSize is like Ranger.Size but returns the Size of the erasure encoded
// pieces that come out.
func (er *EncodedRanger) OutputSize() int64 {
blocks := er.rr.Size() / int64(er.rs.StripeSize())
return blocks * int64(er.rs.ErasureShareSize())
}
// Range is like Ranger.Range, but returns a slice of Readers
func (er *EncodedRanger) Range(ctx context.Context, offset, length int64) (_ []io.ReadCloser, err error) {
defer mon.Task()(&ctx)(&err)
// the offset and length given may not be block-aligned, so let's figure
// out which blocks contain the request.
firstBlock, blockCount := encryption.CalcEncompassingBlocks(
offset, length, er.rs.ErasureShareSize())
// okay, now let's encode the reader for the range containing the blocks
r, err := er.rr.Range(ctx,
firstBlock*int64(er.rs.StripeSize()),
blockCount*int64(er.rs.StripeSize()))
if err != nil {
return nil, err
}
readers, err := EncodeReader(ctx, er.log, r, er.rs)
if err != nil {
return nil, err
}
for i, r := range readers {
// the offset might start a few bytes in, so we potentially have to
// discard the beginning bytes
_, err := io.CopyN(ioutil.Discard, r,
offset-firstBlock*int64(er.rs.ErasureShareSize()))
if err != nil {
return nil, Error.Wrap(err)
}
// the length might be shorter than a multiple of the block size, so
// limit it
readers[i] = readcloser.LimitReadCloser(r, length)
}
return readers, nil
}
// CalcPieceSize calculates what would be the piece size of the encoded data
// after erasure coding data with dataSize using the given ErasureScheme.
func CalcPieceSize(dataSize int64, scheme ErasureScheme) int64 {
const uint32Size = 4
stripeSize := int64(scheme.StripeSize())
stripes := (dataSize + uint32Size + stripeSize - 1) / stripeSize
encodedSize := stripes * int64(scheme.StripeSize())
pieceSize := encodedSize / int64(scheme.RequiredCount())
return pieceSize
}

View File

@ -1,294 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package eestream
import (
"io"
"sync"
"go.uber.org/zap"
)
// PieceBuffer is a synchronized buffer for storing erasure shares for a piece.
type PieceBuffer struct {
log *zap.Logger
buf []byte
shareSize int
cond *sync.Cond
newDataCond *sync.Cond
rpos, wpos int
full bool
currentShare int64 // current erasure share number
totalwr int64 // total bytes ever written to the buffer
lastwr int64 // total bytes ever written when last notified newDataCond
err error
}
// NewPieceBuffer creates and initializes a new PieceBuffer using buf as its
// internal content. If new data is written to the buffer, newDataCond will be
// notified.
func NewPieceBuffer(log *zap.Logger, buf []byte, shareSize int, newDataCond *sync.Cond) *PieceBuffer {
return &PieceBuffer{
log: log,
buf: buf,
shareSize: shareSize,
cond: sync.NewCond(&sync.Mutex{}),
newDataCond: newDataCond,
}
}
// Read reads the next len(p) bytes from the buffer or until the buffer is
// drained. The return value n is the number of bytes read. If the buffer has
// no data to return and no error is set, the call will block until new data is
// written to the buffer. Otherwise the error will be returned.
func (b *PieceBuffer) Read(p []byte) (n int, err error) {
defer b.cond.Broadcast()
b.cond.L.Lock()
defer b.cond.L.Unlock()
for b.empty() {
if b.err != nil {
return 0, b.err
}
b.cond.Wait()
}
if b.rpos >= b.wpos {
nn := copy(p, b.buf[b.rpos:])
n += nn
b.rpos = (b.rpos + nn) % len(b.buf)
p = p[nn:]
}
if b.rpos < b.wpos {
nn := copy(p, b.buf[b.rpos:b.wpos])
n += nn
b.rpos += nn
}
if n > 0 {
b.full = false
}
return n, nil
}
// Skip advances the read pointer with n bytes. It the buffered number of bytes
// are less than n, the method will block until enough data is written to the
// buffer.
func (b *PieceBuffer) Skip(n int) error {
defer b.cond.Broadcast()
b.cond.L.Lock()
defer b.cond.L.Unlock()
for n > 0 {
for b.empty() {
if b.err != nil {
return b.err
}
b.cond.Wait()
}
if b.rpos >= b.wpos {
if len(b.buf)-b.rpos > n {
b.rpos = (b.rpos + n) % len(b.buf)
n = 0
} else {
n -= len(b.buf) - b.rpos
b.rpos = 0
}
} else {
if b.wpos-b.rpos > n {
b.rpos += n
n = 0
} else {
n -= b.wpos - b.rpos
b.rpos = b.wpos
}
}
b.full = false
}
return nil
}
// Write writes the contents of p into the buffer. If the buffer is full it
// will block until some data is read from it, or an error is set. The return
// value n is the number of bytes written. If an error was set, it be returned.
func (b *PieceBuffer) Write(p []byte) (n int, err error) {
for n < len(p) {
nn, err := b.write(p[n:])
n += nn
if err != nil {
return n, err
}
// Notify for new data only if a new complete erasure share is available
b.totalwr += int64(nn)
if b.totalwr/int64(b.shareSize)-b.lastwr/int64(b.shareSize) > 0 {
b.lastwr = b.totalwr
b.notifyNewData()
}
}
return n, nil
}
// write is a helper method that takes care for the locking on each copy
// iteration.
func (b *PieceBuffer) write(p []byte) (n int, err error) {
defer b.cond.Broadcast()
b.cond.L.Lock()
defer b.cond.L.Unlock()
for b.full {
if b.err != nil {
return n, b.err
}
b.cond.Wait()
}
var wr int
if b.wpos < b.rpos {
wr = copy(b.buf[b.wpos:b.rpos], p)
} else {
wr = copy(b.buf[b.wpos:], p)
}
n += wr
b.wpos = (b.wpos + wr) % len(b.buf)
if b.wpos == b.rpos {
b.full = true
}
return n, nil
}
// Close sets io.ErrClosedPipe to the buffer to prevent further writes and
// blocking on read.
func (b *PieceBuffer) Close() error {
b.SetError(io.ErrClosedPipe)
return nil
}
// SetError sets an error to be returned by Read and Write. Read will return
// the error after all data is read from the buffer.
func (b *PieceBuffer) SetError(err error) {
b.setError(err)
b.notifyNewData()
}
// setError is a helper method that locks the mutex before setting the error.
func (b *PieceBuffer) setError(err error) {
defer b.cond.Broadcast()
b.cond.L.Lock()
defer b.cond.L.Unlock()
b.err = err
}
// getError is a helper method that locks the mutex before getting the error.
func (b *PieceBuffer) getError() error {
b.cond.L.Lock()
defer b.cond.L.Unlock()
return b.err
}
// notifyNewData notifies newDataCond that new data is written to the buffer.
func (b *PieceBuffer) notifyNewData() {
b.newDataCond.L.Lock()
defer b.newDataCond.L.Unlock()
b.newDataCond.Broadcast()
}
// empty chacks if the buffer is empty.
func (b *PieceBuffer) empty() bool {
return !b.full && b.rpos == b.wpos
}
// buffered returns the number of bytes that can be read from the buffer
// without blocking.
func (b *PieceBuffer) buffered() int {
b.cond.L.Lock()
defer b.cond.L.Unlock()
switch {
case b.rpos < b.wpos:
return b.wpos - b.rpos
case b.rpos > b.wpos:
return len(b.buf) + b.wpos - b.rpos
case b.full:
return len(b.buf)
default: // empty
return 0
}
}
// HasShare checks if the num-th share can be read from the buffer without
// blocking. If there are older erasure shares in the buffer, they will be
// discarded to leave room for the newer erasure shares to be written.
func (b *PieceBuffer) HasShare(num int64) bool {
if num < b.currentShare {
// we should never get here!
b.log.Sugar().Fatalf("Checking for erasure share %d while the current erasure share is %d.", num, b.currentShare)
}
if b.getError() != nil {
return true
}
bufShares := int64(b.buffered() / b.shareSize)
if num-b.currentShare > 0 {
if bufShares > num-b.currentShare {
// TODO: should this error be ignored?
_ = b.discardUntil(num)
} else {
_ = b.discardUntil(b.currentShare + bufShares)
}
bufShares = int64(b.buffered() / b.shareSize)
}
return bufShares > num-b.currentShare
}
// ReadShare reads the num-th erasure share from the buffer into p. Any shares
// before num will be discarded from the buffer.
func (b *PieceBuffer) ReadShare(num int64, p []byte) error {
if num < b.currentShare {
// we should never get here!
b.log.Sugar().Fatalf("Trying to read erasure share %d while the current erasure share is already %d.", num, b.currentShare)
}
err := b.discardUntil(num)
if err != nil {
return err
}
_, err = io.ReadFull(b, p)
if err != nil {
return err
}
b.currentShare++
return nil
}
// discardUntil discards all erasure shares from the buffer until the num-th
// erasure share exclusively.
func (b *PieceBuffer) discardUntil(num int64) error {
if num <= b.currentShare {
return nil
}
err := b.Skip(int(num-b.currentShare) * b.shareSize)
if err != nil {
return err
}
b.currentShare = num
return nil
}

View File

@ -1,53 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package eestream
import (
"github.com/vivint/infectious"
)
type rsScheme struct {
fc *infectious.FEC
erasureShareSize int
}
// NewRSScheme returns a Reed-Solomon-based ErasureScheme.
func NewRSScheme(fc *infectious.FEC, erasureShareSize int) ErasureScheme {
return &rsScheme{fc: fc, erasureShareSize: erasureShareSize}
}
func (s *rsScheme) EncodeSingle(input, output []byte, num int) (err error) {
return s.fc.EncodeSingle(input, output, num)
}
func (s *rsScheme) Encode(input []byte, output func(num int, data []byte)) (
err error) {
return s.fc.Encode(input, func(s infectious.Share) {
output(s.Number, s.Data)
})
}
func (s *rsScheme) Decode(out []byte, in map[int][]byte) ([]byte, error) {
shares := make([]infectious.Share, 0, len(in))
for num, data := range in {
shares = append(shares, infectious.Share{Number: num, Data: data})
}
return s.fc.Decode(out, shares)
}
func (s *rsScheme) ErasureShareSize() int {
return s.erasureShareSize
}
func (s *rsScheme) StripeSize() int {
return s.erasureShareSize * s.fc.Required()
}
func (s *rsScheme) TotalCount() int {
return s.fc.Total()
}
func (s *rsScheme) RequiredCount() int {
return s.fc.Required()
}

View File

@ -1,681 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package eestream
import (
"bytes"
"context"
"crypto/sha256"
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/vivint/infectious"
"github.com/zeebo/errs"
"go.uber.org/zap/zaptest"
"storj.io/common/encryption"
"storj.io/common/memory"
"storj.io/common/ranger"
"storj.io/common/readcloser"
"storj.io/common/storj"
"storj.io/common/testcontext"
"storj.io/common/testrand"
)
func TestRS(t *testing.T) {
ctx := context.Background()
data := testrand.Bytes(32 * 1024)
fc, err := infectious.NewFEC(2, 4)
if err != nil {
t.Fatal(err)
}
es := NewRSScheme(fc, 8*1024)
rs, err := NewRedundancyStrategy(es, 0, 0)
if err != nil {
t.Fatal(err)
}
readers, err := EncodeReader(ctx, zaptest.NewLogger(t), bytes.NewReader(data), rs)
if err != nil {
t.Fatal(err)
}
readerMap := make(map[int]io.ReadCloser, len(readers))
for i, reader := range readers {
readerMap[i] = reader
}
ctx, cancel := context.WithCancel(ctx)
decoder := DecodeReaders(ctx, cancel, zaptest.NewLogger(t), readerMap, rs, 32*1024, 0, false)
defer func() { assert.NoError(t, decoder.Close()) }()
data2, err := ioutil.ReadAll(decoder)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, data, data2)
}
// Check that io.ReadFull will return io.ErrUnexpectedEOF
// if DecodeReaders return less data than expected.
func TestRSUnexpectedEOF(t *testing.T) {
ctx := context.Background()
data := testrand.Bytes(32 * 1024)
fc, err := infectious.NewFEC(2, 4)
if err != nil {
t.Fatal(err)
}
es := NewRSScheme(fc, 8*1024)
rs, err := NewRedundancyStrategy(es, 0, 0)
if err != nil {
t.Fatal(err)
}
readers, err := EncodeReader(ctx, zaptest.NewLogger(t), bytes.NewReader(data), rs)
if err != nil {
t.Fatal(err)
}
readerMap := make(map[int]io.ReadCloser, len(readers))
for i, reader := range readers {
readerMap[i] = reader
}
ctx, cancel := context.WithCancel(ctx)
decoder := DecodeReaders(ctx, cancel, zaptest.NewLogger(t), readerMap, rs, 32*1024, 0, false)
defer func() { assert.NoError(t, decoder.Close()) }()
// Try ReadFull more data from DecodeReaders than available
data2 := make([]byte, len(data)+1024)
_, err = io.ReadFull(decoder, data2)
assert.EqualError(t, err, io.ErrUnexpectedEOF.Error())
}
func TestRSRanger(t *testing.T) {
ctx := context.Background()
data := testrand.Bytes(32 * 1024)
fc, err := infectious.NewFEC(2, 4)
if err != nil {
t.Fatal(err)
}
es := NewRSScheme(fc, 8*1024)
rs, err := NewRedundancyStrategy(es, 0, 0)
if err != nil {
t.Fatal(err)
}
encKey := storj.Key(sha256.Sum256([]byte("the secret key")))
var firstNonce storj.Nonce
const stripesPerBlock = 2
blockSize := stripesPerBlock * rs.StripeSize()
encrypter, err := encryption.NewEncrypter(storj.EncAESGCM, &encKey, &firstNonce, blockSize)
if err != nil {
t.Fatal(err)
}
readers, err := EncodeReader(ctx, zaptest.NewLogger(t), encryption.TransformReader(encryption.PadReader(ioutil.NopCloser(
bytes.NewReader(data)), encrypter.InBlockSize()), encrypter, 0), rs)
if err != nil {
t.Fatal(err)
}
pieces, err := readAll(readers)
if err != nil {
t.Fatal(err)
}
rrs := map[int]ranger.Ranger{}
for i, piece := range pieces {
rrs[i] = ranger.ByteRanger(piece)
}
decrypter, err := encryption.NewDecrypter(storj.EncAESGCM, &encKey, &firstNonce, blockSize)
if err != nil {
t.Fatal(err)
}
rc, err := Decode(zaptest.NewLogger(t), rrs, rs, 0, false)
if err != nil {
t.Fatal(err)
}
rr, err := encryption.Transform(rc, decrypter)
if err != nil {
t.Fatal(err)
}
rr, err = encryption.UnpadSlow(ctx, rr)
if err != nil {
t.Fatal(err)
}
r, err := rr.Range(ctx, 0, rr.Size())
if err != nil {
t.Fatal(err)
}
data2, err := ioutil.ReadAll(r)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(data, data2) {
t.Fatalf("rs encode/decode failed")
}
}
func TestNewRedundancyStrategy(t *testing.T) {
for i, tt := range []struct {
rep int
opt int
expRep int
expOpt int
errString string
}{
{0, 0, 4, 4, ""},
{-1, 0, 0, 0, "eestream error: negative repair threshold"},
{1, 0, 0, 0, "eestream error: repair threshold less than required count"},
{5, 0, 0, 0, "eestream error: repair threshold greater than total count"},
{0, -1, 0, 0, "eestream error: negative optimal threshold"},
{0, 1, 0, 0, "eestream error: optimal threshold less than required count"},
{0, 5, 0, 0, "eestream error: optimal threshold greater than total count"},
{3, 4, 3, 4, ""},
{0, 3, 0, 0, "eestream error: repair threshold greater than optimal threshold"},
{4, 3, 0, 0, "eestream error: repair threshold greater than optimal threshold"},
{4, 4, 4, 4, ""},
} {
errTag := fmt.Sprintf("Test case #%d", i)
fc, err := infectious.NewFEC(2, 4)
if !assert.NoError(t, err, errTag) {
continue
}
es := NewRSScheme(fc, 8*1024)
rs, err := NewRedundancyStrategy(es, tt.rep, tt.opt)
if tt.errString != "" {
assert.EqualError(t, err, tt.errString, errTag)
continue
}
assert.NoError(t, err, errTag)
assert.Equal(t, tt.expRep, rs.RepairThreshold(), errTag)
assert.Equal(t, tt.expOpt, rs.OptimalThreshold(), errTag)
}
}
// Some pieces will read error.
// Test will pass if at least required number of pieces are still good.
func TestRSErrors(t *testing.T) {
for i, tt := range []testCase{
{4 * 1024, 1024, 1, 1, 0, false},
{4 * 1024, 1024, 1, 1, 1, true},
{4 * 1024, 1024, 1, 2, 0, false},
{4 * 1024, 1024, 1, 2, 1, false},
{4 * 1024, 1024, 1, 2, 2, true},
{4 * 1024, 1024, 2, 4, 0, false},
{4 * 1024, 1024, 2, 4, 1, false},
{4 * 1024, 1024, 2, 4, 2, false},
{4 * 1024, 1024, 2, 4, 3, true},
{4 * 1024, 1024, 2, 4, 4, true},
{6 * 1024, 1024, 3, 7, 0, false},
{6 * 1024, 1024, 3, 7, 1, false},
{6 * 1024, 1024, 3, 7, 2, false},
{6 * 1024, 1024, 3, 7, 3, false},
{6 * 1024, 1024, 3, 7, 4, false},
{6 * 1024, 1024, 3, 7, 5, true},
{6 * 1024, 1024, 3, 7, 6, true},
{6 * 1024, 1024, 3, 7, 7, true},
} {
testRSProblematic(t, tt, i, func(in []byte) io.ReadCloser {
return readcloser.FatalReadCloser(
errors.New("I am an error piece"))
})
}
}
// Some pieces will read EOF at the beginning (byte 0).
// Test will pass if those pieces are less than required.
func TestRSEOF(t *testing.T) {
for i, tt := range []testCase{
{4 * 1024, 1024, 1, 1, 0, false},
{4 * 1024, 1024, 1, 1, 1, true},
{4 * 1024, 1024, 1, 2, 0, false},
{4 * 1024, 1024, 1, 2, 1, false},
{4 * 1024, 1024, 1, 2, 2, true},
{4 * 1024, 1024, 2, 4, 0, false},
{4 * 1024, 1024, 2, 4, 1, false},
{4 * 1024, 1024, 2, 4, 2, false},
{4 * 1024, 1024, 2, 4, 3, true},
{4 * 1024, 1024, 2, 4, 4, true},
{6 * 1024, 1024, 3, 7, 0, false},
{6 * 1024, 1024, 3, 7, 1, false},
{6 * 1024, 1024, 3, 7, 2, false},
{6 * 1024, 1024, 3, 7, 3, false},
{6 * 1024, 1024, 3, 7, 4, false},
{6 * 1024, 1024, 3, 7, 5, true},
{6 * 1024, 1024, 3, 7, 6, true},
{6 * 1024, 1024, 3, 7, 7, true},
} {
testRSProblematic(t, tt, i, func(in []byte) io.ReadCloser {
return readcloser.LimitReadCloser(
ioutil.NopCloser(bytes.NewReader(in)), 0)
})
}
}
// Some pieces will read EOF earlier than expected
// Test will pass if those pieces are less than required.
func TestRSEarlyEOF(t *testing.T) {
for i, tt := range []testCase{
{4 * 1024, 1024, 1, 1, 0, false},
{4 * 1024, 1024, 1, 1, 1, true},
{4 * 1024, 1024, 1, 2, 0, false},
{4 * 1024, 1024, 1, 2, 1, false},
{4 * 1024, 1024, 1, 2, 2, true},
{4 * 1024, 1024, 2, 4, 0, false},
{4 * 1024, 1024, 2, 4, 1, false},
{4 * 1024, 1024, 2, 4, 2, false},
{4 * 1024, 1024, 2, 4, 3, true},
{4 * 1024, 1024, 2, 4, 4, true},
{6 * 1024, 1024, 3, 7, 0, false},
{6 * 1024, 1024, 3, 7, 1, false},
{6 * 1024, 1024, 3, 7, 2, false},
{6 * 1024, 1024, 3, 7, 3, false},
{6 * 1024, 1024, 3, 7, 4, false},
{6 * 1024, 1024, 3, 7, 5, true},
{6 * 1024, 1024, 3, 7, 6, true},
{6 * 1024, 1024, 3, 7, 7, true},
} {
testRSProblematic(t, tt, i, func(in []byte) io.ReadCloser {
// Read EOF after 500 bytes
return readcloser.LimitReadCloser(
ioutil.NopCloser(bytes.NewReader(in)), 500)
})
}
}
// Some pieces will read EOF later than expected.
// Test will pass if at least required number of pieces are still good.
func TestRSLateEOF(t *testing.T) {
for i, tt := range []testCase{
{4 * 1024, 1024, 1, 1, 0, false},
{4 * 1024, 1024, 1, 1, 1, false},
{4 * 1024, 1024, 1, 2, 0, false},
{4 * 1024, 1024, 1, 2, 1, false},
{4 * 1024, 1024, 1, 2, 2, false},
{4 * 1024, 1024, 2, 4, 0, false},
{4 * 1024, 1024, 2, 4, 1, false},
{4 * 1024, 1024, 2, 4, 2, false},
{4 * 1024, 1024, 2, 4, 3, false},
{4 * 1024, 1024, 2, 4, 4, false},
{6 * 1024, 1024, 3, 7, 0, false},
{6 * 1024, 1024, 3, 7, 1, false},
{6 * 1024, 1024, 3, 7, 2, false},
{6 * 1024, 1024, 3, 7, 3, false},
{6 * 1024, 1024, 3, 7, 4, false},
{6 * 1024, 1024, 3, 7, 5, false},
{6 * 1024, 1024, 3, 7, 6, false},
{6 * 1024, 1024, 3, 7, 7, false},
} {
testRSProblematic(t, tt, i, func(in []byte) io.ReadCloser {
// extend the input with random number of random bytes
random := testrand.BytesInt(1 + testrand.Intn(10000))
extended := append(in, random...)
return ioutil.NopCloser(bytes.NewReader(extended))
})
}
}
// Some pieces will read random data.
// Test will pass if there are enough good pieces for error correction.
func TestRSRandomData(t *testing.T) {
for i, tt := range []testCase{
{4 * 1024, 1024, 1, 1, 0, false},
{4 * 1024, 1024, 1, 1, 1, true},
{4 * 1024, 1024, 1, 2, 0, false},
{4 * 1024, 1024, 1, 2, 1, true},
{4 * 1024, 1024, 1, 2, 2, true},
{4 * 1024, 1024, 2, 4, 0, false},
{4 * 1024, 1024, 2, 4, 1, false},
{4 * 1024, 1024, 2, 4, 2, true},
{4 * 1024, 1024, 2, 4, 3, true},
{4 * 1024, 1024, 2, 4, 4, true},
{6 * 1024, 1024, 3, 7, 0, false},
{6 * 1024, 1024, 3, 7, 1, false},
{6 * 1024, 1024, 3, 7, 2, false},
{6 * 1024, 1024, 3, 7, 4, true},
{6 * 1024, 1024, 3, 7, 5, true},
{6 * 1024, 1024, 3, 7, 6, true},
{6 * 1024, 1024, 3, 7, 7, true},
} {
testRSProblematic(t, tt, i, func(in []byte) io.ReadCloser {
// return random data instead of expected one
return ioutil.NopCloser(bytes.NewReader(testrand.BytesInt(len(in))))
})
}
}
// Some pieces will read slowly
func TestRSSlow(t *testing.T) {
for i, tt := range []testCase{
{4 * 1024, 1024, 1, 1, 0, false},
{4 * 1024, 1024, 1, 2, 0, false},
{4 * 1024, 1024, 2, 4, 0, false},
{4 * 1024, 1024, 2, 4, 1, false},
{6 * 1024, 1024, 3, 7, 0, false},
{6 * 1024, 1024, 3, 7, 1, false},
{6 * 1024, 1024, 3, 7, 2, false},
{6 * 1024, 1024, 3, 7, 3, false},
} {
start := time.Now()
testRSProblematic(t, tt, i, func(in []byte) io.ReadCloser {
// sleep 1 second before every read
return ioutil.NopCloser(SlowReader(bytes.NewReader(in), 1*time.Second))
})
if time.Since(start) > 1*time.Second {
t.Fatalf("waited for slow reader")
}
}
}
type testCase struct {
dataSize int
blockSize int
required int
total int
problematic int
fail bool
}
type problematicReadCloser func([]byte) io.ReadCloser
func testRSProblematic(t *testing.T, tt testCase, i int, fn problematicReadCloser) {
errTag := fmt.Sprintf("Test case #%d", i)
ctx := context.Background()
data := testrand.BytesInt(tt.dataSize)
fc, err := infectious.NewFEC(tt.required, tt.total)
if !assert.NoError(t, err, errTag) {
return
}
es := NewRSScheme(fc, tt.blockSize)
rs, err := NewRedundancyStrategy(es, 0, 0)
if !assert.NoError(t, err, errTag) {
return
}
readers, err := EncodeReader(ctx, zaptest.NewLogger(t), bytes.NewReader(data), rs)
if !assert.NoError(t, err, errTag) {
return
}
// read all readers in []byte buffers to avoid deadlock if later
// we don't read in parallel from all of them
pieces, err := readAll(readers)
if !assert.NoError(t, err, errTag) {
return
}
readerMap := make(map[int]io.ReadCloser, len(readers))
// some readers will have problematic behavior
for i := 0; i < tt.problematic; i++ {
readerMap[i] = fn(pieces[i])
}
// the rest will operate normally
for i := tt.problematic; i < tt.total; i++ {
readerMap[i] = ioutil.NopCloser(bytes.NewReader(pieces[i]))
}
ctx, cancel := context.WithCancel(ctx)
decoder := DecodeReaders(ctx, cancel, zaptest.NewLogger(t), readerMap, rs, int64(tt.dataSize), 3*1024, false)
defer func() { assert.NoError(t, decoder.Close()) }()
data2, err := ioutil.ReadAll(decoder)
if tt.fail {
if err == nil && bytes.Equal(data, data2) {
assert.Fail(t, "expected to fail, but didn't", errTag)
}
} else if assert.NoError(t, err, errTag) {
assert.Equal(t, data, data2, errTag)
}
}
func readAll(readers []io.ReadCloser) ([][]byte, error) {
pieces := make([][]byte, len(readers))
errors := make(chan error, len(readers))
for i := range readers {
go func(i int) {
var err error
pieces[i], err = ioutil.ReadAll(readers[i])
errors <- errs.Combine(err, readers[i].Close())
}(i)
}
for range readers {
err := <-errors
if err != nil {
return nil, err
}
}
return pieces, nil
}
func SlowReader(r io.Reader, delay time.Duration) io.Reader {
return &slowReader{Reader: r, Delay: delay}
}
type slowReader struct {
Reader io.Reader
Delay time.Duration
}
func (s *slowReader) Read(p []byte) (n int, err error) {
time.Sleep(s.Delay)
return s.Reader.Read(p)
}
func TestEncoderStalledReaders(t *testing.T) {
ctx := context.Background()
data := testrand.Bytes(120 * 1024)
fc, err := infectious.NewFEC(30, 60)
if err != nil {
t.Fatal(err)
}
es := NewRSScheme(fc, 1024)
rs, err := NewRedundancyStrategy(es, 35, 50)
if err != nil {
t.Fatal(err)
}
readers, err := EncodeReader(ctx, zaptest.NewLogger(t), bytes.NewReader(data), rs)
if err != nil {
t.Fatal(err)
}
start := time.Now()
_, err = readAllStalled(readers, 25)
assert.NoError(t, err)
if time.Since(start) > 1*time.Second {
t.Fatalf("waited for slow reader")
}
for _, reader := range readers {
assert.NoError(t, reader.Close())
}
}
func readAllStalled(readers []io.ReadCloser, stalled int) ([][]byte, error) {
pieces := make([][]byte, len(readers))
errs := make(chan error, len(readers))
for i := stalled; i < len(readers); i++ {
go func(i int) {
var err error
pieces[i], err = ioutil.ReadAll(readers[i])
errs <- err
}(i)
}
for i := stalled; i < len(readers); i++ {
err := <-errs
if err != nil {
return nil, err
}
}
return pieces, nil
}
func TestDecoderErrorWithStalledReaders(t *testing.T) {
ctx := context.Background()
data := testrand.Bytes(10 * 1024)
fc, err := infectious.NewFEC(10, 20)
if err != nil {
t.Fatal(err)
}
es := NewRSScheme(fc, 1024)
rs, err := NewRedundancyStrategy(es, 0, 0)
if err != nil {
t.Fatal(err)
}
readers, err := EncodeReader(ctx, zaptest.NewLogger(t), bytes.NewReader(data), rs)
if err != nil {
t.Fatal(err)
}
// read all readers in []byte buffers to avoid deadlock if later
// we don't read in parallel from all of them
pieces, err := readAll(readers)
if !assert.NoError(t, err) {
return
}
readerMap := make(map[int]io.ReadCloser, len(readers))
// just a few readers will operate normally
for i := 0; i < 4; i++ {
readerMap[i] = ioutil.NopCloser(bytes.NewReader(pieces[i]))
}
// some of the readers will be slow
for i := 4; i < 7; i++ {
readerMap[i] = ioutil.NopCloser(SlowReader(bytes.NewReader(pieces[i]), 1*time.Second))
}
// most of the readers will return error
for i := 7; i < 20; i++ {
readerMap[i] = readcloser.FatalReadCloser(errors.New("I am an error piece"))
}
ctx, cancel := context.WithCancel(ctx)
decoder := DecodeReaders(ctx, cancel, zaptest.NewLogger(t), readerMap, rs, int64(10*1024), 0, false)
defer func() { assert.NoError(t, decoder.Close()) }()
// record the time for reading the data from the decoder
start := time.Now()
_, err = ioutil.ReadAll(decoder)
// we expect the decoder to fail with error as there are not enough good
// nodes to reconstruct the data
assert.Error(t, err)
// but without waiting for the slowest nodes
if time.Since(start) > 1*time.Second {
t.Fatalf("waited for slow reader")
}
}
func BenchmarkReedSolomonErasureScheme(b *testing.B) {
data := testrand.Bytes(8 << 20)
output := make([]byte, 8<<20)
confs := []struct{ required, total int }{
{2, 4},
{20, 50},
{30, 60},
{50, 80},
}
dataSizes := []int{
100,
1 << 10,
256 << 10,
1 << 20,
5 << 20,
8 << 20,
}
bytesToStr := func(bytes int) string {
switch {
case bytes > 10000000:
return fmt.Sprintf("%.fMB", float64(bytes)/float64(1<<20))
case bytes > 1000:
return fmt.Sprintf("%.fKB", float64(bytes)/float64(1<<10))
default:
return fmt.Sprintf("%dB", bytes)
}
}
for _, conf := range confs {
configuration := conf
confname := fmt.Sprintf("r%dt%d/", configuration.required, configuration.total)
for _, expDataSize := range dataSizes {
dataSize := (expDataSize / configuration.required) * configuration.required
testname := bytesToStr(dataSize)
forwardErrorCode, _ := infectious.NewFEC(configuration.required, configuration.total)
erasureScheme := NewRSScheme(forwardErrorCode, 8*1024)
b.Run("Encode/"+confname+testname, func(b *testing.B) {
b.SetBytes(int64(dataSize))
for i := 0; i < b.N; i++ {
err := erasureScheme.Encode(data[:dataSize], func(num int, data []byte) {
_, _ = num, data
})
if err != nil {
b.Fatal(err)
}
}
})
shares := []infectious.Share{}
err := erasureScheme.Encode(data[:dataSize], func(num int, data []byte) {
shares = append(shares, infectious.Share{
Number: num,
Data: append([]byte{}, data...),
})
})
if err != nil {
b.Fatal(err)
}
b.Run("Decode/"+confname+testname, func(b *testing.B) {
b.SetBytes(int64(dataSize))
shareMap := make(map[int][]byte, configuration.total*2)
for i := 0; i < b.N; i++ {
rand.Shuffle(len(shares), func(i, k int) {
shares[i], shares[k] = shares[k], shares[i]
})
offset := i % (configuration.total / 4)
n := configuration.required + 1 + offset
if n > configuration.total {
n = configuration.total
}
for k := range shareMap {
delete(shareMap, k)
}
for i := range shares[:n] {
shareMap[shares[i].Number] = shares[i].Data
}
_, err = erasureScheme.Decode(output[:dataSize], shareMap)
if err != nil {
b.Fatal(err)
}
}
})
}
}
}
func TestCalcPieceSize(t *testing.T) {
const uint32Size = 4
ctx := testcontext.New(t)
defer ctx.Cleanup()
for i, dataSize := range []int64{
0,
1,
1*memory.KiB.Int64() - uint32Size,
1 * memory.KiB.Int64(),
32*memory.KiB.Int64() - uint32Size,
32 * memory.KiB.Int64(),
32*memory.KiB.Int64() + 100,
} {
errTag := fmt.Sprintf("%d. %+v", i, dataSize)
fc, err := infectious.NewFEC(2, 4)
require.NoError(t, err, errTag)
es := NewRSScheme(fc, 1*memory.KiB.Int())
rs, err := NewRedundancyStrategy(es, 0, 0)
require.NoError(t, err, errTag)
calculatedSize := CalcPieceSize(dataSize, es)
randReader := ioutil.NopCloser(io.LimitReader(testrand.Reader(), dataSize))
readers, err := EncodeReader(ctx, zaptest.NewLogger(t), encryption.PadReader(randReader, es.StripeSize()), rs)
require.NoError(t, err, errTag)
for _, reader := range readers {
piece, err := ioutil.ReadAll(reader)
assert.NoError(t, err, errTag)
assert.EqualValues(t, calculatedSize, len(piece), errTag)
}
}
}

View File

@ -1,180 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package eestream
import (
"context"
"fmt"
"io"
"sort"
"strings"
"sync"
"github.com/vivint/infectious"
"go.uber.org/zap"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
)
var (
mon = monkit.Package()
)
// StripeReader can read and decodes stripes from a set of readers
type StripeReader struct {
scheme ErasureScheme
cond *sync.Cond
readerCount int
bufs map[int]*PieceBuffer
inbufs map[int][]byte
inmap map[int][]byte
errmap map[int]error
forceErrorDetection bool
}
// NewStripeReader creates a new StripeReader from the given readers, erasure
// scheme and max buffer memory.
func NewStripeReader(log *zap.Logger, rs map[int]io.ReadCloser, es ErasureScheme, mbm int, forceErrorDetection bool) *StripeReader {
readerCount := len(rs)
r := &StripeReader{
scheme: es,
cond: sync.NewCond(&sync.Mutex{}),
readerCount: readerCount,
bufs: make(map[int]*PieceBuffer, readerCount),
inbufs: make(map[int][]byte, readerCount),
inmap: make(map[int][]byte, readerCount),
errmap: make(map[int]error, readerCount),
forceErrorDetection: forceErrorDetection,
}
bufSize := mbm / readerCount
bufSize -= bufSize % es.ErasureShareSize()
if bufSize < es.ErasureShareSize() {
bufSize = es.ErasureShareSize()
}
for i := range rs {
r.inbufs[i] = make([]byte, es.ErasureShareSize())
r.bufs[i] = NewPieceBuffer(log, make([]byte, bufSize), es.ErasureShareSize(), r.cond)
// Kick off a goroutine each reader to be copied into a PieceBuffer.
go func(r io.Reader, buf *PieceBuffer) {
_, err := io.Copy(buf, r)
if err != nil {
buf.SetError(err)
return
}
buf.SetError(io.EOF)
}(rs[i], r.bufs[i])
}
return r
}
// Close closes the StripeReader and all PieceBuffers.
func (r *StripeReader) Close() error {
errs := make(chan error, len(r.bufs))
for _, buf := range r.bufs {
go func(c io.Closer) {
errs <- c.Close()
}(buf)
}
var first error
for range r.bufs {
err := <-errs
if err != nil && first == nil {
first = Error.Wrap(err)
}
}
return first
}
// ReadStripe reads and decodes the num-th stripe and concatenates it to p. The
// return value is the updated byte slice.
func (r *StripeReader) ReadStripe(ctx context.Context, num int64, p []byte) (_ []byte, err error) {
for i := range r.inmap {
delete(r.inmap, i)
}
r.cond.L.Lock()
defer r.cond.L.Unlock()
for r.pendingReaders() {
for r.readAvailableShares(ctx, num) == 0 {
r.cond.Wait()
}
if r.hasEnoughShares() {
out, err := r.scheme.Decode(p, r.inmap)
if err != nil {
if r.shouldWaitForMore(err) {
continue
}
return nil, err
}
return out, nil
}
}
// could not read enough shares to attempt a decode
mon.Meter("download_stripe_failed_not_enough_pieces_uplink").Mark(1) //locked
return nil, r.combineErrs(num)
}
// readAvailableShares reads the available num-th erasure shares from the piece
// buffers without blocking. The return value n is the number of erasure shares
// read.
func (r *StripeReader) readAvailableShares(ctx context.Context, num int64) (n int) {
for i, buf := range r.bufs {
if r.inmap[i] != nil || r.errmap[i] != nil {
continue
}
if buf.HasShare(num) {
err := buf.ReadShare(num, r.inbufs[i])
if err != nil {
r.errmap[i] = err
} else {
r.inmap[i] = r.inbufs[i]
}
n++
}
}
return n
}
// pendingReaders checks if there are any pending readers to get a share from.
func (r *StripeReader) pendingReaders() bool {
goodReaders := r.readerCount - len(r.errmap)
return goodReaders >= r.scheme.RequiredCount() && goodReaders > len(r.inmap)
}
// hasEnoughShares check if there are enough erasure shares read to attempt
// a decode.
func (r *StripeReader) hasEnoughShares() bool {
return len(r.inmap) >= r.scheme.RequiredCount()+1 ||
(!r.forceErrorDetection && len(r.inmap) == r.scheme.RequiredCount() && !r.pendingReaders())
}
// shouldWaitForMore checks the returned decode error if it makes sense to wait
// for more erasure shares to attempt an error correction.
func (r *StripeReader) shouldWaitForMore(err error) bool {
// check if the error is due to error detection
if !infectious.NotEnoughShares.Contains(err) &&
!infectious.TooManyErrors.Contains(err) {
return false
}
// check if there are more input buffers to wait for
return r.pendingReaders()
}
// combineErrs makes a useful error message from the errors in errmap.
// combineErrs always returns an error.
func (r *StripeReader) combineErrs(num int64) error {
if len(r.errmap) == 0 {
return Error.New("programmer error: no errors to combine")
}
errstrings := make([]string, 0, len(r.errmap))
for i, err := range r.errmap {
errstrings = append(errstrings, fmt.Sprintf("\nerror retrieving piece %02d: %v", i, err))
}
sort.Strings(errstrings)
return Error.New("failed to download stripe %d: %s", num, strings.Join(errstrings, ""))
}

View File

@ -1,62 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package eestream
import (
"github.com/vivint/infectious"
)
type unsafeRSScheme struct {
fc *infectious.FEC
erasureShareSize int
}
// NewUnsafeRSScheme returns a Reed-Solomon-based ErasureScheme without error correction.
func NewUnsafeRSScheme(fc *infectious.FEC, erasureShareSize int) ErasureScheme {
return &unsafeRSScheme{fc: fc, erasureShareSize: erasureShareSize}
}
func (s *unsafeRSScheme) EncodeSingle(input, output []byte, num int) (err error) {
return s.fc.EncodeSingle(input, output, num)
}
func (s *unsafeRSScheme) Encode(input []byte, output func(num int, data []byte)) (
err error) {
return s.fc.Encode(input, func(s infectious.Share) {
output(s.Number, s.Data)
})
}
func (s *unsafeRSScheme) Decode(out []byte, in map[int][]byte) ([]byte, error) {
shares := make([]infectious.Share, 0, len(in))
for num, data := range in {
shares = append(shares, infectious.Share{Number: num, Data: data})
}
stripe := make([]byte, s.RequiredCount()*s.ErasureShareSize())
err := s.fc.Rebuild(shares, func(share infectious.Share) {
copy(stripe[share.Number*s.ErasureShareSize():], share.Data)
})
if err != nil {
return nil, err
}
return stripe, nil
}
func (s *unsafeRSScheme) ErasureShareSize() int {
return s.erasureShareSize
}
func (s *unsafeRSScheme) StripeSize() int {
return s.erasureShareSize * s.fc.Required()
}
func (s *unsafeRSScheme) TotalCount() int {
return s.fc.Total()
}
func (s *unsafeRSScheme) RequiredCount() int {
return s.fc.Required()
}

View File

@ -1,142 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package metainfo
import (
"github.com/zeebo/errs"
"storj.io/common/pb"
)
var (
// ErrInvalidType error for inalid response type casting
ErrInvalidType = errs.New("invalid response type")
)
// BatchItem represents single request in batch
type BatchItem interface {
BatchItem() *pb.BatchRequestItem
}
// BatchResponse single response from batch call
type BatchResponse struct {
pbRequest interface{}
pbResponse interface{}
}
// CreateBucket returns BatchResponse for CreateBucket request
func (resp *BatchResponse) CreateBucket() (CreateBucketResponse, error) {
item, ok := resp.pbResponse.(*pb.BatchResponseItem_BucketCreate)
if !ok {
return CreateBucketResponse{}, ErrInvalidType
}
createResponse, err := newCreateBucketResponse(item.BucketCreate)
if err != nil {
return CreateBucketResponse{}, err
}
return createResponse, nil
}
// GetBucket returns response for GetBucket request
func (resp *BatchResponse) GetBucket() (GetBucketResponse, error) {
item, ok := resp.pbResponse.(*pb.BatchResponseItem_BucketGet)
if !ok {
return GetBucketResponse{}, ErrInvalidType
}
getResponse, err := newGetBucketResponse(item.BucketGet)
if err != nil {
return GetBucketResponse{}, err
}
return getResponse, nil
}
// ListBuckets returns response for ListBuckets request
func (resp *BatchResponse) ListBuckets() (ListBucketsResponse, error) {
item, ok := resp.pbResponse.(*pb.BatchResponseItem_BucketList)
if !ok {
return ListBucketsResponse{}, ErrInvalidType
}
return newListBucketsResponse(item.BucketList), nil
}
// BeginObject returns response for BeginObject request
func (resp *BatchResponse) BeginObject() (BeginObjectResponse, error) {
item, ok := resp.pbResponse.(*pb.BatchResponseItem_ObjectBegin)
if !ok {
return BeginObjectResponse{}, ErrInvalidType
}
return newBeginObjectResponse(item.ObjectBegin), nil
}
// BeginDeleteObject returns response for BeginDeleteObject request
func (resp *BatchResponse) BeginDeleteObject() (BeginDeleteObjectResponse, error) {
item, ok := resp.pbResponse.(*pb.BatchResponseItem_ObjectBeginDelete)
if !ok {
return BeginDeleteObjectResponse{}, ErrInvalidType
}
return newBeginDeleteObjectResponse(item.ObjectBeginDelete), nil
}
// GetObject returns response for GetObject request
func (resp *BatchResponse) GetObject() (GetObjectResponse, error) {
item, ok := resp.pbResponse.(*pb.BatchResponseItem_ObjectGet)
if !ok {
return GetObjectResponse{}, ErrInvalidType
}
return newGetObjectResponse(item.ObjectGet), nil
}
// ListObjects returns response for ListObjects request
func (resp *BatchResponse) ListObjects() (ListObjectsResponse, error) {
item, ok := resp.pbResponse.(*pb.BatchResponseItem_ObjectList)
if !ok {
return ListObjectsResponse{}, ErrInvalidType
}
requestItem, ok := resp.pbRequest.(*pb.BatchRequestItem_ObjectList)
if !ok {
return ListObjectsResponse{}, ErrInvalidType
}
return newListObjectsResponse(item.ObjectList, requestItem.ObjectList.EncryptedPrefix, requestItem.ObjectList.Recursive), nil
}
// BeginSegment returns response for BeginSegment request
func (resp *BatchResponse) BeginSegment() (BeginSegmentResponse, error) {
item, ok := resp.pbResponse.(*pb.BatchResponseItem_SegmentBegin)
if !ok {
return BeginSegmentResponse{}, ErrInvalidType
}
return newBeginSegmentResponse(item.SegmentBegin), nil
}
// BeginDeleteSegment returns response for BeginDeleteSegment request
func (resp *BatchResponse) BeginDeleteSegment() (BeginDeleteSegmentResponse, error) {
item, ok := resp.pbResponse.(*pb.BatchResponseItem_SegmentBeginDelete)
if !ok {
return BeginDeleteSegmentResponse{}, ErrInvalidType
}
return newBeginDeleteSegmentResponse(item.SegmentBeginDelete), nil
}
// ListSegment returns response for ListSegment request
func (resp *BatchResponse) ListSegment() (ListSegmentsResponse, error) {
item, ok := resp.pbResponse.(*pb.BatchResponseItem_SegmentList)
if !ok {
return ListSegmentsResponse{}, ErrInvalidType
}
return newListSegmentsResponse(item.SegmentList), nil
}
// DownloadSegment returns response for DownloadSegment request
func (resp *BatchResponse) DownloadSegment() (DownloadSegmentResponse, error) {
item, ok := resp.pbResponse.(*pb.BatchResponseItem_SegmentDownload)
if !ok {
return DownloadSegmentResponse{}, ErrInvalidType
}
return newDownloadSegmentResponse(item.SegmentDownload), nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,176 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package metainfo
import (
"context"
"time"
"github.com/skyrings/skyring-common/tools/uuid"
"storj.io/common/errs2"
"storj.io/common/pb"
"storj.io/common/rpc/rpcstatus"
"storj.io/common/storj"
)
// CreateSegmentOld requests the order limits for creating a new segment
func (client *Client) CreateSegmentOld(ctx context.Context, bucket string, path storj.Path, segmentIndex int64, redundancy *pb.RedundancyScheme, maxEncryptedSegmentSize int64, expiration time.Time) (limits []*pb.AddressedOrderLimit, rootPieceID storj.PieceID, piecePrivateKey storj.PiecePrivateKey, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.CreateSegmentOld(ctx, &pb.SegmentWriteRequestOld{
Header: client.header(),
Bucket: []byte(bucket),
Path: []byte(path),
Segment: segmentIndex,
Redundancy: redundancy,
MaxEncryptedSegmentSize: maxEncryptedSegmentSize,
Expiration: expiration,
})
if err != nil {
return nil, rootPieceID, piecePrivateKey, Error.Wrap(err)
}
return response.GetAddressedLimits(), response.RootPieceId, response.PrivateKey, nil
}
// CommitSegmentOld requests to store the pointer for the segment
func (client *Client) CommitSegmentOld(ctx context.Context, bucket string, path storj.Path, segmentIndex int64, pointer *pb.Pointer, originalLimits []*pb.OrderLimit) (savedPointer *pb.Pointer, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.CommitSegmentOld(ctx, &pb.SegmentCommitRequestOld{
Header: client.header(),
Bucket: []byte(bucket),
Path: []byte(path),
Segment: segmentIndex,
Pointer: pointer,
OriginalLimits: originalLimits,
})
if err != nil {
return nil, Error.Wrap(err)
}
return response.GetPointer(), nil
}
// SegmentInfoOld requests the pointer of a segment
func (client *Client) SegmentInfoOld(ctx context.Context, bucket string, path storj.Path, segmentIndex int64) (pointer *pb.Pointer, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.SegmentInfoOld(ctx, &pb.SegmentInfoRequestOld{
Header: client.header(),
Bucket: []byte(bucket),
Path: []byte(path),
Segment: segmentIndex,
})
if err != nil {
if errs2.IsRPC(err, rpcstatus.NotFound) {
return nil, storj.ErrObjectNotFound.Wrap(err)
}
return nil, Error.Wrap(err)
}
return response.GetPointer(), nil
}
// ReadSegmentOld requests the order limits for reading a segment
func (client *Client) ReadSegmentOld(ctx context.Context, bucket string, path storj.Path, segmentIndex int64) (pointer *pb.Pointer, limits []*pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.DownloadSegmentOld(ctx, &pb.SegmentDownloadRequestOld{
Header: client.header(),
Bucket: []byte(bucket),
Path: []byte(path),
Segment: segmentIndex,
})
if err != nil {
if errs2.IsRPC(err, rpcstatus.NotFound) {
return nil, nil, piecePrivateKey, storj.ErrObjectNotFound.Wrap(err)
}
return nil, nil, piecePrivateKey, Error.Wrap(err)
}
return response.GetPointer(), sortLimits(response.GetAddressedLimits(), response.GetPointer()), response.PrivateKey, nil
}
// sortLimits sorts order limits and fill missing ones with nil values
func sortLimits(limits []*pb.AddressedOrderLimit, pointer *pb.Pointer) []*pb.AddressedOrderLimit {
sorted := make([]*pb.AddressedOrderLimit, pointer.GetRemote().GetRedundancy().GetTotal())
for _, piece := range pointer.GetRemote().GetRemotePieces() {
sorted[piece.GetPieceNum()] = getLimitByStorageNodeID(limits, piece.NodeId)
}
return sorted
}
func getLimitByStorageNodeID(limits []*pb.AddressedOrderLimit, storageNodeID storj.NodeID) *pb.AddressedOrderLimit {
for _, limit := range limits {
if limit.GetLimit().StorageNodeId == storageNodeID {
return limit
}
}
return nil
}
// DeleteSegmentOld requests the order limits for deleting a segment
func (client *Client) DeleteSegmentOld(ctx context.Context, bucket string, path storj.Path, segmentIndex int64) (limits []*pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.DeleteSegmentOld(ctx, &pb.SegmentDeleteRequestOld{
Header: client.header(),
Bucket: []byte(bucket),
Path: []byte(path),
Segment: segmentIndex,
})
if err != nil {
if errs2.IsRPC(err, rpcstatus.NotFound) {
return nil, piecePrivateKey, storj.ErrObjectNotFound.Wrap(err)
}
return nil, piecePrivateKey, Error.Wrap(err)
}
return response.GetAddressedLimits(), response.PrivateKey, nil
}
// ListSegmentsOld lists the available segments
func (client *Client) ListSegmentsOld(ctx context.Context, bucket string, prefix, startAfter, ignoredEndBefore storj.Path, recursive bool, limit int32, metaFlags uint32) (items []ListItem, more bool, err error) {
defer mon.Task()(&ctx)(&err)
response, err := client.client.ListSegmentsOld(ctx, &pb.ListSegmentsRequestOld{
Header: client.header(),
Bucket: []byte(bucket),
Prefix: []byte(prefix),
StartAfter: []byte(startAfter),
Recursive: recursive,
Limit: limit,
MetaFlags: metaFlags,
})
if err != nil {
return nil, false, Error.Wrap(err)
}
list := response.GetItems()
items = make([]ListItem, len(list))
for i, item := range list {
items[i] = ListItem{
Path: storj.Path(item.GetPath()),
Pointer: item.GetPointer(),
IsPrefix: item.IsPrefix,
}
}
return items, response.GetMore(), nil
}
// SetAttributionOld tries to set the attribution information on the bucket.
func (client *Client) SetAttributionOld(ctx context.Context, bucket string, partnerID uuid.UUID) (err error) {
defer mon.Task()(&ctx)(&err)
_, err = client.client.SetAttributionOld(ctx, &pb.SetAttributionRequestOld{
Header: client.header(),
PartnerId: partnerID[:], // TODO: implement storj.UUID that can be sent using pb
BucketName: []byte(bucket),
})
return Error.Wrap(err)
}

View File

@ -1,144 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kvmetainfo
import (
"context"
"github.com/zeebo/errs"
"storj.io/common/encryption"
"storj.io/common/storj"
"storj.io/storj/uplink/metainfo"
)
// CreateBucket creates a new bucket
func (db *Project) CreateBucket(ctx context.Context, bucketName string, info *storj.Bucket) (_ storj.Bucket, err error) {
defer mon.Task()(&ctx)(&err)
if bucketName == "" {
return storj.Bucket{}, storj.ErrNoBucket.New("")
}
if info == nil {
info = &storj.Bucket{PathCipher: storj.EncAESGCM}
}
if info.DefaultEncryptionParameters.CipherSuite == storj.EncUnspecified {
info.DefaultEncryptionParameters.CipherSuite = storj.EncAESGCM
}
if info.DefaultEncryptionParameters.BlockSize == 0 {
info.DefaultEncryptionParameters.BlockSize = db.encryptedBlockSize
}
if info.DefaultRedundancyScheme.Algorithm == storj.InvalidRedundancyAlgorithm {
info.DefaultRedundancyScheme.Algorithm = storj.ReedSolomon
}
if info.DefaultRedundancyScheme.RequiredShares == 0 {
info.DefaultRedundancyScheme.RequiredShares = int16(db.redundancy.RequiredCount())
}
if info.DefaultRedundancyScheme.RepairShares == 0 {
info.DefaultRedundancyScheme.RepairShares = int16(db.redundancy.RepairThreshold())
}
if info.DefaultRedundancyScheme.OptimalShares == 0 {
info.DefaultRedundancyScheme.OptimalShares = int16(db.redundancy.OptimalThreshold())
}
if info.DefaultRedundancyScheme.TotalShares == 0 {
info.DefaultRedundancyScheme.TotalShares = int16(db.redundancy.TotalCount())
}
if info.DefaultRedundancyScheme.ShareSize == 0 {
info.DefaultRedundancyScheme.ShareSize = int32(db.redundancy.ErasureShareSize())
}
if info.DefaultSegmentsSize == 0 {
info.DefaultSegmentsSize = db.segmentsSize
}
if err := validateBlockSize(info.DefaultRedundancyScheme, info.DefaultEncryptionParameters.BlockSize); err != nil {
return storj.Bucket{}, storj.ErrBucket.Wrap(err)
}
if info.PathCipher < storj.EncNull || info.PathCipher > storj.EncSecretBox {
return storj.Bucket{}, encryption.ErrInvalidConfig.New("encryption type %d is not supported", info.PathCipher)
}
info.Name = bucketName
// uuid MarshalJSON implementation always returns err == nil
partnerID, _ := info.PartnerID.MarshalJSON()
newBucket, err := db.metainfo.CreateBucket(ctx, metainfo.CreateBucketParams{
Name: []byte(info.Name),
PathCipher: info.PathCipher,
PartnerID: partnerID,
DefaultSegmentsSize: info.DefaultSegmentsSize,
DefaultRedundancyScheme: info.DefaultRedundancyScheme,
DefaultEncryptionParameters: info.DefaultEncryptionParameters,
})
if err != nil {
return storj.Bucket{}, storj.ErrBucket.Wrap(err)
}
return newBucket, nil
}
// validateBlockSize confirms the encryption block size aligns with stripe size.
// Stripes contain encrypted data therefore we want the stripe boundaries to match
// with the encryption block size boundaries. We also want stripes to be small for
// audits, but encryption can be a bit larger. All told, block size should be an integer
// multiple of stripe size.
func validateBlockSize(redundancyScheme storj.RedundancyScheme, blockSize int32) error {
stripeSize := redundancyScheme.StripeSize()
if blockSize%stripeSize != 0 {
return errs.New("encryption BlockSize (%d) must be a multiple of RS ShareSize (%d) * RS RequiredShares (%d)",
blockSize, redundancyScheme.ShareSize, redundancyScheme.RequiredShares,
)
}
return nil
}
// DeleteBucket deletes bucket
func (db *Project) DeleteBucket(ctx context.Context, bucketName string) (err error) {
defer mon.Task()(&ctx)(&err)
if bucketName == "" {
return storj.ErrNoBucket.New("")
}
err = db.metainfo.DeleteBucket(ctx, metainfo.DeleteBucketParams{
Name: []byte(bucketName),
})
if err != nil {
return storj.ErrBucket.Wrap(err)
}
return nil
}
// GetBucket gets bucket information
func (db *Project) GetBucket(ctx context.Context, bucketName string) (_ storj.Bucket, err error) {
defer mon.Task()(&ctx)(&err)
if bucketName == "" {
return storj.Bucket{}, storj.ErrNoBucket.New("")
}
bucket, err := db.metainfo.GetBucket(ctx, metainfo.GetBucketParams{
Name: []byte(bucketName),
})
if err != nil {
return storj.Bucket{}, storj.ErrBucket.Wrap(err)
}
return bucket, nil
}
// ListBuckets lists buckets
func (db *Project) ListBuckets(ctx context.Context, listOpts storj.BucketListOptions) (_ storj.BucketList, err error) {
defer mon.Task()(&ctx)(&err)
bucketList, err := db.metainfo.ListBuckets(ctx, metainfo.ListBucketsParams{
ListOpts: listOpts,
})
if err != nil {
return storj.BucketList{}, storj.ErrBucket.Wrap(err)
}
return bucketList, nil
}

View File

@ -1,80 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kvmetainfo
import (
"context"
"time"
"storj.io/common/storj"
)
// CreateObject has optional parameters that can be set
type CreateObject struct {
Metadata map[string]string
ContentType string
Expires time.Time
storj.RedundancyScheme
storj.EncryptionParameters
}
// Object converts the CreateObject to an object with unitialized values
func (create CreateObject) Object(bucket storj.Bucket, path storj.Path) storj.Object {
return storj.Object{
Bucket: bucket,
Path: path,
Metadata: create.Metadata,
ContentType: create.ContentType,
Expires: create.Expires,
Stream: storj.Stream{
Size: -1, // unknown
Checksum: nil, // unknown
SegmentCount: -1, // unknown
FixedSegmentSize: -1, // unknown
RedundancyScheme: create.RedundancyScheme,
EncryptionParameters: create.EncryptionParameters,
},
}
}
// ReadOnlyStream is an interface for reading segment information
type ReadOnlyStream interface {
Info() storj.Object
// SegmentsAt returns the segment that contains the byteOffset and following segments.
// Limit specifies how much to return at most.
SegmentsAt(ctx context.Context, byteOffset int64, limit int64) (infos []storj.Segment, more bool, err error)
// Segments returns the segment at index.
// Limit specifies how much to return at most.
Segments(ctx context.Context, index int64, limit int64) (infos []storj.Segment, more bool, err error)
}
// MutableObject is an interface for manipulating creating/deleting object stream
type MutableObject interface {
// Info gets the current information about the object
Info() storj.Object
// CreateStream creates a new stream for the object
CreateStream(ctx context.Context) (MutableStream, error)
// ContinueStream starts to continue a partially uploaded stream.
ContinueStream(ctx context.Context) (MutableStream, error)
// DeleteStream deletes any information about this objects stream
DeleteStream(ctx context.Context) error
// Commit commits the changes to the database
Commit(ctx context.Context) error
}
// MutableStream is an interface for manipulating stream information
type MutableStream interface {
// TODO: methods for finding partially uploaded segments
Info() storj.Object
// AddSegments adds segments to the stream.
AddSegments(ctx context.Context, segments ...storj.Segment) error
// UpdateSegments updates information about segments.
UpdateSegments(ctx context.Context, segments ...storj.Segment) error
}

View File

@ -1,90 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kvmetainfo
import (
"context"
"github.com/zeebo/errs"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/common/encryption"
"storj.io/common/memory"
"storj.io/common/storj"
"storj.io/storj/uplink/metainfo"
"storj.io/storj/uplink/storage/segments"
"storj.io/storj/uplink/storage/streams"
)
var mon = monkit.Package()
var errClass = errs.Class("kvmetainfo")
const defaultSegmentLimit = 8 // TODO
// DB implements metainfo database
type DB struct {
project *Project
metainfo *metainfo.Client
streams streams.Store
segments segments.Store
encStore *encryption.Store
}
// New creates a new metainfo database
func New(project *Project, metainfo *metainfo.Client, streams streams.Store, segments segments.Store, encStore *encryption.Store) *DB {
return &DB{
project: project,
metainfo: metainfo,
streams: streams,
segments: segments,
encStore: encStore,
}
}
const defaultLookupLimit = 1000
// Limits returns limits for this metainfo database
func (db *DB) Limits() (MetainfoLimits, error) {
// TODO: fetch this information from satellite
return MetainfoLimits{
ListLimit: defaultLookupLimit,
MinimumRemoteSegmentSize: memory.KiB.Int64(), // TODO: is this needed here?
MaximumInlineSegmentSize: memory.MiB.Int64(),
}, nil
}
// MetainfoLimits lists limits specified for the Metainfo database
type MetainfoLimits struct {
// ListLimit specifies the maximum amount of items that can be listed at a time.
ListLimit int64
// MinimumRemoteSegmentSize specifies the minimum remote segment that is allowed to be stored.
MinimumRemoteSegmentSize int64
// MaximumInlineSegmentSize specifies the maximum inline segment that is allowed to be stored.
MaximumInlineSegmentSize int64
}
// CreateBucket creates a new bucket with the specified information
func (db *DB) CreateBucket(ctx context.Context, bucketName string, info *storj.Bucket) (bucketInfo storj.Bucket, err error) {
return db.project.CreateBucket(ctx, bucketName, info)
}
// DeleteBucket deletes bucket
func (db *DB) DeleteBucket(ctx context.Context, bucketName string) (err error) {
return db.project.DeleteBucket(ctx, bucketName)
}
// GetBucket gets bucket information
func (db *DB) GetBucket(ctx context.Context, bucketName string) (bucketInfo storj.Bucket, err error) {
return db.project.GetBucket(ctx, bucketName)
}
// ListBuckets lists buckets
func (db *DB) ListBuckets(ctx context.Context, options storj.BucketListOptions) (list storj.BucketList, err error) {
return db.project.ListBuckets(ctx, options)
}

View File

@ -1,469 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kvmetainfo
import (
"context"
"errors"
"strings"
"github.com/gogo/protobuf/proto"
"storj.io/common/encryption"
"storj.io/common/memory"
"storj.io/common/paths"
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/storj/uplink/metainfo"
"storj.io/storj/uplink/storage/objects"
"storj.io/storj/uplink/storage/segments"
"storj.io/storj/uplink/storage/streams"
)
// DefaultRS default values for RedundancyScheme
var DefaultRS = storj.RedundancyScheme{
Algorithm: storj.ReedSolomon,
RequiredShares: 20,
RepairShares: 30,
OptimalShares: 40,
TotalShares: 50,
ShareSize: 1 * memory.KiB.Int32(),
}
// DefaultES default values for EncryptionParameters
// BlockSize should default to the size of a stripe
var DefaultES = storj.EncryptionParameters{
CipherSuite: storj.EncAESGCM,
BlockSize: DefaultRS.StripeSize(),
}
// GetObject returns information about an object
func (db *DB) GetObject(ctx context.Context, bucket storj.Bucket, path storj.Path) (info storj.Object, err error) {
defer mon.Task()(&ctx)(&err)
_, info, err = db.getInfo(ctx, bucket, path)
return info, err
}
// GetObjectStream returns interface for reading the object stream
func (db *DB) GetObjectStream(ctx context.Context, bucket storj.Bucket, object storj.Object) (stream ReadOnlyStream, err error) {
defer mon.Task()(&ctx)(&err)
if bucket.Name == "" {
return nil, storj.ErrNoBucket.New("")
}
if object.Path == "" {
return nil, storj.ErrNoPath.New("")
}
return &readonlyStream{
db: db,
info: object,
}, nil
}
// CreateObject creates an uploading object and returns an interface for uploading Object information
func (db *DB) CreateObject(ctx context.Context, bucket storj.Bucket, path storj.Path, createInfo *CreateObject) (object MutableObject, err error) {
defer mon.Task()(&ctx)(&err)
if bucket.Name == "" {
return nil, storj.ErrNoBucket.New("")
}
if path == "" {
return nil, storj.ErrNoPath.New("")
}
info := storj.Object{
Bucket: bucket,
Path: path,
}
if createInfo != nil {
info.Metadata = createInfo.Metadata
info.ContentType = createInfo.ContentType
info.Expires = createInfo.Expires
info.RedundancyScheme = createInfo.RedundancyScheme
info.EncryptionParameters = createInfo.EncryptionParameters
}
// TODO: autodetect content type from the path extension
// if info.ContentType == "" {}
if info.EncryptionParameters.IsZero() {
info.EncryptionParameters = storj.EncryptionParameters{
CipherSuite: DefaultES.CipherSuite,
BlockSize: DefaultES.BlockSize,
}
}
if info.RedundancyScheme.IsZero() {
info.RedundancyScheme = DefaultRS
// If the provided EncryptionParameters.BlockSize isn't a multiple of the
// DefaultRS stripeSize, then overwrite the EncryptionParameters with the DefaultES values
if err := validateBlockSize(DefaultRS, info.EncryptionParameters.BlockSize); err != nil {
info.EncryptionParameters.BlockSize = DefaultES.BlockSize
}
}
return &mutableObject{
db: db,
info: info,
}, nil
}
// ModifyObject modifies a committed object
func (db *DB) ModifyObject(ctx context.Context, bucket storj.Bucket, path storj.Path) (object MutableObject, err error) {
defer mon.Task()(&ctx)(&err)
return nil, errors.New("not implemented")
}
func (db *DB) pathCipher(bucketInfo storj.Bucket) storj.CipherSuite {
if db.encStore.EncryptionBypass {
return storj.EncNullBase64URL
}
return bucketInfo.PathCipher
}
// DeleteObject deletes an object from database
func (db *DB) DeleteObject(ctx context.Context, bucket storj.Bucket, path storj.Path) (err error) {
defer mon.Task()(&ctx)(&err)
if bucket.Name == "" {
return storj.ErrNoBucket.New("")
}
prefixed := prefixedObjStore{
store: objects.NewStore(db.streams, db.pathCipher(bucket)),
prefix: bucket.Name,
}
return prefixed.Delete(ctx, path)
}
// ModifyPendingObject creates an interface for updating a partially uploaded object
func (db *DB) ModifyPendingObject(ctx context.Context, bucket storj.Bucket, path storj.Path) (object MutableObject, err error) {
defer mon.Task()(&ctx)(&err)
return nil, errors.New("not implemented")
}
// ListPendingObjects lists pending objects in bucket based on the ListOptions
func (db *DB) ListPendingObjects(ctx context.Context, bucket storj.Bucket, options storj.ListOptions) (list storj.ObjectList, err error) {
defer mon.Task()(&ctx)(&err)
return storj.ObjectList{}, errors.New("not implemented")
}
// ListObjects lists objects in bucket based on the ListOptions
func (db *DB) ListObjects(ctx context.Context, bucket storj.Bucket, options storj.ListOptions) (list storj.ObjectList, err error) {
defer mon.Task()(&ctx)(&err)
if bucket.Name == "" {
return storj.ObjectList{}, storj.ErrNoBucket.New("")
}
var startAfter string
switch options.Direction {
// TODO for now we are supporting only storj.After
// case storj.Forward:
// // forward lists forwards from cursor, including cursor
// startAfter = keyBefore(options.Cursor)
case storj.After:
// after lists forwards from cursor, without cursor
startAfter = options.Cursor
default:
return storj.ObjectList{}, errClass.New("invalid direction %d", options.Direction)
}
// TODO: we should let libuplink users be able to determine what metadata fields they request as well
// metaFlags := meta.All
// if db.pathCipher(bucket) == storj.EncNull || db.pathCipher(bucket) == storj.EncNullBase64URL {
// metaFlags = meta.None
// }
// TODO use flags with listing
// if metaFlags&meta.Size != 0 {
// Calculating the stream's size require also the user-defined metadata,
// where stream store keeps info about the number of segments and their size.
// metaFlags |= meta.UserDefined
// }
pathCipher := db.pathCipher(bucket)
prefix := streams.ParsePath(storj.JoinPaths(bucket.Name, options.Prefix))
prefixKey, err := encryption.DerivePathKey(prefix.Bucket(), streams.PathForKey(prefix.UnencryptedPath().Raw()), db.encStore)
if err != nil {
return storj.ObjectList{}, errClass.Wrap(err)
}
encPrefix, err := encryption.EncryptPath(prefix.Bucket(), prefix.UnencryptedPath(), pathCipher, db.encStore)
if err != nil {
return storj.ObjectList{}, errClass.Wrap(err)
}
// If the raw unencrypted path ends in a `/` we need to remove the final
// section of the encrypted path. For example, if we are listing the path
// `/bob/`, the encrypted path results in `enc("")/enc("bob")/enc("")`. This
// is an incorrect list prefix, what we really want is `enc("")/enc("bob")`
if strings.HasSuffix(prefix.UnencryptedPath().Raw(), "/") {
lastSlashIdx := strings.LastIndex(encPrefix.Raw(), "/")
encPrefix = paths.NewEncrypted(encPrefix.Raw()[:lastSlashIdx])
}
// We have to encrypt startAfter but only if it doesn't contain a bucket.
// It contains a bucket if and only if the prefix has no bucket. This is why it is a raw
// string instead of a typed string: it's either a bucket or an unencrypted path component
// and that isn't known at compile time.
needsEncryption := prefix.Bucket() != ""
if needsEncryption {
startAfter, err = encryption.EncryptPathRaw(startAfter, pathCipher, prefixKey)
if err != nil {
return storj.ObjectList{}, errClass.Wrap(err)
}
}
items, more, err := db.metainfo.ListObjects(ctx, metainfo.ListObjectsParams{
Bucket: []byte(bucket.Name),
EncryptedPrefix: []byte(encPrefix.Raw()),
EncryptedCursor: []byte(startAfter),
Limit: int32(options.Limit),
Recursive: options.Recursive,
})
if err != nil {
return storj.ObjectList{}, errClass.Wrap(err)
}
list = storj.ObjectList{
Bucket: bucket.Name,
Prefix: options.Prefix,
More: more,
Items: make([]storj.Object, len(items)),
}
for i, item := range items {
var path streams.Path
var itemPath string
if needsEncryption {
itemPath, err = encryption.DecryptPathRaw(string(item.EncryptedPath), pathCipher, prefixKey)
if err != nil {
return storj.ObjectList{}, errClass.Wrap(err)
}
// TODO(jeff): this shouldn't be necessary if we handled trailing slashes
// appropriately. there's some issues with list.
fullPath := prefix.UnencryptedPath().Raw()
if len(fullPath) > 0 && fullPath[len(fullPath)-1] != '/' {
fullPath += "/"
}
fullPath += itemPath
path = streams.CreatePath(prefix.Bucket(), paths.NewUnencrypted(fullPath))
} else {
itemPath = string(item.EncryptedPath)
path = streams.CreatePath(string(item.EncryptedPath), paths.Unencrypted{})
}
stream, streamMeta, err := streams.TypedDecryptStreamInfo(ctx, item.EncryptedMetadata, path, db.encStore)
if err != nil {
return storj.ObjectList{}, errClass.Wrap(err)
}
object, err := objectFromMeta(bucket, itemPath, item, stream, &streamMeta)
if err != nil {
return storj.ObjectList{}, errClass.Wrap(err)
}
list.Items[i] = object
}
return list, nil
}
type object struct {
fullpath streams.Path
bucket string
encPath paths.Encrypted
lastSegmentMeta segments.Meta
streamInfo *pb.StreamInfo
streamMeta pb.StreamMeta
}
func (db *DB) getInfo(ctx context.Context, bucket storj.Bucket, path storj.Path) (obj object, info storj.Object, err error) {
defer mon.Task()(&ctx)(&err)
if bucket.Name == "" {
return object{}, storj.Object{}, storj.ErrNoBucket.New("")
}
if path == "" {
return object{}, storj.Object{}, storj.ErrNoPath.New("")
}
fullpath := streams.CreatePath(bucket.Name, paths.NewUnencrypted(path))
encPath, err := encryption.EncryptPath(bucket.Name, paths.NewUnencrypted(path), db.pathCipher(bucket), db.encStore)
if err != nil {
return object{}, storj.Object{}, err
}
objectInfo, err := db.metainfo.GetObject(ctx, metainfo.GetObjectParams{
Bucket: []byte(bucket.Name),
EncryptedPath: []byte(encPath.Raw()),
})
if err != nil {
return object{}, storj.Object{}, err
}
redundancyScheme := objectInfo.Stream.RedundancyScheme
lastSegmentMeta := segments.Meta{
Modified: objectInfo.Created,
Expiration: objectInfo.Expires,
Size: objectInfo.Size,
Data: objectInfo.Metadata,
}
streamInfo, streamMeta, err := streams.TypedDecryptStreamInfo(ctx, lastSegmentMeta.Data, fullpath, db.encStore)
if err != nil {
return object{}, storj.Object{}, err
}
info, err = objectStreamFromMeta(bucket, path, objectInfo.StreamID, lastSegmentMeta, streamInfo, streamMeta, redundancyScheme)
if err != nil {
return object{}, storj.Object{}, err
}
return object{
fullpath: fullpath,
bucket: bucket.Name,
encPath: encPath,
lastSegmentMeta: lastSegmentMeta,
streamInfo: streamInfo,
streamMeta: streamMeta,
}, info, nil
}
func objectFromMeta(bucket storj.Bucket, path storj.Path, listItem storj.ObjectListItem, stream *pb.StreamInfo, streamMeta *pb.StreamMeta) (storj.Object, error) {
object := storj.Object{
Version: 0, // TODO:
Bucket: bucket,
Path: path,
IsPrefix: listItem.IsPrefix,
Created: listItem.CreatedAt, // TODO: use correct field
Modified: listItem.CreatedAt, // TODO: use correct field
Expires: listItem.ExpiresAt,
}
if stream != nil {
serializableMeta := pb.SerializableMeta{}
err := proto.Unmarshal(stream.Metadata, &serializableMeta)
if err != nil {
return storj.Object{}, err
}
object.Metadata = serializableMeta.UserDefined
object.ContentType = serializableMeta.ContentType
object.Stream.Size = ((numberOfSegments(stream, streamMeta) - 1) * stream.SegmentsSize) + stream.LastSegmentSize
}
return object, nil
}
func objectStreamFromMeta(bucket storj.Bucket, path storj.Path, streamID storj.StreamID, lastSegment segments.Meta, stream *pb.StreamInfo, streamMeta pb.StreamMeta, redundancyScheme storj.RedundancyScheme) (storj.Object, error) {
var nonce storj.Nonce
var encryptedKey storj.EncryptedPrivateKey
if streamMeta.LastSegmentMeta != nil {
copy(nonce[:], streamMeta.LastSegmentMeta.KeyNonce)
encryptedKey = streamMeta.LastSegmentMeta.EncryptedKey
}
rv := storj.Object{
Version: 0, // TODO:
Bucket: bucket,
Path: path,
IsPrefix: false,
Created: lastSegment.Modified, // TODO: use correct field
Modified: lastSegment.Modified, // TODO: use correct field
Expires: lastSegment.Expiration, // TODO: use correct field
Stream: storj.Stream{
ID: streamID,
// Checksum: []byte(object.Checksum),
RedundancyScheme: redundancyScheme,
EncryptionParameters: storj.EncryptionParameters{
CipherSuite: storj.CipherSuite(streamMeta.EncryptionType),
BlockSize: streamMeta.EncryptionBlockSize,
},
LastSegment: storj.LastSegment{
EncryptedKeyNonce: nonce,
EncryptedKey: encryptedKey,
},
},
}
if stream != nil {
serMetaInfo := pb.SerializableMeta{}
err := proto.Unmarshal(stream.Metadata, &serMetaInfo)
if err != nil {
return storj.Object{}, err
}
numberOfSegments := streamMeta.NumberOfSegments
if streamMeta.NumberOfSegments == 0 {
numberOfSegments = stream.DeprecatedNumberOfSegments
}
rv.Metadata = serMetaInfo.UserDefined
rv.ContentType = serMetaInfo.ContentType
rv.Stream.Size = stream.SegmentsSize*(numberOfSegments-1) + stream.LastSegmentSize
rv.Stream.SegmentCount = numberOfSegments
rv.Stream.FixedSegmentSize = stream.SegmentsSize
rv.Stream.LastSegment.Size = stream.LastSegmentSize
}
return rv, nil
}
type mutableObject struct {
db *DB
info storj.Object
}
func (object *mutableObject) Info() storj.Object { return object.info }
func (object *mutableObject) CreateStream(ctx context.Context) (_ MutableStream, err error) {
defer mon.Task()(&ctx)(&err)
return &mutableStream{
db: object.db,
info: object.info,
}, nil
}
func (object *mutableObject) ContinueStream(ctx context.Context) (_ MutableStream, err error) {
defer mon.Task()(&ctx)(&err)
return nil, errors.New("not implemented")
}
func (object *mutableObject) DeleteStream(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err)
return errors.New("not implemented")
}
func (object *mutableObject) Commit(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err)
_, info, err := object.db.getInfo(ctx, object.info.Bucket, object.info.Path)
object.info = info
return err
}
func numberOfSegments(stream *pb.StreamInfo, streamMeta *pb.StreamMeta) int64 {
if streamMeta.NumberOfSegments > 0 {
return streamMeta.NumberOfSegments
}
return stream.DeprecatedNumberOfSegments
}

View File

@ -1,29 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kvmetainfo
// TODO: known issue:
// this is incorrect since there's no good way to get such a path
// since the exact previous key is
// append(previousPrefix(cursor), infinite(0xFF)...)
// TODO commented until we will decide if we will support direction for objects listing
// func keyBefore(cursor string) string {
// if cursor == "" {
// return ""
// }
// before := []byte(cursor)
// if before[len(before)-1] == 0 {
// return string(before[:len(before)-1])
// }
// before[len(before)-1]--
// before = append(before, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f, 0x7f)
// return string(before)
// }
// func keyAfter(cursor string) string {
// return cursor + "\x00"
// }

View File

@ -1,50 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kvmetainfo
import (
"context"
"io"
"time"
"storj.io/common/pb"
"storj.io/common/ranger"
"storj.io/common/storj"
"storj.io/storj/uplink/storage/objects"
)
type prefixedObjStore struct {
store objects.Store
prefix string
}
func (o *prefixedObjStore) Get(ctx context.Context, path storj.Path, object storj.Object) (rr ranger.Ranger, err error) {
defer mon.Task()(&ctx)(&err)
if len(path) == 0 {
return nil, storj.ErrNoPath.New("")
}
return o.store.Get(ctx, storj.JoinPaths(o.prefix, path), object)
}
func (o *prefixedObjStore) Put(ctx context.Context, path storj.Path, data io.Reader, metadata pb.SerializableMeta, expiration time.Time) (meta objects.Meta, err error) {
defer mon.Task()(&ctx)(&err)
if len(path) == 0 {
return objects.Meta{}, storj.ErrNoPath.New("")
}
return o.store.Put(ctx, storj.JoinPaths(o.prefix, path), data, metadata, expiration)
}
func (o *prefixedObjStore) Delete(ctx context.Context, path storj.Path) (err error) {
defer mon.Task()(&ctx)(&err)
if len(path) == 0 {
return storj.ErrNoPath.New("")
}
return o.store.Delete(ctx, storj.JoinPaths(o.prefix, path))
}

View File

@ -1,30 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kvmetainfo
import (
"storj.io/storj/uplink/eestream"
"storj.io/storj/uplink/metainfo"
"storj.io/storj/uplink/storage/streams"
)
// Project implements project management operations
type Project struct {
metainfo metainfo.Client
streams streams.Store
encryptedBlockSize int32
redundancy eestream.RedundancyStrategy
segmentsSize int64
}
// NewProject constructs a *Project
func NewProject(streams streams.Store, encryptedBlockSize int32, redundancy eestream.RedundancyStrategy, segmentsSize int64, metainfo metainfo.Client) *Project {
return &Project{
metainfo: metainfo,
streams: streams,
encryptedBlockSize: encryptedBlockSize,
redundancy: redundancy,
segmentsSize: segmentsSize,
}
}

View File

@ -1,131 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kvmetainfo
import (
"context"
"errors"
"storj.io/common/encryption"
"storj.io/common/paths"
"storj.io/common/storj"
"storj.io/storj/uplink/metainfo"
)
var _ ReadOnlyStream = (*readonlyStream)(nil)
type readonlyStream struct {
db *DB
info storj.Object
}
func (stream *readonlyStream) Info() storj.Object { return stream.info }
func (stream *readonlyStream) SegmentsAt(ctx context.Context, byteOffset int64, limit int64) (infos []storj.Segment, more bool, err error) {
defer mon.Task()(&ctx)(&err)
if stream.info.FixedSegmentSize <= 0 {
return nil, false, errors.New("not implemented")
}
index := byteOffset / stream.info.FixedSegmentSize
return stream.Segments(ctx, index, limit)
}
func (stream *readonlyStream) segment(ctx context.Context, index int64) (segment storj.Segment, err error) {
defer mon.Task()(&ctx)(&err)
segment = storj.Segment{
Index: index,
}
isLastSegment := segment.Index+1 == stream.info.SegmentCount
if isLastSegment {
index = -1
}
info, limits, err := stream.db.metainfo.DownloadSegment(ctx, metainfo.DownloadSegmentParams{
StreamID: stream.Info().ID,
Position: storj.SegmentPosition{
Index: int32(index),
},
})
if err != nil {
return segment, err
}
segment.Size = stream.info.Size
segment.EncryptedKeyNonce = info.SegmentEncryption.EncryptedKeyNonce
segment.EncryptedKey = info.SegmentEncryption.EncryptedKey
streamKey, err := encryption.DeriveContentKey(stream.info.Bucket.Name, paths.NewUnencrypted(stream.info.Path), stream.db.encStore)
if err != nil {
return segment, err
}
contentKey, err := encryption.DecryptKey(segment.EncryptedKey, stream.info.EncryptionParameters.CipherSuite, streamKey, &segment.EncryptedKeyNonce)
if err != nil {
return segment, err
}
nonce := new(storj.Nonce)
_, err = encryption.Increment(nonce, segment.Index+1)
if err != nil {
return segment, err
}
if len(info.EncryptedInlineData) != 0 || len(limits) == 0 {
inline, err := encryption.Decrypt(info.EncryptedInlineData, stream.info.EncryptionParameters.CipherSuite, contentKey, nonce)
if err != nil {
return segment, err
}
segment.Inline = inline
}
return segment, nil
}
func (stream *readonlyStream) Segments(ctx context.Context, index int64, limit int64) (infos []storj.Segment, more bool, err error) {
defer mon.Task()(&ctx)(&err)
if index < 0 {
return nil, false, errors.New("invalid argument")
}
if limit <= 0 {
limit = defaultSegmentLimit
}
if index >= stream.info.SegmentCount {
return nil, false, nil
}
infos = make([]storj.Segment, 0, limit)
for ; index < stream.info.SegmentCount && limit > 0; index++ {
limit--
segment, err := stream.segment(ctx, index)
if err != nil {
return nil, false, err
}
infos = append(infos, segment)
}
more = index < stream.info.SegmentCount
return infos, more, nil
}
type mutableStream struct {
db *DB
info storj.Object
}
func (stream *mutableStream) Info() storj.Object { return stream.info }
func (stream *mutableStream) AddSegments(ctx context.Context, segments ...storj.Segment) (err error) {
defer mon.Task()(&ctx)(&err)
return errors.New("not implemented")
}
func (stream *mutableStream) UpdateSegments(ctx context.Context, segments ...storj.Segment) (err error) {
defer mon.Task()(&ctx)(&err)
return errors.New("not implemented")
}

View File

@ -1,50 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package kvmetainfo
import (
"github.com/vivint/infectious"
"github.com/zeebo/errs"
"storj.io/common/encryption"
"storj.io/common/memory"
"storj.io/common/storj"
"storj.io/storj/uplink/eestream"
"storj.io/storj/uplink/metainfo"
"storj.io/storj/uplink/storage/segments"
"storj.io/storj/uplink/storage/streams"
)
var (
// Error is the errs class of SetupProject
Error = errs.Class("SetupProject error")
)
// SetupProject creates a project with temporary values until we can figure out how to bypass encryption related setup
func SetupProject(m *metainfo.Client) (*Project, error) {
whoCares := 1 // TODO: find a better way to do this
fc, err := infectious.NewFEC(whoCares, whoCares)
if err != nil {
return nil, Error.New("failed to create erasure coding client: %v", err)
}
rs, err := eestream.NewRedundancyStrategy(eestream.NewRSScheme(fc, whoCares), whoCares, whoCares)
if err != nil {
return nil, Error.New("failed to create redundancy strategy: %v", err)
}
maxBucketMetaSize := 10 * memory.MiB
segment := segments.NewSegmentStore(m, nil, rs)
// volatile warning: we're setting an encryption key of all zeros for bucket
// metadata, when really the bucket metadata should be stored in a different
// system altogether.
// TODO: https://storjlabs.atlassian.net/browse/V3-1967
encStore := encryption.NewStore()
encStore.SetDefaultKey(new(storj.Key))
strms, err := streams.NewStreamStore(m, segment, maxBucketMetaSize.Int64(), encStore, memory.KiB.Int(), storj.EncAESGCM, maxBucketMetaSize.Int(), maxBucketMetaSize.Int64())
if err != nil {
return nil, Error.New("failed to create streams: %v", err)
}
return NewProject(strms, memory.KiB.Int32(), rs, 64*memory.MiB.Int64(), *m), nil
}

View File

@ -1,132 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package piecestore
import (
"bufio"
"context"
"sync"
"github.com/zeebo/errs"
"storj.io/common/pb"
)
// BufferedUpload implements buffering for an Upload.
type BufferedUpload struct {
buffer bufio.Writer
upload *Upload
}
// NewBufferedUpload creates buffered upload with the specified size.
func NewBufferedUpload(upload *Upload, size int) Uploader {
buffered := &BufferedUpload{}
buffered.upload = upload
buffered.buffer = *bufio.NewWriterSize(buffered.upload, size)
return buffered
}
// Write writes content to the buffer and flushes it to the upload once enough data has been gathered.
func (upload *BufferedUpload) Write(data []byte) (int, error) {
return upload.buffer.Write(data)
}
// Cancel aborts the upload.
func (upload *BufferedUpload) Cancel(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err)
return upload.upload.Cancel(ctx)
}
// Commit flushes any remaining content from buffer and commits the upload.
func (upload *BufferedUpload) Commit(ctx context.Context) (_ *pb.PieceHash, err error) {
defer mon.Task()(&ctx)(&err)
flushErr := upload.buffer.Flush()
piece, closeErr := upload.upload.Commit(ctx)
return piece, errs.Combine(flushErr, closeErr)
}
// BufferedDownload implements buffering for download.
type BufferedDownload struct {
buffer bufio.Reader
download *Download
}
// NewBufferedDownload creates a buffered download with the specified size.
func NewBufferedDownload(download *Download, size int) Downloader {
buffered := &BufferedDownload{}
buffered.download = download
buffered.buffer = *bufio.NewReaderSize(buffered.download, size)
return buffered
}
// Read reads from the buffer and downloading in batches once it's empty.
func (download *BufferedDownload) Read(p []byte) (int, error) {
return download.buffer.Read(p)
}
// Close closes the buffered download.
func (download *BufferedDownload) Close() error {
return download.download.Close()
}
// GetHashAndLimit gets the download's hash and original order limit.
func (download *BufferedDownload) GetHashAndLimit() (*pb.PieceHash, *pb.OrderLimit) {
return download.download.GetHashAndLimit()
}
// LockingUpload adds a lock around upload making it safe to use concurrently.
// TODO: this shouldn't be needed.
type LockingUpload struct {
mu sync.Mutex
upload Uploader
}
// Write uploads data.
func (upload *LockingUpload) Write(p []byte) (int, error) {
upload.mu.Lock()
defer upload.mu.Unlock()
return upload.upload.Write(p)
}
// Cancel aborts the upload.
func (upload *LockingUpload) Cancel(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err)
upload.mu.Lock()
defer upload.mu.Unlock()
return upload.upload.Cancel(ctx)
}
// Commit finishes the upload.
func (upload *LockingUpload) Commit(ctx context.Context) (_ *pb.PieceHash, err error) {
defer mon.Task()(&ctx)(&err)
upload.mu.Lock()
defer upload.mu.Unlock()
return upload.upload.Commit(ctx)
}
// LockingDownload adds a lock around download making it safe to use concurrently.
// TODO: this shouldn't be needed.
type LockingDownload struct {
mu sync.Mutex
download Downloader
}
// Read downloads content.
func (download *LockingDownload) Read(p []byte) (int, error) {
download.mu.Lock()
defer download.mu.Unlock()
return download.download.Read(p)
}
// Close closes the deownload.
func (download *LockingDownload) Close() error {
download.mu.Lock()
defer download.mu.Unlock()
return download.download.Close()
}
// GetHashAndLimit gets the download's hash and original order limit
func (download *LockingDownload) GetHashAndLimit() (*pb.PieceHash, *pb.OrderLimit) {
return download.download.GetHashAndLimit()
}

View File

@ -1,132 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package piecestore
import (
"context"
"io"
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/common/identity"
"storj.io/common/memory"
"storj.io/common/pb"
"storj.io/common/rpc"
"storj.io/common/storj"
)
// Error is the default error class for piecestore client.
var Error = errs.Class("piecestore")
// Config defines piecestore client parameters for upload and download.
type Config struct {
UploadBufferSize int64
DownloadBufferSize int64
InitialStep int64
MaximumStep int64
}
// DefaultConfig are the default params used for upload and download.
var DefaultConfig = Config{
UploadBufferSize: 256 * memory.KiB.Int64(),
DownloadBufferSize: 256 * memory.KiB.Int64(),
InitialStep: 64 * memory.KiB.Int64(),
MaximumStep: 1 * memory.MiB.Int64(),
}
// Client implements uploading, downloading and deleting content from a piecestore.
type Client struct {
log *zap.Logger
client pb.DRPCPiecestoreClient
conn *rpc.Conn
config Config
}
// Dial dials the target piecestore endpoint.
func Dial(ctx context.Context, dialer rpc.Dialer, target *pb.Node, log *zap.Logger, config Config) (*Client, error) {
conn, err := dialer.DialNode(ctx, target)
if err != nil {
return nil, Error.Wrap(err)
}
return &Client{
log: log,
client: pb.NewDRPCPiecestoreClient(conn.Raw()),
conn: conn,
config: config,
}, nil
}
// Delete uses delete order limit to delete a piece on piece store.
//
// DEPRECATED in favor of DeletePieces.
func (client *Client) Delete(ctx context.Context, limit *pb.OrderLimit, privateKey storj.PiecePrivateKey) (err error) {
defer mon.Task()(&ctx)(&err)
_, err = client.client.Delete(ctx, &pb.PieceDeleteRequest{
Limit: limit,
})
return Error.Wrap(err)
}
// DeletePiece deletes a piece.
//
// DEPRECATED in favor of DeletePieces.
func (client *Client) DeletePiece(ctx context.Context, id storj.PieceID) (err error) {
defer mon.Task()(&ctx, id.String())(&err)
_, err = client.client.DeletePiece(ctx, &pb.PieceDeletePieceRequest{
PieceId: id,
})
return Error.Wrap(err)
}
// DeletePieces deletes a set of pieces.
func (client *Client) DeletePieces(ctx context.Context, ids ...storj.PieceID) (err error) {
defer mon.Task()(&ctx)(&err)
if len(ids) == 0 {
// Avoid RPC calls if no pieces to delete.
return nil
}
_, err = client.client.DeletePieces(ctx, &pb.DeletePiecesRequest{
PieceIds: ids,
})
return Error.Wrap(err)
}
// Retain uses a bloom filter to tell the piece store which pieces to keep.
func (client *Client) Retain(ctx context.Context, req *pb.RetainRequest) (err error) {
defer mon.Task()(&ctx)(&err)
_, err = client.client.Retain(ctx, req)
return Error.Wrap(err)
}
// Close closes the underlying connection.
func (client *Client) Close() error {
return client.conn.Close()
}
// GetPeerIdentity gets the connection's peer identity
func (client *Client) GetPeerIdentity() (*identity.PeerIdentity, error) {
return client.conn.PeerIdentity()
}
// next allocation step find the next trusted step.
func (client *Client) nextAllocationStep(previous int64) int64 {
// TODO: ensure that this is frame idependent
next := previous * 3 / 2
if next > client.config.MaximumStep {
next = client.config.MaximumStep
}
return next
}
// ignoreEOF is an utility func for ignoring EOF error, when it's not important.
func ignoreEOF(err error) error {
if err == io.EOF {
return nil
}
return err
}

View File

@ -1,308 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package piecestore
import (
"context"
"fmt"
"io"
"github.com/zeebo/errs"
"storj.io/common/errs2"
"storj.io/common/identity"
"storj.io/common/pb"
"storj.io/common/signing"
"storj.io/common/storj"
)
// Downloader is interface that can be used for downloading content.
// It matches signature of `io.ReadCloser`, with one extra function,
// GetHashAndLimit(), used for accessing information during GET_REPAIR.
type Downloader interface {
Read([]byte) (int, error)
Close() error
GetHashAndLimit() (*pb.PieceHash, *pb.OrderLimit)
}
// Download implements downloading from a piecestore.
type Download struct {
client *Client
limit *pb.OrderLimit
privateKey storj.PiecePrivateKey
peer *identity.PeerIdentity
stream downloadStream
ctx context.Context
read int64 // how much data we have read so far
allocated int64 // how far have we sent orders
downloaded int64 // how much data have we downloaded
downloadSize int64 // how much do we want to download
// what is the step we consider to upload
allocationStep int64
unread ReadBuffer
// hash and originLimit are received in the event of a GET_REPAIR
hash *pb.PieceHash
originLimit *pb.OrderLimit
closed bool
closingError error
}
type downloadStream interface {
CloseSend() error
Send(*pb.PieceDownloadRequest) error
Recv() (*pb.PieceDownloadResponse, error)
}
// Download starts a new download using the specified order limit at the specified offset and size.
func (client *Client) Download(ctx context.Context, limit *pb.OrderLimit, piecePrivateKey storj.PiecePrivateKey, offset, size int64) (_ Downloader, err error) {
defer mon.Task()(&ctx)(&err)
peer, err := client.conn.PeerIdentity()
if err != nil {
return nil, ErrInternal.Wrap(err)
}
stream, err := client.client.Download(ctx)
if err != nil {
return nil, err
}
err = stream.Send(&pb.PieceDownloadRequest{
Limit: limit,
Chunk: &pb.PieceDownloadRequest_Chunk{
Offset: offset,
ChunkSize: size,
},
})
if err != nil {
_, recvErr := stream.Recv()
return nil, ErrProtocol.Wrap(errs.Combine(err, recvErr))
}
download := &Download{
client: client,
limit: limit,
privateKey: piecePrivateKey,
peer: peer,
stream: stream,
ctx: ctx,
read: 0,
allocated: 0,
downloaded: 0,
downloadSize: size,
allocationStep: client.config.InitialStep,
}
if client.config.DownloadBufferSize <= 0 {
return &LockingDownload{download: download}, nil
}
return &LockingDownload{
download: NewBufferedDownload(download, int(client.config.DownloadBufferSize)),
}, nil
}
// Read downloads data from the storage node allocating as necessary.
func (client *Download) Read(data []byte) (read int, err error) {
ctx := client.ctx
defer mon.Task()(&ctx, "node: "+client.peer.ID.String()[0:8])(&err)
if client.closed {
return 0, io.ErrClosedPipe
}
for client.read < client.downloadSize {
// read from buffer
n, err := client.unread.Read(data)
client.read += int64(n)
read += n
// if we have an error return the error
if err != nil {
return read, err
}
// if we are pending for an error, avoid further requests, but try to finish what's in unread buffer.
if client.unread.Errored() {
return read, nil
}
// do we need to send a new order to storagenode
if client.allocated-client.downloaded < client.allocationStep {
newAllocation := client.allocationStep
// have we downloaded more than we have allocated due to a generous storagenode?
if client.allocated-client.downloaded < 0 {
newAllocation += client.downloaded - client.allocated
}
// ensure we don't allocate more than we intend to read
if client.allocated+newAllocation > client.downloadSize {
newAllocation = client.downloadSize - client.allocated
}
// send an order
if newAllocation > 0 {
order, err := signing.SignUplinkOrder(ctx, client.privateKey, &pb.Order{
SerialNumber: client.limit.SerialNumber,
Amount: newAllocation,
})
if err != nil {
// we are signing so we shouldn't propagate this into close,
// however we should include this as a read error
client.unread.IncludeError(err)
client.closeWithError(nil)
return read, nil
}
err = client.stream.Send(&pb.PieceDownloadRequest{
Order: order,
})
if err != nil {
// other side doesn't want to talk to us anymore or network went down
client.unread.IncludeError(err)
// if it's a cancellation, then we'll just close with context.Canceled
if errs2.IsCanceled(err) {
client.closeWithError(err)
return read, err
}
// otherwise, something else happened and we should try to ask the other side
client.closeAndTryFetchError()
return read, nil
}
// update our allocation step
client.allocationStep = client.client.nextAllocationStep(client.allocationStep)
}
}
// we have data, no need to wait for a chunk
if read > 0 {
return read, nil
}
// we don't have data, wait for a chunk from storage node
response, err := client.stream.Recv()
if response != nil && response.Chunk != nil {
client.downloaded += int64(len(response.Chunk.Data))
client.unread.Fill(response.Chunk.Data)
}
// This is a GET_REPAIR because we got a piece hash and the original order limit.
if response != nil && response.Hash != nil && response.Limit != nil {
client.hash = response.Hash
client.originLimit = response.Limit
}
// we may have some data buffered, so we cannot immediately return the error
// we'll queue the error and use the received error as the closing error
if err != nil {
client.unread.IncludeError(err)
client.handleClosingError(err)
}
}
// all downloaded
if read == 0 {
return 0, io.EOF
}
return read, nil
}
// handleClosingError should be used for an error that also closed the stream.
func (client *Download) handleClosingError(err error) {
if client.closed {
return
}
client.closed = true
client.closingError = err
}
// closeWithError is used when we include the err in the closing error and also close the stream.
func (client *Download) closeWithError(err error) {
if client.closed {
return
}
client.closed = true
client.closingError = errs.Combine(err, client.stream.CloseSend())
}
// closeAndTryFetchError closes the stream and also tries to fetch the actual error from the stream.
func (client *Download) closeAndTryFetchError() {
if client.closed {
return
}
client.closed = true
client.closingError = client.stream.CloseSend()
if client.closingError == nil || client.closingError == io.EOF {
_, client.closingError = client.stream.Recv()
}
}
// Close closes the downloading.
func (client *Download) Close() (err error) {
defer func() {
if err != nil {
details := errs.Class(fmt.Sprintf("(Node ID: %s, Piece ID: %s)", client.peer.ID.String(), client.limit.PieceId.String()))
err = details.Wrap(err)
err = Error.Wrap(err)
}
}()
client.closeWithError(nil)
return client.closingError
}
// GetHashAndLimit gets the download's hash and original order limit.
func (client *Download) GetHashAndLimit() (*pb.PieceHash, *pb.OrderLimit) {
return client.hash, client.originLimit
}
// ReadBuffer implements buffered reading with an error.
type ReadBuffer struct {
data []byte
err error
}
// Error returns an error if it was encountered.
func (buffer *ReadBuffer) Error() error { return buffer.err }
// Errored returns whether the buffer contains an error.
func (buffer *ReadBuffer) Errored() bool { return buffer.err != nil }
// Empty checks whether buffer needs to be filled.
func (buffer *ReadBuffer) Empty() bool {
return len(buffer.data) == 0 && buffer.err == nil
}
// IncludeError adds error at the end of the buffer.
func (buffer *ReadBuffer) IncludeError(err error) {
buffer.err = errs.Combine(buffer.err, err)
}
// Fill fills the buffer with the specified bytes.
func (buffer *ReadBuffer) Fill(data []byte) {
buffer.data = data
}
// Read reads from the buffer.
func (buffer *ReadBuffer) Read(data []byte) (n int, err error) {
if len(buffer.data) > 0 {
n = copy(data, buffer.data)
buffer.data = buffer.data[n:]
return n, nil
}
if buffer.err != nil {
return 0, buffer.err
}
return 0, nil
}

View File

@ -1,239 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package piecestore
import (
"context"
"hash"
"io"
"github.com/zeebo/errs"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/common/identity"
"storj.io/common/pb"
"storj.io/common/pkcrypto"
"storj.io/common/signing"
"storj.io/common/storj"
)
var mon = monkit.Package()
// Uploader defines the interface for uploading a piece.
type Uploader interface {
// Write uploads data to the storage node.
Write([]byte) (int, error)
// Cancel cancels the upload.
Cancel(context.Context) error
// Commit finalizes the upload.
Commit(context.Context) (*pb.PieceHash, error)
}
// Upload implements uploading to the storage node.
type Upload struct {
client *Client
limit *pb.OrderLimit
privateKey storj.PiecePrivateKey
peer *identity.PeerIdentity
stream uploadStream
ctx context.Context
hash hash.Hash // TODO: use concrete implementation
offset int64
allocationStep int64
// when there's a send error then it will automatically close
finished bool
sendError error
}
type uploadStream interface {
Context() context.Context
CloseSend() error
Send(*pb.PieceUploadRequest) error
CloseAndRecv() (*pb.PieceUploadResponse, error)
}
// Upload initiates an upload to the storage node.
func (client *Client) Upload(ctx context.Context, limit *pb.OrderLimit, piecePrivateKey storj.PiecePrivateKey) (_ Uploader, err error) {
defer mon.Task()(&ctx, "node: "+limit.StorageNodeId.String()[0:8])(&err)
peer, err := client.conn.PeerIdentity()
if err != nil {
return nil, ErrInternal.Wrap(err)
}
stream, err := client.client.Upload(ctx)
if err != nil {
return nil, err
}
err = stream.Send(&pb.PieceUploadRequest{
Limit: limit,
})
if err != nil {
_, closeErr := stream.CloseAndRecv()
switch {
case err != io.EOF && closeErr != nil:
err = ErrProtocol.Wrap(errs.Combine(err, closeErr))
case closeErr != nil:
err = ErrProtocol.Wrap(closeErr)
}
return nil, err
}
upload := &Upload{
client: client,
limit: limit,
privateKey: piecePrivateKey,
peer: peer,
stream: stream,
ctx: ctx,
hash: pkcrypto.NewHash(),
offset: 0,
allocationStep: client.config.InitialStep,
}
if client.config.UploadBufferSize <= 0 {
return &LockingUpload{upload: upload}, nil
}
return &LockingUpload{
upload: NewBufferedUpload(upload, int(client.config.UploadBufferSize)),
}, nil
}
// Write sends data to the storagenode allocating as necessary.
func (client *Upload) Write(data []byte) (written int, err error) {
ctx := client.ctx
defer mon.Task()(&ctx, "node: "+client.peer.ID.String()[0:8])(&err)
if client.finished {
return 0, io.EOF
}
// if we already encountered an error, keep returning it
if client.sendError != nil {
return 0, client.sendError
}
fullData := data
defer func() {
// write the hash of the data sent to the server
// guaranteed not to return error
_, _ = client.hash.Write(fullData[:written])
}()
for len(data) > 0 {
// pick a data chunk to send
var sendData []byte
if client.allocationStep < int64(len(data)) {
sendData, data = data[:client.allocationStep], data[client.allocationStep:]
} else {
sendData, data = data, nil
}
// create a signed order for the next chunk
order, err := signing.SignUplinkOrder(ctx, client.privateKey, &pb.Order{
SerialNumber: client.limit.SerialNumber,
Amount: client.offset + int64(len(sendData)),
})
if err != nil {
return written, ErrInternal.Wrap(err)
}
// send signed order + data
err = client.stream.Send(&pb.PieceUploadRequest{
Order: order,
Chunk: &pb.PieceUploadRequest_Chunk{
Offset: client.offset,
Data: sendData,
},
})
if err != nil {
_, closeErr := client.stream.CloseAndRecv()
switch {
case err != io.EOF && closeErr != nil:
err = ErrProtocol.Wrap(errs.Combine(err, closeErr))
case closeErr != nil:
err = ErrProtocol.Wrap(closeErr)
}
client.sendError = err
return written, err
}
// update our offset
client.offset += int64(len(sendData))
written += len(sendData)
// update allocation step, incrementally building trust
client.allocationStep = client.client.nextAllocationStep(client.allocationStep)
}
return written, nil
}
// Cancel cancels the uploading.
func (client *Upload) Cancel(ctx context.Context) (err error) {
defer mon.Task()(&ctx)(&err)
if client.finished {
return io.EOF
}
client.finished = true
return Error.Wrap(client.stream.CloseSend())
}
// Commit finishes uploading by sending the piece-hash and retrieving the piece-hash.
func (client *Upload) Commit(ctx context.Context) (_ *pb.PieceHash, err error) {
defer mon.Task()(&ctx, "node: "+client.peer.ID.String()[0:8])(&err)
if client.finished {
return nil, io.EOF
}
client.finished = true
if client.sendError != nil {
// something happened during sending, try to figure out what exactly
// since sendError was already reported, we don't need to rehandle it.
_, closeErr := client.stream.CloseAndRecv()
return nil, Error.Wrap(closeErr)
}
// sign the hash for storage node
uplinkHash, err := signing.SignUplinkPieceHash(ctx, client.privateKey, &pb.PieceHash{
PieceId: client.limit.PieceId,
PieceSize: client.offset,
Hash: client.hash.Sum(nil),
Timestamp: client.limit.OrderCreation,
})
if err != nil {
// failed to sign, let's close the sending side, no need to wait for a response
closeErr := client.stream.CloseSend()
// closeErr being io.EOF doesn't inform us about anything
return nil, Error.Wrap(errs.Combine(err, ignoreEOF(closeErr)))
}
// exchange signed piece hashes
// 1. send our piece hash
sendErr := client.stream.Send(&pb.PieceUploadRequest{
Done: uplinkHash,
})
// 2. wait for a piece hash as a response
response, closeErr := client.stream.CloseAndRecv()
if response == nil || response.Done == nil {
// combine all the errors from before
// sendErr is io.EOF when failed to send, so don't care
// closeErr is io.EOF when storage node closed before sending us a response
return nil, errs.Combine(ErrProtocol.New("expected piece hash"), ignoreEOF(sendErr), ignoreEOF(closeErr))
}
// verification
verifyErr := client.client.VerifyPieceHash(client.stream.Context(), client.peer, client.limit, response.Done, uplinkHash.Hash)
// combine all the errors from before
// sendErr is io.EOF when we failed to send
// closeErr is io.EOF when storage node closed properly
return response.Done, errs.Combine(verifyErr, ignoreEOF(sendErr), ignoreEOF(closeErr))
}

View File

@ -1,55 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package piecestore
import (
"bytes"
"context"
"time"
"github.com/zeebo/errs"
"storj.io/common/identity"
"storj.io/common/pb"
"storj.io/common/signing"
)
const pieceHashExpiration = 24 * time.Hour
var (
// ErrInternal is an error class for internal errors.
ErrInternal = errs.Class("internal")
// ErrProtocol is an error class for unexpected protocol sequence.
ErrProtocol = errs.Class("protocol")
// ErrVerifyUntrusted is an error in case there is a trust issue.
ErrVerifyUntrusted = errs.Class("untrusted")
// ErrStorageNodeInvalidResponse is an error when a storage node returns a response with invalid data
ErrStorageNodeInvalidResponse = errs.Class("storage node has returned an invalid response")
)
// VerifyPieceHash verifies piece hash which is sent by peer.
func (client *Client) VerifyPieceHash(ctx context.Context, peer *identity.PeerIdentity, limit *pb.OrderLimit, hash *pb.PieceHash, expectedHash []byte) (err error) {
defer mon.Task()(&ctx)(&err)
if peer == nil || limit == nil || hash == nil || len(expectedHash) == 0 {
return ErrProtocol.New("invalid arguments")
}
if limit.PieceId != hash.PieceId {
return ErrProtocol.New("piece id changed") // TODO: report rpc status bad message
}
if !bytes.Equal(hash.Hash, expectedHash) {
return ErrVerifyUntrusted.New("hashes don't match") // TODO: report rpc status bad message
}
if err := signing.VerifyPieceHashSignature(ctx, signing.SigneeFromPeerIdentity(peer), hash); err != nil {
return ErrVerifyUntrusted.New("invalid hash signature: %v", err) // TODO: report rpc status bad message
}
if hash.Timestamp.Before(time.Now().Add(-pieceHashExpiration)) {
return ErrStorageNodeInvalidResponse.New("piece has timestamp is too old (%v). Required to be not older than %s",
hash.Timestamp, pieceHashExpiration,
)
}
return nil
}

View File

@ -1,22 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package meta
// Meta flags for the List method
const (
// None represents no meta flags
None = 0
// Modified meta flag
Modified = uint32(1 << iota)
// Expiration meta flag
Expiration
// Size meta flags
Size
// Checksum meta flag
Checksum
// UserDefined meta flag
UserDefined
// All represents all the meta flags
All = ^uint32(0)
)

View File

@ -1,102 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package objects
import (
"context"
"io"
"time"
"github.com/gogo/protobuf/proto"
"go.uber.org/zap"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/common/pb"
"storj.io/common/ranger"
"storj.io/common/storj"
"storj.io/storj/uplink/storage/streams"
)
var mon = monkit.Package()
// Meta is the full object metadata
type Meta struct {
pb.SerializableMeta
Modified time.Time
Expiration time.Time
Size int64
Checksum string
}
// Store for objects
type Store interface {
Get(ctx context.Context, path storj.Path, object storj.Object) (rr ranger.Ranger, err error)
Put(ctx context.Context, path storj.Path, data io.Reader, metadata pb.SerializableMeta, expiration time.Time) (meta Meta, err error)
Delete(ctx context.Context, path storj.Path) (err error)
}
type objStore struct {
store streams.Store
pathCipher storj.CipherSuite
}
// NewStore for objects
func NewStore(store streams.Store, pathCipher storj.CipherSuite) Store {
return &objStore{store: store, pathCipher: pathCipher}
}
func (o *objStore) Get(ctx context.Context, path storj.Path, object storj.Object) (
rr ranger.Ranger, err error) {
defer mon.Task()(&ctx)(&err)
if len(path) == 0 {
return nil, storj.ErrNoPath.New("")
}
rr, err = o.store.Get(ctx, path, object, o.pathCipher)
return rr, err
}
func (o *objStore) Put(ctx context.Context, path storj.Path, data io.Reader, metadata pb.SerializableMeta, expiration time.Time) (meta Meta, err error) {
defer mon.Task()(&ctx)(&err)
if len(path) == 0 {
return Meta{}, storj.ErrNoPath.New("")
}
// TODO(kaloyan): autodetect content type
// if metadata.GetContentType() == "" {}
b, err := proto.Marshal(&metadata)
if err != nil {
return Meta{}, err
}
m, err := o.store.Put(ctx, path, o.pathCipher, data, b, expiration)
return convertMeta(m), err
}
func (o *objStore) Delete(ctx context.Context, path storj.Path) (err error) {
defer mon.Task()(&ctx)(&err)
if len(path) == 0 {
return storj.ErrNoPath.New("")
}
return o.store.Delete(ctx, path, o.pathCipher)
}
// convertMeta converts stream metadata to object metadata
func convertMeta(m streams.Meta) Meta {
ser := pb.SerializableMeta{}
err := proto.Unmarshal(m.Data, &ser)
if err != nil {
zap.S().Warnf("Failed deserializing metadata: %v", err)
}
return Meta{
Modified: m.Modified,
Expiration: m.Expiration,
Size: m.Size,
SerializableMeta: ser,
}
}

View File

@ -1,11 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package segments
import (
"github.com/zeebo/errs"
)
// Error is the errs class of standard segment errors
var Error = errs.Class("segment error")

View File

@ -1,57 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package segments
import "io"
// PeekThresholdReader allows a check to see if the size of a given reader
// exceeds the maximum inline segment size or not.
type PeekThresholdReader struct {
r io.Reader
thresholdBuf []byte
isLargerCalled bool
readCalled bool
}
// NewPeekThresholdReader creates a new instance of PeekThresholdReader
func NewPeekThresholdReader(r io.Reader) (pt *PeekThresholdReader) {
return &PeekThresholdReader{r: r}
}
// Read initially reads bytes from the internal buffer, then continues
// reading from the wrapped data reader. The number of bytes read `n`
// is returned.
func (pt *PeekThresholdReader) Read(p []byte) (n int, err error) {
pt.readCalled = true
if len(pt.thresholdBuf) == 0 {
return pt.r.Read(p)
}
n = copy(p, pt.thresholdBuf)
pt.thresholdBuf = pt.thresholdBuf[n:]
return n, nil
}
// IsLargerThan returns a bool to determine whether a reader's size
// is larger than the given threshold or not.
func (pt *PeekThresholdReader) IsLargerThan(thresholdSize int) (bool, error) {
if pt.isLargerCalled {
return false, Error.New("IsLargerThan can't be called more than once")
}
if pt.readCalled {
return false, Error.New("IsLargerThan can't be called after Read has been called")
}
pt.isLargerCalled = true
buf := make([]byte, thresholdSize+1)
n, err := io.ReadFull(pt.r, buf)
pt.thresholdBuf = buf[:n]
if err == io.EOF || err == io.ErrUnexpectedEOF {
return false, nil
}
if err != nil {
return false, err
}
return true, nil
}

View File

@ -1,82 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package segments
import (
"bytes"
"io"
"testing"
"github.com/stretchr/testify/assert"
)
func TestThresholdBufAndRead(t *testing.T) {
for _, tt := range []struct {
name string
file []byte
thresholdSize int
expectedIsRemote bool
outputBufLen int
readsToEnd bool
}{
{"Test strictly less than threshold: ", []byte("abcdefghijklmnopqrstuvwxyz"), 30, false, 30, true},
{"Test strictly greater than threshold: ", []byte("abcdefghijklmnopqrstuvwxyz"), 10, true, 30, true},
{"Test read less than threshold buf: ", []byte("abcdefghijklmnopqrstuvwxyz"), 20, true, 10, false},
{"Test empty file: ", []byte(""), 10, false, 10, true},
{"Test threshold size == len(file): ", []byte("abcdefghijklmnopqrstuvwxyz"), 26, false, 26, true},
} {
ioReader := bytes.NewReader(tt.file)
p := NewPeekThresholdReader(ioReader)
isRemote, err := p.IsLargerThan(tt.thresholdSize)
assert.Equal(t, tt.expectedIsRemote, isRemote, tt.name)
assert.NoError(t, err, tt.name)
outputBuf := make([]byte, tt.outputBufLen)
n, err := io.ReadFull(p, outputBuf)
if tt.readsToEnd && err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
t.Fatalf(tt.name, "unexpected err from ReadFull:\n%s", err.Error())
} else if !tt.readsToEnd && err != nil {
t.Fatalf(tt.name, "unexpected err from ReadFull:\n%s", err.Error())
}
if tt.readsToEnd && n != len(tt.file) {
t.Fatalf(tt.name, "expected size of n to equal length of file")
}
if !tt.readsToEnd && n != tt.outputBufLen {
t.Fatalf(tt.name, "expected n to equal length of outputBuf")
}
if !bytes.Equal(outputBuf[:n], tt.file[:n]) {
t.Fatalf(tt.name, "expected data in outputBuf to match data in file")
}
}
}
func TestMultipleIsLargerCall(t *testing.T) {
file := []byte("abcdefghijklmnopqrstuvwxyz")
ioReader := bytes.NewReader(file)
p := NewPeekThresholdReader(ioReader)
_, err := p.IsLargerThan(20)
assert.NoError(t, err)
_, err = p.IsLargerThan(20)
if err == nil {
t.Fatal("expected to err because multiple call")
}
}
func TestIsLargerThanCalledAfterRead(t *testing.T) {
file := []byte("abcdefghijklmnopqrstuvwxyz")
ioReader := bytes.NewReader(file)
p := NewPeekThresholdReader(ioReader)
outputBuf := make([]byte, 10)
_, err := p.Read(outputBuf)
assert.NoError(t, err)
_, err = p.IsLargerThan(20)
if err == nil {
t.Fatal("expected to err because IsLargerThan called after Read")
}
}

View File

@ -1,29 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package segments
import (
"io"
)
// SizedReader allows to check the total number of bytes read so far.
type SizedReader struct {
r io.Reader
size int64
}
// SizeReader create a new instance of SizedReader.
func SizeReader(r io.Reader) *SizedReader {
return &SizedReader{r: r}
}
// Read implements io.Reader.Read
func (r *SizedReader) Read(p []byte) (n int, err error) {
n, err = r.r.Read(p)
r.size += int64(n)
return n, err
}
// Size returns the total number of bytes read so far.
func (r *SizedReader) Size() int64 { return r.size }

View File

@ -1,38 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package segments
import (
"bytes"
"io"
"io/ioutil"
"testing"
"github.com/stretchr/testify/assert"
)
func TestSizeReader(t *testing.T) {
data := "abcdefgh"
r := bytes.NewReader([]byte(data))
sr := SizeReader(r)
// Nothing has been read yet - size is 0
assert.EqualValues(t, 0, sr.Size())
// Read 2 bytes - Size is now 2
buf := make([]byte, 2)
_, err := io.ReadFull(sr, buf)
assert.NoError(t, err)
assert.EqualValues(t, 2, sr.Size())
// Read 2 bytes again - Size is now 4
_, err = io.ReadFull(sr, buf)
assert.NoError(t, err)
assert.EqualValues(t, 4, sr.Size())
// Read all the rest - Size is now len(data)
_, err = io.Copy(ioutil.Discard, sr)
assert.NoError(t, err)
assert.EqualValues(t, len(data), sr.Size())
}

View File

@ -1,184 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package segments
import (
"context"
"io"
"math/rand"
"sync"
"time"
"github.com/vivint/infectious"
"gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/common/pb"
"storj.io/common/ranger"
"storj.io/common/storj"
"storj.io/storj/uplink/ecclient"
"storj.io/storj/uplink/eestream"
"storj.io/storj/uplink/metainfo"
)
var (
mon = monkit.Package()
)
// Meta info about a segment
type Meta struct {
Modified time.Time
Expiration time.Time
Size int64
Data []byte
}
// Store for segments
type Store interface {
// Ranger creates a ranger for downloading erasure codes from piece store nodes.
Ranger(ctx context.Context, info storj.SegmentDownloadInfo, limits []*pb.AddressedOrderLimit, objectRS storj.RedundancyScheme) (ranger.Ranger, error)
Put(ctx context.Context, data io.Reader, expiration time.Time, limits []*pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey) (_ []*pb.SegmentPieceUploadResult, size int64, err error)
Delete(ctx context.Context, streamID storj.StreamID, segmentIndex int32) (err error)
}
type segmentStore struct {
metainfo *metainfo.Client
ec ecclient.Client
rs eestream.RedundancyStrategy
rngMu sync.Mutex
rng *rand.Rand
}
// NewSegmentStore creates a new instance of segmentStore
func NewSegmentStore(metainfo *metainfo.Client, ec ecclient.Client, rs eestream.RedundancyStrategy) Store {
return &segmentStore{
metainfo: metainfo,
ec: ec,
rs: rs,
rng: rand.New(rand.NewSource(time.Now().UnixNano())),
}
}
// Put uploads a segment to an erasure code client
func (s *segmentStore) Put(ctx context.Context, data io.Reader, expiration time.Time, limits []*pb.AddressedOrderLimit, piecePrivateKey storj.PiecePrivateKey) (_ []*pb.SegmentPieceUploadResult, size int64, err error) {
defer mon.Task()(&ctx)(&err)
sizedReader := SizeReader(NewPeekThresholdReader(data))
successfulNodes, successfulHashes, err := s.ec.Put(ctx, limits, piecePrivateKey, s.rs, sizedReader, expiration)
if err != nil {
return nil, size, Error.Wrap(err)
}
uploadResults := make([]*pb.SegmentPieceUploadResult, 0, len(successfulNodes))
for i := range successfulNodes {
if successfulNodes[i] == nil {
continue
}
uploadResults = append(uploadResults, &pb.SegmentPieceUploadResult{
PieceNum: int32(i),
NodeId: successfulNodes[i].Id,
Hash: successfulHashes[i],
})
}
if l := len(uploadResults); l < s.rs.OptimalThreshold() {
return nil, size, Error.New("uploaded results (%d) are below the optimal threshold (%d)", l, s.rs.OptimalThreshold())
}
return uploadResults, sizedReader.Size(), nil
}
// Ranger creates a ranger for downloading erasure codes from piece store nodes.
func (s *segmentStore) Ranger(
ctx context.Context, info storj.SegmentDownloadInfo, limits []*pb.AddressedOrderLimit, objectRS storj.RedundancyScheme,
) (rr ranger.Ranger, err error) {
defer mon.Task()(&ctx, info, limits, objectRS)(&err)
// no order limits also means its inline segment
if len(info.EncryptedInlineData) != 0 || len(limits) == 0 {
return ranger.ByteRanger(info.EncryptedInlineData), nil
}
needed := CalcNeededNodes(objectRS)
selected := make([]*pb.AddressedOrderLimit, len(limits))
s.rngMu.Lock()
perm := s.rng.Perm(len(limits))
s.rngMu.Unlock()
for _, i := range perm {
limit := limits[i]
if limit == nil {
continue
}
selected[i] = limit
needed--
if needed <= 0 {
break
}
}
fc, err := infectious.NewFEC(int(objectRS.RequiredShares), int(objectRS.TotalShares))
if err != nil {
return nil, err
}
es := eestream.NewRSScheme(fc, int(objectRS.ShareSize))
redundancy, err := eestream.NewRedundancyStrategy(es, int(objectRS.RepairShares), int(objectRS.OptimalShares))
if err != nil {
return nil, err
}
rr, err = s.ec.Get(ctx, selected, info.PiecePrivateKey, redundancy, info.Size)
return rr, Error.Wrap(err)
}
// Delete requests the satellite to delete a segment and tells storage nodes
// to delete the segment's pieces.
func (s *segmentStore) Delete(ctx context.Context, streamID storj.StreamID, segmentIndex int32) (err error) {
defer mon.Task()(&ctx)(&err)
_, limits, privateKey, err := s.metainfo.BeginDeleteSegment(ctx, metainfo.BeginDeleteSegmentParams{
StreamID: streamID,
Position: storj.SegmentPosition{
Index: segmentIndex,
},
})
if err != nil {
return Error.Wrap(err)
}
if len(limits) != 0 {
// remote segment - delete the pieces from storage nodes
err = s.ec.Delete(ctx, limits, privateKey)
if err != nil {
return Error.Wrap(err)
}
}
// don't do FinishDeleteSegment at the moment to avoid satellite round trip
// FinishDeleteSegment doesn't implement any specific logic at the moment
return nil
}
// CalcNeededNodes calculate how many minimum nodes are needed for download,
// based on t = k + (n-o)k/o
func CalcNeededNodes(rs storj.RedundancyScheme) int32 {
extra := int32(1)
if rs.OptimalShares > 0 {
extra = int32(((rs.TotalShares - rs.OptimalShares) * rs.RequiredShares) / rs.OptimalShares)
if extra == 0 {
// ensure there is at least one extra node, so we can have error detection/correction
extra = 1
}
}
needed := int32(rs.RequiredShares) + extra
if needed > int32(rs.TotalShares) {
needed = int32(rs.TotalShares)
}
return needed
}

View File

@ -1,41 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package segments_test
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"storj.io/common/storj"
"storj.io/storj/uplink/storage/segments"
)
func TestCalcNeededNodes(t *testing.T) {
for i, tt := range []struct {
k, m, o, n int16
needed int32
}{
{k: 0, m: 0, o: 0, n: 0, needed: 0},
{k: 1, m: 1, o: 1, n: 1, needed: 1},
{k: 1, m: 1, o: 2, n: 2, needed: 2},
{k: 1, m: 2, o: 2, n: 2, needed: 2},
{k: 2, m: 3, o: 4, n: 4, needed: 3},
{k: 2, m: 4, o: 6, n: 8, needed: 3},
{k: 20, m: 30, o: 40, n: 50, needed: 25},
{k: 29, m: 35, o: 80, n: 95, needed: 34},
} {
tag := fmt.Sprintf("#%d. %+v", i, tt)
rs := storj.RedundancyScheme{
RequiredShares: tt.k,
RepairShares: tt.m,
OptimalShares: tt.o,
TotalShares: tt.n,
}
assert.Equal(t, tt.needed, segments.CalcNeededNodes(rs), tag)
}
}

View File

@ -1,36 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package streams
import "io"
// EOFReader holds reader and status of EOF
type EOFReader struct {
reader io.Reader
eof bool
err error
}
// NewEOFReader keeps track of the state, has the internal reader reached EOF
func NewEOFReader(r io.Reader) *EOFReader {
return &EOFReader{reader: r}
}
func (r *EOFReader) Read(p []byte) (n int, err error) {
n, err = r.reader.Read(p)
if err == io.EOF {
r.eof = true
} else if err != nil && r.err == nil {
r.err = err
}
return n, err
}
func (r *EOFReader) isEOF() bool {
return r.eof
}
func (r *EOFReader) hasError() bool {
return r.err != nil
}

View File

@ -1,63 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package streams
import (
"strings"
"storj.io/common/paths"
"storj.io/common/storj"
)
// Path is a representation of an object path within a bucket
type Path struct {
bucket string
unencPath paths.Unencrypted
raw []byte
}
// Bucket returns the bucket part of the path.
func (p Path) Bucket() string { return p.bucket }
// UnencryptedPath returns the unencrypted path part of the path.
func (p Path) UnencryptedPath() paths.Unencrypted { return p.unencPath }
// Raw returns the raw data in the path.
func (p Path) Raw() []byte { return append([]byte(nil), p.raw...) }
// String returns the string form of the raw data in the path.
func (p Path) String() string { return string(p.raw) }
// ParsePath returns a new Path with the given raw bytes.
func ParsePath(raw storj.Path) (path Path) {
// A path may contain a bucket and an unencrypted path.
parts := strings.SplitN(raw, "/", 2)
path.bucket = parts[0]
if len(parts) > 1 {
path.unencPath = paths.NewUnencrypted(parts[1])
}
path.raw = []byte(raw)
return path
}
// CreatePath will create a Path for the provided information.
func CreatePath(bucket string, unencPath paths.Unencrypted) (path Path) {
path.bucket = bucket
path.unencPath = unencPath
path.raw = append(path.raw, bucket...)
if unencPath.Valid() {
path.raw = append(path.raw, '/')
path.raw = append(path.raw, unencPath.Raw()...)
}
return path
}
// PathForKey removes the trailing `/` from the raw path, which is required so
// the derived key matches the final list path (which also has the trailing
// encrypted `/` part of the path removed).
func PathForKey(raw string) paths.Unencrypted {
return paths.NewUnencrypted(strings.TrimSuffix(raw, "/"))
}

View File

@ -1,57 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package streams
import (
"context"
"io"
"time"
"storj.io/common/encryption"
"storj.io/common/ranger"
"storj.io/common/storj"
"storj.io/storj/uplink/metainfo"
"storj.io/storj/uplink/storage/segments"
)
// Store interface methods for streams to satisfy to be a store
type Store interface {
Get(ctx context.Context, path storj.Path, object storj.Object, pathCipher storj.CipherSuite) (ranger.Ranger, error)
Put(ctx context.Context, path storj.Path, pathCipher storj.CipherSuite, data io.Reader, metadata []byte, expiration time.Time) (Meta, error)
Delete(ctx context.Context, path storj.Path, pathCipher storj.CipherSuite) error
}
type shimStore struct {
store typedStore
}
// NewStreamStore constructs a Store.
func NewStreamStore(metainfo *metainfo.Client, segments segments.Store, segmentSize int64, encStore *encryption.Store, encBlockSize int, cipher storj.CipherSuite, inlineThreshold int, maxEncryptedSegmentSize int64) (Store, error) {
typedStore, err := newTypedStreamStore(metainfo, segments, segmentSize, encStore, encBlockSize, cipher, inlineThreshold, maxEncryptedSegmentSize)
if err != nil {
return nil, err
}
return &shimStore{store: typedStore}, nil
}
// Get parses the passed in path and dispatches to the typed store.
func (s *shimStore) Get(ctx context.Context, path storj.Path, object storj.Object, pathCipher storj.CipherSuite) (_ ranger.Ranger, err error) {
defer mon.Task()(&ctx)(&err)
return s.store.Get(ctx, ParsePath(path), object, pathCipher)
}
// Put parses the passed in path and dispatches to the typed store.
func (s *shimStore) Put(ctx context.Context, path storj.Path, pathCipher storj.CipherSuite, data io.Reader, metadata []byte, expiration time.Time) (_ Meta, err error) {
defer mon.Task()(&ctx)(&err)
return s.store.Put(ctx, ParsePath(path), pathCipher, data, metadata, expiration)
}
// Delete parses the passed in path and dispatches to the typed store.
func (s *shimStore) Delete(ctx context.Context, path storj.Path, pathCipher storj.CipherSuite) (err error) {
defer mon.Task()(&ctx)(&err)
return s.store.Delete(ctx, ParsePath(path), pathCipher)
}

View File

@ -1,28 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package streams
import "io"
// SizeReader holds reader and size read so far
type SizeReader struct {
reader io.Reader
size int64
}
// NewSizeReader keeps track of how much bytes are read from the reader
func NewSizeReader(r io.Reader) *SizeReader {
return &SizeReader{reader: r}
}
func (r *SizeReader) Read(p []byte) (n int, err error) {
n, err = r.reader.Read(p)
r.size += int64(n)
return n, err
}
// Size returns the number of bytes read so far
func (r *SizeReader) Size() int64 {
return r.size
}

View File

@ -1,641 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package streams
import (
"context"
"crypto/rand"
"io"
"io/ioutil"
"time"
"github.com/gogo/protobuf/proto"
"github.com/zeebo/errs"
"go.uber.org/zap"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/common/encryption"
"storj.io/common/pb"
"storj.io/common/ranger"
"storj.io/common/storj"
"storj.io/storj/uplink/metainfo"
"storj.io/storj/uplink/storage/segments"
)
var mon = monkit.Package()
// Meta info about a stream
type Meta struct {
Modified time.Time
Expiration time.Time
Size int64
Data []byte
}
// Store interface methods for streams to satisfy to be a store
type typedStore interface {
Get(ctx context.Context, path Path, object storj.Object, pathCipher storj.CipherSuite) (ranger.Ranger, error)
Put(ctx context.Context, path Path, pathCipher storj.CipherSuite, data io.Reader, metadata []byte, expiration time.Time) (Meta, error)
Delete(ctx context.Context, path Path, pathCipher storj.CipherSuite) error
}
// streamStore is a store for streams. It implements typedStore as part of an ongoing migration
// to use typed paths. See the shim for the store that the rest of the world interacts with.
type streamStore struct {
metainfo *metainfo.Client
segments segments.Store
segmentSize int64
encStore *encryption.Store
encBlockSize int
cipher storj.CipherSuite
inlineThreshold int
maxEncryptedSegmentSize int64
}
// newTypedStreamStore constructs a typedStore backed by a streamStore.
func newTypedStreamStore(metainfo *metainfo.Client, segments segments.Store, segmentSize int64, encStore *encryption.Store, encBlockSize int, cipher storj.CipherSuite, inlineThreshold int, maxEncryptedSegmentSize int64) (typedStore, error) {
if segmentSize <= 0 {
return nil, errs.New("segment size must be larger than 0")
}
if encBlockSize <= 0 {
return nil, errs.New("encryption block size must be larger than 0")
}
return &streamStore{
metainfo: metainfo,
segments: segments,
segmentSize: segmentSize,
encStore: encStore,
encBlockSize: encBlockSize,
cipher: cipher,
inlineThreshold: inlineThreshold,
maxEncryptedSegmentSize: maxEncryptedSegmentSize,
}, nil
}
// Put breaks up data as it comes in into s.segmentSize length pieces, then
// store the first piece at s0/<path>, second piece at s1/<path>, and the
// *last* piece at l/<path>. Store the given metadata, along with the number
// of segments, in a new protobuf, in the metadata of l/<path>.
func (s *streamStore) Put(ctx context.Context, path Path, pathCipher storj.CipherSuite, data io.Reader, metadata []byte, expiration time.Time) (m Meta, err error) {
defer mon.Task()(&ctx)(&err)
// previously file uploaded?
err = s.Delete(ctx, path, pathCipher)
if err != nil && !storj.ErrObjectNotFound.Has(err) {
// something wrong happened checking for an existing
// file with the same name
return Meta{}, err
}
return s.upload(ctx, path, pathCipher, data, metadata, expiration)
}
// upload registers segments in metainfo and uploads them to storage nodes.
//
// If there is an error, it cleans up any uploaded segment before returning.
func (s *streamStore) upload(ctx context.Context, path Path, pathCipher storj.CipherSuite, data io.Reader, metadata []byte, expiration time.Time) (_ Meta, err error) {
defer mon.Task()(&ctx)(&err)
derivedKey, err := encryption.DeriveContentKey(path.Bucket(), path.UnencryptedPath(), s.encStore)
if err != nil {
return Meta{}, err
}
encPath, err := encryption.EncryptPath(path.Bucket(), path.UnencryptedPath(), pathCipher, s.encStore)
if err != nil {
return Meta{}, err
}
beginObjectReq := &metainfo.BeginObjectParams{
Bucket: []byte(path.Bucket()),
EncryptedPath: []byte(encPath.Raw()),
ExpiresAt: expiration,
}
var (
committedSegments int64
streamID storj.StreamID
)
defer func() {
if err != nil {
s.cancelHandler(context.Background(), streamID, committedSegments, path, pathCipher)
return
}
select {
case <-ctx.Done():
s.cancelHandler(context.Background(), streamID, committedSegments, path, pathCipher)
default:
}
}()
var (
currentSegment int64
contentKey storj.Key
prevSegmentCommitReq *metainfo.CommitSegmentParams
streamSize int64
lastSegmentSize int64
encryptedKey []byte
keyNonce storj.Nonce
)
eofReader := NewEOFReader(data)
for !eofReader.isEOF() && !eofReader.hasError() {
// generate random key for encrypting the segment's content
_, err := rand.Read(contentKey[:])
if err != nil {
return Meta{}, err
}
// Initialize the content nonce with the current total segment incremented
// by 1 because at this moment the next segment has not been already
// uploaded.
// The increment by 1 is to avoid nonce reuse with the metadata encryption,
// which is encrypted with the zero nonce.
contentNonce := storj.Nonce{}
_, err = encryption.Increment(&contentNonce, currentSegment+1)
if err != nil {
return Meta{}, err
}
// generate random nonce for encrypting the content key
_, err = rand.Read(keyNonce[:])
if err != nil {
return Meta{}, err
}
encryptedKey, err = encryption.EncryptKey(&contentKey, s.cipher, derivedKey, &keyNonce)
if err != nil {
return Meta{}, err
}
sizeReader := NewSizeReader(eofReader)
segmentReader := io.LimitReader(sizeReader, s.segmentSize)
peekReader := segments.NewPeekThresholdReader(segmentReader)
// If the data is larger than the inline threshold size, then it will be a remote segment
isRemote, err := peekReader.IsLargerThan(s.inlineThreshold)
if err != nil {
return Meta{}, err
}
segmentEncryption := storj.SegmentEncryption{}
if s.cipher != storj.EncNull {
segmentEncryption = storj.SegmentEncryption{
EncryptedKey: encryptedKey,
EncryptedKeyNonce: keyNonce,
}
}
if isRemote {
encrypter, err := encryption.NewEncrypter(s.cipher, &contentKey, &contentNonce, s.encBlockSize)
if err != nil {
return Meta{}, err
}
paddedReader := encryption.PadReader(ioutil.NopCloser(peekReader), encrypter.InBlockSize())
transformedReader := encryption.TransformReader(paddedReader, encrypter, 0)
beginSegment := &metainfo.BeginSegmentParams{
MaxOrderLimit: s.maxEncryptedSegmentSize,
Position: storj.SegmentPosition{
Index: int32(currentSegment),
},
}
var responses []metainfo.BatchResponse
if currentSegment == 0 {
responses, err = s.metainfo.Batch(ctx, beginObjectReq, beginSegment)
if err != nil {
return Meta{}, err
}
objResponse, err := responses[0].BeginObject()
if err != nil {
return Meta{}, err
}
streamID = objResponse.StreamID
} else {
beginSegment.StreamID = streamID
responses, err = s.metainfo.Batch(ctx, prevSegmentCommitReq, beginSegment)
if len(responses) > 0 {
// We increment because the first request has succeeded
committedSegments++
}
if err != nil {
return Meta{}, err
}
}
segResponse, err := responses[1].BeginSegment()
if err != nil {
return Meta{}, err
}
segmentID := segResponse.SegmentID
limits := segResponse.Limits
piecePrivateKey := segResponse.PiecePrivateKey
uploadResults, size, err := s.segments.Put(ctx, transformedReader, expiration, limits, piecePrivateKey)
if err != nil {
return Meta{}, err
}
prevSegmentCommitReq = &metainfo.CommitSegmentParams{
SegmentID: segmentID,
SizeEncryptedData: size,
Encryption: segmentEncryption,
UploadResult: uploadResults,
}
} else {
data, err := ioutil.ReadAll(peekReader)
if err != nil {
return Meta{}, err
}
cipherData, err := encryption.Encrypt(data, s.cipher, &contentKey, &contentNonce)
if err != nil {
return Meta{}, err
}
makeInlineSegment := &metainfo.MakeInlineSegmentParams{
Position: storj.SegmentPosition{
Index: int32(currentSegment),
},
Encryption: segmentEncryption,
EncryptedInlineData: cipherData,
}
if currentSegment == 0 {
responses, err := s.metainfo.Batch(ctx, beginObjectReq, makeInlineSegment)
if err != nil {
return Meta{}, err
}
objResponse, err := responses[0].BeginObject()
if err != nil {
return Meta{}, err
}
streamID = objResponse.StreamID
} else {
makeInlineSegment.StreamID = streamID
err = s.metainfo.MakeInlineSegment(ctx, *makeInlineSegment)
if err != nil {
return Meta{}, err
}
}
committedSegments++
}
lastSegmentSize = sizeReader.Size()
streamSize += lastSegmentSize
currentSegment++
}
totalSegments := currentSegment
if eofReader.hasError() {
return Meta{}, eofReader.err
}
streamInfo, err := proto.Marshal(&pb.StreamInfo{
DeprecatedNumberOfSegments: totalSegments,
SegmentsSize: s.segmentSize,
LastSegmentSize: lastSegmentSize,
Metadata: metadata,
})
if err != nil {
return Meta{}, err
}
// encrypt metadata with the content encryption key and zero nonce
encryptedStreamInfo, err := encryption.Encrypt(streamInfo, s.cipher, &contentKey, &storj.Nonce{})
if err != nil {
return Meta{}, err
}
streamMeta := pb.StreamMeta{
NumberOfSegments: totalSegments,
EncryptedStreamInfo: encryptedStreamInfo,
EncryptionType: int32(s.cipher),
EncryptionBlockSize: int32(s.encBlockSize),
}
if s.cipher != storj.EncNull {
streamMeta.LastSegmentMeta = &pb.SegmentMeta{
EncryptedKey: encryptedKey,
KeyNonce: keyNonce[:],
}
}
objectMetadata, err := proto.Marshal(&streamMeta)
if err != nil {
return Meta{}, err
}
commitObject := metainfo.CommitObjectParams{
StreamID: streamID,
EncryptedMetadata: objectMetadata,
}
if prevSegmentCommitReq != nil {
var responses []metainfo.BatchResponse
responses, err = s.metainfo.Batch(ctx, prevSegmentCommitReq, &commitObject)
if len(responses) > 0 {
// We increment because the first request has succeeded
committedSegments++
}
} else {
err = s.metainfo.CommitObject(ctx, commitObject)
}
if err != nil {
return Meta{}, err
}
resultMeta := Meta{
Expiration: expiration,
Size: streamSize,
Data: metadata,
}
return resultMeta, nil
}
// Get returns a ranger that knows what the overall size is (from l/<path>)
// and then returns the appropriate data from segments s0/<path>, s1/<path>,
// ..., l/<path>.
func (s *streamStore) Get(ctx context.Context, path Path, object storj.Object, pathCipher storj.CipherSuite) (rr ranger.Ranger, err error) {
defer mon.Task()(&ctx)(&err)
info, limits, err := s.metainfo.DownloadSegment(ctx, metainfo.DownloadSegmentParams{
StreamID: object.ID,
Position: storj.SegmentPosition{
Index: -1, // Request the last segment
},
})
if err != nil {
return nil, err
}
lastSegmentRanger, err := s.segments.Ranger(ctx, info, limits, object.RedundancyScheme)
if err != nil {
return nil, err
}
derivedKey, err := encryption.DeriveContentKey(path.Bucket(), path.UnencryptedPath(), s.encStore)
if err != nil {
return nil, err
}
var rangers []ranger.Ranger
for i := int64(0); i < object.SegmentCount-1; i++ {
var contentNonce storj.Nonce
_, err = encryption.Increment(&contentNonce, i+1)
if err != nil {
return nil, err
}
rangers = append(rangers, &lazySegmentRanger{
metainfo: s.metainfo,
segments: s.segments,
streamID: object.ID,
segmentIndex: int32(i),
rs: object.RedundancyScheme,
size: object.FixedSegmentSize,
derivedKey: derivedKey,
startingNonce: &contentNonce,
encBlockSize: int(object.EncryptionParameters.BlockSize),
cipher: object.CipherSuite,
})
}
var contentNonce storj.Nonce
_, err = encryption.Increment(&contentNonce, object.SegmentCount)
if err != nil {
return nil, err
}
decryptedLastSegmentRanger, err := decryptRanger(
ctx,
lastSegmentRanger,
object.LastSegment.Size,
object.CipherSuite,
derivedKey,
info.SegmentEncryption.EncryptedKey,
&info.SegmentEncryption.EncryptedKeyNonce,
&contentNonce,
int(object.EncryptionParameters.BlockSize),
)
if err != nil {
return nil, err
}
rangers = append(rangers, decryptedLastSegmentRanger)
return ranger.Concat(rangers...), nil
}
// Delete all the segments, with the last one last
func (s *streamStore) Delete(ctx context.Context, path Path, pathCipher storj.CipherSuite) (err error) {
defer mon.Task()(&ctx)(&err)
encPath, err := encryption.EncryptPath(path.Bucket(), path.UnencryptedPath(), pathCipher, s.encStore)
if err != nil {
return err
}
batchItems := []metainfo.BatchItem{
&metainfo.BeginDeleteObjectParams{
Bucket: []byte(path.Bucket()),
EncryptedPath: []byte(encPath.Raw()),
},
&metainfo.ListSegmentsParams{
CursorPosition: storj.SegmentPosition{
Index: 0,
},
},
}
resps, err := s.metainfo.Batch(ctx, batchItems...)
if err != nil {
return err
}
if len(resps) != 2 {
return errs.New(
"metainfo.Batch request returned an unexpected number of responses. Want: 2, got: %d", len(resps),
)
}
delResp, err := resps[0].BeginDeleteObject()
if err != nil {
return err
}
listResp, err := resps[1].ListSegment()
if err != nil {
return err
}
// TODO handle listResp.More
var errlist errs.Group
for _, item := range listResp.Items {
err = s.segments.Delete(ctx, delResp.StreamID, item.Position.Index)
if err != nil {
errlist.Add(err)
continue
}
}
return errlist.Err()
}
// ListItem is a single item in a listing
type ListItem struct {
Path string
Meta Meta
IsPrefix bool
}
type lazySegmentRanger struct {
ranger ranger.Ranger
metainfo *metainfo.Client
segments segments.Store
streamID storj.StreamID
segmentIndex int32
rs storj.RedundancyScheme
size int64
derivedKey *storj.Key
startingNonce *storj.Nonce
encBlockSize int
cipher storj.CipherSuite
}
// Size implements Ranger.Size.
func (lr *lazySegmentRanger) Size() int64 {
return lr.size
}
// Range implements Ranger.Range to be lazily connected.
func (lr *lazySegmentRanger) Range(ctx context.Context, offset, length int64) (_ io.ReadCloser, err error) {
defer mon.Task()(&ctx)(&err)
if lr.ranger == nil {
info, limits, err := lr.metainfo.DownloadSegment(ctx, metainfo.DownloadSegmentParams{
StreamID: lr.streamID,
Position: storj.SegmentPosition{
Index: lr.segmentIndex,
},
})
if err != nil {
return nil, err
}
rr, err := lr.segments.Ranger(ctx, info, limits, lr.rs)
if err != nil {
return nil, err
}
encryptedKey, keyNonce := info.SegmentEncryption.EncryptedKey, info.SegmentEncryption.EncryptedKeyNonce
lr.ranger, err = decryptRanger(ctx, rr, lr.size, lr.cipher, lr.derivedKey, encryptedKey, &keyNonce, lr.startingNonce, lr.encBlockSize)
if err != nil {
return nil, err
}
}
return lr.ranger.Range(ctx, offset, length)
}
// decryptRanger returns a decrypted ranger of the given rr ranger.
func decryptRanger(ctx context.Context, rr ranger.Ranger, decryptedSize int64, cipher storj.CipherSuite, derivedKey *storj.Key, encryptedKey storj.EncryptedPrivateKey, encryptedKeyNonce, startingNonce *storj.Nonce, encBlockSize int) (decrypted ranger.Ranger, err error) {
defer mon.Task()(&ctx)(&err)
contentKey, err := encryption.DecryptKey(encryptedKey, cipher, derivedKey, encryptedKeyNonce)
if err != nil {
return nil, err
}
decrypter, err := encryption.NewDecrypter(cipher, contentKey, startingNonce, encBlockSize)
if err != nil {
return nil, err
}
var rd ranger.Ranger
if rr.Size()%int64(decrypter.InBlockSize()) != 0 {
reader, err := rr.Range(ctx, 0, rr.Size())
if err != nil {
return nil, err
}
defer func() { err = errs.Combine(err, reader.Close()) }()
cipherData, err := ioutil.ReadAll(reader)
if err != nil {
return nil, err
}
data, err := encryption.Decrypt(cipherData, cipher, contentKey, startingNonce)
if err != nil {
return nil, err
}
return ranger.ByteRanger(data), nil
}
rd, err = encryption.Transform(rr, decrypter)
if err != nil {
return nil, err
}
return encryption.Unpad(rd, int(rd.Size()-decryptedSize))
}
// CancelHandler handles clean up of segments on receiving CTRL+C
func (s *streamStore) cancelHandler(ctx context.Context, streamID storj.StreamID, totalSegments int64, path Path, pathCipher storj.CipherSuite) {
defer mon.Task()(&ctx)(nil)
for i := int64(0); i < totalSegments; i++ {
err := s.segments.Delete(ctx, streamID, int32(i))
if err != nil {
zap.L().Warn("Failed deleting segment", zap.Stringer("path", path), zap.Int64("Segment Index", i), zap.Error(err))
continue
}
}
}
func getEncryptedKeyAndNonce(m *pb.SegmentMeta) (storj.EncryptedPrivateKey, *storj.Nonce) {
if m == nil {
return nil, nil
}
var nonce storj.Nonce
copy(nonce[:], m.KeyNonce)
return m.EncryptedKey, &nonce
}
// TypedDecryptStreamInfo decrypts stream info
func TypedDecryptStreamInfo(ctx context.Context, streamMetaBytes []byte, path Path, encStore *encryption.Store) (
_ *pb.StreamInfo, streamMeta pb.StreamMeta, err error) {
defer mon.Task()(&ctx)(&err)
err = proto.Unmarshal(streamMetaBytes, &streamMeta)
if err != nil {
return nil, pb.StreamMeta{}, err
}
if encStore.EncryptionBypass {
return nil, streamMeta, nil
}
derivedKey, err := encryption.DeriveContentKey(path.Bucket(), path.UnencryptedPath(), encStore)
if err != nil {
return nil, pb.StreamMeta{}, err
}
cipher := storj.CipherSuite(streamMeta.EncryptionType)
encryptedKey, keyNonce := getEncryptedKeyAndNonce(streamMeta.LastSegmentMeta)
contentKey, err := encryption.DecryptKey(encryptedKey, cipher, derivedKey, keyNonce)
if err != nil {
return nil, pb.StreamMeta{}, err
}
// decrypt metadata with the content encryption key and zero nonce
streamInfo, err := encryption.Decrypt(streamMeta.EncryptedStreamInfo, cipher, contentKey, &storj.Nonce{})
if err != nil {
return nil, pb.StreamMeta{}, err
}
var stream pb.StreamInfo
if err := proto.Unmarshal(streamInfo, &stream); err != nil {
return nil, pb.StreamMeta{}, err
}
return &stream, streamMeta, nil
}

View File

@ -1,11 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package stream
import (
"github.com/zeebo/errs"
)
// Error is the errs class of stream errors
var Error = errs.Class("stream error")

View File

@ -1,118 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package stream
import (
"context"
"io"
"storj.io/common/storj"
"storj.io/storj/uplink/metainfo/kvmetainfo"
"storj.io/storj/uplink/storage/streams"
)
// Download implements Reader, Seeker and Closer for reading from stream.
type Download struct {
ctx context.Context
stream kvmetainfo.ReadOnlyStream
streams streams.Store
reader io.ReadCloser
offset int64
limit int64
closed bool
}
// NewDownload creates new stream download.
func NewDownload(ctx context.Context, stream kvmetainfo.ReadOnlyStream, streams streams.Store) *Download {
return &Download{
ctx: ctx,
stream: stream,
streams: streams,
limit: -1,
}
}
// NewDownloadRange creates new stream range download with range from offset to offset+limit.
func NewDownloadRange(ctx context.Context, stream kvmetainfo.ReadOnlyStream, streams streams.Store, offset, limit int64) *Download {
return &Download{
ctx: ctx,
stream: stream,
streams: streams,
offset: offset,
limit: limit,
}
}
// Read reads up to len(data) bytes into data.
//
// If this is the first call it will read from the beginning of the stream.
// Use Seek to change the current offset for the next Read call.
//
// See io.Reader for more details.
func (download *Download) Read(data []byte) (n int, err error) {
if download.closed {
return 0, Error.New("already closed")
}
if download.reader == nil {
err = download.resetReader(download.offset)
if err != nil {
return 0, err
}
}
if download.limit == 0 {
return 0, io.EOF
}
if download.limit > 0 && download.limit < int64(len(data)) {
data = data[:download.limit]
}
n, err = download.reader.Read(data)
if download.limit >= 0 {
download.limit -= int64(n)
}
download.offset += int64(n)
return n, err
}
// Close closes the stream and releases the underlying resources.
func (download *Download) Close() error {
if download.closed {
return Error.New("already closed")
}
download.closed = true
if download.reader == nil {
return nil
}
return download.reader.Close()
}
func (download *Download) resetReader(offset int64) error {
if download.reader != nil {
err := download.reader.Close()
if err != nil {
return err
}
}
obj := download.stream.Info()
rr, err := download.streams.Get(download.ctx, storj.JoinPaths(obj.Bucket.Name, obj.Path), obj, obj.Bucket.PathCipher)
if err != nil {
return err
}
download.reader, err = rr.Range(download.ctx, offset, obj.Size-offset)
if err != nil {
return err
}
download.offset = offset
return nil
}

View File

@ -1,87 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package stream
import (
"context"
"io"
"github.com/gogo/protobuf/proto"
"github.com/zeebo/errs"
"golang.org/x/sync/errgroup"
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/storj/uplink/metainfo/kvmetainfo"
"storj.io/storj/uplink/storage/streams"
)
// Upload implements Writer and Closer for writing to stream.
type Upload struct {
ctx context.Context
stream kvmetainfo.MutableStream
streams streams.Store
writer io.WriteCloser
closed bool
errgroup errgroup.Group
}
// NewUpload creates new stream upload.
func NewUpload(ctx context.Context, stream kvmetainfo.MutableStream, streams streams.Store) *Upload {
reader, writer := io.Pipe()
upload := Upload{
ctx: ctx,
stream: stream,
streams: streams,
writer: writer,
}
upload.errgroup.Go(func() error {
obj := stream.Info()
serMetaInfo := pb.SerializableMeta{
ContentType: obj.ContentType,
UserDefined: obj.Metadata,
}
metadata, err := proto.Marshal(&serMetaInfo)
if err != nil {
return errs.Combine(err, reader.CloseWithError(err))
}
_, err = streams.Put(ctx, storj.JoinPaths(obj.Bucket.Name, obj.Path), obj.Bucket.PathCipher, reader, metadata, obj.Expires)
if err != nil {
return errs.Combine(err, reader.CloseWithError(err))
}
return nil
})
return &upload
}
// Write writes len(data) bytes from data to the underlying data stream.
//
// See io.Writer for more details.
func (upload *Upload) Write(data []byte) (n int, err error) {
if upload.closed {
return 0, Error.New("already closed")
}
return upload.writer.Write(data)
}
// Close closes the stream and releases the underlying resources.
func (upload *Upload) Close() error {
if upload.closed {
return Error.New("already closed")
}
upload.closed = true
err := upload.writer.Close()
// Wait for streams.Put to commit the upload to the PointerDB
return errs.Combine(err, upload.errgroup.Wait())
}