storj/pkg/piecestore/rpc/client/pieceranger.go
Alexander Leitner 2eb660d4b7 Bandwidth allocation pipeline data (#276)
* Moving retrieve into multiple goroutines

* Make sure we pass nil errors into err channel

* restore tests

* incorporate locks in retrieve.go

* deserialize data only if we have something to deserealize when receiving bandwidth allocation in server store

* Adding logic for retrieve to be more efficient

* Add channel?

* hmm

* implement Throttle concurrency primitive

* using throttle

* Remove unused variables

* Egon comments addressed

* Get ba total correct

* Consume without waiting

* incrementally increase signing size

* Get downloads working with throttle

* Removed logging

* Make sure we handle errors properly

* Fix tests
>
>
Co-authored-by: Kaloyan <kaloyan@storj.io>

* Can't Fatalf in goroutine

* Add missing returns to tests

* add capacity to channel, smarter allocations

* rename things and don't use size as limit

* replace things with sync2.Throttle

* fix compilation errors

* add note about security

* fix ordering

* Max length is actually 64 bytes for piece ID

* Max length is actually 64 bytes for piece ID

* fix limit

* error comes from pending allocs, so no need to relog

* Optimize throughput

* TODO

* Deleted allocation manager

* Return when someone sends a smaller bandwidth allocation than the previous message

* review comments
2018-09-10 03:18:41 -06:00

77 lines
2.1 KiB
Go

// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package client
import (
"bytes"
"context"
"io"
"io/ioutil"
"github.com/zeebo/errs"
"storj.io/storj/pkg/ranger"
pb "storj.io/storj/protos/piecestore"
)
// Error is the error class for pieceRanger
var Error = errs.Class("pieceRanger error")
type pieceRanger struct {
c *Client
id PieceID
size int64
stream pb.PieceStoreRoutes_RetrieveClient
pba *pb.PayerBandwidthAllocation
}
// PieceRanger PieceRanger returns a RangeCloser from a PieceID.
func PieceRanger(ctx context.Context, c *Client, stream pb.PieceStoreRoutes_RetrieveClient, id PieceID, pba *pb.PayerBandwidthAllocation) (ranger.RangeCloser, error) {
piece, err := c.Meta(ctx, id)
if err != nil {
return nil, err
}
return &pieceRanger{c: c, id: id, size: piece.Size, stream: stream, pba: pba}, nil
}
// PieceRangerSize creates a PieceRanger with known size.
// Use it if you know the piece size. This will safe the extra request for
// retrieving the piece size from the piece storage.
func PieceRangerSize(c *Client, stream pb.PieceStoreRoutes_RetrieveClient, id PieceID, size int64, pba *pb.PayerBandwidthAllocation) ranger.RangeCloser {
return &pieceRanger{c: c, id: id, size: size, stream: stream, pba: pba}
}
// Size implements Ranger.Size
func (r *pieceRanger) Size() int64 {
return r.size
}
// Size implements Ranger.Size
func (r *pieceRanger) Close() error {
return r.c.Close()
}
// Range implements Ranger.Range
func (r *pieceRanger) Range(ctx context.Context, offset, length int64) (io.ReadCloser, error) {
if offset < 0 {
return nil, Error.New("negative offset")
}
if length < 0 {
return nil, Error.New("negative length")
}
if offset+length > r.size {
return nil, Error.New("range beyond end")
}
if length == 0 {
return ioutil.NopCloser(bytes.NewReader([]byte{})), nil
}
// send piece data
if err := r.stream.Send(&pb.PieceRetrieval{PieceData: &pb.PieceRetrieval_PieceData{Id: r.id.String(), Size: length, Offset: offset}}); err != nil {
return nil, err
}
return NewStreamReader(r.c, r.stream, r.pba, r.size), nil
}