storj/pkg/ranger/reader.go

128 lines
2.8 KiB
Go
Raw Normal View History

2018-04-11 13:46:19 +01:00
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package ranger
import (
"bytes"
"io"
"io/ioutil"
"storj.io/storj/internal/pkg/readcloser"
2018-04-11 13:46:19 +01:00
)
// A Ranger is a flexible data stream type that allows for more effective
// pipelining during seeking. A Ranger can return multiple parallel Readers for
// any subranges.
type Ranger interface {
Size() int64
Range(offset, length int64) io.ReadCloser
2018-04-11 13:46:19 +01:00
}
// A RangeCloser is a Ranger that must be closed when finished
type RangeCloser interface {
2018-04-24 03:16:34 +01:00
Ranger
io.Closer
}
// NopCloser makes an existing Ranger function as a RangeCloser
2018-04-24 03:16:34 +01:00
// with a no-op for Close()
func NopCloser(r Ranger) RangeCloser {
2018-04-24 03:16:34 +01:00
return struct {
Ranger
io.Closer
}{
Ranger: r,
Closer: ioutil.NopCloser(nil),
}
}
2018-04-11 13:46:19 +01:00
// ByteRanger turns a byte slice into a Ranger
type ByteRanger []byte
2018-04-11 14:18:35 +01:00
// Size implements Ranger.Size
2018-04-11 13:46:19 +01:00
func (b ByteRanger) Size() int64 { return int64(len(b)) }
2018-04-11 14:18:35 +01:00
// Range implements Ranger.Range
func (b ByteRanger) Range(offset, length int64) io.ReadCloser {
2018-04-11 13:46:19 +01:00
if offset < 0 {
return readcloser.FatalReadCloser(Error.New("negative offset"))
}
if length < 0 {
return readcloser.FatalReadCloser(Error.New("negative length"))
2018-04-11 13:46:19 +01:00
}
if offset+length > int64(len(b)) {
return readcloser.FatalReadCloser(Error.New("buffer runoff"))
2018-04-11 13:46:19 +01:00
}
return ioutil.NopCloser(bytes.NewReader(b[offset : offset+length]))
2018-04-11 13:46:19 +01:00
}
type concatReader struct {
r1 Ranger
r2 Ranger
}
func (c *concatReader) Size() int64 {
return c.r1.Size() + c.r2.Size()
}
func (c *concatReader) Range(offset, length int64) io.ReadCloser {
2018-04-11 13:46:19 +01:00
r1Size := c.r1.Size()
if offset+length <= r1Size {
return c.r1.Range(offset, length)
}
if offset >= r1Size {
return c.r2.Range(offset-r1Size, length)
}
return readcloser.MultiReadCloser(
2018-04-11 13:46:19 +01:00
c.r1.Range(offset, r1Size-offset),
readcloser.LazyReadCloser(func() io.ReadCloser {
2018-04-11 13:46:19 +01:00
return c.r2.Range(0, length-(r1Size-offset))
}))
}
func concat2(r1, r2 Ranger) Ranger {
return &concatReader{r1: r1, r2: r2}
}
// Concat concatenates Rangers
func Concat(r ...Ranger) Ranger {
switch len(r) {
case 0:
return ByteRanger(nil)
case 1:
return r[0]
case 2:
return concat2(r[0], r[1])
default:
mid := len(r) / 2
return concat2(Concat(r[:mid]...), Concat(r[mid:]...))
}
}
type subrange struct {
r Ranger
offset, length int64
}
// Subrange returns a subset of a Ranger.
func Subrange(data Ranger, offset, length int64) (Ranger, error) {
dSize := data.Size()
if offset < 0 || offset > dSize {
return nil, Error.New("invalid offset")
}
if length+offset > dSize {
return nil, Error.New("invalid length")
}
return &subrange{r: data, offset: offset, length: length}, nil
}
func (s *subrange) Size() int64 {
return s.length
}
func (s *subrange) Range(offset, length int64) io.ReadCloser {
2018-04-11 13:46:19 +01:00
return s.r.Range(offset+s.offset, length)
}