2019-03-18 10:55:06 +00:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package pieces
|
|
|
|
|
|
|
|
import (
|
2019-06-04 13:31:39 +01:00
|
|
|
"context"
|
2019-08-08 02:47:30 +01:00
|
|
|
"encoding/binary"
|
2019-03-18 10:55:06 +00:00
|
|
|
"hash"
|
|
|
|
"io"
|
|
|
|
|
2019-08-08 02:47:30 +01:00
|
|
|
"github.com/zeebo/errs"
|
2020-01-30 20:12:50 +00:00
|
|
|
"go.uber.org/zap"
|
2019-08-08 02:47:30 +01:00
|
|
|
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/pb"
|
|
|
|
"storj.io/common/pkcrypto"
|
|
|
|
"storj.io/common/storj"
|
2019-03-18 10:55:06 +00:00
|
|
|
"storj.io/storj/storage"
|
2019-08-08 02:47:30 +01:00
|
|
|
"storj.io/storj/storage/filestore"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
// V1PieceHeaderReservedArea is the amount of space to be reserved at the beginning of
|
|
|
|
// pieces stored with filestore.FormatV1 or greater. Serialized piece headers should be
|
|
|
|
// written into that space, and the remaining space afterward should be zeroes.
|
|
|
|
// V1PieceHeaderReservedArea includes the size of the framing field
|
|
|
|
// (v1PieceHeaderFrameSize). It has a constant size because:
|
|
|
|
//
|
|
|
|
// * We do not anticipate needing more than this.
|
|
|
|
// * We will be able to sum up all space used by a satellite (or all satellites) without
|
|
|
|
// opening and reading from each piece file (stat() is faster than open()).
|
|
|
|
// * This simplifies piece file writing (if we needed to know the exact header size
|
|
|
|
// before writing, then we'd need to spool the entire contents of the piece somewhere
|
|
|
|
// before we could calculate the hash and size). This way, we can simply reserve the
|
|
|
|
// header space, write the piece content as it comes in, and then seek back to the
|
|
|
|
// beginning and fill in the header.
|
|
|
|
//
|
|
|
|
// We put it at the beginning of piece files because:
|
|
|
|
//
|
|
|
|
// * If we put it at the end instead, we would have to seek to the end of a file (to find
|
|
|
|
// out the real size while avoiding race conditions with stat()) and then seek backward
|
|
|
|
// again to get the header, and then seek back to the beginning to get the content.
|
|
|
|
// Seeking on spinning platter hard drives is very slow compared to reading sequential
|
|
|
|
// bytes.
|
|
|
|
// * Putting the header in the middle of piece files might be entertaining, but it would
|
|
|
|
// also be silly.
|
|
|
|
// * If piece files are incorrectly truncated or not completely written, it will be
|
|
|
|
// much easier to identify those cases when the header is intact and findable.
|
|
|
|
//
|
|
|
|
// If more space than this is needed, we will need to use a new storage format version.
|
|
|
|
V1PieceHeaderReservedArea = 512
|
|
|
|
|
|
|
|
// v1PieceHeaderFramingSize is the size of the field used at the beginning of piece
|
|
|
|
// files to indicate the size of the marshaled piece header within the reserved header
|
|
|
|
// area (because protobufs are not self-delimiting, which is lame).
|
|
|
|
v1PieceHeaderFramingSize = 2
|
2019-03-18 10:55:06 +00:00
|
|
|
)
|
|
|
|
|
2019-08-26 19:57:41 +01:00
|
|
|
// BadFormatVersion is returned when a storage format cannot support the request function
|
|
|
|
var BadFormatVersion = errs.Class("Incompatible storage format version")
|
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
// Writer implements a piece writer that writes content to blob store and calculates a hash.
|
|
|
|
type Writer struct {
|
2020-01-30 20:12:50 +00:00
|
|
|
log *zap.Logger
|
2019-08-08 02:47:30 +01:00
|
|
|
hash hash.Hash
|
|
|
|
blob storage.BlobWriter
|
|
|
|
pieceSize int64 // piece size only; i.e., not including piece header
|
2019-03-18 14:29:54 +00:00
|
|
|
|
2019-08-12 22:43:05 +01:00
|
|
|
blobs storage.Blobs
|
|
|
|
satellite storj.NodeID
|
|
|
|
closed bool
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewWriter creates a new writer for storage.BlobWriter.
|
2020-01-30 20:12:50 +00:00
|
|
|
func NewWriter(log *zap.Logger, blobWriter storage.BlobWriter, blobs storage.Blobs, satellite storj.NodeID) (*Writer, error) {
|
|
|
|
w := &Writer{log: log}
|
2019-08-12 22:43:05 +01:00
|
|
|
if blobWriter.StorageFormatVersion() >= filestore.FormatV1 {
|
2019-08-08 02:47:30 +01:00
|
|
|
// We skip past the reserved header area for now- we want the header to be at the
|
|
|
|
// beginning of the file, to make it quick to seek there and also to make it easier
|
|
|
|
// to identify situations where a blob file has been truncated incorrectly. And we
|
|
|
|
// don't know what exactly is going to be in the header yet--we won't know what the
|
|
|
|
// hash or size or timestamp or expiration or signature fields need to be until we
|
|
|
|
// have received the whole piece.
|
|
|
|
//
|
|
|
|
// Once the writer calls Commit() on this writer, we will seek back to the beginning
|
|
|
|
// of the file and write the header.
|
2019-08-12 22:43:05 +01:00
|
|
|
if _, err := blobWriter.Seek(V1PieceHeaderReservedArea, io.SeekStart); err != nil {
|
2019-08-08 02:47:30 +01:00
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
}
|
2019-08-12 22:43:05 +01:00
|
|
|
w.blob = blobWriter
|
2019-03-18 10:55:06 +00:00
|
|
|
w.hash = pkcrypto.NewHash()
|
2019-08-12 22:43:05 +01:00
|
|
|
w.blobs = blobs
|
|
|
|
w.satellite = satellite
|
2019-03-18 10:55:06 +00:00
|
|
|
return w, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write writes data to the blob and calculates the hash.
|
|
|
|
func (w *Writer) Write(data []byte) (int, error) {
|
2019-07-25 09:22:15 +01:00
|
|
|
n, err := w.blob.Write(data)
|
2019-08-08 02:47:30 +01:00
|
|
|
w.pieceSize += int64(n)
|
2019-03-18 10:55:06 +00:00
|
|
|
_, _ = w.hash.Write(data[:n]) // guaranteed not to return an error
|
2019-07-25 09:22:15 +01:00
|
|
|
if err == io.EOF {
|
|
|
|
return n, err
|
|
|
|
}
|
2019-03-18 10:55:06 +00:00
|
|
|
return n, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2019-08-08 02:47:30 +01:00
|
|
|
// Size returns the amount of data written to the piece so far, not including the size of
|
|
|
|
// the piece header.
|
|
|
|
func (w *Writer) Size() int64 { return w.pieceSize }
|
2019-03-18 10:55:06 +00:00
|
|
|
|
|
|
|
// Hash returns the hash of data written so far.
|
|
|
|
func (w *Writer) Hash() []byte { return w.hash.Sum(nil) }
|
|
|
|
|
|
|
|
// Commit commits piece to permanent storage.
|
2019-08-08 02:47:30 +01:00
|
|
|
func (w *Writer) Commit(ctx context.Context, pieceHeader *pb.PieceHeader) (err error) {
|
2019-06-04 13:31:39 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-03-18 14:29:54 +00:00
|
|
|
if w.closed {
|
|
|
|
return Error.New("already closed")
|
|
|
|
}
|
2020-01-07 23:34:51 +00:00
|
|
|
|
2019-08-08 02:47:30 +01:00
|
|
|
// point of no return: after this we definitely either commit or cancel
|
2019-03-18 14:29:54 +00:00
|
|
|
w.closed = true
|
2019-08-08 02:47:30 +01:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
|
|
|
err = Error.Wrap(errs.Combine(err, w.blob.Cancel(ctx)))
|
|
|
|
} else {
|
|
|
|
err = Error.Wrap(w.blob.Commit(ctx))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2020-01-07 23:34:51 +00:00
|
|
|
// if the blob store is a cache, update the cache, but only if we did not
|
|
|
|
// encounter an error
|
|
|
|
if cache, ok := w.blobs.(*BlobsUsageCache); ok {
|
|
|
|
defer func() {
|
|
|
|
if err == nil {
|
|
|
|
totalSize, sizeErr := w.blob.Size()
|
|
|
|
if sizeErr != nil {
|
2020-01-30 20:12:50 +00:00
|
|
|
w.log.Error("Failed to calculate piece size, cannot update the cache",
|
|
|
|
zap.Error(sizeErr), zap.Stringer("piece ID", pieceHeader.GetOrderLimit().PieceId),
|
|
|
|
zap.Stringer("satellite ID", w.satellite))
|
2020-01-07 23:34:51 +00:00
|
|
|
return
|
|
|
|
}
|
|
|
|
cache.Update(ctx, w.satellite, totalSize, w.Size(), 0)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
2019-08-08 02:47:30 +01:00
|
|
|
formatVer := w.blob.StorageFormatVersion()
|
|
|
|
if formatVer == filestore.FormatV0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
pieceHeader.FormatVersion = pb.PieceHeader_FormatVersion(formatVer)
|
2020-04-08 13:08:57 +01:00
|
|
|
headerBytes, err := pb.Marshal(pieceHeader)
|
2019-08-08 02:47:30 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
mon.IntVal("storagenode_pieces_pieceheader_size").Observe(int64(len(headerBytes)))
|
|
|
|
if len(headerBytes) > (V1PieceHeaderReservedArea - v1PieceHeaderFramingSize) {
|
|
|
|
// This should never happen under normal circumstances, and it might deserve a panic(),
|
|
|
|
// but I'm not *entirely* sure this case can't be triggered by a malicious uplink. Are
|
|
|
|
// google.protobuf.Timestamp fields variable-width?
|
|
|
|
mon.Meter("storagenode_pieces_pieceheader_overflow").Mark(len(headerBytes))
|
|
|
|
return Error.New("marshaled piece header too big!")
|
|
|
|
}
|
|
|
|
size, err := w.blob.Size()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if _, err := w.blob.Seek(0, io.SeekStart); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// We need to store some "framing" bytes first, because protobufs are not self-delimiting.
|
|
|
|
// In cases where the serialized pieceHeader is not exactly V1PieceHeaderReservedArea bytes
|
|
|
|
// (probably _all_ cases), without this marker, we wouldn't have any way to take the
|
|
|
|
// V1PieceHeaderReservedArea bytes from a piece blob and trim off the right number of zeroes
|
|
|
|
// at the end so that the protobuf unmarshals correctly.
|
|
|
|
var framingBytes [v1PieceHeaderFramingSize]byte
|
|
|
|
binary.BigEndian.PutUint16(framingBytes[:], uint16(len(headerBytes)))
|
|
|
|
if _, err = w.blob.Write(framingBytes[:]); err != nil {
|
2020-01-14 11:41:12 +00:00
|
|
|
return Error.New("failed writing piece framing field at file start: %w", err)
|
2019-08-08 02:47:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Now write the serialized header bytes.
|
|
|
|
if _, err = w.blob.Write(headerBytes); err != nil {
|
2020-01-14 11:41:12 +00:00
|
|
|
return Error.New("failed writing piece header at file start: %w", err)
|
2019-08-08 02:47:30 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// seek back to the end, as blob.Commit will truncate from the current file position.
|
|
|
|
// (don't try to seek(0, io.SeekEnd), because dir.CreateTemporaryFile preallocs space
|
|
|
|
// and the actual end of the file might be far past the intended end of the piece.)
|
|
|
|
if _, err := w.blob.Seek(size, io.SeekStart); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Cancel deletes any temporarily written data.
|
2019-06-04 13:31:39 +01:00
|
|
|
func (w *Writer) Cancel(ctx context.Context) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-03-18 14:29:54 +00:00
|
|
|
if w.closed {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
w.closed = true
|
2019-06-05 14:06:06 +01:00
|
|
|
return Error.Wrap(w.blob.Cancel(ctx))
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
|
2019-05-13 12:53:14 +01:00
|
|
|
// Reader implements a piece reader that reads content from blob store.
|
2019-03-18 10:55:06 +00:00
|
|
|
type Reader struct {
|
2019-08-08 02:47:30 +01:00
|
|
|
formatVersion storage.FormatVersion
|
|
|
|
|
|
|
|
blob storage.BlobReader
|
|
|
|
pos int64 // relative to file start; i.e., it includes piece header
|
|
|
|
pieceSize int64 // piece size only; i.e., not including piece header
|
2019-03-18 10:55:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewReader creates a new reader for storage.BlobReader.
|
2019-07-25 09:22:15 +01:00
|
|
|
func NewReader(blob storage.BlobReader) (*Reader, error) {
|
2019-03-18 10:55:06 +00:00
|
|
|
size, err := blob.Size()
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
2019-08-08 02:47:30 +01:00
|
|
|
formatVersion := blob.StorageFormatVersion()
|
|
|
|
if formatVersion >= filestore.FormatV1 {
|
|
|
|
if size < V1PieceHeaderReservedArea {
|
|
|
|
return nil, Error.New("invalid piece file for storage format version %d: too small for header (%d < %d)", formatVersion, size, V1PieceHeaderReservedArea)
|
|
|
|
}
|
|
|
|
size -= V1PieceHeaderReservedArea
|
|
|
|
}
|
2019-03-18 10:55:06 +00:00
|
|
|
|
2019-08-08 02:47:30 +01:00
|
|
|
reader := &Reader{
|
|
|
|
formatVersion: formatVersion,
|
|
|
|
blob: blob,
|
|
|
|
pieceSize: size,
|
|
|
|
}
|
2019-03-18 10:55:06 +00:00
|
|
|
return reader, nil
|
|
|
|
}
|
|
|
|
|
2019-08-08 02:47:30 +01:00
|
|
|
// StorageFormatVersion returns the storage format version of the piece being read.
|
|
|
|
func (r *Reader) StorageFormatVersion() storage.FormatVersion {
|
|
|
|
return r.formatVersion
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetPieceHeader reads, unmarshals, and returns the piece header. It may only be called once,
|
|
|
|
// before any Read() calls. (Retrieving the header at any time could be supported, but for the sake
|
|
|
|
// of performance we need to understand why and how often that would happen.)
|
|
|
|
func (r *Reader) GetPieceHeader() (*pb.PieceHeader, error) {
|
|
|
|
if r.formatVersion < filestore.FormatV1 {
|
2019-08-26 19:57:41 +01:00
|
|
|
return nil, BadFormatVersion.New("Can't get piece header from storage format V0 reader")
|
2019-08-08 02:47:30 +01:00
|
|
|
}
|
|
|
|
if r.pos != 0 {
|
|
|
|
return nil, Error.New("GetPieceHeader called when not at the beginning of the blob stream")
|
|
|
|
}
|
|
|
|
// We need to read the size of the serialized header protobuf before we read the header
|
|
|
|
// itself. The headers aren't a constant size, although V1PieceHeaderReservedArea is
|
|
|
|
// constant. Without this marker, we wouldn't have any way to know how much of the
|
|
|
|
// reserved header area is supposed to make up the serialized header protobuf.
|
|
|
|
var headerBytes [V1PieceHeaderReservedArea]byte
|
|
|
|
framingBytes := headerBytes[:v1PieceHeaderFramingSize]
|
|
|
|
n, err := io.ReadFull(r.blob, framingBytes)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
if n != v1PieceHeaderFramingSize {
|
|
|
|
return nil, Error.New("Could not read whole PieceHeader framing field")
|
|
|
|
}
|
|
|
|
r.pos += int64(n)
|
|
|
|
headerSize := binary.BigEndian.Uint16(framingBytes)
|
|
|
|
if headerSize > (V1PieceHeaderReservedArea - v1PieceHeaderFramingSize) {
|
|
|
|
return nil, Error.New("PieceHeader framing field claims impossible size of %d bytes", headerSize)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now we can read the actual serialized header.
|
|
|
|
pieceHeaderBytes := headerBytes[v1PieceHeaderFramingSize : v1PieceHeaderFramingSize+headerSize]
|
|
|
|
n, err = io.ReadFull(r.blob, pieceHeaderBytes)
|
|
|
|
if err != nil {
|
|
|
|
return nil, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
r.pos += int64(n)
|
|
|
|
|
|
|
|
// Deserialize and return.
|
|
|
|
header := &pb.PieceHeader{}
|
2020-04-08 13:08:57 +01:00
|
|
|
if err := pb.Unmarshal(pieceHeaderBytes, header); err != nil {
|
2020-01-14 11:41:12 +00:00
|
|
|
return nil, Error.New("piece header: %w", err)
|
2019-08-08 02:47:30 +01:00
|
|
|
}
|
|
|
|
return header, nil
|
|
|
|
}
|
|
|
|
|
2019-03-18 10:55:06 +00:00
|
|
|
// Read reads data from the underlying blob, buffering as necessary.
|
|
|
|
func (r *Reader) Read(data []byte) (int, error) {
|
2019-08-08 02:47:30 +01:00
|
|
|
if r.formatVersion >= filestore.FormatV1 && r.pos < V1PieceHeaderReservedArea {
|
|
|
|
// should only be necessary once per reader. or zero times, if GetPieceHeader is used
|
|
|
|
if _, err := r.blob.Seek(V1PieceHeaderReservedArea, io.SeekStart); err != nil {
|
|
|
|
return 0, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
}
|
2019-03-18 10:55:06 +00:00
|
|
|
n, err := r.blob.Read(data)
|
|
|
|
r.pos += int64(n)
|
2019-07-25 09:22:15 +01:00
|
|
|
if err == io.EOF {
|
|
|
|
return n, err
|
|
|
|
}
|
2019-03-18 10:55:06 +00:00
|
|
|
return n, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2019-08-08 02:47:30 +01:00
|
|
|
// Seek seeks to the specified location within the piece content (ignoring the header).
|
2019-03-18 10:55:06 +00:00
|
|
|
func (r *Reader) Seek(offset int64, whence int) (int64, error) {
|
2019-08-08 02:47:30 +01:00
|
|
|
if whence == io.SeekStart && r.formatVersion >= filestore.FormatV1 {
|
|
|
|
offset += V1PieceHeaderReservedArea
|
|
|
|
}
|
2019-03-18 10:55:06 +00:00
|
|
|
if whence == io.SeekStart && r.pos == offset {
|
|
|
|
return r.pos, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
pos, err := r.blob.Seek(offset, whence)
|
|
|
|
r.pos = pos
|
2019-08-08 02:47:30 +01:00
|
|
|
if r.formatVersion >= filestore.FormatV1 {
|
|
|
|
if pos < V1PieceHeaderReservedArea {
|
|
|
|
// any position within the file header should show as 0 here
|
|
|
|
pos = 0
|
|
|
|
} else {
|
|
|
|
pos -= V1PieceHeaderReservedArea
|
|
|
|
}
|
|
|
|
}
|
2019-07-25 09:22:15 +01:00
|
|
|
if err == io.EOF {
|
|
|
|
return pos, err
|
|
|
|
}
|
2019-03-18 10:55:06 +00:00
|
|
|
return pos, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2019-08-08 02:47:30 +01:00
|
|
|
// ReadAt reads data at the specified offset, which is relative to the piece content,
|
|
|
|
// not the underlying blob. The piece header is not reachable by this method.
|
2019-03-18 10:55:06 +00:00
|
|
|
func (r *Reader) ReadAt(data []byte, offset int64) (int, error) {
|
2019-08-08 02:47:30 +01:00
|
|
|
if r.formatVersion >= filestore.FormatV1 {
|
|
|
|
offset += V1PieceHeaderReservedArea
|
|
|
|
}
|
2019-03-18 10:55:06 +00:00
|
|
|
n, err := r.blob.ReadAt(data, offset)
|
2019-07-25 09:22:15 +01:00
|
|
|
if err == io.EOF {
|
|
|
|
return n, err
|
|
|
|
}
|
2019-03-18 10:55:06 +00:00
|
|
|
return n, Error.Wrap(err)
|
|
|
|
}
|
|
|
|
|
2019-08-08 02:47:30 +01:00
|
|
|
// Size returns the amount of data in the piece.
|
|
|
|
func (r *Reader) Size() int64 { return r.pieceSize }
|
2019-03-18 10:55:06 +00:00
|
|
|
|
|
|
|
// Close closes the reader.
|
|
|
|
func (r *Reader) Close() error {
|
|
|
|
return Error.Wrap(r.blob.Close())
|
|
|
|
}
|