Added rpc server (#40)

* Added piecestore

* gofmt

* Added requested changes

* Added cli

* Removed ranger because I wanted something that can stand alone

* Add example of http server using piece store

* Changed piecestore code to make it more optial for error handelling

* Merged with piecestore

* Added missing package

* Forgot io import

* gofmt

* gofmt

* Forgot io

* Make path by hash exported

* updated to simplify again whoops

* Updated server to work real good

* Forgot ampersand

* Updated to match FilePiece

* Merged in cam's delete code

* Remove unused io

* Added RPC code

* Give the download request a reader

* Removed http server stuff; changed receive stream to say io.reader

* Added expiration date to shardInfo

* Change all instances of Shard to Piece; change protobuf name; moved client insance to outside functions

* added ttl info request

* Move scripts to http server pr; added close method for Retrieve api

* added rpc server tests for getting piece meta data and retrieval routes

* Resolved linter errors, moved to prc server to pkg, updated go.mod to use latest protobuf

* Imported cams test

* Bump gometalinter deadline

* WIP adding tests

* added tests for store and delete routes

* Add changes as requested by Kaloyan, also cleaned up some code

* Get the code actually working whoops

* More cleanup

* Separating database calls from api.go

* need to rename expiration

* Added some changes requested by JT

* Fix read size

* Fixed total amount to read

* added tests

* Simplify protobuf, add store tests, edited api to handle invalid stores properly, return errors instead of messages

* Moved rpc client and server to piece store

* Moved piecestore protobuf to the /protos folder

* Cleaned up messages

* Clean up linter errors

* Added missing sqlite import

* Add ability to do iterative reads and writes to pstore

* Incrementally read data

* Fix linter and import errors

* Solve linter Error

* Change return types

* begin test refactor

* refactored to implement only 1 db connection, moved SQLite row checking into separate function and removed defer on rows.Close(), fixed os.tempDir in rpc_test.go

* Cleaning up tests

* Added retrieve tests

* refactored delete tests

* Deleted old tests

* Updated cmd/piecestore to reflect changes to piecestore

* Refactored server tests and server/client store code

* gofmt

* WIP implementing TTL struct

* Read 4k at a time when Retrieving

* implemented ttl struct

* Accidentally removed fpiece dependency?

* Double resolve merge conflict

* Moved client to the examples since it is an example

* Change hash to id in protobuf. TODO: Change client and server to reflect these changes

* Update client and protobuf

* changed hash to id

* Handle eof properly in retrieve

* Id -> ID

* forgot to change import for client after moving

* Moved client and server main to examples

* Made changes requested by JT

* checkEntries is now private, created currentTime variable for checkEntries, added defer rows.Close()

* Print not fatal

* Handle overflow when reading from server

* added const IDLength

* removed types from comments

* Add reader/writer for download data from and uploading data to server

* goimports and comments

* fixed nits, casts, added OK const, DBCleanup now exits program on error

* Add stream reader and writer for server

* Fix errors

* i beforee except after c lol

* customizable data dir

* Forgot to rename variable

* customizable data dir

* Handle closing of server stream properly

* linter

* pass on inserting the same data twice

* linter

* linter

* Do the easy things JT asked for

* Handle overflow from reads properly; handle custom db path

* Handle overflow for server stream read; TODO Combine server and client stream reads

* Allow for TTL of 0 to stay forever

* Make Client cleaner for user
This commit is contained in:
Alexander Leitner 2018-06-02 14:14:59 -04:00 committed by GitHub
parent f2407727d4
commit ff8e191a9a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 2288 additions and 586 deletions

View File

@ -3,7 +3,7 @@
lint: check-copyrights
@echo "Running ${@}"
@gometalinter \
--deadline=60s \
--deadline=70s \
--disable-all \
--enable=golint \
--enable=goimports \

View File

@ -7,6 +7,7 @@ import (
"context"
"flag"
"fmt"
"io"
"os"
"sort"
@ -63,7 +64,15 @@ func run(ctx context.Context) error {
return argError.New(fmt.Sprintf("Path (%s) is a directory, not a file", c.Args().Get(1)))
}
_, err = pstore.Store(c.Args().Get(0), file, int64(fileInfo.Size()), 0, c.Args().Get(2))
dataFileChunk, err := pstore.StoreWriter(c.Args().Get(0), int64(fileInfo.Size()), 0, c.Args().Get(2))
if err != nil {
return err
}
// Close when finished
defer dataFileChunk.Close()
_, err = io.Copy(dataFileChunk, file)
return err
},
@ -89,13 +98,16 @@ func run(ctx context.Context) error {
return argError.New(fmt.Sprintf("Path (%s) is a file, not a directory", c.Args().Get(1)))
}
_, err = pstore.Retrieve(c.Args().Get(0), os.Stdout, -1, 0, c.Args().Get(1))
dataFileChunk, err := pstore.RetrieveReader(c.Args().Get(0), -1, 0, c.Args().Get(1))
if err != nil {
return err
}
return nil
// Close when finished
defer dataFileChunk.Close()
_, err = io.Copy(os.Stdout, dataFileChunk)
return err
},
},
{

View File

@ -0,0 +1,175 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package main
import (
"fmt"
"io"
"log"
"os"
"path/filepath"
"time"
"github.com/urfave/cli"
"github.com/zeebo/errs"
"golang.org/x/net/context"
"google.golang.org/grpc"
"storj.io/storj/examples/piecestore/rpc/client/utils"
"storj.io/storj/pkg/piecestore/rpc/client"
)
var argError = errs.Class("argError")
func main() {
app := cli.NewApp()
// Set up connection with rpc server
var conn *grpc.ClientConn
conn, err := grpc.Dial(":7777", grpc.WithInsecure())
if err != nil {
log.Fatalf("did not connect: %s", err)
}
defer conn.Close()
routeClient := client.New(context.Background(), conn)
app.Commands = []cli.Command{
{
Name: "upload",
Aliases: []string{"u"},
Usage: "upload data",
Action: func(c *cli.Context) error {
if c.Args().Get(0) == "" {
return argError.New("No input file specified")
}
file, err := os.Open(c.Args().Get(0))
if err != nil {
return err
}
// Close the file when we are done
defer file.Close()
fileInfo, err := file.Stat()
if err != nil {
return err
}
if fileInfo.IsDir() {
return argError.New(fmt.Sprintf("Path (%s) is a directory, not a file", c.Args().Get(0)))
}
var fileOffset, storeOffset int64 = 0, 0
var length = fileInfo.Size()
var ttl = time.Now().Unix() + 86400
id, err := utils.DetermineID(file, fileOffset, length)
if err != nil {
return err
}
// Created a section reader so that we can concurrently retrieve the same file.
dataSection := io.NewSectionReader(file, fileOffset, length)
writer, err := routeClient.StorePieceRequest(id, fileOffset, length, ttl, storeOffset)
if err != nil {
fmt.Printf("Failed to send meta data to server to store file of id: %s\n", id)
return err
}
_, err = io.Copy(writer, dataSection)
if err != nil {
fmt.Printf("Failed to store file of id: %s\n", id)
} else {
fmt.Printf("successfully stored file of id: %s\n", id)
}
return writer.Close()
},
},
{
Name: "download",
Aliases: []string{"d"},
Usage: "download data",
Action: func(c *cli.Context) error {
if c.Args().Get(0) == "" {
return argError.New("No id specified")
}
id := c.Args().Get(0)
if c.Args().Get(1) == "" {
return argError.New("No output file specified")
}
_, err := os.Stat(c.Args().Get(1))
if err != nil && !os.IsNotExist(err) {
return err
}
if err == nil {
return argError.New("File already exists")
}
dataPath := c.Args().Get(1)
if err = os.MkdirAll(filepath.Dir(dataPath), 0700); err != nil {
return err
}
// Create File on file system
dataFile, err := os.OpenFile(dataPath, os.O_RDWR|os.O_CREATE, 0755)
if err != nil {
return err
}
defer dataFile.Close()
pieceInfo, err := routeClient.PieceMetaRequest(id)
if err != nil {
os.Remove(dataPath)
return err
}
reader, err := routeClient.RetrievePieceRequest(id, 0, pieceInfo.Size)
if err != nil {
fmt.Printf("Failed to retrieve file of id: %s\n", id)
os.Remove(dataPath)
return err
}
_, err = io.Copy(dataFile, reader)
if err != nil {
fmt.Printf("Failed to retrieve file of id: %s\n", id)
os.Remove(dataPath)
} else {
fmt.Printf("Successfully retrieved file of id: %s\n", id)
}
return reader.Close()
},
},
{
Name: "delete",
Aliases: []string{"x"},
Usage: "delete data",
Action: func(c *cli.Context) error {
if c.Args().Get(0) == "" {
return argError.New("Missing data Id")
}
err = routeClient.DeletePieceRequest(c.Args().Get(0))
return err
},
},
}
err = app.Run(os.Args)
if err != nil {
log.Fatal(err)
}
}

View File

@ -0,0 +1,23 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package utils
import (
"crypto/sha256"
"fmt"
"io"
"os"
)
// DetermineID -- Get the id for a section of data
func DetermineID(f *os.File, offset int64, length int64) (string, error) {
h := sha256.New()
fSection := io.NewSectionReader(f, offset, length)
if _, err := io.Copy(h, fSection); err != nil {
return "", err
}
return fmt.Sprintf("%x", h.Sum(nil)), nil
}

View File

@ -0,0 +1,86 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package main
import (
"fmt"
"log"
"net"
"os"
"path"
"regexp"
_ "github.com/mattn/go-sqlite3"
"google.golang.org/grpc"
"storj.io/storj/pkg/piecestore/rpc/server"
"storj.io/storj/pkg/piecestore/rpc/server/ttl"
pb "storj.io/storj/protos/piecestore"
)
func main() {
port := "7777"
if len(os.Args) > 1 && os.Args[1] != "" {
if matched, _ := regexp.MatchString(`^\d{2,6}$`, os.Args[1]); matched == true {
port = os.Args[1]
}
}
// Get default folder for storing data and database
dataFolder, err := os.Getwd()
if err != nil {
log.Fatal(err)
}
// Check if directory for storing data and database was passed in
if len(os.Args) > 2 && os.Args[2] != "" {
dataFolder = os.Args[2]
}
fileInfo, err := os.Stat(dataFolder)
if err != nil {
log.Fatalf(err.Error())
}
if fileInfo.IsDir() != true {
log.Fatalf("dataFolder %s is not a directory", dataFolder)
}
// Suggestion for whoever implements this: Instead of using port use node id
dataDir := path.Join(dataFolder, port, "/piece-store-data/")
dbPath := path.Join(dataFolder, port, "/ttl-data.db")
ttlDB, err := ttl.NewTTL(dbPath)
if err != nil {
log.Fatalf("failed to open DB")
}
// create a listener on TCP port
lis, err := net.Listen("tcp", fmt.Sprintf(":%s", port))
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
defer lis.Close()
// create a server instance
s := server.Server{PieceStoreDir: dataDir, DB: ttlDB}
// create a gRPC server object
grpcServer := grpc.NewServer()
// attach the api service to the server
pb.RegisterPieceStoreRoutesServer(grpcServer, &s)
// routinely check DB and delete expired entries
go func() {
err := s.DB.DBCleanup(dataDir)
log.Fatalf("Error in DBCleanup: %v", err)
}()
// start the server
if err := grpcServer.Serve(lis); err != nil {
log.Fatalf("failed to serve: %s", err)
}
}

3
go.mod
View File

@ -4,9 +4,10 @@ require (
github.com/fsnotify/fsnotify v1.4.7
github.com/go-redis/redis v0.0.0-20180417061816-9ccc23344a52
github.com/gogo/protobuf v1.0.0
github.com/golang/protobuf v1.0.0
github.com/golang/protobuf v1.1.0
github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce
github.com/magiconair/properties v1.7.6
github.com/mattn/go-sqlite3 v1.7.0
github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238
github.com/pelletier/go-toml v1.1.0
github.com/spacemonkeygo/errors v0.0.0-20171212215202-9064522e9fd1

View File

@ -4,7 +4,6 @@
package pstore
import (
"io"
"os"
"path"
"path/filepath"
@ -14,95 +13,79 @@ import (
"storj.io/storj/pkg/filepiece"
)
// IDLength -- Minimum ID length
const IDLength = 20
// Errors
var (
ArgError = errs.Class("argError")
FSError = errs.Class("fsError")
)
// PathByHash -- creates datapath from hash and dir
func PathByHash(hash, dir string) (string, error) {
if len(hash) < 20 {
return "", ArgError.New("Invalid hash length")
// PathByID creates datapath from id and dir
func PathByID(id, dir string) (string, error) {
if len(id) < IDLength {
return "", ArgError.New("Invalid id length")
}
if dir == "" {
return "", ArgError.New("No path provided")
}
folder1 := string(hash[0:2])
folder2 := string(hash[2:4])
fileName := string(hash[4:])
folder1 := string(id[0:2])
folder2 := string(id[2:4])
fileName := string(id[4:])
return path.Join(dir, folder1, folder2, fileName), nil
}
// Store -- Store data into piece store
// hash (string) Hash of the data to be stored
// r (io.Reader) File/Stream that contains the contents of the data to be stored
// length (length) Size of the data to be stored
// psFileOffset (offset) Offset of the data that you are writing. Useful for multiple connections to split the data transfer
// dir (string) pstore directory containing all other data stored
// returns (error) error if failed and nil if successful
func Store(hash string, r io.Reader, length int64, psFileOffset int64, dir string) (int64, error) {
// StoreWriter stores data into piece store in multiple writes
// id is the id of the data to be stored
// dir is the pstore directory containing all other data stored
// returns error if failed and nil if successful
func StoreWriter(id string, length int64, psFileOffset int64, dir string) (*fpiece.Chunk, error) {
if psFileOffset < 0 {
return 0, ArgError.New("Offset is less than 0. Must be greater than or equal to 0")
}
if length < 0 {
return 0, ArgError.New("Length is less than 0. Must be greater than or equal to 0")
}
if dir == "" {
return 0, ArgError.New("No path provided")
return nil, ArgError.New("Offset is less than 0. Must be greater than or equal to 0")
}
dataPath, err := PathByHash(hash, dir)
dataPath, err := PathByID(id, dir)
if err != nil {
return 0, err
return nil, err
}
// Create directory path on file system
if err = os.MkdirAll(filepath.Dir(dataPath), 0700); err != nil {
return 0, err
return nil, err
}
// Create File on file system
dataFile, err := os.OpenFile(dataPath, os.O_RDWR|os.O_CREATE, 0755)
if err != nil {
return 0, err
return nil, err
}
dataFileChunk, err := fpiece.NewChunk(dataFile, psFileOffset, length)
if err != nil {
return 0, err
}
// Close when finished
defer dataFile.Close()
return io.CopyN(dataFileChunk, r, length)
return fpiece.NewChunk(dataFile, psFileOffset, length)
}
// Retrieve -- Retrieve data from pstore directory
// hash (string) Hash of the stored data
// w (io.Writer) Stream that recieves the stored data
// length (length) Amount of data to read. Read all data if -1
// readPosOffset (offset) Offset of the data that you are reading. Useful for multiple connections to split the data transfer
// dir (string) pstore directory containing all other data stored
// returns (int64, error) returns err if failed and the number of bytes retrieved if successful
func Retrieve(hash string, w io.Writer, length int64, readPosOffset int64, dir string) (int64, error) {
if dir == "" {
return 0, ArgError.New("No path provided")
}
dataPath, err := PathByHash(hash, dir)
// RetrieveReader retrieves data from pstore directory
// id is the id of the stored data
// length is the amount of data to read. Read all data if -1
// readPosOffset is the offset of the data that you are reading. Useful for multiple connections to split the data transfer
// dir is the pstore directory containing all other data stored
// returns error if failed and nil if successful
func RetrieveReader(id string, length int64, readPosOffset int64, dir string) (*fpiece.Chunk, error) {
dataPath, err := PathByID(id, dir)
if err != nil {
return 0, err
return nil, err
}
fileInfo, err := os.Stat(dataPath)
if err != nil {
return 0, err
return nil, err
}
// If offset is greater than file size return
if readPosOffset >= fileInfo.Size() || readPosOffset < 0 {
return 0, ArgError.New("Invalid offset: %v", readPosOffset)
return nil, ArgError.New("Invalid offset: %v", readPosOffset)
}
// If length less than 0 read the entire file
@ -117,32 +100,19 @@ func Retrieve(hash string, w io.Writer, length int64, readPosOffset int64, dir s
dataFile, err := os.OpenFile(dataPath, os.O_RDONLY, 0755)
if err != nil {
return 0, err
return nil, err
}
// Close when finished
defer dataFile.Close()
// Created a section reader so that we can concurrently retrieve the same file.
dataFileChunk, err := fpiece.NewChunk(dataFile, readPosOffset, length)
if err != nil {
return 0, err
}
total, err := io.CopyN(w, dataFileChunk, length)
return total, err
return fpiece.NewChunk(dataFile, readPosOffset, length)
}
// Delete -- Delete data from farmer
// hash (string) Hash of the data to be stored
// dir (string) pstore directory containing all other data stored
// returns (error) if failed and nil if successful
func Delete(hash string, dir string) error {
if dir == "" {
return ArgError.New("No path provided")
}
dataPath, err := PathByHash(hash, dir)
// Delete deletes data from farmer
// id is the id of the data to be stored
// dir is the pstore directory containing all other data stored
// returns error if failed and nil if successful
func Delete(id string, dir string) error {
dataPath, err := PathByID(id, dir)
if err != nil {
return err
}

View File

@ -4,553 +4,308 @@
package pstore
import (
"fmt"
"io"
"io/ioutil"
"log"
"os"
"path"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
)
var tmpfile string
func TestStore(t *testing.T) {
t.Run("it stores data successfully", func(t *testing.T) {
file, err := os.Open(tmpfile)
tests := []struct{
it string
id string
size int64
offset int64
content []byte
expectedContent []byte
err string
} {
{
it: "should successfully store data",
id: "0123456789ABCDEFGHIJ",
size: 5,
offset: 0,
content: []byte("butts"),
expectedContent: []byte("butts"),
err: "",
},
{
it: "should successfully store data by offset",
id: "0123456789ABCDEFGHIJ",
size: 5,
offset: 5,
content: []byte("butts"),
expectedContent: []byte("butts"),
err: "",
},
{
it: "should successfully store data by chunk",
id: "0123456789ABCDEFGHIJ",
size: 2,
offset: 3,
content: []byte("butts"),
expectedContent: []byte("bu"),
err: "",
},
{
it: "should return an error when given an invalid id",
id: "012",
size: 5,
offset: 0,
content: []byte("butts"),
expectedContent: []byte("butts"),
err: "argError: Invalid id length",
},
{
it: "should return an error when given negative offset",
id: "0123456789ABCDEFGHIJ",
size: 5,
offset: -1,
content: []byte("butts"),
expectedContent: []byte(""),
err: "argError: Offset is less than 0. Must be greater than or equal to 0",
},
{
it: "should return an error when given negative length",
id: "0123456789ABCDEFGHIJ",
size: -1,
offset: 0,
content: []byte("butts"),
expectedContent: []byte(""),
err: "Invalid Length",
},
}
if err != nil {
t.Errorf("Error opening tmp file: %s", err.Error())
return
}
for _, tt := range tests {
t.Run(tt.it, func(t *testing.T) {
assert := assert.New(t)
storeFile, err := StoreWriter(tt.id, tt.size, tt.offset, os.TempDir())
if tt.err != "" {
if assert.NotNil(err) {
assert.Equal(tt.err, err.Error())
}
return
} else if err != nil {
t.Errorf("Error: %s", err.Error())
return
}
fi, err := file.Stat()
if err != nil {
t.Errorf("Could not stat test file: %s", err.Error())
return
}
// Write chunk received to disk
_, err = storeFile.Write(tt.content)
assert.Nil(err)
defer file.Close()
storeFile.Close()
hash := "0123456789ABCDEFGHIJ"
Store(hash, file, int64(fi.Size()), 0, os.TempDir())
folder1 := string(tt.id[0:2])
folder2 := string(tt.id[2:4])
fileName := string(tt.id[4:])
folder1 := string(hash[0:2])
folder2 := string(hash[2:4])
fileName := string(hash[4:])
createdFilePath := path.Join(os.TempDir(), folder1, folder2, fileName)
createdFilePath := path.Join(os.TempDir(), folder1, folder2, fileName)
defer os.RemoveAll(path.Join(os.TempDir(), folder1))
_, lStatErr := os.Lstat(createdFilePath)
if lStatErr != nil {
t.Errorf("No file was created from Store(): %s", lStatErr.Error())
return
}
createdFile, err := os.Open(createdFilePath)
if err != nil {
t.Errorf("Error: %s opening created file %s", err.Error(), createdFilePath)
return
}
createdFile, openCreatedError := os.Open(createdFilePath)
if openCreatedError != nil {
t.Errorf("Error: %s opening created file %s", openCreatedError.Error(), createdFilePath)
}
defer createdFile.Close()
buffer := make([]byte, tt.size)
createdFile.Seek(tt.offset, 0)
_, _ = createdFile.Read(buffer)
buffer := make([]byte, 5)
createdFile.Seek(0, 0)
_, _ = createdFile.Read(buffer)
createdFile.Close()
os.RemoveAll(path.Join(os.TempDir(), folder1))
if string(buffer) != "butts" {
t.Errorf("Expected data butts does not equal Actual data %s", string(buffer))
}
})
t.Run("it stores data by offset successfully", func(t *testing.T) {
file, err := os.Open(tmpfile)
if err != nil {
t.Errorf("Error opening tmp file: %s", err.Error())
return
}
fi, err := file.Stat()
if err != nil {
t.Errorf("Could not stat test file: %s", err.Error())
return
}
defer file.Close()
hash := "0123456789ABCDEFGHIJ"
Store(hash, file, int64(fi.Size()), 2, os.TempDir())
folder1 := string(hash[0:2])
folder2 := string(hash[2:4])
fileName := string(hash[4:])
createdFilePath := path.Join(os.TempDir(), folder1, folder2, fileName)
defer os.RemoveAll(path.Join(os.TempDir(), folder1))
_, lStatErr := os.Lstat(createdFilePath)
if lStatErr != nil {
t.Errorf("No file was created from Store(): %s", lStatErr.Error())
return
}
createdFile, openCreatedError := os.Open(createdFilePath)
if openCreatedError != nil {
t.Errorf("Error: %s opening created file %s", openCreatedError.Error(), createdFilePath)
}
defer createdFile.Close()
buffer := make([]byte, 7)
createdFile.Seek(0, 0)
_, _ = createdFile.Read(buffer)
// \0\0butts
expected := []byte{0, 0, 98, 117, 116, 116, 115}
if string(buffer) != string(expected) {
t.Errorf("Expected data (%v) does not equal Actual data (%v)", expected, buffer)
}
})
t.Run("it stores data by chunk successfully", func(t *testing.T) {
file, err := os.Open(tmpfile)
if err != nil {
t.Errorf("Error opening tmp file: %s", err.Error())
return
}
defer file.Close()
hash := "0123456789ABCDEFGHIJ"
Store(hash, file, 4, 0, os.TempDir())
folder1 := string(hash[0:2])
folder2 := string(hash[2:4])
fileName := string(hash[4:])
createdFilePath := path.Join(os.TempDir(), folder1, folder2, fileName)
defer os.RemoveAll(path.Join(os.TempDir(), folder1))
_, lStatErr := os.Lstat(createdFilePath)
if lStatErr != nil {
t.Errorf("No file was created from Store(): %s", lStatErr.Error())
return
}
createdFile, openCreatedError := os.Open(createdFilePath)
if openCreatedError != nil {
t.Errorf("Error: %s opening created file %s", openCreatedError.Error(), createdFilePath)
}
defer createdFile.Close()
buffer := make([]byte, 4)
createdFile.Seek(0, 0)
_, _ = createdFile.Read(buffer)
// butt
expected := []byte{98, 117, 116, 116}
if string(buffer) != string(expected) {
t.Errorf("Expected data %s does not equal Actual data %s", string(expected), string(buffer))
}
})
t.Run("it should return hash err if the hash is too short", func(t *testing.T) {
assert := assert.New(t)
file, err := os.Open(tmpfile)
if err != nil {
t.Errorf("Error opening tmp file: %s", err.Error())
return
}
defer file.Close()
hash := "11111111"
_, err = Store(hash, file, 5, 0, os.TempDir())
assert.NotNil(err)
if err != nil {
assert.Equal(err.Error(), "argError: Invalid hash length", "They should have the same error message")
}
})
// Test passing in negative offset
t.Run("it should return an error when given negative offset", func(t *testing.T) {
assert := assert.New(t)
file, err := os.Open(tmpfile)
if err != nil {
t.Errorf("Error opening tmp file: %s", err.Error())
return
}
fi, err := file.Stat()
if err != nil {
t.Errorf("Could not stat test file: %s", err.Error())
return
}
defer file.Close()
hash := "0123456789ABCDEFGHIJ"
_, err = Store(hash, file, int64(fi.Size()), -12, os.TempDir())
assert.NotNil(err)
if err != nil {
assert.Equal(err.Error(), "argError: Offset is less than 0. Must be greater than or equal to 0", err.Error())
}
})
// Test passing in a negative length
t.Run("it should return an error when given length less than 0", func(t *testing.T) {
assert := assert.New(t)
file, err := os.Open(tmpfile)
if err != nil {
t.Errorf("Error opening tmp file: %s", err.Error())
return
}
_, err = file.Stat()
if err != nil {
t.Errorf("Could not stat test file: %s", err.Error())
return
}
defer file.Close()
hash := "0123456789ABCDEFGHIJ"
_, err = Store(hash, file, -1, 0, os.TempDir())
assert.NotNil(err)
if err != nil {
assert.Equal(err.Error(), "argError: Length is less than 0. Must be greater than or equal to 0", err.Error())
}
})
if string(buffer) != string(tt.expectedContent) {
t.Errorf("Expected data butts does not equal Actual data %s", string(buffer))
return
}
})
}
}
func TestRetrieve(t *testing.T) {
t.Run("it retrieves data successfully", func(t *testing.T) {
file, err := os.Open(tmpfile)
if err != nil {
t.Errorf("Error opening tmp file: %s", err.Error())
return
}
tests := []struct{
it string
id string
size int64
offset int64
content []byte
expectedContent []byte
err string
} {
{
it: "should successfully retrieve data",
id: "0123456789ABCDEFGHIJ",
size: 5,
offset: 0,
content: []byte("butts"),
expectedContent: []byte("butts"),
err: "",
},
{
it: "should successfully retrieve data by offset",
id: "0123456789ABCDEFGHIJ",
size: 5,
offset: 5,
content: []byte("butts"),
expectedContent: []byte("butts"),
err: "",
},
{
it: "should successfully retrieve data by chunk",
id: "0123456789ABCDEFGHIJ",
size: 2,
offset: 5,
content: []byte("bu"),
expectedContent: []byte("bu"),
err: "",
},
{
it: "should return an error when given negative offset",
id: "0123456789ABCDEFGHIJ",
size: 0,
offset: -1337,
content: []byte("butts"),
expectedContent: []byte(""),
err: "argError: Invalid offset: -1337",
},
{
it: "should successfully retrieve data with negative length",
id: "0123456789ABCDEFGHIJ",
size: -1,
offset: 0,
content: []byte("butts"),
expectedContent: []byte("butts"),
err: "",
},
}
fi, err := file.Stat()
if err != nil {
t.Errorf("Could not stat test file: %s", err.Error())
return
}
defer file.Close()
for _, tt := range tests {
t.Run(tt.it, func(t *testing.T) {
assert := assert.New(t)
hash := "0123456789ABCDEFGHIJ"
Store(hash, file, int64(fi.Size()), 0, os.TempDir())
folder1 := string(tt.id[0:2])
folder2 := string(tt.id[2:4])
fileName := string(tt.id[4:])
// Create file for retrieving data into
retrievalFilePath := path.Join(os.TempDir(), "retrieved.txt")
retrievalFile, err := os.OpenFile(retrievalFilePath, os.O_RDWR|os.O_CREATE, 0777)
if err != nil {
t.Errorf("Error creating file: %s", err.Error())
return
}
defer os.RemoveAll(retrievalFilePath)
defer retrievalFile.Close()
createdFilePath := path.Join(os.TempDir(), folder1, folder2, fileName)
_, err = Retrieve(hash, retrievalFile, int64(fi.Size()), 0, os.TempDir())
if err != nil {
if err != io.EOF {
t.Errorf("Retrieve Error: %s", err.Error())
if err := os.MkdirAll(filepath.Dir(createdFilePath), 0700); err != nil {
t.Errorf("Error: %s when creating dir", err.Error())
return
}
}
buffer := make([]byte, 5)
retrievalFile.Seek(0, 0)
_, _ = retrievalFile.Read(buffer)
fmt.Printf("Retrieved data: %s", string(buffer))
if string(buffer) != "butts" {
t.Errorf("Expected data butts does not equal Actual data %s", string(buffer))
}
})
t.Run("it retrieves data by offset successfully", func(t *testing.T) {
file, err := os.Open(tmpfile)
if err != nil {
t.Errorf("Error opening tmp file: %s", err.Error())
return
}
fi, err := file.Stat()
if err != nil {
t.Errorf("Could not stat test file: %s", err.Error())
return
}
defer file.Close()
hash := "0123456789ABCDEFGHIJ"
Store(hash, file, int64(fi.Size()), 0, os.TempDir())
// Create file for retrieving data into
retrievalFilePath := path.Join(os.TempDir(), "retrieved.txt")
retrievalFile, err := os.OpenFile(retrievalFilePath, os.O_RDWR|os.O_CREATE, 0777)
if err != nil {
t.Errorf("Error creating file: %s", err.Error())
return
}
defer os.RemoveAll(retrievalFilePath)
defer retrievalFile.Close()
_, err = Retrieve(hash, retrievalFile, int64(fi.Size()), 2, os.TempDir())
if err != nil {
if err != io.EOF {
t.Errorf("Retrieve Error: %s", err.Error())
createdFile, err := os.OpenFile(createdFilePath, os.O_RDWR|os.O_CREATE, 0755)
if err != nil {
t.Errorf("Error: %s opening created file %s", err.Error(), createdFilePath)
return
}
}
buffer := make([]byte, 3)
retrievalFile.Seek(0, 0)
_, _ = retrievalFile.Read(buffer)
fmt.Printf("Retrieved data: %s", string(buffer))
if string(buffer) != "tts" {
t.Errorf("Expected data (tts) does not equal Actual data (%s)", string(buffer))
}
})
t.Run("it retrieves data by chunk successfully", func(t *testing.T) {
file, err := os.Open(tmpfile)
if err != nil {
t.Errorf("Error opening tmp file: %s", err.Error())
return
}
fi, err := file.Stat()
if err != nil {
t.Errorf("Could not stat test file: %s", err.Error())
return
}
defer file.Close()
hash := "0123456789ABCDEFGHIJ"
Store(hash, file, int64(fi.Size()), 0, os.TempDir())
// Create file for retrieving data into
retrievalFilePath := path.Join(os.TempDir(), "retrieved.txt")
retrievalFile, err := os.OpenFile(retrievalFilePath, os.O_RDWR|os.O_CREATE, 0777)
if err != nil {
t.Errorf("Error creating file: %s", err.Error())
return
}
defer os.RemoveAll(retrievalFilePath)
defer retrievalFile.Close()
_, err = Retrieve(hash, retrievalFile, 3, 0, os.TempDir())
if err != nil {
if err != io.EOF {
t.Errorf("Retrieve Error: %s", err.Error())
createdFile.Seek(tt.offset, 0)
_, err = createdFile.Write(tt.content)
if err != nil {
t.Errorf("Error: %s writing to created file", err.Error())
return
}
}
createdFile.Close()
buffer := make([]byte, 3)
retrievalFile.Seek(0, 0)
_, _ = retrievalFile.Read(buffer)
fmt.Printf("Retrieved data: %s", string(buffer))
if string(buffer) != "but" {
t.Errorf("Expected data (but) does not equal Actual data (%s)", string(buffer))
}
})
// Test passing in negative offset
t.Run("it should return an error when retrieving with offset less 0", func(t *testing.T) {
assert := assert.New(t)
file, err := os.Open(tmpfile)
if err != nil {
t.Errorf("Error opening tmp file: %s", err.Error())
return
}
fi, err := file.Stat()
if err != nil {
t.Errorf("Could not stat test file: %s", err.Error())
return
}
defer file.Close()
hash := "0123456789ABCDEFGHIJ"
Store(hash, file, int64(fi.Size()), 0, os.TempDir())
// Create file for retrieving data into
retrievalFilePath := path.Join(os.TempDir(), "retrieved.txt")
retrievalFile, err := os.OpenFile(retrievalFilePath, os.O_RDWR|os.O_CREATE, 0777)
if err != nil {
t.Errorf("Error creating file: %s", err.Error())
return
}
defer os.RemoveAll(retrievalFilePath)
defer retrievalFile.Close()
_, err = Retrieve(hash, retrievalFile, int64(fi.Size()), -1, os.TempDir())
assert.NotNil(err)
if err != nil {
assert.Equal("argError: Invalid offset: -1", err.Error(), err.Error())
}
})
// Test passing in negative length
t.Run("it should return the entire file successfully when retrieving with negative length", func(t *testing.T) {
file, err := os.Open(tmpfile)
if err != nil {
t.Errorf("Error opening tmp file: %s", err.Error())
return
}
fi, err := file.Stat()
if err != nil {
t.Errorf("Could not stat test file: %s", err.Error())
return
}
defer file.Close()
hash := "0123456789ABCDEFGHIJ"
Store(hash, file, int64(fi.Size()), 0, os.TempDir())
// Create file for retrieving data into
retrievalFilePath := path.Join(os.TempDir(), "retrieved.txt")
retrievalFile, err := os.OpenFile(retrievalFilePath, os.O_RDWR|os.O_CREATE, 0777)
if err != nil {
t.Errorf("Error creating file: %s", err.Error())
return
}
defer os.RemoveAll(retrievalFilePath)
defer retrievalFile.Close()
_, err = Retrieve(hash, retrievalFile, -1, 0, os.TempDir())
if err != nil {
if err != io.EOF {
t.Errorf("Retrieve Error: %s", err.Error())
storeFile, err := RetrieveReader(tt.id, tt.size, tt.offset, os.TempDir())
if tt.err != "" {
if assert.NotNil(err) {
assert.Equal(tt.err, err.Error())
}
return
} else if err != nil {
t.Errorf("Error: %s", err.Error())
return
}
}
buffer := make([]byte, 5)
size := tt.size
if tt.size < 0 {
size = int64(len(tt.content))
}
buffer := make([]byte, size)
storeFile.Read(buffer)
storeFile.Close()
retrievalFile.Seek(0, 0)
_, _ = retrievalFile.Read(buffer)
fmt.Printf("Retrieved data: %s", string(buffer))
if string(buffer) != "butts" {
t.Errorf("Expected data butts does not equal Actual data %s", string(buffer))
}
})
os.RemoveAll(path.Join(os.TempDir(), folder1))
if string(buffer) != string(tt.expectedContent) {
t.Errorf("Expected data butts does not equal Actual data %s", string(buffer))
return
}
})
}
}
func TestDelete(t *testing.T) {
t.Run("it deletes data successfully", func(t *testing.T) {
file, err := os.Open(tmpfile)
if err != nil {
t.Errorf("Error opening tmp file: %s", err.Error())
tests := []struct{
it string
id string
err string
} {
{
it: "should successfully delete data",
id: "11111111111111111111",
err: "",
},
{
it: "should return nil-err with non-existent id",
id: "11111111111111111111",
err: "",
},
{
it: "should err with invalid id length",
id: "111111",
err: "argError: Invalid id length",
},
}
for _, tt := range tests {
t.Run(tt.it, func(t *testing.T) {
assert := assert.New(t)
folder1 := string(tt.id[0:2])
folder2 := string(tt.id[2:4])
fileName := string(tt.id[4:])
createdFilePath := path.Join(os.TempDir(), folder1, folder2, fileName)
if err := os.MkdirAll(filepath.Dir(createdFilePath), 0700); err != nil {
t.Errorf("Error: %s when creating dir", err.Error())
return
}
createdFile, err := os.OpenFile(createdFilePath, os.O_RDWR|os.O_CREATE, 0755)
if err != nil {
t.Errorf("Error: %s opening created file %s", err.Error(), createdFilePath)
return
}
createdFile.Close()
err = Delete(tt.id, os.TempDir())
if tt.err != "" {
if assert.NotNil(err) {
assert.Equal(tt.err, err.Error())
}
return
} else if err != nil {
t.Errorf("Error: %s", err.Error())
return
}
if _, err = os.Stat(createdFilePath); os.IsExist(err) {
t.Errorf("Error deleting file")
return
}
return
}
fi, err := file.Stat()
if err != nil {
t.Errorf("Could not stat test file: %s", err.Error())
return
}
defer file.Close()
hash := "0123456789ABCDEFGHIJ"
Store(hash, file, int64(fi.Size()), 0, os.TempDir())
folder1 := string(hash[0:2])
folder2 := string(hash[2:4])
fileName := string(hash[4:])
if _, err := os.Stat(path.Join(os.TempDir(), folder1, folder2, fileName)); err != nil {
t.Errorf("Failed to Store test file")
return
}
Delete(hash, os.TempDir())
_, err = os.Stat(path.Join(os.TempDir(), folder1, folder2, fileName))
if err == nil {
t.Errorf("Failed to Delete test file")
return
}
})
// Test passing in a hash that doesn't exist
t.Run("it returns an error if hash doesn't exist", func(t *testing.T) {
assert := assert.New(t)
file, err := os.Open(tmpfile)
if err != nil {
t.Errorf("Error opening tmp file: %s", err.Error())
return
}
fi, err := file.Stat()
if err != nil {
t.Errorf("Could not stat test file: %s", err.Error())
return
}
defer file.Close()
hash := "0123456789ABCDEFGHIJ"
Store(hash, file, int64(fi.Size()), 0, os.TempDir())
folder1 := string(hash[0:2])
folder2 := string(hash[2:4])
fileName := string(hash[4:])
if _, err := os.Stat(path.Join(os.TempDir(), folder1, folder2, fileName)); err != nil {
t.Errorf("Failed to Store test file")
return
}
falseHash := ""
err = Delete(falseHash, os.TempDir())
assert.NotNil(err)
if err != nil {
assert.NotEqual(err.Error(), "argError: Hash folder does not exist", "They should be equal")
}
})
})
}
}
func TestMain(m *testing.M) {
content := []byte("butts")
tmpfilePtr, err := ioutil.TempFile("", "api_test")
if err != nil {
log.Fatal(err)
}
tmpfile = tmpfilePtr.Name()
defer os.Remove(tmpfile) // clean up
if _, err := tmpfilePtr.Write(content); err != nil {
log.Fatal(err)
}
if err := tmpfilePtr.Close(); err != nil {
log.Fatal(err)
}
m.Run()
}

View File

@ -0,0 +1,67 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package client
import (
"fmt"
"io"
"log"
"golang.org/x/net/context"
"google.golang.org/grpc"
pb "storj.io/storj/protos/piecestore"
)
// Client -- Struct Info needed for protobuf api calls
type Client struct {
ctx context.Context
route pb.PieceStoreRoutesClient
}
// New -- Initilize Client
func New(ctx context.Context, conn *grpc.ClientConn) *Client {
return &Client{ctx, pb.NewPieceStoreRoutesClient(conn)}
}
// PieceMetaRequest -- Request info about a piece by Id
func (client *Client) PieceMetaRequest(id string) (*pb.PieceSummary, error) {
return client.route.Piece(client.ctx, &pb.PieceId{Id: id})
}
// StorePieceRequest -- Upload Piece to Server
func (client *Client) StorePieceRequest(id string, dataOffset int64, length int64, ttl int64, storeOffset int64) (io.WriteCloser, error) {
stream, err := client.route.Store(client.ctx)
if err != nil {
return nil, err
}
// SSend preliminary data
if err := stream.Send(&pb.PieceStore{Id: id, Size: length, Ttl: ttl, StoreOffset: storeOffset}); err != nil {
stream.CloseAndRecv()
return nil, fmt.Errorf("%v.Send() = %v", stream, err)
}
return &StreamWriter{stream: stream}, err
}
// RetrievePieceRequest -- Begin Download Piece from Server
func (client *Client) RetrievePieceRequest(id string, offset int64, length int64) (io.ReadCloser, error) {
stream, err := client.route.Retrieve(client.ctx, &pb.PieceRetrieval{Id: id, Size: length, StoreOffset: offset})
if err != nil {
return nil, err
}
return &StreamReader{stream: stream}, nil
}
// DeletePieceRequest -- Delete Piece From Server
func (client *Client) DeletePieceRequest(id string) error {
reply, err := client.route.Delete(client.ctx, &pb.PieceDelete{Id: id})
if err != nil {
return err
}
log.Printf("Route summary : %v", reply)
return nil
}

View File

@ -0,0 +1,75 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package client
import (
"fmt"
"log"
pb "storj.io/storj/protos/piecestore"
)
// StreamWriter -- Struct for writing piece to server upload stream
type StreamWriter struct {
stream pb.PieceStoreRoutes_StoreClient
}
// Write -- Write method for piece upload to stream
func (s *StreamWriter) Write(b []byte) (int, error) {
if err := s.stream.Send(&pb.PieceStore{Content: b}); err != nil {
return 0, fmt.Errorf("%v.Send() = %v", s.stream, err)
}
return len(b), nil
}
// Close -- Close Write Stream
func (s *StreamWriter) Close() error {
reply, err := s.stream.CloseAndRecv()
if err != nil {
return err
}
log.Printf("Route summary: %v", reply)
return nil
}
// StreamReader -- Struct for reading piece download stream from server
type StreamReader struct {
stream pb.PieceStoreRoutes_RetrieveClient
overflowData []byte
}
// Read -- Read method for piece download stream
func (s *StreamReader) Read(b []byte) (int, error) {
// Use overflow data if we have it
if len(s.overflowData) > 0 {
n := copy(b, s.overflowData) // Copy from overflow into buffer
s.overflowData = s.overflowData[n:] // Overflow is set to whatever remains
return n, nil
}
// Receive data from server stream
msg, err := s.stream.Recv()
if err != nil {
return 0, err
}
// Copy data into buffer
n := copy(b, msg.Content)
// If left over data save it into overflow variable for next read
if n < len(msg.Content) {
s.overflowData = b[len(b):]
}
return n, nil
}
// Close -- Close Read Stream
func (s *StreamReader) Close() error {
return s.stream.CloseSend()
}

View File

@ -0,0 +1,56 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package server
import (
pb "storj.io/storj/protos/piecestore"
)
// StreamWriter -- Struct for writing piece to server upload stream
type StreamWriter struct {
stream pb.PieceStoreRoutes_RetrieveServer
}
// Write -- Write method for piece upload to stream
func (s *StreamWriter) Write(b []byte) (int, error) {
// Write the buffer to the stream we opened earlier
if err := s.stream.Send(&pb.PieceRetrievalStream{Size: int64(len(b)), Content: b}); err != nil {
return 0, err
}
return len(b), nil
}
// StreamReader -- Struct for Retrieving data from server
type StreamReader struct {
stream pb.PieceStoreRoutes_StoreServer
overflowData []byte
}
// Read -- Read method for piece download from stream
func (s *StreamReader) Read(b []byte) (int, error) {
// Use overflow data if we have it
if len(s.overflowData) > 0 {
n := copy(b, s.overflowData) // Copy from overflow into buffer
s.overflowData = s.overflowData[n:] // Overflow is set to whatever remains
return n, nil
}
// Receive data from server stream
msg, err := s.stream.Recv()
if err != nil {
return 0, err
}
// Copy data into buffer
n := copy(b, msg.Content)
// If left over data save it into overflow variable for next read
if n < len(msg.Content) {
s.overflowData = b[len(b):]
}
return n, nil
}

View File

@ -0,0 +1,142 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package server
import (
"fmt"
"io"
"log"
"os"
"golang.org/x/net/context"
"storj.io/storj/pkg/piecestore"
"storj.io/storj/pkg/piecestore/rpc/server/ttl"
pb "storj.io/storj/protos/piecestore"
)
// OK - Success!
const OK = "OK"
// Server -- GRPC server meta data used in route calls
type Server struct {
PieceStoreDir string
DB *ttl.TTL
}
// Store -- Store incoming data using piecestore
func (s *Server) Store(stream pb.PieceStoreRoutes_StoreServer) error {
log.Println("Storing data...")
// Receive initial meta data about what's being stored
piece, err := stream.Recv()
if err != nil {
return err
}
// If we put in the database first then that checks if the data already exists
if err := s.DB.AddTTLToDB(piece.Id, piece.Ttl); err != nil {
log.Println(err)
return err
}
// Initialize file for storing data
storeFile, err := pstore.StoreWriter(piece.Id, piece.Size, piece.StoreOffset, s.PieceStoreDir)
if err != nil {
return err
}
defer storeFile.Close()
reader := &StreamReader{stream: stream}
total, err := io.Copy(storeFile, reader)
if err != nil {
return err
}
if total < piece.Size {
return fmt.Errorf("Received %v bytes of total %v bytes", int64(total), piece.Size)
}
log.Println("Successfully stored data.")
return stream.SendAndClose(&pb.PieceStoreSummary{Message: OK, TotalReceived: int64(total)})
}
// Retrieve -- Retrieve data from piecestore and send to client
func (s *Server) Retrieve(pieceMeta *pb.PieceRetrieval, stream pb.PieceStoreRoutes_RetrieveServer) error {
log.Println("Retrieving data...")
path, err := pstore.PathByID(pieceMeta.Id, s.PieceStoreDir)
if err != nil {
return err
}
fileInfo, err := os.Stat(path)
if err != nil {
return err
}
// Read the size specified
totalToRead := pieceMeta.Size
// Read the entire file if specified -1
if pieceMeta.Size <= -1 {
totalToRead = fileInfo.Size()
}
storeFile, err := pstore.RetrieveReader(pieceMeta.Id, totalToRead, pieceMeta.StoreOffset, s.PieceStoreDir)
if err != nil {
return err
}
defer storeFile.Close()
writer := &StreamWriter{stream: stream}
_, err = io.Copy(writer, storeFile)
if err != nil {
return err
}
log.Println("Successfully retrieved data.")
return nil
}
// Piece -- Send meta data about a stored by by Id
func (s *Server) Piece(ctx context.Context, in *pb.PieceId) (*pb.PieceSummary, error) {
log.Println("Getting Meta data...")
path, err := pstore.PathByID(in.Id, s.PieceStoreDir)
if err != nil {
return nil, err
}
fileInfo, err := os.Stat(path)
if err != nil {
return nil, err
}
// Read database to calculate expiration
ttl, err := s.DB.GetTTLByID(in.Id)
if err != nil {
return nil, err
}
log.Println("Meta data retrieved.")
return &pb.PieceSummary{Id: in.Id, Size: fileInfo.Size(), Expiration: ttl}, nil
}
// Delete -- Delete data by Id from piecestore
func (s *Server) Delete(ctx context.Context, in *pb.PieceDelete) (*pb.PieceDeleteSummary, error) {
log.Println("Deleting data...")
if err := pstore.Delete(in.Id, s.PieceStoreDir); err != nil {
return nil, err
}
if err := s.DB.DeleteTTLByID(in.Id); err != nil {
return nil, err
}
log.Println("Successfully deleted data.")
return &pb.PieceDeleteSummary{Message: OK}, nil
}

View File

@ -0,0 +1,463 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package server
import (
"bytes"
"database/sql"
"fmt"
"io"
"log"
"net"
"os"
"path"
"testing"
_ "github.com/mattn/go-sqlite3"
"golang.org/x/net/context"
"google.golang.org/grpc"
"storj.io/storj/pkg/piecestore"
"storj.io/storj/pkg/piecestore/rpc/server/ttl"
pb "storj.io/storj/protos/piecestore"
)
var tempDir string = path.Join(os.TempDir(), "test-data", "3000")
var tempDBPath string = path.Join(os.TempDir(), "test.db")
var db *sql.DB
var s Server
var c pb.PieceStoreRoutesClient
var testId string = "11111111111111111111"
var testCreatedDate int64 = 1234567890
var testExpiration int64 = 9999999999
func TestPiece(t *testing.T) {
// simulate piece stored with farmer
file, err := pstore.StoreWriter(testId, 5, 0, s.PieceStoreDir)
if err != nil {
return
}
// Close when finished
defer file.Close()
_, err = io.Copy(file, bytes.NewReader([]byte("butts")))
if err != nil {
t.Errorf("Error: %v\nCould not create test piece", err)
return
}
defer pstore.Delete(testId, s.PieceStoreDir)
// set up test cases
tests := []struct {
id string
size int64
expiration int64
err string
}{
{ // should successfully retrieve piece meta-data
id: testId,
size: 5,
expiration: testExpiration,
err: "",
},
{ // server should err with invalid id
id: "123",
size: 5,
expiration: testExpiration,
err: "rpc error: code = Unknown desc = argError: Invalid id length",
},
{ // server should err with nonexistent file
id: "22222222222222222222",
size: 5,
expiration: testExpiration,
err: fmt.Sprintf("rpc error: code = Unknown desc = stat %s: no such file or directory", path.Join(os.TempDir(), "/test-data/3000/22/22/2222222222222222")),
},
}
for _, tt := range tests {
t.Run("should return expected PieceSummary values", func(t *testing.T) {
// simulate piece TTL entry
_, err = db.Exec(fmt.Sprintf(`INSERT INTO ttl (id, created, expires) VALUES ("%s", "%d", "%d")`, tt.id, testCreatedDate, testExpiration))
if err != nil {
t.Errorf("Error: %v\nCould not make TTL entry", err)
return
}
defer db.Exec(fmt.Sprintf(`DELETE FROM ttl WHERE id="%s"`, tt.id))
req := &pb.PieceId{Id: tt.id}
resp, err := c.Piece(context.Background(), req)
if len(tt.err) > 0 {
if err != nil {
if err.Error() == tt.err {
return
}
}
t.Errorf("\nExpected: %s\nGot: %v\n", tt.err, err)
return
}
if err != nil && tt.err == "" {
t.Errorf("\nExpected: %s\nGot: %v\n", tt.err, err)
return
}
if resp.Id != tt.id || resp.Size != tt.size || resp.Expiration != tt.expiration {
t.Errorf("Expected: %v, %v, %v\nGot: %v, %v, %v\n", tt.id, tt.size, tt.expiration, resp.Id, resp.Size, resp.Expiration)
return
}
// clean up DB entry
_, err = db.Exec(fmt.Sprintf(`DELETE FROM ttl WHERE id="%s"`, testId))
if err != nil {
t.Errorf("Error cleaning test DB entry")
return
}
})
}
}
func TestRetrieve(t *testing.T) {
// simulate piece stored with farmer
file, err := pstore.StoreWriter(testId, 5, 0, s.PieceStoreDir)
if err != nil {
return
}
// Close when finished
defer file.Close()
_, err = io.Copy(file, bytes.NewReader([]byte("butts")))
if err != nil {
t.Errorf("Error: %v\nCould not create test piece", err)
return
}
defer pstore.Delete(testId, s.PieceStoreDir)
// set up test cases
tests := []struct {
id string
reqSize int64
respSize int64
offset int64
content []byte
err string
}{
{ // should successfully retrieve data
id: testId,
reqSize: 5,
respSize: 5,
offset: 0,
content: []byte("butts"),
err: "",
},
{ // server should err with invalid id
id: "123",
reqSize: 5,
respSize: 5,
offset: 0,
content: []byte("butts"),
err: "rpc error: code = Unknown desc = argError: Invalid id length",
},
{ // server should err with nonexistent file
id: "22222222222222222222",
reqSize: 5,
respSize: 5,
offset: 0,
content: []byte("butts"),
err: fmt.Sprintf("rpc error: code = Unknown desc = stat %s: no such file or directory", path.Join(os.TempDir(), "/test-data/3000/22/22/2222222222222222")),
},
{ // server should return expected content and respSize with offset and excess reqSize
id: testId,
reqSize: 5,
respSize: 4,
offset: 1,
content: []byte("utts"),
err: "",
},
{ // server should return expected content with reduced reqSize
id: testId,
reqSize: 4,
respSize: 4,
offset: 0,
content: []byte("butt"),
err: "",
},
}
for _, tt := range tests {
t.Run("should return expected PieceRetrievalStream values", func(t *testing.T) {
req := &pb.PieceRetrieval{Id: tt.id, Size: tt.reqSize, StoreOffset: tt.offset}
stream, err := c.Retrieve(context.Background(), req)
if err != nil {
t.Errorf("Unexpected error: %v\n", err)
return
}
resp, err := stream.Recv()
if len(tt.err) > 0 {
if err != nil {
if err.Error() == tt.err {
return
}
}
t.Errorf("\nExpected: %s\nGot: %v\n", tt.err, err)
return
}
if err != nil && tt.err == "" {
t.Errorf("\nExpected: %s\nGot: %v\n", tt.err, err)
return
}
if resp.Size != tt.respSize || bytes.Equal(resp.Content, tt.content) != true {
t.Errorf("Expected: %v, %v\nGot: %v, %v\n", tt.respSize, tt.content, resp.Size, resp.Content)
return
}
})
}
}
func TestStore(t *testing.T) {
tests := []struct {
id string
size int64
ttl int64
offset int64
content []byte
message string
totalReceived int64
err string
}{
{ // should successfully store data
id: testId,
size: 5,
ttl: testExpiration,
offset: 0,
content: []byte("butts"),
message: "OK",
totalReceived: 5,
err: "",
},
{ // should err with invalid id length
id: "butts",
size: 5,
ttl: testExpiration,
offset: 0,
content: []byte("butts"),
message: "",
totalReceived: 0,
err: "rpc error: code = Unknown desc = argError: Invalid id length",
},
{ // should err with excessive size
id: "ABCDEFGHIJKLMNOPQRST",
size: 10,
ttl: testExpiration,
offset: 0,
content: []byte("butts"),
message: "",
totalReceived: 5,
err: "rpc error: code = Unknown desc = Received 5 bytes of total 10 bytes",
},
{ // should successfully store data by offset
id: testId,
size: 5,
ttl: testExpiration,
offset: 10,
content: []byte("butts"),
message: "OK",
totalReceived: 5,
err: "",
},
{ // should err with incorrect size
id: testId,
size: 5,
ttl: testExpiration,
offset: 0,
content: []byte(""),
message: "",
totalReceived: 0,
err: "rpc error: code = Unknown desc = Received 0 bytes of total 5 bytes",
},
}
for _, tt := range tests {
t.Run("should return expected PieceStoreSummary values", func(t *testing.T) {
stream, err := c.Store(context.Background())
if err != nil {
t.Errorf("Unexpected error: %v\n", err)
return
}
// Write the buffer to the stream we opened earlier
if err := stream.Send(&pb.PieceStore{Id: tt.id, Size: tt.size, Ttl: tt.ttl, StoreOffset: tt.offset}); err != nil {
t.Errorf("Unexpected error: %v\n", err)
return
}
// Write the buffer to the stream we opened earlier
if err := stream.Send(&pb.PieceStore{Content: tt.content}); err != nil {
t.Errorf("Unexpected error: %v\n", err)
return
}
resp, err := stream.CloseAndRecv()
defer db.Exec(fmt.Sprintf(`DELETE FROM ttl WHERE id="%s"`, tt.id))
if len(tt.err) > 0 {
if err != nil {
if err.Error() == tt.err {
return
}
}
t.Errorf("\nExpected: %s\nGot: %v\n", tt.err, err)
return
}
if err != nil && tt.err == "" {
t.Errorf("\nExpected: %s\nGot: %v\n", tt.err, err)
return
}
if resp.Message != tt.message || resp.TotalReceived != tt.totalReceived {
t.Errorf("Expected: %v, %v\nGot: %v, %v\n", tt.message, tt.totalReceived, resp.Message, resp.TotalReceived)
}
})
}
}
func TestDelete(t *testing.T) {
// set up test cases
tests := []struct {
id string
message string
err string
}{
{ // should successfully delete data
id: testId,
message: "OK",
err: "",
},
{ // should err with invalid id length
id: "123",
message: "rpc error: code = Unknown desc = argError: Invalid id length",
err: "rpc error: code = Unknown desc = argError: Invalid id length",
},
{ // should return OK with nonexistent file
id: "22222222222222222223",
message: "OK",
err: "",
},
}
for _, tt := range tests {
t.Run("should return expected PieceDeleteSummary values", func(t *testing.T) {
// simulate piece stored with farmer
file, err := pstore.StoreWriter(testId, 5, 0, s.PieceStoreDir)
if err != nil {
return
}
// Close when finished
defer file.Close()
_, err = io.Copy(file, bytes.NewReader([]byte("butts")))
if err != nil {
t.Errorf("Error: %v\nCould not create test piece", err)
return
}
// simulate piece TTL entry
_, err = db.Exec(fmt.Sprintf(`INSERT INTO ttl (id, created, expires) VALUES ("%s", "%d", "%d")`, tt.id, testCreatedDate, testCreatedDate))
if err != nil {
t.Errorf("Error: %v\nCould not make TTL entry", err)
return
}
defer db.Exec(fmt.Sprintf(`DELETE FROM ttl WHERE id="%s"`, tt.id))
defer pstore.Delete(testId, s.PieceStoreDir)
req := &pb.PieceDelete{Id: tt.id}
resp, err := c.Delete(context.Background(), req)
if len(tt.err) > 0 {
if err != nil {
if err.Error() == tt.err {
return
}
}
t.Errorf("\nExpected: %s\nGot: %v\n", tt.err, err)
return
}
if err != nil && tt.err == "" {
t.Errorf("\nExpected: %s\nGot: %v\n", tt.err, err)
return
}
if resp.Message != tt.message {
t.Errorf("Expected: %v\nGot: %v\n", tt.message, resp.Message)
return
}
// if test passes, check if file was indeed deleted
filePath, err := pstore.PathByID(tt.id, s.PieceStoreDir)
if _, err = os.Stat(filePath); os.IsNotExist(err) != true {
t.Errorf("File not deleted")
return
}
})
}
}
func StartServer() {
lis, err := net.Listen("tcp", ":3000")
if err != nil {
log.Fatalf("failed to listen: %v", err)
}
grpcs := grpc.NewServer()
pb.RegisterPieceStoreRoutesServer(grpcs, &s)
if err := grpcs.Serve(lis); err != nil {
log.Fatalf("failed to serve: %v", err)
}
}
func TestMain(m *testing.M) {
go StartServer()
// Set up a connection to the Server.
const address = "localhost:3000"
conn, err := grpc.Dial(address, grpc.WithInsecure())
if err != nil {
fmt.Printf("did not connect: %v", err)
return
}
defer conn.Close()
c = pb.NewPieceStoreRoutesClient(conn)
ttlDB, err := ttl.NewTTL(tempDBPath)
if err != nil {
log.Fatal(err)
}
s = Server{tempDir, ttlDB}
db = ttlDB.DB
// clean up temp files
defer os.RemoveAll(path.Join(tempDir, "/test-data"))
defer os.Remove(tempDBPath)
defer db.Close()
m.Run()
}

View File

@ -0,0 +1,129 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package ttl
import (
"database/sql"
"fmt"
"log"
"os"
"path/filepath"
"time"
_ "github.com/mattn/go-sqlite3" // sqlite is weird and needs underscore
"storj.io/storj/pkg/piecestore"
)
// TTL -- ttl database
type TTL struct {
DB *sql.DB
}
// NewTTL -- creates ttl database and struct
func NewTTL(DBPath string) (*TTL, error) {
if err := os.MkdirAll(filepath.Dir(DBPath), 0700); err != nil {
return nil, err
}
db, err := sql.Open("sqlite3", DBPath)
if err != nil {
return nil, err
}
_, err = db.Exec("CREATE TABLE IF NOT EXISTS `ttl` (`id` TEXT UNIQUE, `created` INT(10), `expires` INT(10));")
if err != nil {
return nil, err
}
return &TTL{db}, nil
}
// checkEntries -- checks for and deletes expired TTL entries
func checkEntries(dir string, rows *sql.Rows) error {
for rows.Next() {
var expID string
var expires int64
err := rows.Scan(&expID, &expires)
if err != nil {
return err
}
// delete file on local machine
err = pstore.Delete(expID, dir)
if err != nil {
return err
}
log.Printf("Deleted file: %s\n", expID)
if rows.Err() != nil {
return rows.Err()
}
}
return nil
}
// DBCleanup -- go routine to check ttl database for expired entries
// pass in database and location of file for deletion
func (ttl *TTL) DBCleanup(dir string) error {
tickChan := time.NewTicker(time.Second * 5).C
for {
select {
case <-tickChan:
now := time.Now().Unix()
rows, err := ttl.DB.Query(fmt.Sprintf("SELECT id, expires FROM ttl WHERE expires < %d AND expires > 0", now))
if err != nil {
return err
}
defer rows.Close()
if err := checkEntries(dir, rows); err != nil {
return err
}
_, err = ttl.DB.Exec(fmt.Sprintf("DELETE FROM ttl WHERE expires < %d AND expires > 0", now))
if err != nil {
return err
}
}
}
}
// AddTTLToDB -- Insert TTL into database by id
func (ttl *TTL) AddTTLToDB(id string, expiration int64) error {
_, err := ttl.DB.Exec(fmt.Sprintf(`INSERT or REPLACE INTO ttl (id, created, expires) VALUES ("%s", "%d", "%d")`, id, time.Now().Unix(), expiration))
return err
}
// GetTTLByID -- Find the TTL in the database by id and return it
func (ttl *TTL) GetTTLByID(id string) (expiration int64, err error) {
rows, err := ttl.DB.Query(fmt.Sprintf(`SELECT expires FROM ttl WHERE id="%s"`, id))
if err != nil {
return 0, err
}
defer rows.Close()
for rows.Next() {
err = rows.Scan(&expiration)
if err != nil {
return 0, err
}
}
return expiration, nil
}
// DeleteTTLByID -- Find the TTL in the database by id and delete it
func (ttl *TTL) DeleteTTLByID(id string) error {
_, err := ttl.DB.Exec(fmt.Sprintf(`DELETE FROM ttl WHERE id="%s"`, id))
return err
}

6
protos/piecestore/gen.go Normal file
View File

@ -0,0 +1,6 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package piecestoreroutes
//go:generate protoc -I ./ piece_store.proto --go_out=plugins=grpc:.

View File

@ -0,0 +1,682 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: piece_store.proto
package piecestoreroutes
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type PieceStore struct {
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
Size int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
Ttl int64 `protobuf:"varint,3,opt,name=ttl" json:"ttl,omitempty"`
StoreOffset int64 `protobuf:"varint,4,opt,name=storeOffset" json:"storeOffset,omitempty"`
Content []byte `protobuf:"bytes,5,opt,name=content,proto3" json:"content,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PieceStore) Reset() { *m = PieceStore{} }
func (m *PieceStore) String() string { return proto.CompactTextString(m) }
func (*PieceStore) ProtoMessage() {}
func (*PieceStore) Descriptor() ([]byte, []int) {
return fileDescriptor_piece_store_abc1e1c59d558922, []int{0}
}
func (m *PieceStore) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceStore.Unmarshal(m, b)
}
func (m *PieceStore) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PieceStore.Marshal(b, m, deterministic)
}
func (dst *PieceStore) XXX_Merge(src proto.Message) {
xxx_messageInfo_PieceStore.Merge(dst, src)
}
func (m *PieceStore) XXX_Size() int {
return xxx_messageInfo_PieceStore.Size(m)
}
func (m *PieceStore) XXX_DiscardUnknown() {
xxx_messageInfo_PieceStore.DiscardUnknown(m)
}
var xxx_messageInfo_PieceStore proto.InternalMessageInfo
func (m *PieceStore) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *PieceStore) GetSize() int64 {
if m != nil {
return m.Size
}
return 0
}
func (m *PieceStore) GetTtl() int64 {
if m != nil {
return m.Ttl
}
return 0
}
func (m *PieceStore) GetStoreOffset() int64 {
if m != nil {
return m.StoreOffset
}
return 0
}
func (m *PieceStore) GetContent() []byte {
if m != nil {
return m.Content
}
return nil
}
type PieceId struct {
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PieceId) Reset() { *m = PieceId{} }
func (m *PieceId) String() string { return proto.CompactTextString(m) }
func (*PieceId) ProtoMessage() {}
func (*PieceId) Descriptor() ([]byte, []int) {
return fileDescriptor_piece_store_abc1e1c59d558922, []int{1}
}
func (m *PieceId) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceId.Unmarshal(m, b)
}
func (m *PieceId) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PieceId.Marshal(b, m, deterministic)
}
func (dst *PieceId) XXX_Merge(src proto.Message) {
xxx_messageInfo_PieceId.Merge(dst, src)
}
func (m *PieceId) XXX_Size() int {
return xxx_messageInfo_PieceId.Size(m)
}
func (m *PieceId) XXX_DiscardUnknown() {
xxx_messageInfo_PieceId.DiscardUnknown(m)
}
var xxx_messageInfo_PieceId proto.InternalMessageInfo
func (m *PieceId) GetId() string {
if m != nil {
return m.Id
}
return ""
}
type PieceSummary struct {
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
Size int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
Expiration int64 `protobuf:"varint,3,opt,name=expiration" json:"expiration,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PieceSummary) Reset() { *m = PieceSummary{} }
func (m *PieceSummary) String() string { return proto.CompactTextString(m) }
func (*PieceSummary) ProtoMessage() {}
func (*PieceSummary) Descriptor() ([]byte, []int) {
return fileDescriptor_piece_store_abc1e1c59d558922, []int{2}
}
func (m *PieceSummary) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceSummary.Unmarshal(m, b)
}
func (m *PieceSummary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PieceSummary.Marshal(b, m, deterministic)
}
func (dst *PieceSummary) XXX_Merge(src proto.Message) {
xxx_messageInfo_PieceSummary.Merge(dst, src)
}
func (m *PieceSummary) XXX_Size() int {
return xxx_messageInfo_PieceSummary.Size(m)
}
func (m *PieceSummary) XXX_DiscardUnknown() {
xxx_messageInfo_PieceSummary.DiscardUnknown(m)
}
var xxx_messageInfo_PieceSummary proto.InternalMessageInfo
func (m *PieceSummary) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *PieceSummary) GetSize() int64 {
if m != nil {
return m.Size
}
return 0
}
func (m *PieceSummary) GetExpiration() int64 {
if m != nil {
return m.Expiration
}
return 0
}
type PieceRetrieval struct {
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
Size int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
StoreOffset int64 `protobuf:"varint,3,opt,name=storeOffset" json:"storeOffset,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PieceRetrieval) Reset() { *m = PieceRetrieval{} }
func (m *PieceRetrieval) String() string { return proto.CompactTextString(m) }
func (*PieceRetrieval) ProtoMessage() {}
func (*PieceRetrieval) Descriptor() ([]byte, []int) {
return fileDescriptor_piece_store_abc1e1c59d558922, []int{3}
}
func (m *PieceRetrieval) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceRetrieval.Unmarshal(m, b)
}
func (m *PieceRetrieval) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PieceRetrieval.Marshal(b, m, deterministic)
}
func (dst *PieceRetrieval) XXX_Merge(src proto.Message) {
xxx_messageInfo_PieceRetrieval.Merge(dst, src)
}
func (m *PieceRetrieval) XXX_Size() int {
return xxx_messageInfo_PieceRetrieval.Size(m)
}
func (m *PieceRetrieval) XXX_DiscardUnknown() {
xxx_messageInfo_PieceRetrieval.DiscardUnknown(m)
}
var xxx_messageInfo_PieceRetrieval proto.InternalMessageInfo
func (m *PieceRetrieval) GetId() string {
if m != nil {
return m.Id
}
return ""
}
func (m *PieceRetrieval) GetSize() int64 {
if m != nil {
return m.Size
}
return 0
}
func (m *PieceRetrieval) GetStoreOffset() int64 {
if m != nil {
return m.StoreOffset
}
return 0
}
type PieceRetrievalStream struct {
Size int64 `protobuf:"varint,1,opt,name=size" json:"size,omitempty"`
Content []byte `protobuf:"bytes,2,opt,name=content,proto3" json:"content,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PieceRetrievalStream) Reset() { *m = PieceRetrievalStream{} }
func (m *PieceRetrievalStream) String() string { return proto.CompactTextString(m) }
func (*PieceRetrievalStream) ProtoMessage() {}
func (*PieceRetrievalStream) Descriptor() ([]byte, []int) {
return fileDescriptor_piece_store_abc1e1c59d558922, []int{4}
}
func (m *PieceRetrievalStream) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceRetrievalStream.Unmarshal(m, b)
}
func (m *PieceRetrievalStream) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PieceRetrievalStream.Marshal(b, m, deterministic)
}
func (dst *PieceRetrievalStream) XXX_Merge(src proto.Message) {
xxx_messageInfo_PieceRetrievalStream.Merge(dst, src)
}
func (m *PieceRetrievalStream) XXX_Size() int {
return xxx_messageInfo_PieceRetrievalStream.Size(m)
}
func (m *PieceRetrievalStream) XXX_DiscardUnknown() {
xxx_messageInfo_PieceRetrievalStream.DiscardUnknown(m)
}
var xxx_messageInfo_PieceRetrievalStream proto.InternalMessageInfo
func (m *PieceRetrievalStream) GetSize() int64 {
if m != nil {
return m.Size
}
return 0
}
func (m *PieceRetrievalStream) GetContent() []byte {
if m != nil {
return m.Content
}
return nil
}
type PieceDelete struct {
Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PieceDelete) Reset() { *m = PieceDelete{} }
func (m *PieceDelete) String() string { return proto.CompactTextString(m) }
func (*PieceDelete) ProtoMessage() {}
func (*PieceDelete) Descriptor() ([]byte, []int) {
return fileDescriptor_piece_store_abc1e1c59d558922, []int{5}
}
func (m *PieceDelete) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceDelete.Unmarshal(m, b)
}
func (m *PieceDelete) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PieceDelete.Marshal(b, m, deterministic)
}
func (dst *PieceDelete) XXX_Merge(src proto.Message) {
xxx_messageInfo_PieceDelete.Merge(dst, src)
}
func (m *PieceDelete) XXX_Size() int {
return xxx_messageInfo_PieceDelete.Size(m)
}
func (m *PieceDelete) XXX_DiscardUnknown() {
xxx_messageInfo_PieceDelete.DiscardUnknown(m)
}
var xxx_messageInfo_PieceDelete proto.InternalMessageInfo
func (m *PieceDelete) GetId() string {
if m != nil {
return m.Id
}
return ""
}
type PieceDeleteSummary struct {
Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PieceDeleteSummary) Reset() { *m = PieceDeleteSummary{} }
func (m *PieceDeleteSummary) String() string { return proto.CompactTextString(m) }
func (*PieceDeleteSummary) ProtoMessage() {}
func (*PieceDeleteSummary) Descriptor() ([]byte, []int) {
return fileDescriptor_piece_store_abc1e1c59d558922, []int{6}
}
func (m *PieceDeleteSummary) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceDeleteSummary.Unmarshal(m, b)
}
func (m *PieceDeleteSummary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PieceDeleteSummary.Marshal(b, m, deterministic)
}
func (dst *PieceDeleteSummary) XXX_Merge(src proto.Message) {
xxx_messageInfo_PieceDeleteSummary.Merge(dst, src)
}
func (m *PieceDeleteSummary) XXX_Size() int {
return xxx_messageInfo_PieceDeleteSummary.Size(m)
}
func (m *PieceDeleteSummary) XXX_DiscardUnknown() {
xxx_messageInfo_PieceDeleteSummary.DiscardUnknown(m)
}
var xxx_messageInfo_PieceDeleteSummary proto.InternalMessageInfo
func (m *PieceDeleteSummary) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
type PieceStoreSummary struct {
Message string `protobuf:"bytes,1,opt,name=message" json:"message,omitempty"`
TotalReceived int64 `protobuf:"varint,2,opt,name=totalReceived" json:"totalReceived,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PieceStoreSummary) Reset() { *m = PieceStoreSummary{} }
func (m *PieceStoreSummary) String() string { return proto.CompactTextString(m) }
func (*PieceStoreSummary) ProtoMessage() {}
func (*PieceStoreSummary) Descriptor() ([]byte, []int) {
return fileDescriptor_piece_store_abc1e1c59d558922, []int{7}
}
func (m *PieceStoreSummary) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceStoreSummary.Unmarshal(m, b)
}
func (m *PieceStoreSummary) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PieceStoreSummary.Marshal(b, m, deterministic)
}
func (dst *PieceStoreSummary) XXX_Merge(src proto.Message) {
xxx_messageInfo_PieceStoreSummary.Merge(dst, src)
}
func (m *PieceStoreSummary) XXX_Size() int {
return xxx_messageInfo_PieceStoreSummary.Size(m)
}
func (m *PieceStoreSummary) XXX_DiscardUnknown() {
xxx_messageInfo_PieceStoreSummary.DiscardUnknown(m)
}
var xxx_messageInfo_PieceStoreSummary proto.InternalMessageInfo
func (m *PieceStoreSummary) GetMessage() string {
if m != nil {
return m.Message
}
return ""
}
func (m *PieceStoreSummary) GetTotalReceived() int64 {
if m != nil {
return m.TotalReceived
}
return 0
}
func init() {
proto.RegisterType((*PieceStore)(nil), "piecestoreroutes.PieceStore")
proto.RegisterType((*PieceId)(nil), "piecestoreroutes.PieceId")
proto.RegisterType((*PieceSummary)(nil), "piecestoreroutes.PieceSummary")
proto.RegisterType((*PieceRetrieval)(nil), "piecestoreroutes.PieceRetrieval")
proto.RegisterType((*PieceRetrievalStream)(nil), "piecestoreroutes.PieceRetrievalStream")
proto.RegisterType((*PieceDelete)(nil), "piecestoreroutes.PieceDelete")
proto.RegisterType((*PieceDeleteSummary)(nil), "piecestoreroutes.PieceDeleteSummary")
proto.RegisterType((*PieceStoreSummary)(nil), "piecestoreroutes.PieceStoreSummary")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for PieceStoreRoutes service
type PieceStoreRoutesClient interface {
Piece(ctx context.Context, in *PieceId, opts ...grpc.CallOption) (*PieceSummary, error)
Retrieve(ctx context.Context, in *PieceRetrieval, opts ...grpc.CallOption) (PieceStoreRoutes_RetrieveClient, error)
Store(ctx context.Context, opts ...grpc.CallOption) (PieceStoreRoutes_StoreClient, error)
Delete(ctx context.Context, in *PieceDelete, opts ...grpc.CallOption) (*PieceDeleteSummary, error)
}
type pieceStoreRoutesClient struct {
cc *grpc.ClientConn
}
func NewPieceStoreRoutesClient(cc *grpc.ClientConn) PieceStoreRoutesClient {
return &pieceStoreRoutesClient{cc}
}
func (c *pieceStoreRoutesClient) Piece(ctx context.Context, in *PieceId, opts ...grpc.CallOption) (*PieceSummary, error) {
out := new(PieceSummary)
err := grpc.Invoke(ctx, "/piecestoreroutes.PieceStoreRoutes/Piece", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *pieceStoreRoutesClient) Retrieve(ctx context.Context, in *PieceRetrieval, opts ...grpc.CallOption) (PieceStoreRoutes_RetrieveClient, error) {
stream, err := grpc.NewClientStream(ctx, &_PieceStoreRoutes_serviceDesc.Streams[0], c.cc, "/piecestoreroutes.PieceStoreRoutes/Retrieve", opts...)
if err != nil {
return nil, err
}
x := &pieceStoreRoutesRetrieveClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
type PieceStoreRoutes_RetrieveClient interface {
Recv() (*PieceRetrievalStream, error)
grpc.ClientStream
}
type pieceStoreRoutesRetrieveClient struct {
grpc.ClientStream
}
func (x *pieceStoreRoutesRetrieveClient) Recv() (*PieceRetrievalStream, error) {
m := new(PieceRetrievalStream)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *pieceStoreRoutesClient) Store(ctx context.Context, opts ...grpc.CallOption) (PieceStoreRoutes_StoreClient, error) {
stream, err := grpc.NewClientStream(ctx, &_PieceStoreRoutes_serviceDesc.Streams[1], c.cc, "/piecestoreroutes.PieceStoreRoutes/Store", opts...)
if err != nil {
return nil, err
}
x := &pieceStoreRoutesStoreClient{stream}
return x, nil
}
type PieceStoreRoutes_StoreClient interface {
Send(*PieceStore) error
CloseAndRecv() (*PieceStoreSummary, error)
grpc.ClientStream
}
type pieceStoreRoutesStoreClient struct {
grpc.ClientStream
}
func (x *pieceStoreRoutesStoreClient) Send(m *PieceStore) error {
return x.ClientStream.SendMsg(m)
}
func (x *pieceStoreRoutesStoreClient) CloseAndRecv() (*PieceStoreSummary, error) {
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
m := new(PieceStoreSummary)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *pieceStoreRoutesClient) Delete(ctx context.Context, in *PieceDelete, opts ...grpc.CallOption) (*PieceDeleteSummary, error) {
out := new(PieceDeleteSummary)
err := grpc.Invoke(ctx, "/piecestoreroutes.PieceStoreRoutes/Delete", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for PieceStoreRoutes service
type PieceStoreRoutesServer interface {
Piece(context.Context, *PieceId) (*PieceSummary, error)
Retrieve(*PieceRetrieval, PieceStoreRoutes_RetrieveServer) error
Store(PieceStoreRoutes_StoreServer) error
Delete(context.Context, *PieceDelete) (*PieceDeleteSummary, error)
}
func RegisterPieceStoreRoutesServer(s *grpc.Server, srv PieceStoreRoutesServer) {
s.RegisterService(&_PieceStoreRoutes_serviceDesc, srv)
}
func _PieceStoreRoutes_Piece_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PieceId)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PieceStoreRoutesServer).Piece(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/piecestoreroutes.PieceStoreRoutes/Piece",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PieceStoreRoutesServer).Piece(ctx, req.(*PieceId))
}
return interceptor(ctx, in, info, handler)
}
func _PieceStoreRoutes_Retrieve_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(PieceRetrieval)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(PieceStoreRoutesServer).Retrieve(m, &pieceStoreRoutesRetrieveServer{stream})
}
type PieceStoreRoutes_RetrieveServer interface {
Send(*PieceRetrievalStream) error
grpc.ServerStream
}
type pieceStoreRoutesRetrieveServer struct {
grpc.ServerStream
}
func (x *pieceStoreRoutesRetrieveServer) Send(m *PieceRetrievalStream) error {
return x.ServerStream.SendMsg(m)
}
func _PieceStoreRoutes_Store_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(PieceStoreRoutesServer).Store(&pieceStoreRoutesStoreServer{stream})
}
type PieceStoreRoutes_StoreServer interface {
SendAndClose(*PieceStoreSummary) error
Recv() (*PieceStore, error)
grpc.ServerStream
}
type pieceStoreRoutesStoreServer struct {
grpc.ServerStream
}
func (x *pieceStoreRoutesStoreServer) SendAndClose(m *PieceStoreSummary) error {
return x.ServerStream.SendMsg(m)
}
func (x *pieceStoreRoutesStoreServer) Recv() (*PieceStore, error) {
m := new(PieceStore)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _PieceStoreRoutes_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PieceDelete)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(PieceStoreRoutesServer).Delete(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/piecestoreroutes.PieceStoreRoutes/Delete",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(PieceStoreRoutesServer).Delete(ctx, req.(*PieceDelete))
}
return interceptor(ctx, in, info, handler)
}
var _PieceStoreRoutes_serviceDesc = grpc.ServiceDesc{
ServiceName: "piecestoreroutes.PieceStoreRoutes",
HandlerType: (*PieceStoreRoutesServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Piece",
Handler: _PieceStoreRoutes_Piece_Handler,
},
{
MethodName: "Delete",
Handler: _PieceStoreRoutes_Delete_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "Retrieve",
Handler: _PieceStoreRoutes_Retrieve_Handler,
ServerStreams: true,
},
{
StreamName: "Store",
Handler: _PieceStoreRoutes_Store_Handler,
ClientStreams: true,
},
},
Metadata: "piece_store.proto",
}
func init() { proto.RegisterFile("piece_store.proto", fileDescriptor_piece_store_abc1e1c59d558922) }
var fileDescriptor_piece_store_abc1e1c59d558922 = []byte{
// 379 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4d, 0x6f, 0xaa, 0x40,
0x14, 0x15, 0xf0, 0xe3, 0xbd, 0xab, 0xcf, 0xe8, 0xcd, 0x5b, 0x20, 0xa9, 0x86, 0x4c, 0x4d, 0xc3,
0x8a, 0x34, 0xed, 0x5f, 0x70, 0xd3, 0x4d, 0x6d, 0x30, 0x69, 0xba, 0x6b, 0xa8, 0x5c, 0x9b, 0x49,
0x40, 0x0c, 0x8c, 0xa6, 0xed, 0xa2, 0x7f, 0xb2, 0x7f, 0xa8, 0x61, 0x18, 0x11, 0x35, 0xa8, 0xbb,
0x99, 0x73, 0x2e, 0x67, 0xce, 0x9c, 0x33, 0x40, 0x7f, 0xc5, 0x69, 0x4e, 0xaf, 0xa9, 0x88, 0x13,
0x72, 0x57, 0x49, 0x2c, 0x62, 0xec, 0x49, 0x48, 0x22, 0x49, 0xbc, 0x16, 0x94, 0xb2, 0x6f, 0x80,
0xa7, 0x0c, 0x9b, 0x65, 0x18, 0x76, 0x41, 0xe7, 0x81, 0xa9, 0xd9, 0x9a, 0xf3, 0xd7, 0xd3, 0x79,
0x80, 0x08, 0xf5, 0x94, 0x7f, 0x91, 0xa9, 0xdb, 0x9a, 0x63, 0x78, 0x72, 0x8d, 0x3d, 0x30, 0x84,
0x08, 0x4d, 0x43, 0x42, 0xd9, 0x12, 0x6d, 0x68, 0x4b, 0xc9, 0xe9, 0x62, 0x91, 0x92, 0x30, 0xeb,
0x92, 0x29, 0x43, 0x68, 0x42, 0x6b, 0x1e, 0x2f, 0x05, 0x2d, 0x85, 0xd9, 0xb0, 0x35, 0xa7, 0xe3,
0x6d, 0xb7, 0x6c, 0x00, 0x2d, 0x79, 0xfe, 0x43, 0x70, 0x78, 0x38, 0xf3, 0xa0, 0x93, 0x5b, 0x5b,
0x47, 0x91, 0x9f, 0x7c, 0x5e, 0x64, 0x6e, 0x04, 0x40, 0x1f, 0x2b, 0x9e, 0xf8, 0x82, 0xc7, 0x4b,
0xe5, 0xb1, 0x84, 0xb0, 0x67, 0xe8, 0x4a, 0x4d, 0x8f, 0x44, 0xc2, 0x69, 0xe3, 0x87, 0x17, 0xa9,
0x1e, 0x5c, 0xd0, 0x38, 0xba, 0x20, 0x9b, 0xc0, 0xff, 0x7d, 0xdd, 0x99, 0x48, 0xc8, 0x8f, 0x0a,
0x35, 0xad, 0xa4, 0x56, 0x0a, 0x43, 0xdf, 0x0f, 0x63, 0x08, 0x6d, 0xa9, 0x32, 0xa1, 0x90, 0xc4,
0x51, 0x1b, 0xcc, 0x05, 0x2c, 0xd1, 0xdb, 0x58, 0x4c, 0x68, 0x45, 0x94, 0xa6, 0xfe, 0x3b, 0xa9,
0xd1, 0xed, 0x96, 0xcd, 0xa0, 0xbf, 0xeb, 0xf6, 0xec, 0x38, 0x8e, 0xe1, 0x9f, 0x88, 0x85, 0x1f,
0x7a, 0x34, 0x27, 0xbe, 0xa1, 0x40, 0x45, 0xb0, 0x0f, 0xde, 0xfd, 0xe8, 0xd0, 0xdb, 0xa9, 0x7a,
0xf2, 0x15, 0xe1, 0x04, 0x1a, 0x12, 0xc3, 0x81, 0x7b, 0xf8, 0xc2, 0x5c, 0x55, 0xaf, 0x35, 0xaa,
0xa0, 0x94, 0x31, 0x56, 0xc3, 0x17, 0xf8, 0xa3, 0xf2, 0x23, 0xb4, 0x2b, 0xa6, 0x8b, 0x80, 0xad,
0x9b, 0x73, 0x13, 0x79, 0x05, 0xac, 0x76, 0xab, 0xe1, 0x23, 0x34, 0xf2, 0x07, 0x7e, 0x55, 0x65,
0x22, 0x03, 0xac, 0xeb, 0x53, 0x6c, 0xe1, 0xd3, 0xd1, 0x70, 0x0a, 0x4d, 0xd5, 0xd1, 0xb0, 0xe2,
0x93, 0x9c, 0xb6, 0xc6, 0x27, 0xe9, 0x42, 0xf2, 0xad, 0x29, 0xff, 0xcf, 0xfb, 0xdf, 0x00, 0x00,
0x00, 0xff, 0xff, 0x0d, 0xe5, 0xb1, 0x8d, 0xb4, 0x03, 0x00, 0x00,
}

View File

@ -0,0 +1,60 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
syntax = "proto3";
package piecestoreroutes;
service PieceStoreRoutes {
rpc Piece(PieceId) returns (PieceSummary) {}
rpc Retrieve(PieceRetrieval) returns (stream PieceRetrievalStream) {}
rpc Store(stream PieceStore) returns (PieceStoreSummary) {}
rpc Delete(PieceDelete) returns (PieceDeleteSummary) {}
}
message PieceStore {
string id = 1;
int64 size = 2;
int64 ttl = 3;
int64 storeOffset = 4;
bytes content = 5;
}
message PieceId {
string id = 1;
}
message PieceSummary {
string id = 1;
int64 size = 2;
int64 expiration = 3;
}
message PieceRetrieval {
string id = 1;
int64 size = 2;
int64 storeOffset = 3;
}
message PieceRetrievalStream {
int64 size = 1;
bytes content = 2;
}
message PieceDelete {
string id = 1;
}
message PieceDeleteSummary {
string message = 1;
}
message PieceStoreSummary {
string message = 1;
int64 totalReceived = 2;
}