2020-06-02 16:29:46 +01:00
|
|
|
// Copyright (C) 2020 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package orders
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/binary"
|
2020-07-14 23:31:22 +01:00
|
|
|
"fmt"
|
2020-06-02 16:29:46 +01:00
|
|
|
"io"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"strconv"
|
2020-06-03 20:21:59 +01:00
|
|
|
"strings"
|
2020-06-02 16:29:46 +01:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/zeebo/errs"
|
|
|
|
|
|
|
|
"storj.io/common/pb"
|
|
|
|
"storj.io/common/storj"
|
2020-07-14 23:31:22 +01:00
|
|
|
"storj.io/storj/private/date"
|
2020-06-02 16:29:46 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2020-06-03 20:21:59 +01:00
|
|
|
unsentFilePrefix = "unsent-orders-"
|
|
|
|
archiveFilePrefix = "archived-orders-"
|
2020-06-02 16:29:46 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
// FileStore implements the orders.Store interface by appending orders to flat files.
|
|
|
|
type FileStore struct {
|
|
|
|
ordersDir string
|
|
|
|
unsentDir string
|
|
|
|
archiveDir string
|
2020-06-03 20:21:59 +01:00
|
|
|
// mutex for unsent directory
|
|
|
|
unsentMu sync.Mutex
|
|
|
|
// mutex for archive directory
|
|
|
|
archiveMu sync.Mutex
|
|
|
|
|
|
|
|
// how long after OrderLimit creation date are OrderLimits no longer accepted (piecestore Config)
|
|
|
|
orderLimitGracePeriod time.Duration
|
2020-07-14 23:31:22 +01:00
|
|
|
// how long after the grace period passes to start submitting orders
|
|
|
|
maxInFlightTime time.Duration
|
2020-06-02 16:29:46 +01:00
|
|
|
}
|
|
|
|
|
2020-07-14 23:31:22 +01:00
|
|
|
// NewFileStore creates a new orders file store, and the directories necessary for its use.
|
|
|
|
func NewFileStore(ordersDir string, orderLimitGracePeriod, maxInFlightTime time.Duration) (*FileStore, error) {
|
|
|
|
fs := &FileStore{
|
|
|
|
ordersDir: ordersDir,
|
|
|
|
unsentDir: filepath.Join(ordersDir, "unsent"),
|
|
|
|
archiveDir: filepath.Join(ordersDir, "archive"),
|
2020-06-03 20:21:59 +01:00
|
|
|
orderLimitGracePeriod: orderLimitGracePeriod,
|
2020-07-14 23:31:22 +01:00
|
|
|
maxInFlightTime: maxInFlightTime,
|
|
|
|
}
|
|
|
|
|
|
|
|
err := fs.ensureDirectories()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2020-06-02 16:29:46 +01:00
|
|
|
}
|
2020-07-14 23:31:22 +01:00
|
|
|
|
|
|
|
return fs, nil
|
2020-06-02 16:29:46 +01:00
|
|
|
}
|
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
// Enqueue inserts order to be sent at the end of the unsent file for a particular creation hour.
|
|
|
|
// It assumes the order is not being queued after the order limit grace period.
|
2020-06-02 16:29:46 +01:00
|
|
|
func (store *FileStore) Enqueue(info *Info) (err error) {
|
2020-06-03 20:21:59 +01:00
|
|
|
store.unsentMu.Lock()
|
|
|
|
defer store.unsentMu.Unlock()
|
2020-06-02 16:29:46 +01:00
|
|
|
|
2020-07-14 23:31:22 +01:00
|
|
|
// if the settle buffer period has already passed, do not enqueue this order
|
|
|
|
if store.settleBufferPassed(info.Limit.OrderCreation.Truncate(time.Hour)) {
|
2020-06-03 20:21:59 +01:00
|
|
|
return OrderError.New("grace period passed for order limit")
|
|
|
|
}
|
|
|
|
|
|
|
|
f, err := store.getUnsentFile(info.Limit.SatelliteId, info.Limit.OrderCreation)
|
2020-06-02 16:29:46 +01:00
|
|
|
if err != nil {
|
|
|
|
return OrderError.Wrap(err)
|
|
|
|
}
|
|
|
|
defer func() {
|
2020-06-03 20:21:59 +01:00
|
|
|
err = errs.Combine(err, OrderError.Wrap(f.Close()))
|
2020-06-02 16:29:46 +01:00
|
|
|
}()
|
|
|
|
|
|
|
|
err = writeLimit(f, info.Limit)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
err = writeOrder(f, info.Order)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
// UnsentInfo is a struct containing a window of orders for a satellite and order creation hour.
|
|
|
|
type UnsentInfo struct {
|
|
|
|
CreatedAtHour time.Time
|
|
|
|
InfoList []*Info
|
|
|
|
}
|
|
|
|
|
|
|
|
// ListUnsentBySatellite returns one window of orders that haven't been sent yet, grouped by satellite.
|
|
|
|
// It only reads files where the order limit grace period has passed, meaning no new orders will be appended.
|
|
|
|
// There is a separate window for each created at hour, so if a satellite has 2 windows, `ListUnsentBySatellite`
|
2020-07-14 23:31:22 +01:00
|
|
|
// needs to be called twice, with calls to `Archive` in between each call, to see all unsent orders.
|
2020-06-03 20:21:59 +01:00
|
|
|
func (store *FileStore) ListUnsentBySatellite() (infoMap map[storj.NodeID]UnsentInfo, err error) {
|
|
|
|
store.unsentMu.Lock()
|
|
|
|
defer store.unsentMu.Unlock()
|
2020-06-02 16:29:46 +01:00
|
|
|
|
|
|
|
var errList error
|
2020-06-03 20:21:59 +01:00
|
|
|
infoMap = make(map[storj.NodeID]UnsentInfo)
|
2020-06-02 16:29:46 +01:00
|
|
|
|
|
|
|
err = filepath.Walk(store.unsentDir, func(path string, info os.FileInfo, err error) error {
|
|
|
|
if err != nil {
|
2020-06-03 20:21:59 +01:00
|
|
|
errList = errs.Combine(errList, OrderError.Wrap(err))
|
2020-06-02 16:29:46 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if info.IsDir() {
|
|
|
|
return nil
|
|
|
|
}
|
2020-06-03 20:21:59 +01:00
|
|
|
satelliteID, createdAtHour, err := getUnsentFileInfo(info.Name())
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// if we already have orders for this satellite, ignore the file
|
|
|
|
if _, ok := infoMap[satelliteID]; ok {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// if orders can still be added to file, ignore it.
|
2020-07-14 23:31:22 +01:00
|
|
|
if !store.settleBufferPassed(createdAtHour) {
|
2020-06-02 16:29:46 +01:00
|
|
|
return nil
|
|
|
|
}
|
2020-06-03 20:21:59 +01:00
|
|
|
newUnsentInfo := UnsentInfo{
|
|
|
|
CreatedAtHour: createdAtHour,
|
|
|
|
}
|
2020-06-02 16:29:46 +01:00
|
|
|
|
|
|
|
f, err := os.Open(path)
|
|
|
|
if err != nil {
|
|
|
|
return OrderError.Wrap(err)
|
|
|
|
}
|
|
|
|
defer func() {
|
2020-06-03 20:21:59 +01:00
|
|
|
err = errs.Combine(err, OrderError.Wrap(f.Close()))
|
2020-06-02 16:29:46 +01:00
|
|
|
}()
|
|
|
|
|
|
|
|
for {
|
|
|
|
limit, err := readLimit(f)
|
|
|
|
if err != nil {
|
|
|
|
if errs.Is(err, io.EOF) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
order, err := readOrder(f)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
newInfo := &Info{
|
|
|
|
Limit: limit,
|
|
|
|
Order: order,
|
|
|
|
}
|
2020-06-03 20:21:59 +01:00
|
|
|
newUnsentInfo.InfoList = append(newUnsentInfo.InfoList, newInfo)
|
2020-06-02 16:29:46 +01:00
|
|
|
}
|
2020-06-03 20:21:59 +01:00
|
|
|
|
|
|
|
infoMap[satelliteID] = newUnsentInfo
|
2020-06-02 16:29:46 +01:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
errList = errs.Combine(errList, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return infoMap, errList
|
|
|
|
}
|
|
|
|
|
2020-07-14 23:31:22 +01:00
|
|
|
// Archive moves a file from "unsent" to "archive". The filename/path changes from
|
|
|
|
// unsent/unsent-orders-<satelliteID>-<createdAtHour>
|
|
|
|
// to
|
2020-07-16 15:18:02 +01:00
|
|
|
// archive/archived-orders-<satelliteID>-<createdAtHour>-<archivedTime>-<ACCEPTED/REJECTED>.
|
2020-07-14 23:31:22 +01:00
|
|
|
func (store *FileStore) Archive(satelliteID storj.NodeID, createdAtHour, archivedAt time.Time, status pb.SettlementWithWindowResponse_Status) error {
|
2020-06-03 20:21:59 +01:00
|
|
|
store.unsentMu.Lock()
|
|
|
|
defer store.unsentMu.Unlock()
|
|
|
|
store.archiveMu.Lock()
|
|
|
|
defer store.archiveMu.Unlock()
|
|
|
|
|
2020-07-14 23:31:22 +01:00
|
|
|
oldFileName := unsentFilePrefix + satelliteID.String() + "-" + getCreationHourString(createdAtHour)
|
|
|
|
oldFilePath := filepath.Join(store.unsentDir, oldFileName)
|
2020-06-03 20:21:59 +01:00
|
|
|
|
2020-07-14 23:31:22 +01:00
|
|
|
newFileName := fmt.Sprintf("%s%s-%s-%s-%s",
|
|
|
|
archiveFilePrefix,
|
|
|
|
satelliteID.String(),
|
|
|
|
getCreationHourString(createdAtHour),
|
|
|
|
strconv.FormatInt(archivedAt.UnixNano(), 10),
|
|
|
|
pb.SettlementWithWindowResponse_Status_name[int32(status)],
|
|
|
|
)
|
|
|
|
newFilePath := filepath.Join(store.archiveDir, newFileName)
|
|
|
|
|
|
|
|
return OrderError.Wrap(os.Rename(oldFilePath, newFilePath))
|
2020-06-03 20:21:59 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListArchived returns orders that have been sent.
|
|
|
|
func (store *FileStore) ListArchived() ([]*ArchivedInfo, error) {
|
|
|
|
store.archiveMu.Lock()
|
|
|
|
defer store.archiveMu.Unlock()
|
|
|
|
|
2020-06-02 16:29:46 +01:00
|
|
|
var errList error
|
2020-06-03 20:21:59 +01:00
|
|
|
archivedList := []*ArchivedInfo{}
|
|
|
|
err := filepath.Walk(store.archiveDir, func(path string, info os.FileInfo, err error) error {
|
2020-06-02 16:29:46 +01:00
|
|
|
if err != nil {
|
|
|
|
errList = errs.Combine(errList, OrderError.Wrap(err))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if info.IsDir() {
|
|
|
|
return nil
|
|
|
|
}
|
2020-07-14 23:31:22 +01:00
|
|
|
_, _, archivedAt, statusText, err := getArchivedFileInfo(info.Name())
|
2020-06-03 20:21:59 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-07-14 23:31:22 +01:00
|
|
|
status := StatusUnsent
|
|
|
|
switch statusText {
|
|
|
|
case pb.SettlementWithWindowResponse_ACCEPTED.String():
|
|
|
|
status = StatusAccepted
|
|
|
|
case pb.SettlementWithWindowResponse_REJECTED.String():
|
|
|
|
status = StatusRejected
|
|
|
|
}
|
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
f, err := os.Open(path)
|
|
|
|
if err != nil {
|
|
|
|
return OrderError.Wrap(err)
|
|
|
|
}
|
|
|
|
defer func() {
|
|
|
|
err = errs.Combine(err, OrderError.Wrap(f.Close()))
|
|
|
|
}()
|
|
|
|
|
|
|
|
for {
|
2020-07-14 23:31:22 +01:00
|
|
|
limit, err := readLimit(f)
|
2020-06-03 20:21:59 +01:00
|
|
|
if err != nil {
|
|
|
|
if errs.Is(err, io.EOF) {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
order, err := readOrder(f)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
newInfo := &ArchivedInfo{
|
|
|
|
Limit: limit,
|
|
|
|
Order: order,
|
|
|
|
Status: status,
|
|
|
|
ArchivedAt: archivedAt,
|
|
|
|
}
|
|
|
|
archivedList = append(archivedList, newInfo)
|
2020-06-02 16:29:46 +01:00
|
|
|
}
|
2020-06-03 20:21:59 +01:00
|
|
|
return nil
|
2020-06-02 16:29:46 +01:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
errList = errs.Combine(errList, err)
|
|
|
|
}
|
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
return archivedList, errList
|
2020-06-02 16:29:46 +01:00
|
|
|
}
|
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
// CleanArchive deletes all entries archvied before the provided time.
|
|
|
|
func (store *FileStore) CleanArchive(deleteBefore time.Time) error {
|
|
|
|
store.archiveMu.Lock()
|
|
|
|
defer store.archiveMu.Unlock()
|
2020-06-02 16:29:46 +01:00
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
// we want to delete everything older than ttl
|
|
|
|
var errList error
|
|
|
|
err := filepath.Walk(store.archiveDir, func(path string, info os.FileInfo, err error) error {
|
|
|
|
if err != nil {
|
|
|
|
errList = errs.Combine(errList, OrderError.Wrap(err))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if info.IsDir() {
|
|
|
|
return nil
|
|
|
|
}
|
2020-07-14 23:31:22 +01:00
|
|
|
_, _, archivedAt, _, err := getArchivedFileInfo(info.Name())
|
2020-06-03 20:21:59 +01:00
|
|
|
if err != nil {
|
|
|
|
errList = errs.Combine(errList, err)
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if archivedAt.Before(deleteBefore) {
|
|
|
|
return OrderError.Wrap(os.Remove(path))
|
|
|
|
}
|
2020-06-02 16:29:46 +01:00
|
|
|
return nil
|
2020-06-03 20:21:59 +01:00
|
|
|
})
|
|
|
|
return errs.Combine(errList, err)
|
2020-06-02 16:29:46 +01:00
|
|
|
}
|
|
|
|
|
2020-07-14 23:31:22 +01:00
|
|
|
// TestSetSettleBuffer is a function that allows us to modify order limit grace period and max inflight time for testing purposes.
|
|
|
|
func (store *FileStore) TestSetSettleBuffer(orderLimitGracePeriod, maxInFlightTime time.Duration) {
|
|
|
|
store.unsentMu.Lock()
|
|
|
|
defer store.unsentMu.Unlock()
|
|
|
|
|
|
|
|
store.orderLimitGracePeriod = orderLimitGracePeriod
|
|
|
|
store.maxInFlightTime = maxInFlightTime
|
|
|
|
}
|
|
|
|
|
|
|
|
// ensureDirectories checks for the existence of the unsent and archived directories, and creates them if they do not exist.
|
|
|
|
func (store *FileStore) ensureDirectories() error {
|
2020-06-02 16:29:46 +01:00
|
|
|
if _, err := os.Stat(store.unsentDir); os.IsNotExist(err) {
|
2020-07-14 23:31:22 +01:00
|
|
|
err = os.MkdirAll(store.unsentDir, 0700)
|
2020-06-02 16:29:46 +01:00
|
|
|
if err != nil {
|
2020-07-14 23:31:22 +01:00
|
|
|
return OrderError.Wrap(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if _, err := os.Stat(store.archiveDir); os.IsNotExist(err) {
|
|
|
|
err = os.MkdirAll(store.archiveDir, 0700)
|
|
|
|
if err != nil {
|
|
|
|
return OrderError.Wrap(err)
|
2020-06-02 16:29:46 +01:00
|
|
|
}
|
|
|
|
}
|
2020-07-14 23:31:22 +01:00
|
|
|
return nil
|
|
|
|
}
|
2020-06-02 16:29:46 +01:00
|
|
|
|
2020-07-14 23:31:22 +01:00
|
|
|
// getUnsentFile creates or gets the order limit file for appending unsent orders to.
|
|
|
|
// There is a different file for each satellite and creation hour.
|
|
|
|
// It expects the caller to lock the store's mutex before calling, and to handle closing the returned file.
|
|
|
|
func (store *FileStore) getUnsentFile(satelliteID storj.NodeID, creationTime time.Time) (*os.File, error) {
|
2020-06-03 20:21:59 +01:00
|
|
|
fileName := unsentFilePrefix + satelliteID.String() + "-" + getCreationHourString(creationTime)
|
2020-06-02 16:29:46 +01:00
|
|
|
filePath := filepath.Join(store.unsentDir, fileName)
|
|
|
|
// create file if not exists or append
|
|
|
|
f, err := os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
|
|
|
|
if err != nil {
|
|
|
|
return nil, OrderError.Wrap(err)
|
|
|
|
}
|
|
|
|
return f, nil
|
|
|
|
}
|
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
func getCreationHourString(t time.Time) string {
|
2020-07-14 23:31:22 +01:00
|
|
|
creationHour := date.TruncateToHourInNano(t)
|
|
|
|
timeStr := strconv.FormatInt(creationHour, 10)
|
2020-06-03 20:21:59 +01:00
|
|
|
return timeStr
|
|
|
|
}
|
|
|
|
|
2020-07-14 23:31:22 +01:00
|
|
|
// settleBufferPassed determines whether enough time has passed that no new orders will be added to a file.
|
|
|
|
func (store *FileStore) settleBufferPassed(createdHour time.Time) bool {
|
|
|
|
// wait until the gracePeriod+maxInFlightTime has passed, to ensure in-flight actions have completed
|
|
|
|
canSendCutoff := time.Now().Add(-store.orderLimitGracePeriod).Add(-store.maxInFlightTime)
|
2020-06-03 20:21:59 +01:00
|
|
|
// add one hour to include order limits in file added at end of createdHour
|
|
|
|
return createdHour.Add(time.Hour).Before(canSendCutoff)
|
|
|
|
}
|
|
|
|
|
|
|
|
// getUnsentFileInfo gets the satellite ID and created hour from a filename.
|
2020-07-16 15:18:02 +01:00
|
|
|
// it expects the file name to be in the format "unsent-orders-<satelliteID>-<createdAtHour>".
|
2020-06-03 20:21:59 +01:00
|
|
|
func getUnsentFileInfo(name string) (satellite storj.NodeID, createdHour time.Time, err error) {
|
|
|
|
if !strings.HasPrefix(name, unsentFilePrefix) {
|
|
|
|
return storj.NodeID{}, time.Time{}, OrderError.New("Not a valid unsent order file name: %s", name)
|
|
|
|
}
|
|
|
|
// chop off prefix to get satellite ID and created hours
|
|
|
|
infoStr := name[len(unsentFilePrefix):]
|
|
|
|
infoSlice := strings.Split(infoStr, "-")
|
|
|
|
if len(infoSlice) != 2 {
|
|
|
|
return storj.NodeID{}, time.Time{}, OrderError.New("Not a valid unsent order file name: %s", name)
|
|
|
|
}
|
|
|
|
|
|
|
|
satelliteIDStr := infoSlice[0]
|
|
|
|
satelliteID, err := storj.NodeIDFromString(satelliteIDStr)
|
|
|
|
if err != nil {
|
|
|
|
return storj.NodeID{}, time.Time{}, OrderError.New("Not a valid unsent order file name: %s", name)
|
|
|
|
}
|
|
|
|
|
|
|
|
timeStr := infoSlice[1]
|
|
|
|
createdHourUnixNano, err := strconv.ParseInt(timeStr, 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
return satelliteID, time.Time{}, OrderError.Wrap(err)
|
|
|
|
}
|
|
|
|
createdAtHour := time.Unix(0, createdHourUnixNano)
|
|
|
|
|
|
|
|
return satelliteID, createdAtHour, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// getArchivedFileInfo gets the archived at time from an archive file name.
|
2020-07-16 15:18:02 +01:00
|
|
|
// it expects the file name to be in the format "archived-orders-<satelliteID>-<createdAtHour>-<archviedAtTime>-<status>".
|
2020-07-14 23:31:22 +01:00
|
|
|
func getArchivedFileInfo(name string) (satelliteID storj.NodeID, createdAtHour, archivedAt time.Time, status string, err error) {
|
2020-06-03 20:21:59 +01:00
|
|
|
if !strings.HasPrefix(name, archiveFilePrefix) {
|
2020-07-14 23:31:22 +01:00
|
|
|
return storj.NodeID{}, time.Time{}, time.Time{}, "", OrderError.New("Not a valid archived order file name: %s", name)
|
|
|
|
}
|
|
|
|
// chop off prefix to get satellite ID, created hour, archive time, and status
|
|
|
|
infoStr := name[len(archiveFilePrefix):]
|
|
|
|
infoSlice := strings.Split(infoStr, "-")
|
|
|
|
if len(infoSlice) != 4 {
|
|
|
|
return storj.NodeID{}, time.Time{}, time.Time{}, "", OrderError.New("Not a valid archived order file name: %s", name)
|
2020-06-03 20:21:59 +01:00
|
|
|
}
|
2020-07-14 23:31:22 +01:00
|
|
|
|
|
|
|
satelliteIDStr := infoSlice[0]
|
|
|
|
satelliteID, err = storj.NodeIDFromString(satelliteIDStr)
|
|
|
|
if err != nil {
|
|
|
|
return storj.NodeID{}, time.Time{}, time.Time{}, "", OrderError.New("Not a valid archived order file name: %s", name)
|
|
|
|
}
|
|
|
|
|
|
|
|
createdAtStr := infoSlice[1]
|
|
|
|
createdHourUnixNano, err := strconv.ParseInt(createdAtStr, 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
return satelliteID, time.Time{}, time.Time{}, "", OrderError.New("Not a valid archived order file name: %s", name)
|
|
|
|
}
|
|
|
|
createdAtHour = time.Unix(0, createdHourUnixNano)
|
|
|
|
|
|
|
|
archivedAtStr := infoSlice[2]
|
2020-06-03 20:21:59 +01:00
|
|
|
archivedAtUnixNano, err := strconv.ParseInt(archivedAtStr, 10, 64)
|
|
|
|
if err != nil {
|
2020-07-14 23:31:22 +01:00
|
|
|
return satelliteID, createdAtHour, time.Time{}, "", OrderError.New("Not a valid archived order file name: %s", name)
|
2020-06-03 20:21:59 +01:00
|
|
|
}
|
2020-07-14 23:31:22 +01:00
|
|
|
archivedAt = time.Unix(0, archivedAtUnixNano)
|
|
|
|
|
|
|
|
status = infoSlice[3]
|
|
|
|
|
|
|
|
return satelliteID, createdAtHour, archivedAt, status, nil
|
2020-06-03 20:21:59 +01:00
|
|
|
}
|
|
|
|
|
2020-06-02 16:29:46 +01:00
|
|
|
// writeLimit writes the size of the order limit bytes, followed by the order limit bytes.
|
|
|
|
// it expects the caller to have locked the mutex.
|
|
|
|
func writeLimit(f io.Writer, limit *pb.OrderLimit) error {
|
|
|
|
limitSerialized, err := pb.Marshal(limit)
|
|
|
|
if err != nil {
|
|
|
|
return OrderError.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
sizeBytes := [4]byte{}
|
|
|
|
binary.LittleEndian.PutUint32(sizeBytes[:], uint32(len(limitSerialized)))
|
|
|
|
if _, err = f.Write(sizeBytes[:]); err != nil {
|
|
|
|
return OrderError.New("Error writing serialized limit size: %w", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, err = f.Write(limitSerialized); err != nil {
|
|
|
|
return OrderError.New("Error writing serialized limit: %w", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// readLimit reads the size of the limit followed by the serialized limit, and returns the unmarshalled limit.
|
|
|
|
func readLimit(f io.Reader) (*pb.OrderLimit, error) {
|
|
|
|
sizeBytes := [4]byte{}
|
|
|
|
_, err := io.ReadFull(f, sizeBytes[:])
|
|
|
|
if err != nil {
|
|
|
|
return nil, OrderError.Wrap(err)
|
|
|
|
}
|
|
|
|
limitSize := binary.LittleEndian.Uint32(sizeBytes[:])
|
|
|
|
limitSerialized := make([]byte, limitSize)
|
|
|
|
_, err = io.ReadFull(f, limitSerialized)
|
|
|
|
if err != nil {
|
|
|
|
return nil, OrderError.Wrap(err)
|
|
|
|
}
|
|
|
|
limit := &pb.OrderLimit{}
|
|
|
|
err = pb.Unmarshal(limitSerialized, limit)
|
|
|
|
if err != nil {
|
|
|
|
return nil, OrderError.Wrap(err)
|
|
|
|
}
|
|
|
|
return limit, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// writeOrder writes the size of the order bytes, followed by the order bytes.
|
|
|
|
// it expects the caller to have locked the mutex.
|
|
|
|
func writeOrder(f io.Writer, order *pb.Order) error {
|
|
|
|
orderSerialized, err := pb.Marshal(order)
|
|
|
|
if err != nil {
|
|
|
|
return OrderError.Wrap(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
sizeBytes := [4]byte{}
|
|
|
|
binary.LittleEndian.PutUint32(sizeBytes[:], uint32(len(orderSerialized)))
|
|
|
|
if _, err = f.Write(sizeBytes[:]); err != nil {
|
|
|
|
return OrderError.New("Error writing serialized order size: %w", err)
|
|
|
|
}
|
|
|
|
if _, err = f.Write(orderSerialized); err != nil {
|
|
|
|
return OrderError.New("Error writing serialized order: %w", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// readOrder reads the size of the order followed by the serialized order, and returns the unmarshalled order.
|
|
|
|
func readOrder(f io.Reader) (*pb.Order, error) {
|
|
|
|
sizeBytes := [4]byte{}
|
|
|
|
_, err := io.ReadFull(f, sizeBytes[:])
|
|
|
|
if err != nil {
|
|
|
|
return nil, OrderError.Wrap(err)
|
|
|
|
}
|
|
|
|
orderSize := binary.LittleEndian.Uint32(sizeBytes[:])
|
|
|
|
orderSerialized := make([]byte, orderSize)
|
|
|
|
_, err = io.ReadFull(f, orderSerialized)
|
|
|
|
if err != nil {
|
|
|
|
return nil, OrderError.Wrap(err)
|
|
|
|
}
|
|
|
|
order := &pb.Order{}
|
|
|
|
err = pb.Unmarshal(orderSerialized, order)
|
|
|
|
if err != nil {
|
|
|
|
return nil, OrderError.Wrap(err)
|
|
|
|
}
|
|
|
|
return order, nil
|
|
|
|
}
|