2020-06-02 16:29:46 +01:00
|
|
|
// Copyright (C) 2020 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package orders
|
|
|
|
|
|
|
|
import (
|
2020-10-15 19:57:02 +01:00
|
|
|
"context"
|
2020-06-02 16:29:46 +01:00
|
|
|
"io"
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/zeebo/errs"
|
2020-09-10 21:25:22 +01:00
|
|
|
"go.uber.org/zap"
|
2020-06-02 16:29:46 +01:00
|
|
|
|
|
|
|
"storj.io/common/pb"
|
|
|
|
"storj.io/common/storj"
|
2020-07-14 23:31:22 +01:00
|
|
|
"storj.io/storj/private/date"
|
2020-10-01 23:52:22 +01:00
|
|
|
"storj.io/storj/storagenode/orders/ordersfile"
|
2020-06-02 16:29:46 +01:00
|
|
|
)
|
|
|
|
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
// activeWindow represents a window with active operations waiting to finish to enqueue
|
|
|
|
// their orders.
|
|
|
|
type activeWindow struct {
|
|
|
|
satelliteID storj.NodeID
|
|
|
|
timestamp int64
|
|
|
|
}
|
|
|
|
|
2020-06-02 16:29:46 +01:00
|
|
|
// FileStore implements the orders.Store interface by appending orders to flat files.
|
|
|
|
type FileStore struct {
|
2020-09-10 21:25:22 +01:00
|
|
|
log *zap.Logger
|
|
|
|
|
2020-06-02 16:29:46 +01:00
|
|
|
ordersDir string
|
|
|
|
unsentDir string
|
|
|
|
archiveDir string
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
|
|
|
|
// always acquire the activeMu after the unsentMu to avoid deadlocks. if someone acquires
|
|
|
|
// activeMu before unsentMu, then you can be in a situation where two goroutines are
|
|
|
|
// waiting for each other forever.
|
|
|
|
|
|
|
|
// mutex for the active map
|
|
|
|
activeMu sync.Mutex
|
|
|
|
active map[activeWindow]int
|
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
// mutex for unsent directory
|
|
|
|
unsentMu sync.Mutex
|
|
|
|
// mutex for archive directory
|
|
|
|
archiveMu sync.Mutex
|
|
|
|
|
|
|
|
// how long after OrderLimit creation date are OrderLimits no longer accepted (piecestore Config)
|
|
|
|
orderLimitGracePeriod time.Duration
|
2020-06-02 16:29:46 +01:00
|
|
|
}
|
|
|
|
|
2020-07-14 23:31:22 +01:00
|
|
|
// NewFileStore creates a new orders file store, and the directories necessary for its use.
|
2020-09-10 21:25:22 +01:00
|
|
|
func NewFileStore(log *zap.Logger, ordersDir string, orderLimitGracePeriod time.Duration) (*FileStore, error) {
|
2020-07-14 23:31:22 +01:00
|
|
|
fs := &FileStore{
|
2020-09-10 21:25:22 +01:00
|
|
|
log: log,
|
2020-07-14 23:31:22 +01:00
|
|
|
ordersDir: ordersDir,
|
|
|
|
unsentDir: filepath.Join(ordersDir, "unsent"),
|
|
|
|
archiveDir: filepath.Join(ordersDir, "archive"),
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
active: make(map[activeWindow]int),
|
2020-06-03 20:21:59 +01:00
|
|
|
orderLimitGracePeriod: orderLimitGracePeriod,
|
2020-07-14 23:31:22 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
err := fs.ensureDirectories()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2020-06-02 16:29:46 +01:00
|
|
|
}
|
2020-07-14 23:31:22 +01:00
|
|
|
|
|
|
|
return fs, nil
|
2020-06-02 16:29:46 +01:00
|
|
|
}
|
|
|
|
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
// BeginEnqueue returns a function that can be called to enqueue the passed in Info. If the Info
|
|
|
|
// is too old to be enqueued, then an error is returned.
|
2020-10-01 23:52:22 +01:00
|
|
|
func (store *FileStore) BeginEnqueue(satelliteID storj.NodeID, createdAt time.Time) (commit func(*ordersfile.Info) error, err error) {
|
2020-06-03 20:21:59 +01:00
|
|
|
store.unsentMu.Lock()
|
|
|
|
defer store.unsentMu.Unlock()
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
store.activeMu.Lock()
|
|
|
|
defer store.activeMu.Unlock()
|
|
|
|
|
|
|
|
// if the order is older than the grace period, reject it. We don't check against what
|
|
|
|
// window the order would go into to make the calculation more predictable: if the order
|
|
|
|
// is older than the grace limit, it will not be accepted.
|
|
|
|
if time.Since(createdAt) > store.orderLimitGracePeriod {
|
|
|
|
return nil, OrderError.New("grace period passed for order limit")
|
2020-06-03 20:21:59 +01:00
|
|
|
}
|
|
|
|
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
// record that there is an operation in flight for this window
|
|
|
|
store.enqueueStartedLocked(satelliteID, createdAt)
|
|
|
|
|
2020-10-01 23:52:22 +01:00
|
|
|
return func(info *ordersfile.Info) error {
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
// always acquire the activeMu after the unsentMu to avoid deadlocks
|
|
|
|
store.unsentMu.Lock()
|
|
|
|
defer store.unsentMu.Unlock()
|
|
|
|
store.activeMu.Lock()
|
|
|
|
defer store.activeMu.Unlock()
|
|
|
|
|
|
|
|
// always remove the in flight operation
|
|
|
|
defer store.enqueueFinishedLocked(satelliteID, createdAt)
|
|
|
|
|
2020-08-21 16:24:46 +01:00
|
|
|
// caller wants to abort; free file for sending and return with no error
|
|
|
|
if info == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
// check that the info matches what the enqueue was begun with
|
|
|
|
if info.Limit.SatelliteId != satelliteID || !info.Limit.OrderCreation.Equal(createdAt) {
|
|
|
|
return OrderError.New("invalid info passed in to enqueue commit")
|
|
|
|
}
|
|
|
|
|
|
|
|
// write out the data
|
2020-10-06 17:14:25 +01:00
|
|
|
of, err := ordersfile.OpenWritableUnsent(store.unsentDir, info.Limit.SatelliteId, info.Limit.OrderCreation)
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
if err != nil {
|
|
|
|
return OrderError.Wrap(err)
|
|
|
|
}
|
|
|
|
defer func() {
|
2020-10-01 23:52:22 +01:00
|
|
|
err = errs.Combine(err, OrderError.Wrap(of.Close()))
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
}()
|
|
|
|
|
2020-10-01 23:52:22 +01:00
|
|
|
err = of.Append(info)
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
if err != nil {
|
2020-10-01 23:52:22 +01:00
|
|
|
return OrderError.Wrap(err)
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
}
|
2020-09-17 20:22:10 +01:00
|
|
|
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
return nil
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// enqueueStartedLocked records that there is an order pending to be written to the window.
|
|
|
|
func (store *FileStore) enqueueStartedLocked(satelliteID storj.NodeID, createdAt time.Time) {
|
|
|
|
store.active[activeWindow{
|
|
|
|
satelliteID: satelliteID,
|
|
|
|
timestamp: date.TruncateToHourInNano(createdAt),
|
|
|
|
}]++
|
|
|
|
}
|
|
|
|
|
|
|
|
// enqueueFinishedLocked informs that there is no longer an order pending to be written to the
|
|
|
|
// window.
|
|
|
|
func (store *FileStore) enqueueFinishedLocked(satelliteID storj.NodeID, createdAt time.Time) {
|
|
|
|
window := activeWindow{
|
|
|
|
satelliteID: satelliteID,
|
|
|
|
timestamp: date.TruncateToHourInNano(createdAt),
|
2020-06-02 16:29:46 +01:00
|
|
|
}
|
|
|
|
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
store.active[window]--
|
|
|
|
if store.active[window] <= 0 {
|
|
|
|
delete(store.active, window)
|
2020-06-02 16:29:46 +01:00
|
|
|
}
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
}
|
|
|
|
|
2020-09-14 18:38:41 +01:00
|
|
|
// hasActiveEnqueue returns true if there are active orders enqueued for the requested window.
|
|
|
|
func (store *FileStore) hasActiveEnqueue(satelliteID storj.NodeID, createdAt time.Time) bool {
|
|
|
|
store.activeMu.Lock()
|
|
|
|
defer store.activeMu.Unlock()
|
|
|
|
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
return store.active[activeWindow{
|
|
|
|
satelliteID: satelliteID,
|
|
|
|
timestamp: date.TruncateToHourInNano(createdAt),
|
|
|
|
}] > 0
|
|
|
|
}
|
|
|
|
|
|
|
|
// Enqueue inserts order to be sent at the end of the unsent file for a particular creation hour.
|
|
|
|
// It ensures the order is not being queued after the order limit grace period.
|
2020-10-01 23:52:22 +01:00
|
|
|
func (store *FileStore) Enqueue(info *ordersfile.Info) (err error) {
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
commit, err := store.BeginEnqueue(info.Limit.SatelliteId, info.Limit.OrderCreation)
|
2020-06-02 16:29:46 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
return commit(info)
|
2020-06-02 16:29:46 +01:00
|
|
|
}
|
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
// UnsentInfo is a struct containing a window of orders for a satellite and order creation hour.
|
|
|
|
type UnsentInfo struct {
|
|
|
|
CreatedAtHour time.Time
|
2020-10-06 17:14:25 +01:00
|
|
|
Version ordersfile.Version
|
2020-10-01 23:52:22 +01:00
|
|
|
InfoList []*ordersfile.Info
|
2020-06-03 20:21:59 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListUnsentBySatellite returns one window of orders that haven't been sent yet, grouped by satellite.
|
|
|
|
// It only reads files where the order limit grace period has passed, meaning no new orders will be appended.
|
|
|
|
// There is a separate window for each created at hour, so if a satellite has 2 windows, `ListUnsentBySatellite`
|
2020-07-14 23:31:22 +01:00
|
|
|
// needs to be called twice, with calls to `Archive` in between each call, to see all unsent orders.
|
2020-10-15 19:57:02 +01:00
|
|
|
func (store *FileStore) ListUnsentBySatellite(ctx context.Context, now time.Time) (infoMap map[storj.NodeID]UnsentInfo, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-09-14 18:38:41 +01:00
|
|
|
// shouldn't be necessary, but acquire archiveMu to ensure we do not attempt to archive files during list
|
|
|
|
store.archiveMu.Lock()
|
|
|
|
defer store.archiveMu.Unlock()
|
2020-06-02 16:29:46 +01:00
|
|
|
|
|
|
|
var errList error
|
2020-06-03 20:21:59 +01:00
|
|
|
infoMap = make(map[storj.NodeID]UnsentInfo)
|
2020-06-02 16:29:46 +01:00
|
|
|
|
|
|
|
err = filepath.Walk(store.unsentDir, func(path string, info os.FileInfo, err error) error {
|
|
|
|
if err != nil {
|
2020-06-03 20:21:59 +01:00
|
|
|
errList = errs.Combine(errList, OrderError.Wrap(err))
|
2020-06-02 16:29:46 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if info.IsDir() {
|
|
|
|
return nil
|
|
|
|
}
|
2020-10-01 23:52:22 +01:00
|
|
|
fileInfo, err := ordersfile.GetUnsentInfo(info)
|
2020-06-03 20:21:59 +01:00
|
|
|
if err != nil {
|
2020-10-01 23:52:22 +01:00
|
|
|
errList = errs.Combine(errList, OrderError.Wrap(err))
|
|
|
|
return nil
|
2020-06-03 20:21:59 +01:00
|
|
|
}
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
// if we already have orders for this satellite, ignore the file
|
2020-10-01 23:52:22 +01:00
|
|
|
if _, ok := infoMap[fileInfo.SatelliteID]; ok {
|
2020-06-03 20:21:59 +01:00
|
|
|
return nil
|
|
|
|
}
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
|
|
|
|
// if orders can still be added to file, ignore it. We add an hour because that's
|
|
|
|
// the newest order that could be added to that window.
|
2020-10-01 23:52:22 +01:00
|
|
|
if now.Sub(fileInfo.CreatedAtHour.Add(time.Hour)) <= store.orderLimitGracePeriod {
|
2020-06-02 16:29:46 +01:00
|
|
|
return nil
|
|
|
|
}
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
|
|
|
|
// if there are still active orders for the time, ignore it.
|
2020-10-01 23:52:22 +01:00
|
|
|
if store.hasActiveEnqueue(fileInfo.SatelliteID, fileInfo.CreatedAtHour) {
|
storagenode: live tracking of order window usage
This change accomplishes multiple things:
1. Instead of having a max in flight time, which means
we effectively have a minimum bandwidth for uploads
and downloads, we keep track of what windows have
active requests happening in them.
2. We don't double check when we save the order to see if it
is too old: by then, it's too late. A malicious uplink
could just submit orders outside of the grace window and
receive all the data, but the node would just not commit
it, so the uplink gets free traffic. Because the endpoints
also check for the order being too old, this would be a
very tight race that depends on knowledge of the node system
clock, but best to not have the race exist. Instead, we piggy
back off of the in flight tracking and do the check when
we start to handle the order, and commit at the end.
3. Change the functions that send orders and list unsent
orders to accept a time at which that operation is
happening. This way, in tests, we can pretend we're
listing or sending far into the future after the windows
are available to send, rather than exposing test functions
to modify internal state about the grace period to get
the desired effect. This brings tests closer to actual
usage in production.
4. Change the calculation for if an order is allowed to be
enqueued due to the grace period to just look at the
order creation time, rather than some computation involving
the window it will be in. In this way, you can easily
answer the question of "will this order be accepted?" by
asking "is it older than X?" where X is the grace period.
5. Increases the frequency we check to send up orders to once
every 5 minutes instead of once every hour because we already
have hour-long buffering due to the windows. This decreases
the maximum latency that an order will be reported back to
the satellite by 55 minutes.
Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
newUnsentInfo := UnsentInfo{
|
2020-10-01 23:52:22 +01:00
|
|
|
CreatedAtHour: fileInfo.CreatedAtHour,
|
2020-10-06 17:14:25 +01:00
|
|
|
Version: fileInfo.Version,
|
2020-06-03 20:21:59 +01:00
|
|
|
}
|
2020-06-02 16:29:46 +01:00
|
|
|
|
2020-10-06 17:14:25 +01:00
|
|
|
of, err := ordersfile.OpenReadable(path, fileInfo.Version)
|
2020-06-02 16:29:46 +01:00
|
|
|
if err != nil {
|
|
|
|
return OrderError.Wrap(err)
|
|
|
|
}
|
|
|
|
defer func() {
|
2020-10-01 23:52:22 +01:00
|
|
|
err = errs.Combine(err, OrderError.Wrap(of.Close()))
|
2020-06-02 16:29:46 +01:00
|
|
|
}()
|
|
|
|
|
|
|
|
for {
|
2020-09-10 21:25:22 +01:00
|
|
|
// if at any point we see an unexpected EOF error, return what orders we could read successfully with no error
|
|
|
|
// this behavior ensures that we will attempt to archive corrupted files instead of continually failing to read them
|
2020-10-01 23:52:22 +01:00
|
|
|
newInfo, err := of.ReadOne()
|
2020-06-02 16:29:46 +01:00
|
|
|
if err != nil {
|
|
|
|
if errs.Is(err, io.EOF) {
|
|
|
|
break
|
|
|
|
}
|
2020-10-06 17:14:25 +01:00
|
|
|
// if last entry read is corrupt, attempt to read again
|
|
|
|
if ordersfile.ErrEntryCorrupt.Has(err) {
|
|
|
|
store.log.Warn("Corrupted order detected in orders file", zap.Error(err))
|
|
|
|
mon.Meter("orders_unsent_file_corrupted").Mark64(1)
|
|
|
|
continue
|
|
|
|
}
|
2020-06-02 16:29:46 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
newUnsentInfo.InfoList = append(newUnsentInfo.InfoList, newInfo)
|
2020-06-02 16:29:46 +01:00
|
|
|
}
|
2020-06-03 20:21:59 +01:00
|
|
|
|
2020-10-01 23:52:22 +01:00
|
|
|
infoMap[fileInfo.SatelliteID] = newUnsentInfo
|
2020-06-02 16:29:46 +01:00
|
|
|
return nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
errList = errs.Combine(errList, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
return infoMap, errList
|
|
|
|
}
|
|
|
|
|
2020-10-01 23:52:22 +01:00
|
|
|
// Archive moves a file from "unsent" to "archive".
|
|
|
|
func (store *FileStore) Archive(satelliteID storj.NodeID, unsentInfo UnsentInfo, archivedAt time.Time, status pb.SettlementWithWindowResponse_Status) error {
|
2020-06-03 20:21:59 +01:00
|
|
|
store.unsentMu.Lock()
|
|
|
|
defer store.unsentMu.Unlock()
|
|
|
|
store.archiveMu.Lock()
|
|
|
|
defer store.archiveMu.Unlock()
|
|
|
|
|
2020-10-01 23:52:22 +01:00
|
|
|
return OrderError.Wrap(ordersfile.MoveUnsent(
|
|
|
|
store.unsentDir,
|
|
|
|
store.archiveDir,
|
|
|
|
satelliteID,
|
|
|
|
unsentInfo.CreatedAtHour,
|
|
|
|
archivedAt,
|
|
|
|
status,
|
2020-10-06 17:14:25 +01:00
|
|
|
unsentInfo.Version,
|
2020-10-01 23:52:22 +01:00
|
|
|
))
|
2020-06-03 20:21:59 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// ListArchived returns orders that have been sent.
|
|
|
|
func (store *FileStore) ListArchived() ([]*ArchivedInfo, error) {
|
|
|
|
store.archiveMu.Lock()
|
|
|
|
defer store.archiveMu.Unlock()
|
|
|
|
|
2020-06-02 16:29:46 +01:00
|
|
|
var errList error
|
2020-06-03 20:21:59 +01:00
|
|
|
archivedList := []*ArchivedInfo{}
|
|
|
|
err := filepath.Walk(store.archiveDir, func(path string, info os.FileInfo, err error) error {
|
2020-06-02 16:29:46 +01:00
|
|
|
if err != nil {
|
|
|
|
errList = errs.Combine(errList, OrderError.Wrap(err))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if info.IsDir() {
|
|
|
|
return nil
|
|
|
|
}
|
2020-10-01 23:52:22 +01:00
|
|
|
|
|
|
|
fileInfo, err := ordersfile.GetArchivedInfo(info)
|
|
|
|
if err != nil {
|
|
|
|
return OrderError.Wrap(err)
|
|
|
|
}
|
2020-10-06 17:14:25 +01:00
|
|
|
of, err := ordersfile.OpenReadable(path, fileInfo.Version)
|
2020-06-03 20:21:59 +01:00
|
|
|
if err != nil {
|
2020-10-01 23:52:22 +01:00
|
|
|
return OrderError.Wrap(err)
|
2020-06-03 20:21:59 +01:00
|
|
|
}
|
2020-10-01 23:52:22 +01:00
|
|
|
defer func() {
|
|
|
|
err = errs.Combine(err, OrderError.Wrap(of.Close()))
|
|
|
|
}()
|
2020-06-03 20:21:59 +01:00
|
|
|
|
2020-07-14 23:31:22 +01:00
|
|
|
status := StatusUnsent
|
2020-10-01 23:52:22 +01:00
|
|
|
switch fileInfo.StatusText {
|
2020-07-14 23:31:22 +01:00
|
|
|
case pb.SettlementWithWindowResponse_ACCEPTED.String():
|
|
|
|
status = StatusAccepted
|
|
|
|
case pb.SettlementWithWindowResponse_REJECTED.String():
|
|
|
|
status = StatusRejected
|
|
|
|
}
|
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
for {
|
2020-10-01 23:52:22 +01:00
|
|
|
info, err := of.ReadOne()
|
2020-06-03 20:21:59 +01:00
|
|
|
if err != nil {
|
|
|
|
if errs.Is(err, io.EOF) {
|
|
|
|
break
|
|
|
|
}
|
2020-10-06 17:14:25 +01:00
|
|
|
// if last entry read is corrupt, attempt to read again
|
|
|
|
if ordersfile.ErrEntryCorrupt.Has(err) {
|
|
|
|
store.log.Warn("Corrupted order detected in orders file", zap.Error(err))
|
|
|
|
mon.Meter("orders_archive_file_corrupted").Mark64(1)
|
|
|
|
continue
|
|
|
|
}
|
2020-06-03 20:21:59 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
newInfo := &ArchivedInfo{
|
2020-10-01 23:52:22 +01:00
|
|
|
Limit: info.Limit,
|
|
|
|
Order: info.Order,
|
2020-06-03 20:21:59 +01:00
|
|
|
Status: status,
|
2020-10-01 23:52:22 +01:00
|
|
|
ArchivedAt: fileInfo.ArchivedAt,
|
2020-06-03 20:21:59 +01:00
|
|
|
}
|
|
|
|
archivedList = append(archivedList, newInfo)
|
2020-06-02 16:29:46 +01:00
|
|
|
}
|
2020-06-03 20:21:59 +01:00
|
|
|
return nil
|
2020-06-02 16:29:46 +01:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
errList = errs.Combine(errList, err)
|
|
|
|
}
|
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
return archivedList, errList
|
2020-06-02 16:29:46 +01:00
|
|
|
}
|
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
// CleanArchive deletes all entries archvied before the provided time.
|
|
|
|
func (store *FileStore) CleanArchive(deleteBefore time.Time) error {
|
|
|
|
store.archiveMu.Lock()
|
|
|
|
defer store.archiveMu.Unlock()
|
2020-06-02 16:29:46 +01:00
|
|
|
|
2020-06-03 20:21:59 +01:00
|
|
|
// we want to delete everything older than ttl
|
|
|
|
var errList error
|
|
|
|
err := filepath.Walk(store.archiveDir, func(path string, info os.FileInfo, err error) error {
|
|
|
|
if err != nil {
|
|
|
|
errList = errs.Combine(errList, OrderError.Wrap(err))
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if info.IsDir() {
|
|
|
|
return nil
|
|
|
|
}
|
2020-10-01 23:52:22 +01:00
|
|
|
fileInfo, err := ordersfile.GetArchivedInfo(info)
|
2020-06-03 20:21:59 +01:00
|
|
|
if err != nil {
|
|
|
|
errList = errs.Combine(errList, err)
|
|
|
|
return nil
|
|
|
|
}
|
2020-10-01 23:52:22 +01:00
|
|
|
if fileInfo.ArchivedAt.Before(deleteBefore) {
|
2020-06-03 20:21:59 +01:00
|
|
|
return OrderError.Wrap(os.Remove(path))
|
|
|
|
}
|
2020-06-02 16:29:46 +01:00
|
|
|
return nil
|
2020-06-03 20:21:59 +01:00
|
|
|
})
|
|
|
|
return errs.Combine(errList, err)
|
2020-06-02 16:29:46 +01:00
|
|
|
}
|
|
|
|
|
2020-07-14 23:31:22 +01:00
|
|
|
// ensureDirectories checks for the existence of the unsent and archived directories, and creates them if they do not exist.
|
|
|
|
func (store *FileStore) ensureDirectories() error {
|
2020-06-02 16:29:46 +01:00
|
|
|
if _, err := os.Stat(store.unsentDir); os.IsNotExist(err) {
|
2020-07-14 23:31:22 +01:00
|
|
|
err = os.MkdirAll(store.unsentDir, 0700)
|
2020-06-02 16:29:46 +01:00
|
|
|
if err != nil {
|
2020-07-14 23:31:22 +01:00
|
|
|
return OrderError.Wrap(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if _, err := os.Stat(store.archiveDir); os.IsNotExist(err) {
|
|
|
|
err = os.MkdirAll(store.archiveDir, 0700)
|
|
|
|
if err != nil {
|
|
|
|
return OrderError.Wrap(err)
|
2020-06-02 16:29:46 +01:00
|
|
|
}
|
|
|
|
}
|
2020-07-14 23:31:22 +01:00
|
|
|
return nil
|
|
|
|
}
|