2019-06-05 17:41:02 +01:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package metainfo
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
2019-06-24 10:52:25 +01:00
|
|
|
"regexp"
|
2019-06-05 17:41:02 +01:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/gogo/protobuf/proto"
|
2019-07-03 17:14:37 +01:00
|
|
|
"github.com/zeebo/errs"
|
2019-06-05 17:41:02 +01:00
|
|
|
"go.uber.org/zap"
|
2020-01-17 15:01:36 +00:00
|
|
|
"golang.org/x/time/rate"
|
2019-06-05 17:41:02 +01:00
|
|
|
|
2019-12-27 11:48:47 +00:00
|
|
|
"storj.io/common/encryption"
|
|
|
|
"storj.io/common/macaroon"
|
|
|
|
"storj.io/common/pb"
|
|
|
|
"storj.io/common/rpc/rpcstatus"
|
|
|
|
"storj.io/common/storj"
|
2020-03-30 10:08:50 +01:00
|
|
|
"storj.io/common/uuid"
|
2019-06-05 17:41:02 +01:00
|
|
|
"storj.io/storj/pkg/auth"
|
|
|
|
"storj.io/storj/satellite/console"
|
|
|
|
)
|
|
|
|
|
2019-06-24 10:52:25 +01:00
|
|
|
const (
|
|
|
|
requestTTL = time.Hour * 4
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
ipRegexp = regexp.MustCompile(`^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$`)
|
|
|
|
)
|
2019-06-05 17:41:02 +01:00
|
|
|
|
|
|
|
// TTLItem keeps association between serial number and ttl
|
|
|
|
type TTLItem struct {
|
|
|
|
serialNumber storj.SerialNumber
|
|
|
|
ttl time.Time
|
|
|
|
}
|
|
|
|
|
|
|
|
type createRequest struct {
|
2019-07-09 22:54:00 +01:00
|
|
|
Expiration time.Time
|
2019-06-05 17:41:02 +01:00
|
|
|
Redundancy *pb.RedundancyScheme
|
|
|
|
|
|
|
|
ttl time.Time
|
|
|
|
}
|
|
|
|
|
|
|
|
type createRequests struct {
|
|
|
|
mu sync.RWMutex
|
|
|
|
// orders limit serial number used because with CreateSegment we don't have path yet
|
|
|
|
entries map[storj.SerialNumber]*createRequest
|
|
|
|
|
|
|
|
muTTL sync.Mutex
|
|
|
|
entriesTTL []*TTLItem
|
|
|
|
}
|
|
|
|
|
|
|
|
func newCreateRequests() *createRequests {
|
|
|
|
return &createRequests{
|
|
|
|
entries: make(map[storj.SerialNumber]*createRequest),
|
|
|
|
entriesTTL: make([]*TTLItem, 0),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (requests *createRequests) Put(serialNumber storj.SerialNumber, createRequest *createRequest) {
|
|
|
|
ttl := time.Now().Add(requestTTL)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
requests.muTTL.Lock()
|
|
|
|
requests.entriesTTL = append(requests.entriesTTL, &TTLItem{
|
|
|
|
serialNumber: serialNumber,
|
|
|
|
ttl: ttl,
|
|
|
|
})
|
|
|
|
requests.muTTL.Unlock()
|
|
|
|
}()
|
|
|
|
|
|
|
|
createRequest.ttl = ttl
|
|
|
|
requests.mu.Lock()
|
|
|
|
requests.entries[serialNumber] = createRequest
|
|
|
|
requests.mu.Unlock()
|
|
|
|
|
|
|
|
go requests.cleanup()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (requests *createRequests) Load(serialNumber storj.SerialNumber) (*createRequest, bool) {
|
|
|
|
requests.mu.RLock()
|
|
|
|
request, found := requests.entries[serialNumber]
|
|
|
|
if request != nil && request.ttl.Before(time.Now()) {
|
|
|
|
request = nil
|
|
|
|
found = false
|
|
|
|
}
|
|
|
|
requests.mu.RUnlock()
|
|
|
|
|
|
|
|
return request, found
|
|
|
|
}
|
|
|
|
|
|
|
|
func (requests *createRequests) Remove(serialNumber storj.SerialNumber) {
|
|
|
|
requests.mu.Lock()
|
|
|
|
delete(requests.entries, serialNumber)
|
|
|
|
requests.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (requests *createRequests) cleanup() {
|
|
|
|
requests.muTTL.Lock()
|
|
|
|
now := time.Now()
|
|
|
|
remove := make([]storj.SerialNumber, 0)
|
|
|
|
newStart := 0
|
|
|
|
for i, item := range requests.entriesTTL {
|
|
|
|
if item.ttl.Before(now) {
|
|
|
|
remove = append(remove, item.serialNumber)
|
|
|
|
newStart = i + 1
|
|
|
|
} else {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
requests.entriesTTL = requests.entriesTTL[newStart:]
|
|
|
|
requests.muTTL.Unlock()
|
|
|
|
|
|
|
|
for _, serialNumber := range remove {
|
|
|
|
requests.Remove(serialNumber)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
func getAPIKey(ctx context.Context, header *pb.RequestHeader) (key *macaroon.APIKey, err error) {
|
2019-06-05 17:41:02 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
2019-09-19 17:19:29 +01:00
|
|
|
if header != nil {
|
|
|
|
return macaroon.ParseRawAPIKey(header.ApiKey)
|
|
|
|
}
|
|
|
|
|
2019-06-05 17:41:02 +01:00
|
|
|
keyData, ok := auth.GetAPIKey(ctx)
|
|
|
|
if !ok {
|
2019-09-19 17:19:29 +01:00
|
|
|
return nil, errs.New("missing credentials")
|
2019-06-05 17:41:02 +01:00
|
|
|
}
|
|
|
|
|
2019-09-19 17:19:29 +01:00
|
|
|
return macaroon.ParseAPIKey(string(keyData))
|
|
|
|
}
|
|
|
|
|
2020-03-10 09:58:14 +00:00
|
|
|
// validateAuth validates things like API key, user permissions and rate limit and always returns valid rpc error.
|
2019-09-19 17:19:29 +01:00
|
|
|
func (endpoint *Endpoint) validateAuth(ctx context.Context, header *pb.RequestHeader, action macaroon.Action) (_ *console.APIKeyInfo, err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
key, err := getAPIKey(ctx, header)
|
2019-06-05 17:41:02 +01:00
|
|
|
if err != nil {
|
2019-08-20 14:16:51 +01:00
|
|
|
endpoint.log.Debug("invalid request", zap.Error(err))
|
2019-09-19 05:46:39 +01:00
|
|
|
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, "Invalid API credentials")
|
2019-06-05 17:41:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
keyInfo, err := endpoint.apiKeys.GetByHead(ctx, key.Head())
|
|
|
|
if err != nil {
|
2019-08-20 14:16:51 +01:00
|
|
|
endpoint.log.Debug("unauthorized request", zap.Error(err))
|
2019-09-19 05:46:39 +01:00
|
|
|
return nil, rpcstatus.Error(rpcstatus.PermissionDenied, "Unauthorized API credentials")
|
2019-06-05 17:41:02 +01:00
|
|
|
}
|
|
|
|
|
2020-01-17 15:01:36 +00:00
|
|
|
if err = endpoint.checkRate(ctx, keyInfo.ProjectID); err != nil {
|
|
|
|
endpoint.log.Debug("rate check failed", zap.Error(err))
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2019-06-05 17:41:02 +01:00
|
|
|
// Revocations are currently handled by just deleting the key.
|
|
|
|
err = key.Check(ctx, keyInfo.Secret, action, nil)
|
|
|
|
if err != nil {
|
2019-08-20 14:16:51 +01:00
|
|
|
endpoint.log.Debug("unauthorized request", zap.Error(err))
|
2019-09-19 05:46:39 +01:00
|
|
|
return nil, rpcstatus.Error(rpcstatus.PermissionDenied, "Unauthorized API credentials")
|
2019-06-05 17:41:02 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return keyInfo, nil
|
|
|
|
}
|
|
|
|
|
2020-01-30 17:43:37 +00:00
|
|
|
func (endpoint *Endpoint) checkRate(ctx context.Context, projectID uuid.UUID) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
2020-01-17 15:01:36 +00:00
|
|
|
if !endpoint.limiterConfig.Enabled {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
limiter, err := endpoint.limiterCache.Get(projectID.String(), func() (interface{}, error) {
|
|
|
|
limit := rate.Limit(endpoint.limiterConfig.Rate)
|
|
|
|
|
|
|
|
project, err := endpoint.projects.Get(ctx, projectID)
|
|
|
|
if err != nil {
|
|
|
|
return false, err
|
|
|
|
}
|
|
|
|
if project.RateLimit != nil && *project.RateLimit > 0 {
|
|
|
|
limit = rate.Limit(*project.RateLimit)
|
|
|
|
}
|
|
|
|
|
|
|
|
// initialize the limiter with limit and burst the same so that we don't limit how quickly calls
|
|
|
|
// are made within the second.
|
|
|
|
return rate.NewLimiter(limit, int(limit)), nil
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return rpcstatus.Error(rpcstatus.Unavailable, err.Error())
|
|
|
|
}
|
|
|
|
|
|
|
|
if !limiter.(*rate.Limiter).Allow() {
|
2020-01-30 17:43:37 +00:00
|
|
|
endpoint.log.Warn("too many requests for project",
|
|
|
|
zap.Stringer("projectID", projectID),
|
|
|
|
zap.Float64("limit", float64(limiter.(*rate.Limiter).Limit())))
|
|
|
|
|
|
|
|
mon.Event("metainfo_rate_limit_exceeded") //locked
|
|
|
|
|
2020-01-29 14:12:19 +00:00
|
|
|
return rpcstatus.Error(rpcstatus.ResourceExhausted, "Too Many Requests")
|
2020-01-17 15:01:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-07-08 14:33:15 +01:00
|
|
|
func (endpoint *Endpoint) validateCommitSegment(ctx context.Context, req *pb.SegmentCommitRequestOld) (err error) {
|
2019-06-05 17:41:02 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
err = endpoint.validateBucket(ctx, req.Bucket)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-07-24 12:33:23 +01:00
|
|
|
err = endpoint.validatePointer(ctx, req.Pointer, req.OriginalLimits)
|
2019-06-05 17:41:02 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(req.OriginalLimits) > 0 {
|
|
|
|
createRequest, found := endpoint.createRequests.Load(req.OriginalLimits[0].SerialNumber)
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case !found:
|
|
|
|
return Error.New("missing create request or request expired")
|
2019-07-09 22:54:00 +01:00
|
|
|
case !createRequest.Expiration.Equal(req.Pointer.ExpirationDate):
|
2019-06-05 17:41:02 +01:00
|
|
|
return Error.New("pointer expiration date does not match requested one")
|
|
|
|
case !proto.Equal(createRequest.Redundancy, req.Pointer.Remote.Redundancy):
|
|
|
|
return Error.New("pointer redundancy scheme date does not match requested one")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (endpoint *Endpoint) validateBucket(ctx context.Context, bucket []byte) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
if len(bucket) == 0 {
|
2020-03-16 08:55:52 +00:00
|
|
|
return Error.Wrap(storj.ErrNoBucket.New(""))
|
2019-06-05 17:41:02 +01:00
|
|
|
}
|
2019-06-24 10:52:25 +01:00
|
|
|
|
|
|
|
if len(bucket) < 3 || len(bucket) > 63 {
|
|
|
|
return Error.New("bucket name must be at least 3 and no more than 63 characters long")
|
2019-06-05 17:41:02 +01:00
|
|
|
}
|
2019-06-24 10:52:25 +01:00
|
|
|
|
|
|
|
// Regexp not used because benchmark shows it will be slower for valid bucket names
|
|
|
|
// https://gist.github.com/mniewrzal/49de3af95f36e63e88fac24f565e444c
|
|
|
|
labels := bytes.Split(bucket, []byte("."))
|
|
|
|
for _, label := range labels {
|
|
|
|
err = validateBucketLabel(label)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if ipRegexp.MatchString(string(bucket)) {
|
|
|
|
return Error.New("bucket name cannot be formatted as an IP address")
|
|
|
|
}
|
|
|
|
|
2019-06-05 17:41:02 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-06-24 10:52:25 +01:00
|
|
|
func validateBucketLabel(label []byte) error {
|
|
|
|
if len(label) == 0 {
|
|
|
|
return Error.New("bucket label cannot be empty")
|
|
|
|
}
|
|
|
|
|
|
|
|
if !isLowerLetter(label[0]) && !isDigit(label[0]) {
|
|
|
|
return Error.New("bucket label must start with a lowercase letter or number")
|
|
|
|
}
|
|
|
|
|
|
|
|
if label[0] == '-' || label[len(label)-1] == '-' {
|
|
|
|
return Error.New("bucket label cannot start or end with a hyphen")
|
|
|
|
}
|
|
|
|
|
|
|
|
for i := 1; i < len(label)-1; i++ {
|
|
|
|
if !isLowerLetter(label[i]) && !isDigit(label[i]) && (label[i] != '-') && (label[i] != '.') {
|
|
|
|
return Error.New("bucket name must contain only lowercase letters, numbers or hyphens")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func isLowerLetter(r byte) bool {
|
|
|
|
return r >= 'a' && r <= 'z'
|
|
|
|
}
|
|
|
|
|
|
|
|
func isDigit(r byte) bool {
|
|
|
|
return r >= '0' && r <= '9'
|
|
|
|
}
|
|
|
|
|
2019-07-24 12:33:23 +01:00
|
|
|
func (endpoint *Endpoint) validatePointer(ctx context.Context, pointer *pb.Pointer, originalLimits []*pb.OrderLimit) (err error) {
|
2019-06-05 17:41:02 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
|
|
|
if pointer == nil {
|
|
|
|
return Error.New("no pointer specified")
|
|
|
|
}
|
|
|
|
|
|
|
|
if pointer.Type == pb.Pointer_INLINE && pointer.Remote != nil {
|
|
|
|
return Error.New("pointer type is INLINE but remote segment is set")
|
|
|
|
}
|
|
|
|
|
|
|
|
if pointer.Type == pb.Pointer_REMOTE {
|
2019-07-03 17:14:37 +01:00
|
|
|
switch {
|
|
|
|
case pointer.Remote == nil:
|
2019-06-05 17:41:02 +01:00
|
|
|
return Error.New("no remote segment specified")
|
2019-07-03 17:14:37 +01:00
|
|
|
case pointer.Remote.RemotePieces == nil:
|
2019-06-05 17:41:02 +01:00
|
|
|
return Error.New("no remote segment pieces specified")
|
2019-07-03 17:14:37 +01:00
|
|
|
case pointer.Remote.Redundancy == nil:
|
2019-06-05 17:41:02 +01:00
|
|
|
return Error.New("no redundancy scheme specified")
|
|
|
|
}
|
2019-07-24 12:33:23 +01:00
|
|
|
|
|
|
|
remote := pointer.Remote
|
|
|
|
|
|
|
|
if len(originalLimits) == 0 {
|
|
|
|
return Error.New("no order limits")
|
|
|
|
}
|
|
|
|
if int32(len(originalLimits)) != remote.Redundancy.Total {
|
|
|
|
return Error.New("invalid no order limit for piece")
|
|
|
|
}
|
|
|
|
|
2019-07-31 19:28:43 +01:00
|
|
|
maxAllowed, err := encryption.CalcEncryptedSize(endpoint.requiredRSConfig.MaxSegmentSize.Int64(), storj.EncryptionParameters{
|
2019-07-24 12:33:23 +01:00
|
|
|
CipherSuite: storj.EncAESGCM,
|
|
|
|
BlockSize: 128, // intentionally low block size to allow maximum possible encryption overhead
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
if pointer.SegmentSize > maxAllowed || pointer.SegmentSize < 0 {
|
|
|
|
return Error.New("segment size %v is out of range, maximum allowed is %v", pointer.SegmentSize, maxAllowed)
|
|
|
|
}
|
|
|
|
|
2019-11-14 08:31:30 +00:00
|
|
|
pieceNums := make(map[int32]struct{})
|
|
|
|
nodeIds := make(map[storj.NodeID]struct{})
|
2019-07-24 12:33:23 +01:00
|
|
|
for _, piece := range remote.RemotePieces {
|
2019-10-28 21:09:57 +00:00
|
|
|
if piece.PieceNum >= int32(len(originalLimits)) {
|
|
|
|
return Error.New("invalid piece number")
|
|
|
|
}
|
|
|
|
|
2019-07-24 12:33:23 +01:00
|
|
|
limit := originalLimits[piece.PieceNum]
|
|
|
|
|
|
|
|
if limit == nil {
|
|
|
|
return Error.New("empty order limit for piece")
|
|
|
|
}
|
|
|
|
|
|
|
|
err := endpoint.orders.VerifyOrderLimitSignature(ctx, limit)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-10-01 17:55:02 +01:00
|
|
|
// expect that too much time has not passed between order limit creation and now
|
|
|
|
if time.Since(limit.OrderCreation) > endpoint.maxCommitInterval {
|
|
|
|
return Error.New("Segment not committed before max commit interval of %f minutes.", endpoint.maxCommitInterval.Minutes())
|
|
|
|
}
|
|
|
|
|
2019-07-24 12:33:23 +01:00
|
|
|
derivedPieceID := remote.RootPieceId.Derive(piece.NodeId, piece.PieceNum)
|
|
|
|
if limit.PieceId.IsZero() || limit.PieceId != derivedPieceID {
|
|
|
|
return Error.New("invalid order limit piece id")
|
|
|
|
}
|
2019-08-22 12:40:15 +01:00
|
|
|
if piece.NodeId != limit.StorageNodeId {
|
2019-07-24 12:33:23 +01:00
|
|
|
return Error.New("piece NodeID != order limit NodeID")
|
|
|
|
}
|
2019-11-04 22:26:19 +00:00
|
|
|
|
|
|
|
if _, ok := pieceNums[piece.PieceNum]; ok {
|
|
|
|
return Error.New("piece num %d is duplicated", piece.PieceNum)
|
|
|
|
}
|
|
|
|
|
|
|
|
if _, ok := nodeIds[piece.NodeId]; ok {
|
|
|
|
return Error.New("node id %s for piece num %d is duplicated", piece.NodeId.String(), piece.PieceNum)
|
|
|
|
}
|
|
|
|
|
|
|
|
pieceNums[piece.PieceNum] = struct{}{}
|
|
|
|
nodeIds[piece.NodeId] = struct{}{}
|
2019-07-24 12:33:23 +01:00
|
|
|
}
|
2019-06-05 17:41:02 +01:00
|
|
|
}
|
2019-07-24 12:33:23 +01:00
|
|
|
|
2019-06-05 17:41:02 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (endpoint *Endpoint) validateRedundancy(ctx context.Context, redundancy *pb.RedundancyScheme) (err error) {
|
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2019-08-22 12:40:15 +01:00
|
|
|
if endpoint.requiredRSConfig.Validate {
|
2019-07-31 19:28:43 +01:00
|
|
|
if endpoint.requiredRSConfig.ErasureShareSize.Int32() != redundancy.ErasureShareSize ||
|
2019-10-31 19:04:33 +00:00
|
|
|
endpoint.requiredRSConfig.MinTotalThreshold > int(redundancy.Total) ||
|
|
|
|
endpoint.requiredRSConfig.MaxTotalThreshold < int(redundancy.Total) ||
|
2019-07-31 19:28:43 +01:00
|
|
|
endpoint.requiredRSConfig.MinThreshold != int(redundancy.MinReq) ||
|
|
|
|
endpoint.requiredRSConfig.RepairThreshold != int(redundancy.RepairThreshold) ||
|
|
|
|
endpoint.requiredRSConfig.SuccessThreshold != int(redundancy.SuccessThreshold) {
|
2019-10-31 19:04:33 +00:00
|
|
|
return Error.New("provided redundancy scheme parameters not allowed: want [%d, %d, %d, %d-%d, %d] got [%d, %d, %d, %d, %d]",
|
2019-07-31 19:28:43 +01:00
|
|
|
endpoint.requiredRSConfig.MinThreshold,
|
|
|
|
endpoint.requiredRSConfig.RepairThreshold,
|
|
|
|
endpoint.requiredRSConfig.SuccessThreshold,
|
2019-10-31 19:04:33 +00:00
|
|
|
endpoint.requiredRSConfig.MinTotalThreshold,
|
|
|
|
endpoint.requiredRSConfig.MaxTotalThreshold,
|
2019-07-31 19:28:43 +01:00
|
|
|
endpoint.requiredRSConfig.ErasureShareSize.Int32(),
|
2019-06-29 16:05:46 +01:00
|
|
|
|
|
|
|
redundancy.MinReq,
|
|
|
|
redundancy.RepairThreshold,
|
|
|
|
redundancy.SuccessThreshold,
|
|
|
|
redundancy.Total,
|
|
|
|
redundancy.ErasureShareSize,
|
|
|
|
)
|
2019-06-21 19:15:58 +01:00
|
|
|
}
|
2019-06-05 17:41:02 +01:00
|
|
|
}
|
2019-06-21 19:15:58 +01:00
|
|
|
|
2019-06-05 17:41:02 +01:00
|
|
|
return nil
|
|
|
|
}
|