2019-06-05 17:41:02 +01:00
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package metainfo
import (
"bytes"
"context"
2020-06-10 15:10:44 +01:00
"crypto/subtle"
2019-06-24 10:52:25 +01:00
"regexp"
2022-01-13 09:57:31 +00:00
"strconv"
2021-04-07 15:20:05 +01:00
"time"
2019-06-05 17:41:02 +01:00
2022-10-24 10:43:47 +01:00
"github.com/jtolio/eventkit"
2019-07-03 17:14:37 +01:00
"github.com/zeebo/errs"
2019-06-05 17:41:02 +01:00
"go.uber.org/zap"
2020-01-17 15:01:36 +00:00
"golang.org/x/time/rate"
2019-06-05 17:41:02 +01:00
2021-04-07 15:20:05 +01:00
"storj.io/common/encryption"
2022-01-13 09:57:31 +00:00
"storj.io/common/errs2"
2019-12-27 11:48:47 +00:00
"storj.io/common/macaroon"
2022-04-21 15:25:16 +01:00
"storj.io/common/memory"
2019-12-27 11:48:47 +00:00
"storj.io/common/pb"
"storj.io/common/rpc/rpcstatus"
"storj.io/common/storj"
2020-03-30 10:08:50 +01:00
"storj.io/common/uuid"
2022-06-22 12:33:03 +01:00
"storj.io/storj/satellite/accounting"
2019-06-05 17:41:02 +01:00
"storj.io/storj/satellite/console"
2020-10-06 11:40:31 +01:00
"storj.io/storj/satellite/console/consoleauth"
2021-04-21 13:42:57 +01:00
"storj.io/storj/satellite/metabase"
2019-06-05 17:41:02 +01:00
)
2022-04-21 15:25:16 +01:00
const encryptedKeySize = 48
var (
ipRegexp = regexp . MustCompile ( ` ^(([0-9]|[1-9][0-9]|1[0-9] { 2}|2[0-4][0-9]|25[0-5])\.) { 3}([0-9]|[1-9][0-9]|1[0-9] { 2}|2[0-4][0-9]|25[0-5])$ ` )
)
2019-06-05 17:41:02 +01:00
2022-10-24 10:43:47 +01:00
var ek = eventkit . Package ( )
2019-09-19 17:19:29 +01:00
func getAPIKey ( ctx context . Context , header * pb . RequestHeader ) ( key * macaroon . APIKey , err error ) {
2019-06-05 17:41:02 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2019-09-19 17:19:29 +01:00
if header != nil {
return macaroon . ParseRawAPIKey ( header . ApiKey )
}
2020-10-06 11:40:31 +01:00
keyData , ok := consoleauth . GetAPIKey ( ctx )
2019-06-05 17:41:02 +01:00
if ! ok {
2019-09-19 17:19:29 +01:00
return nil , errs . New ( "missing credentials" )
2019-06-05 17:41:02 +01:00
}
2019-09-19 17:19:29 +01:00
return macaroon . ParseAPIKey ( string ( keyData ) )
}
2020-03-10 09:58:14 +00:00
// validateAuth validates things like API key, user permissions and rate limit and always returns valid rpc error.
2019-09-19 17:19:29 +01:00
func ( endpoint * Endpoint ) validateAuth ( ctx context . Context , header * pb . RequestHeader , action macaroon . Action ) ( _ * console . APIKeyInfo , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2020-06-10 15:10:44 +01:00
key , keyInfo , err := endpoint . validateBasic ( ctx , header )
if err != nil {
return nil , err
}
err = key . Check ( ctx , keyInfo . Secret , action , endpoint . revocations )
if err != nil {
endpoint . log . Debug ( "unauthorized request" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . PermissionDenied , "Unauthorized API credentials" )
}
return keyInfo , nil
}
2021-08-31 17:15:43 +01:00
type verifyPermission struct {
action macaroon . Action
actionPermitted * bool
optional bool
}
// validateAuthN validates things like API keys, rate limit and user permissions
// for each permission from permissions. It returns an error for the first
// required (not optional) permission that the check fails for. There must be at
// least one required (not optional) permission. In case all permissions are
// optional, it will return an error. It always returns valid RPC errors.
func ( endpoint * Endpoint ) validateAuthN ( ctx context . Context , header * pb . RequestHeader , permissions ... verifyPermission ) ( _ * console . APIKeyInfo , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
allOptional := true
for _ , p := range permissions {
if ! p . optional {
allOptional = false
break
}
}
if allOptional {
return nil , rpcstatus . Error ( rpcstatus . Internal , "All permissions are optional" )
}
key , keyInfo , err := endpoint . validateBasic ( ctx , header )
if err != nil {
return nil , err
}
for _ , p := range permissions {
err = key . Check ( ctx , keyInfo . Secret , p . action , endpoint . revocations )
if p . actionPermitted != nil {
* p . actionPermitted = err == nil
}
if err != nil && ! p . optional {
endpoint . log . Debug ( "unauthorized request" , zap . Error ( err ) )
return nil , rpcstatus . Error ( rpcstatus . PermissionDenied , "Unauthorized API credentials" )
}
}
return keyInfo , nil
}
2022-11-02 15:19:45 +00:00
// validateAuthAny validates things like API keys, rate limit and user permissions.
// At least one of the action from actions must be permitted to return successfully.
// It always returns valid RPC errors.
func ( endpoint * Endpoint ) validateAuthAny ( ctx context . Context , header * pb . RequestHeader , actions ... macaroon . Action ) ( _ * console . APIKeyInfo , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
key , keyInfo , err := endpoint . validateBasic ( ctx , header )
if err != nil {
return nil , err
}
if len ( actions ) == 0 {
return nil , rpcstatus . Error ( rpcstatus . Internal , "No action to validate" )
}
var combinedErrs error
for _ , action := range actions {
err = key . Check ( ctx , keyInfo . Secret , action , endpoint . revocations )
if err == nil {
return keyInfo , nil
}
combinedErrs = errs . Combine ( combinedErrs , err )
}
endpoint . log . Debug ( "unauthorized request" , zap . Error ( combinedErrs ) )
return nil , rpcstatus . Error ( rpcstatus . PermissionDenied , "Unauthorized API credentials" )
}
2020-06-10 15:10:44 +01:00
func ( endpoint * Endpoint ) validateBasic ( ctx context . Context , header * pb . RequestHeader ) ( _ * macaroon . APIKey , _ * console . APIKeyInfo , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2019-09-19 17:19:29 +01:00
key , err := getAPIKey ( ctx , header )
2019-06-05 17:41:02 +01:00
if err != nil {
2019-08-20 14:16:51 +01:00
endpoint . log . Debug ( "invalid request" , zap . Error ( err ) )
2020-06-10 15:10:44 +01:00
return nil , nil , rpcstatus . Error ( rpcstatus . InvalidArgument , "Invalid API credentials" )
2019-06-05 17:41:02 +01:00
}
keyInfo , err := endpoint . apiKeys . GetByHead ( ctx , key . Head ( ) )
if err != nil {
2019-08-20 14:16:51 +01:00
endpoint . log . Debug ( "unauthorized request" , zap . Error ( err ) )
2020-06-10 15:10:44 +01:00
return nil , nil , rpcstatus . Error ( rpcstatus . PermissionDenied , "Unauthorized API credentials" )
2019-06-05 17:41:02 +01:00
}
2022-10-24 10:43:47 +01:00
userAgent := ""
2022-08-09 11:39:43 +01:00
if keyInfo . UserAgent != nil {
2022-10-24 10:43:47 +01:00
userAgent = string ( keyInfo . UserAgent )
2022-08-09 11:39:43 +01:00
}
2022-10-24 10:43:47 +01:00
ek . Event ( "auth" ,
eventkit . String ( "user-agent" , userAgent ) ,
eventkit . String ( "project" , keyInfo . ProjectID . String ( ) ) ,
2023-01-27 21:07:32 +00:00
eventkit . String ( "partner" , string ( keyInfo . UserAgent ) ) ,
2022-10-24 10:43:47 +01:00
)
2022-08-09 11:39:43 +01:00
2020-01-17 15:01:36 +00:00
if err = endpoint . checkRate ( ctx , keyInfo . ProjectID ) ; err != nil {
endpoint . log . Debug ( "rate check failed" , zap . Error ( err ) )
2020-06-10 15:10:44 +01:00
return nil , nil , err
2020-01-17 15:01:36 +00:00
}
2020-06-10 15:10:44 +01:00
return key , keyInfo , nil
}
func ( endpoint * Endpoint ) validateRevoke ( ctx context . Context , header * pb . RequestHeader , macToRevoke * macaroon . Macaroon ) ( _ * console . APIKeyInfo , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
key , keyInfo , err := endpoint . validateBasic ( ctx , header )
2019-06-05 17:41:02 +01:00
if err != nil {
2020-06-10 15:10:44 +01:00
return nil , err
2019-06-05 17:41:02 +01:00
}
2020-06-10 15:10:44 +01:00
// The macaroon to revoke must be valid with the same secret as the key.
if ! macToRevoke . Validate ( keyInfo . Secret ) {
return nil , rpcstatus . Error ( rpcstatus . InvalidArgument , "Macaroon to revoke invalid" )
}
keyTail := key . Tail ( )
tails := macToRevoke . Tails ( keyInfo . Secret )
// A macaroon cannot revoke itself. So we only check len(tails-1), skipping
// the final tail. To be valid, the final tail of the auth key must be
// contained within the checked tails of the macaroon we want to revoke.
for i := 0 ; i < len ( tails ) - 1 ; i ++ {
if subtle . ConstantTimeCompare ( tails [ i ] , keyTail ) == 1 {
return keyInfo , nil
}
}
return nil , rpcstatus . Error ( rpcstatus . PermissionDenied , "Unauthorized attempt to revoke macaroon" )
2019-06-05 17:41:02 +01:00
}
2020-01-30 17:43:37 +00:00
func ( endpoint * Endpoint ) checkRate ( ctx context . Context , projectID uuid . UUID ) ( err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2020-04-01 10:15:24 +01:00
if ! endpoint . config . RateLimiter . Enabled {
2020-01-17 15:01:36 +00:00
return nil
}
limiter , err := endpoint . limiterCache . Get ( projectID . String ( ) , func ( ) ( interface { } , error ) {
2021-08-23 22:47:58 +01:00
rateLimit := rate . Limit ( endpoint . config . RateLimiter . Rate )
burstLimit := int ( endpoint . config . RateLimiter . Rate )
2020-01-17 15:01:36 +00:00
2023-03-13 16:55:30 +00:00
limits , err := endpoint . projectLimits . GetLimits ( ctx , projectID )
2020-01-17 15:01:36 +00:00
if err != nil {
return false , err
}
2023-03-13 16:55:30 +00:00
if limits . RateLimit != nil {
rateLimit = rate . Limit ( * limits . RateLimit )
burstLimit = * limits . RateLimit
2021-08-23 22:47:58 +01:00
}
// use the explicitly set burst value if it's defined
2023-03-13 16:55:30 +00:00
if limits . BurstLimit != nil {
burstLimit = * limits . BurstLimit
2020-01-17 15:01:36 +00:00
}
2021-08-23 22:47:58 +01:00
return rate . NewLimiter ( rateLimit , burstLimit ) , nil
2020-01-17 15:01:36 +00:00
} )
if err != nil {
return rpcstatus . Error ( rpcstatus . Unavailable , err . Error ( ) )
}
if ! limiter . ( * rate . Limiter ) . Allow ( ) {
2020-01-30 17:43:37 +00:00
endpoint . log . Warn ( "too many requests for project" ,
zap . Stringer ( "projectID" , projectID ) ,
2021-08-23 22:47:58 +01:00
zap . Float64 ( "rate limit" , float64 ( limiter . ( * rate . Limiter ) . Limit ( ) ) ) ,
zap . Float64 ( "burst limit" , float64 ( limiter . ( * rate . Limiter ) . Burst ( ) ) ) )
2020-01-30 17:43:37 +00:00
2020-10-13 13:13:41 +01:00
mon . Event ( "metainfo_rate_limit_exceeded" ) //mon:locked
2020-01-30 17:43:37 +00:00
2020-01-29 14:12:19 +00:00
return rpcstatus . Error ( rpcstatus . ResourceExhausted , "Too Many Requests" )
2020-01-17 15:01:36 +00:00
}
return nil
}
2019-06-05 17:41:02 +01:00
func ( endpoint * Endpoint ) validateBucket ( ctx context . Context , bucket [ ] byte ) ( err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
if len ( bucket ) == 0 {
2020-03-16 08:55:52 +00:00
return Error . Wrap ( storj . ErrNoBucket . New ( "" ) )
2019-06-05 17:41:02 +01:00
}
2019-06-24 10:52:25 +01:00
if len ( bucket ) < 3 || len ( bucket ) > 63 {
return Error . New ( "bucket name must be at least 3 and no more than 63 characters long" )
2019-06-05 17:41:02 +01:00
}
2019-06-24 10:52:25 +01:00
// Regexp not used because benchmark shows it will be slower for valid bucket names
// https://gist.github.com/mniewrzal/49de3af95f36e63e88fac24f565e444c
labels := bytes . Split ( bucket , [ ] byte ( "." ) )
for _ , label := range labels {
err = validateBucketLabel ( label )
if err != nil {
return err
}
}
if ipRegexp . MatchString ( string ( bucket ) ) {
return Error . New ( "bucket name cannot be formatted as an IP address" )
}
2019-06-05 17:41:02 +01:00
return nil
}
2019-06-24 10:52:25 +01:00
func validateBucketLabel ( label [ ] byte ) error {
if len ( label ) == 0 {
return Error . New ( "bucket label cannot be empty" )
}
if ! isLowerLetter ( label [ 0 ] ) && ! isDigit ( label [ 0 ] ) {
return Error . New ( "bucket label must start with a lowercase letter or number" )
}
if label [ 0 ] == '-' || label [ len ( label ) - 1 ] == '-' {
return Error . New ( "bucket label cannot start or end with a hyphen" )
}
for i := 1 ; i < len ( label ) - 1 ; i ++ {
if ! isLowerLetter ( label [ i ] ) && ! isDigit ( label [ i ] ) && ( label [ i ] != '-' ) && ( label [ i ] != '.' ) {
return Error . New ( "bucket name must contain only lowercase letters, numbers or hyphens" )
}
}
return nil
}
func isLowerLetter ( r byte ) bool {
return r >= 'a' && r <= 'z'
}
func isDigit ( r byte ) bool {
return r >= '0' && r <= '9'
}
2021-04-07 15:20:05 +01:00
func ( endpoint * Endpoint ) validateRemoteSegment ( ctx context . Context , commitRequest metabase . CommitSegment , originalLimits [ ] * pb . OrderLimit ) ( err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
if len ( originalLimits ) == 0 {
return Error . New ( "no order limits" )
}
if len ( originalLimits ) != int ( commitRequest . Redundancy . TotalShares ) {
return Error . New ( "invalid no order limit for piece" )
}
maxAllowed , err := encryption . CalcEncryptedSize ( endpoint . config . MaxSegmentSize . Int64 ( ) , storj . EncryptionParameters {
CipherSuite : storj . EncAESGCM ,
BlockSize : 128 , // intentionally low block size to allow maximum possible encryption overhead
} )
if err != nil {
return err
}
if int64 ( commitRequest . EncryptedSize ) > maxAllowed || commitRequest . EncryptedSize < 0 {
return Error . New ( "encrypted segment size %v is out of range, maximum allowed is %v" , commitRequest . EncryptedSize , maxAllowed )
}
// TODO more validation for plain size and plain offset
if commitRequest . PlainSize > commitRequest . EncryptedSize {
return Error . New ( "plain segment size %v is out of range, maximum allowed is %v" , commitRequest . PlainSize , commitRequest . EncryptedSize )
}
pieceNums := make ( map [ uint16 ] struct { } )
nodeIds := make ( map [ storj . NodeID ] struct { } )
2022-05-18 10:32:38 +01:00
deriver := commitRequest . RootPieceID . Deriver ( )
2021-04-07 15:20:05 +01:00
for _ , piece := range commitRequest . Pieces {
if int ( piece . Number ) >= len ( originalLimits ) {
return Error . New ( "invalid piece number" )
}
limit := originalLimits [ piece . Number ]
if limit == nil {
return Error . New ( "empty order limit for piece" )
}
err := endpoint . orders . VerifyOrderLimitSignature ( ctx , limit )
if err != nil {
return err
}
// expect that too much time has not passed between order limit creation and now
if time . Since ( limit . OrderCreation ) > endpoint . config . MaxCommitInterval {
return Error . New ( "Segment not committed before max commit interval of %f minutes." , endpoint . config . MaxCommitInterval . Minutes ( ) )
}
2022-05-18 10:32:38 +01:00
derivedPieceID := deriver . Derive ( piece . StorageNode , int32 ( piece . Number ) )
2021-04-07 15:20:05 +01:00
if limit . PieceId . IsZero ( ) || limit . PieceId != derivedPieceID {
return Error . New ( "invalid order limit piece id" )
}
if piece . StorageNode != limit . StorageNodeId {
return Error . New ( "piece NodeID != order limit NodeID" )
}
if _ , ok := pieceNums [ piece . Number ] ; ok {
return Error . New ( "piece num %d is duplicated" , piece . Number )
}
if _ , ok := nodeIds [ piece . StorageNode ] ; ok {
return Error . New ( "node id %s for piece num %d is duplicated" , piece . StorageNode . String ( ) , piece . Number )
}
pieceNums [ piece . Number ] = struct { } { }
nodeIds [ piece . StorageNode ] = struct { } { }
}
return nil
}
2022-01-13 09:57:31 +00:00
func ( endpoint * Endpoint ) checkUploadLimits ( ctx context . Context , projectID uuid . UUID ) error {
2022-06-22 12:33:03 +01:00
return endpoint . checkUploadLimitsForNewObject ( ctx , projectID , 1 , 1 )
}
func ( endpoint * Endpoint ) checkUploadLimitsForNewObject (
ctx context . Context , projectID uuid . UUID , newObjectSize int64 , newObjectSegmentCount int64 ,
) error {
2022-07-04 19:44:58 +01:00
if limit , err := endpoint . projectUsage . ExceedsUploadLimits ( ctx , projectID , newObjectSize , newObjectSegmentCount ) ; err != nil {
2022-01-14 15:51:40 +00:00
if errs2 . IsCanceled ( err ) {
return rpcstatus . Wrap ( rpcstatus . Canceled , err )
}
2022-01-13 09:57:31 +00:00
2022-01-14 15:51:40 +00:00
endpoint . log . Error (
"Retrieving project upload limit failed; limit won't be enforced" ,
zap . Stringer ( "Project ID" , projectID ) ,
zap . Error ( err ) ,
)
} else {
2022-07-04 19:44:58 +01:00
if limit . ExceedsSegments {
2022-01-13 09:57:31 +00:00
endpoint . log . Warn ( "Segment limit exceeded" ,
2022-01-14 15:51:40 +00:00
zap . String ( "Limit" , strconv . Itoa ( int ( limit . SegmentsLimit ) ) ) ,
2022-01-13 09:57:31 +00:00
zap . Stringer ( "Project ID" , projectID ) ,
)
return rpcstatus . Error ( rpcstatus . ResourceExhausted , "Exceeded Segments Limit" )
}
2022-01-14 15:51:40 +00:00
if limit . ExceedsStorage {
endpoint . log . Warn ( "Storage limit exceeded" ,
zap . String ( "Limit" , strconv . Itoa ( limit . StorageLimit . Int ( ) ) ) ,
zap . Stringer ( "Project ID" , projectID ) ,
)
return rpcstatus . Error ( rpcstatus . ResourceExhausted , "Exceeded Storage Limit" )
}
2022-01-13 09:57:31 +00:00
}
return nil
}
2022-06-22 12:33:03 +01:00
func ( endpoint * Endpoint ) addSegmentToUploadLimits ( ctx context . Context , projectID uuid . UUID , segmentSize int64 ) error {
return endpoint . addToUploadLimits ( ctx , projectID , segmentSize , 1 )
}
func ( endpoint * Endpoint ) addToUploadLimits ( ctx context . Context , projectID uuid . UUID , size int64 , segmentCount int64 ) error {
if err := endpoint . projectUsage . AddProjectStorageUsage ( ctx , projectID , size ) ; err != nil {
2022-08-18 19:07:33 +01:00
if errs2 . IsCanceled ( err ) {
return rpcstatus . Wrap ( rpcstatus . Canceled , err )
}
2022-01-13 09:57:31 +00:00
// log it and continue. it's most likely our own fault that we couldn't
// track it, and the only thing that will be affected is our per-project
// bandwidth and storage limits.
endpoint . log . Error ( "Could not track new project's storage usage" ,
zap . Stringer ( "Project ID" , projectID ) ,
zap . Error ( err ) ,
)
}
2022-07-04 19:44:58 +01:00
err := endpoint . projectUsage . UpdateProjectSegmentUsage ( ctx , projectID , segmentCount )
if err != nil {
if errs2 . IsCanceled ( err ) {
return rpcstatus . Wrap ( rpcstatus . Canceled , err )
2022-06-22 12:33:03 +01:00
}
2022-07-04 19:44:58 +01:00
// log it and continue. it's most likely our own fault that we couldn't
// track it, and the only thing that will be affected is our per-project
// segment limits.
endpoint . log . Error (
"Could not track the new project's segment usage when committing" ,
zap . Stringer ( "Project ID" , projectID ) ,
zap . Error ( err ) ,
)
2022-06-22 12:33:03 +01:00
}
return nil
}
func ( endpoint * Endpoint ) addStorageUsageUpToLimit ( ctx context . Context , projectID uuid . UUID , storage int64 , segments int64 ) ( err error ) {
err = endpoint . projectUsage . AddProjectUsageUpToLimit ( ctx , projectID , storage , segments )
if err != nil {
if accounting . ErrProjectLimitExceeded . Has ( err ) {
endpoint . log . Warn ( "Upload limit exceeded" ,
2022-01-13 09:57:31 +00:00
zap . Stringer ( "Project ID" , projectID ) ,
zap . Error ( err ) ,
)
2022-06-22 12:33:03 +01:00
return rpcstatus . Error ( rpcstatus . ResourceExhausted , err . Error ( ) )
}
if errs2 . IsCanceled ( err ) {
return rpcstatus . Wrap ( rpcstatus . Canceled , err )
2022-01-13 09:57:31 +00:00
}
2022-06-22 12:33:03 +01:00
endpoint . log . Error (
"Updating project upload limits failed; limits won't be enforced" ,
zap . Stringer ( "Project ID" , projectID ) ,
zap . Error ( err ) ,
)
2022-01-13 09:57:31 +00:00
}
return nil
}
2022-04-21 15:25:16 +01:00
// checkEncryptedMetadata checks encrypted metadata and it's encrypted key sizes. Metadata encrypted key nonce
// is serialized to storj.Nonce automatically.
func ( endpoint * Endpoint ) checkEncryptedMetadataSize ( encryptedMetadata , encryptedKey [ ] byte ) error {
metadataSize := memory . Size ( len ( encryptedMetadata ) )
if metadataSize > endpoint . config . MaxMetadataSize {
return rpcstatus . Errorf ( rpcstatus . InvalidArgument , "Encrypted metadata is too large, got %v, maximum allowed is %v" , metadataSize , endpoint . config . MaxMetadataSize )
}
// verify key only if any metadata was set
if metadataSize > 0 && len ( encryptedKey ) != encryptedKeySize {
return rpcstatus . Errorf ( rpcstatus . InvalidArgument , "Encrypted metadata key size is invalid, got %v, expected %v" , len ( encryptedKey ) , encryptedKeySize )
}
return nil
}