2020-03-30 15:19:36 +01:00
|
|
|
// Copyright (C) 2019 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package metainfo
|
|
|
|
|
|
|
|
import (
|
2023-06-28 22:17:01 +01:00
|
|
|
"bytes"
|
2020-03-30 15:19:36 +01:00
|
|
|
"context"
|
2020-04-14 12:50:50 +01:00
|
|
|
"sync"
|
2020-04-14 09:27:43 +01:00
|
|
|
|
|
|
|
"go.uber.org/zap"
|
2020-03-30 15:19:36 +01:00
|
|
|
|
2020-04-14 12:50:50 +01:00
|
|
|
"storj.io/common/errs2"
|
2020-03-30 15:19:36 +01:00
|
|
|
"storj.io/common/pb"
|
|
|
|
"storj.io/common/rpc/rpcstatus"
|
|
|
|
"storj.io/common/useragent"
|
2020-03-30 10:08:50 +01:00
|
|
|
"storj.io/common/uuid"
|
2020-04-14 12:50:50 +01:00
|
|
|
"storj.io/drpc/drpccache"
|
2020-04-14 09:27:43 +01:00
|
|
|
"storj.io/storj/satellite/attribution"
|
2023-04-13 13:04:07 +01:00
|
|
|
"storj.io/storj/satellite/buckets"
|
2020-07-24 10:40:17 +01:00
|
|
|
"storj.io/storj/satellite/console"
|
2020-03-30 15:19:36 +01:00
|
|
|
)
|
|
|
|
|
2021-10-22 22:28:03 +01:00
|
|
|
// MaxUserAgentLength is the maximum allowable length of the User Agent.
|
|
|
|
const MaxUserAgentLength = 500
|
|
|
|
|
2023-04-14 17:39:11 +01:00
|
|
|
// ensureAttribution ensures that the bucketName has the partner information specified by project-level user agent, or header user agent.
|
2023-06-28 22:17:01 +01:00
|
|
|
// If `forceBucketUpdate` is true, then the buckets table will be updated if necessary (needed for bucket creation). Otherwise, it is sufficient
|
|
|
|
// to only ensure the attribution exists in the value attributions db.
|
2020-04-14 12:50:50 +01:00
|
|
|
//
|
|
|
|
// Assumes that the user has permissions sufficient for authenticating.
|
2023-06-28 22:17:01 +01:00
|
|
|
func (endpoint *Endpoint) ensureAttribution(ctx context.Context, header *pb.RequestHeader, keyInfo *console.APIKeyInfo, bucketName, projectUserAgent []byte, forceBucketUpdate bool) (err error) {
|
2022-04-01 12:57:02 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2020-04-14 12:50:50 +01:00
|
|
|
if header == nil {
|
|
|
|
return rpcstatus.Error(rpcstatus.InvalidArgument, "header is nil")
|
|
|
|
}
|
2023-01-27 21:07:32 +00:00
|
|
|
if len(header.UserAgent) == 0 && len(keyInfo.UserAgent) == 0 && len(projectUserAgent) == 0 {
|
2020-04-14 12:50:50 +01:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-06-28 22:17:01 +01:00
|
|
|
if !forceBucketUpdate {
|
|
|
|
if conncache := drpccache.FromContext(ctx); conncache != nil {
|
|
|
|
cache := conncache.LoadOrCreate(attributionCheckCacheKey{},
|
|
|
|
func() interface{} {
|
|
|
|
return &attributionCheckCache{}
|
|
|
|
}).(*attributionCheckCache)
|
|
|
|
if !cache.needsCheck(string(bucketName)) {
|
|
|
|
return nil
|
|
|
|
}
|
2020-04-14 12:50:50 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-23 00:38:18 +01:00
|
|
|
userAgent := keyInfo.UserAgent
|
2022-06-02 15:07:32 +01:00
|
|
|
if len(projectUserAgent) > 0 {
|
|
|
|
userAgent = projectUserAgent
|
|
|
|
}
|
|
|
|
|
2021-09-23 00:38:18 +01:00
|
|
|
// first check keyInfo (user) attribution
|
2023-01-27 21:07:32 +00:00
|
|
|
if userAgent == nil {
|
2021-09-23 00:38:18 +01:00
|
|
|
// otherwise, use header (partner tool) as attribution
|
|
|
|
userAgent = header.UserAgent
|
2020-04-14 12:50:50 +01:00
|
|
|
}
|
|
|
|
|
2022-04-01 12:57:02 +01:00
|
|
|
userAgent, err = TrimUserAgent(userAgent)
|
2021-10-22 22:28:03 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-06-28 22:17:01 +01:00
|
|
|
err = endpoint.tryUpdateBucketAttribution(ctx, header, keyInfo.ProjectID, bucketName, userAgent, forceBucketUpdate)
|
2020-04-14 12:50:50 +01:00
|
|
|
if errs2.IsRPC(err, rpcstatus.NotFound) || errs2.IsRPC(err, rpcstatus.AlreadyExists) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2021-10-22 22:28:03 +01:00
|
|
|
// TrimUserAgent returns userAgentBytes that consist of only the product portion of the user agent, and is bounded by
|
|
|
|
// the maxUserAgentLength.
|
|
|
|
func TrimUserAgent(userAgent []byte) ([]byte, error) {
|
2021-12-02 17:30:33 +00:00
|
|
|
if len(userAgent) == 0 {
|
|
|
|
return userAgent, nil
|
|
|
|
}
|
2021-10-22 22:28:03 +01:00
|
|
|
userAgentEntries, err := useragent.ParseEntries(userAgent)
|
2020-03-30 15:19:36 +01:00
|
|
|
if err != nil {
|
2021-10-22 22:28:03 +01:00
|
|
|
return userAgent, Error.New("error while parsing user agent: %w", err)
|
2020-03-30 15:19:36 +01:00
|
|
|
}
|
2021-10-22 22:28:03 +01:00
|
|
|
// strip comments, libraries, and empty products from the user agent
|
|
|
|
newEntries := userAgentEntries[:0]
|
|
|
|
for _, e := range userAgentEntries {
|
|
|
|
switch product := e.Product; product {
|
2021-12-11 20:23:04 +00:00
|
|
|
case "uplink", "common", "drpc", "Gateway-ST", "":
|
2021-10-22 22:28:03 +01:00
|
|
|
default:
|
|
|
|
e.Comment = ""
|
|
|
|
newEntries = append(newEntries, e)
|
2020-03-30 15:19:36 +01:00
|
|
|
}
|
|
|
|
}
|
2021-10-22 22:28:03 +01:00
|
|
|
userAgent, err = useragent.EncodeEntries(newEntries)
|
|
|
|
if err != nil {
|
|
|
|
return userAgent, Error.New("error while encoding user agent entries: %w", err)
|
|
|
|
}
|
2020-03-30 15:19:36 +01:00
|
|
|
|
2021-10-22 22:28:03 +01:00
|
|
|
// bound the user agent length
|
|
|
|
if len(userAgent) > MaxUserAgentLength && len(newEntries) > 0 {
|
|
|
|
// try to preserve the first entry
|
|
|
|
if (len(newEntries[0].Product) + len(newEntries[0].Version)) <= MaxUserAgentLength {
|
|
|
|
userAgent, err = useragent.EncodeEntries(newEntries[:1])
|
|
|
|
if err != nil {
|
|
|
|
return userAgent, Error.New("error while encoding first user agent entry: %w", err)
|
2020-03-30 15:19:36 +01:00
|
|
|
}
|
2021-10-22 22:28:03 +01:00
|
|
|
} else {
|
|
|
|
// first entry is too large, truncate
|
|
|
|
userAgent = userAgent[:MaxUserAgentLength]
|
2020-03-30 15:19:36 +01:00
|
|
|
}
|
|
|
|
}
|
2021-10-22 22:28:03 +01:00
|
|
|
return userAgent, nil
|
2020-03-30 15:19:36 +01:00
|
|
|
}
|
2020-04-14 09:27:43 +01:00
|
|
|
|
2023-06-28 22:17:01 +01:00
|
|
|
func (endpoint *Endpoint) tryUpdateBucketAttribution(ctx context.Context, header *pb.RequestHeader, projectID uuid.UUID, bucketName []byte, userAgent []byte, forceBucketUpdate bool) (err error) {
|
2022-04-01 12:57:02 +01:00
|
|
|
defer mon.Task()(&ctx)(&err)
|
|
|
|
|
2020-04-14 12:50:50 +01:00
|
|
|
if header == nil {
|
|
|
|
return rpcstatus.Error(rpcstatus.InvalidArgument, "header is nil")
|
|
|
|
}
|
|
|
|
|
2020-04-14 09:27:43 +01:00
|
|
|
// check if attribution is set for given bucket
|
2023-06-28 22:17:01 +01:00
|
|
|
attrInfo, err := endpoint.attributions.Get(ctx, projectID, bucketName)
|
2020-04-14 09:27:43 +01:00
|
|
|
if err == nil {
|
2023-06-28 22:17:01 +01:00
|
|
|
if !forceBucketUpdate {
|
|
|
|
// bucket has already an attribution, no need to update
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
} else if !attribution.ErrBucketNotAttributed.Has(err) {
|
2020-04-14 09:27:43 +01:00
|
|
|
endpoint.log.Error("error while getting attribution from DB", zap.Error(err))
|
|
|
|
return rpcstatus.Error(rpcstatus.Internal, err.Error())
|
|
|
|
}
|
|
|
|
|
2023-06-28 22:17:01 +01:00
|
|
|
// checks if bucket exists before updates it or makes a new entry
|
|
|
|
bucket, err := endpoint.buckets.GetBucket(ctx, bucketName, projectID)
|
|
|
|
if err != nil {
|
|
|
|
if buckets.ErrBucketNotFound.Has(err) {
|
|
|
|
return rpcstatus.Errorf(rpcstatus.NotFound, "bucket %q does not exist", bucketName)
|
|
|
|
}
|
|
|
|
endpoint.log.Error("error while getting bucket", zap.ByteString("bucketName", bucketName), zap.Error(err))
|
|
|
|
return rpcstatus.Error(rpcstatus.Internal, "unable to set bucket attribution")
|
|
|
|
}
|
|
|
|
|
|
|
|
if attrInfo != nil {
|
|
|
|
// bucket user agent and value attributions user agent already set
|
|
|
|
if bytes.Equal(bucket.UserAgent, attrInfo.UserAgent) {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
// make sure bucket user_agent matches value_attribution
|
|
|
|
userAgent = attrInfo.UserAgent
|
|
|
|
}
|
|
|
|
|
2021-09-09 16:21:42 +01:00
|
|
|
empty, err := endpoint.isBucketEmpty(ctx, projectID, bucketName)
|
2020-04-14 09:27:43 +01:00
|
|
|
if err != nil {
|
2021-09-09 16:21:42 +01:00
|
|
|
endpoint.log.Error("internal", zap.Error(err))
|
|
|
|
return rpcstatus.Error(rpcstatus.Internal, Error.Wrap(err).Error())
|
2020-04-14 09:27:43 +01:00
|
|
|
}
|
|
|
|
if !empty {
|
2023-01-27 21:07:32 +00:00
|
|
|
return rpcstatus.Errorf(rpcstatus.AlreadyExists, "bucket %q is not empty, Partner %q cannot be attributed", bucketName, userAgent)
|
2020-04-14 09:27:43 +01:00
|
|
|
}
|
|
|
|
|
2023-06-28 22:17:01 +01:00
|
|
|
if attrInfo == nil {
|
|
|
|
// update attribution table
|
|
|
|
_, err = endpoint.attributions.Insert(ctx, &attribution.Info{
|
|
|
|
ProjectID: projectID,
|
|
|
|
BucketName: bucketName,
|
|
|
|
UserAgent: userAgent,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
endpoint.log.Error("error while inserting attribution to DB", zap.Error(err))
|
|
|
|
return rpcstatus.Error(rpcstatus.Internal, err.Error())
|
2020-04-14 09:27:43 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// update bucket information
|
2021-09-23 00:38:18 +01:00
|
|
|
bucket.UserAgent = userAgent
|
2021-09-09 16:21:42 +01:00
|
|
|
_, err = endpoint.buckets.UpdateBucket(ctx, bucket)
|
2020-04-14 09:27:43 +01:00
|
|
|
if err != nil {
|
|
|
|
endpoint.log.Error("error while updating bucket", zap.ByteString("bucketName", bucketName), zap.Error(err))
|
|
|
|
return rpcstatus.Error(rpcstatus.Internal, "unable to set bucket attribution")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
2020-04-14 12:50:50 +01:00
|
|
|
|
|
|
|
// maxAttributionCacheSize determines how many buckets attributionCheckCache remembers.
|
|
|
|
const maxAttributionCacheSize = 10
|
|
|
|
|
|
|
|
// attributionCheckCacheKey is used as a key for the connection cache.
|
|
|
|
type attributionCheckCacheKey struct{}
|
|
|
|
|
|
|
|
// attributionCheckCache implements a basic lru cache, with a constant size.
|
|
|
|
type attributionCheckCache struct {
|
|
|
|
mu sync.Mutex
|
|
|
|
pos int
|
|
|
|
buckets []string
|
|
|
|
}
|
|
|
|
|
|
|
|
// needsCheck returns true when the bucket should be tested for setting the useragent.
|
|
|
|
func (cache *attributionCheckCache) needsCheck(bucket string) bool {
|
|
|
|
cache.mu.Lock()
|
|
|
|
defer cache.mu.Unlock()
|
|
|
|
|
|
|
|
for _, b := range cache.buckets {
|
|
|
|
if b == bucket {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(cache.buckets) >= maxAttributionCacheSize {
|
|
|
|
cache.pos = (cache.pos + 1) % len(cache.buckets)
|
|
|
|
cache.buckets[cache.pos] = bucket
|
|
|
|
} else {
|
|
|
|
cache.pos = len(cache.buckets)
|
|
|
|
cache.buckets = append(cache.buckets, bucket)
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
|
|
|
}
|