storj/satellite/metainfo/attribution_test.go

390 lines
14 KiB
Go
Raw Normal View History

// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package metainfo_test
import (
"bytes"
"fmt"
"io/ioutil"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"storj.io/common/memory"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/common/uuid"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite/attribution"
"storj.io/storj/satellite/console"
"storj.io/storj/satellite/metainfo"
"storj.io/uplink"
)
func TestTrimUserAgent(t *testing.T) {
oversizeProduct := testrand.RandAlphaNumeric(metainfo.MaxUserAgentLength)
oversizeVersion := testrand.RandNumeric(metainfo.MaxUserAgentLength)
for _, tt := range []struct {
userAgent []byte
strippedUserAgent []byte
}{
{userAgent: nil, strippedUserAgent: nil},
{userAgent: []byte(""), strippedUserAgent: []byte("")},
{userAgent: []byte("not-a-partner"), strippedUserAgent: []byte("not-a-partner")},
{userAgent: []byte("Zenko"), strippedUserAgent: []byte("Zenko")},
{userAgent: []byte("Zenko uplink/v1.0.0"), strippedUserAgent: []byte("Zenko")},
{userAgent: []byte("Zenko uplink/v1.0.0 (drpc/v0.10.0 common/v0.0.0-00010101000000-000000000000)"), strippedUserAgent: []byte("Zenko")},
{userAgent: []byte("Zenko uplink/v1.0.0 (drpc/v0.10.0) (common/v0.0.0-00010101000000-000000000000)"), strippedUserAgent: []byte("Zenko")},
{userAgent: []byte("uplink/v1.0.0 (drpc/v0.10.0 common/v0.0.0-00010101000000-000000000000)"), strippedUserAgent: []byte("")},
{userAgent: []byte("uplink/v1.0.0"), strippedUserAgent: []byte("")},
{userAgent: []byte("uplink/v1.0.0 Zenko/v3"), strippedUserAgent: []byte("Zenko/v3")},
// oversize alphanumeric as 2nd entry product should use just the first entry
{userAgent: append([]byte("Zenko/v3 "), oversizeProduct...), strippedUserAgent: []byte("Zenko/v3")},
// all comments (small or oversize) should be completely removed
{userAgent: append([]byte("Zenko ("), append(oversizeVersion, []byte(")")...)...), strippedUserAgent: []byte("Zenko")},
// oversize version should truncate
{userAgent: append([]byte("Zenko/v"), oversizeVersion...), strippedUserAgent: []byte("Zenko/v" + string(oversizeVersion[:len(oversizeVersion)-len("Zenko/v")]))},
// oversize product names should truncate
{userAgent: append([]byte("Zenko"), oversizeProduct...), strippedUserAgent: []byte("Zenko" + string(oversizeProduct[:len(oversizeProduct)-len("Zenko")]))},
} {
userAgent, err := metainfo.TrimUserAgent(tt.userAgent)
require.NoError(t, err)
assert.Equal(t, tt.strippedUserAgent, userAgent)
}
}
func TestBucketAttribution(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1,
StorageNodeCount: 1,
UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
for i, tt := range []struct {
signupPartner []byte
userAgent []byte
expectedAttribution []byte
}{
{signupPartner: nil, userAgent: nil, expectedAttribution: nil},
{signupPartner: []byte(""), userAgent: []byte(""), expectedAttribution: nil},
{signupPartner: []byte("Minio"), userAgent: nil, expectedAttribution: []byte("Minio")},
{signupPartner: []byte("Minio"), userAgent: []byte("Minio"), expectedAttribution: []byte("Minio")},
{signupPartner: []byte("Minio"), userAgent: []byte("Zenko"), expectedAttribution: []byte("Minio")},
{signupPartner: nil, userAgent: []byte("rclone/1.0 uplink/v1.6.1-0.20211005203254-bb2eda8c28d3"), expectedAttribution: []byte("rclone/1.0")},
{signupPartner: nil, userAgent: []byte("Zenko"), expectedAttribution: []byte("Zenko")},
} {
errTag := fmt.Sprintf("%d. %+v", i, tt)
satellite := planet.Satellites[0]
user1, err := satellite.AddUser(ctx, console.CreateUser{
FullName: "Test User " + strconv.Itoa(i),
Email: "user@test" + strconv.Itoa(i),
PartnerID: "",
UserAgent: tt.signupPartner,
}, 1)
require.NoError(t, err, errTag)
satProject, err := satellite.AddProject(ctx, user1.ID, "test"+strconv.Itoa(i))
require.NoError(t, err, errTag)
// add a second user to the project, and create the api key with the new user to ensure that
// the project owner's attribution is used for a new bucket, even if someone else creates it.
user2, err := satellite.AddUser(ctx, console.CreateUser{
FullName: "Test User 2" + strconv.Itoa(i),
Email: "user2@test" + strconv.Itoa(i),
UserAgent: tt.signupPartner,
}, 1)
require.NoError(t, err, errTag)
_, err = satellite.DB.Console().ProjectMembers().Insert(ctx, user2.ID, satProject.ID)
require.NoError(t, err)
createBucketAndCheckAttribution := func(userID uuid.UUID, apiKeyName, bucketName string) {
userCtx, err := satellite.UserContext(ctx, userID)
require.NoError(t, err, errTag)
_, apiKeyInfo, err := satellite.API.Console.Service.CreateAPIKey(userCtx, satProject.ID, apiKeyName)
require.NoError(t, err, errTag)
config := uplink.Config{
UserAgent: string(tt.userAgent),
}
access, err := config.RequestAccessWithPassphrase(ctx, satellite.NodeURL().String(), apiKeyInfo.Serialize(), "mypassphrase")
require.NoError(t, err, errTag)
project, err := config.OpenProject(ctx, access)
require.NoError(t, err, errTag)
_, err = project.CreateBucket(ctx, bucketName)
require.NoError(t, err, errTag)
bucketInfo, err := satellite.API.Buckets.Service.GetBucket(ctx, []byte(bucketName), satProject.ID)
require.NoError(t, err, errTag)
assert.Equal(t, tt.expectedAttribution, bucketInfo.UserAgent, errTag)
attributionInfo, err := planet.Satellites[0].DB.Attribution().Get(ctx, satProject.ID, []byte(bucketName))
if tt.expectedAttribution == nil {
assert.True(t, attribution.ErrBucketNotAttributed.Has(err), errTag)
} else {
require.NoError(t, err, errTag)
assert.Equal(t, tt.expectedAttribution, attributionInfo.UserAgent, errTag)
}
}
createBucketAndCheckAttribution(user1.ID, "apikey1", "bucket1")
createBucketAndCheckAttribution(user2.ID, "apikey2", "bucket2")
}
})
}
func TestQueryAttribution(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 0,
Reconfigure: testplanet.Reconfigure{
Satellite: testplanet.ReconfigureRS(2, 3, 4, 4),
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
const (
bucketName = "test"
objectKey = "test-key"
)
satellite := planet.Satellites[0]
storagenode: live tracking of order window usage This change accomplishes multiple things: 1. Instead of having a max in flight time, which means we effectively have a minimum bandwidth for uploads and downloads, we keep track of what windows have active requests happening in them. 2. We don't double check when we save the order to see if it is too old: by then, it's too late. A malicious uplink could just submit orders outside of the grace window and receive all the data, but the node would just not commit it, so the uplink gets free traffic. Because the endpoints also check for the order being too old, this would be a very tight race that depends on knowledge of the node system clock, but best to not have the race exist. Instead, we piggy back off of the in flight tracking and do the check when we start to handle the order, and commit at the end. 3. Change the functions that send orders and list unsent orders to accept a time at which that operation is happening. This way, in tests, we can pretend we're listing or sending far into the future after the windows are available to send, rather than exposing test functions to modify internal state about the grace period to get the desired effect. This brings tests closer to actual usage in production. 4. Change the calculation for if an order is allowed to be enqueued due to the grace period to just look at the order creation time, rather than some computation involving the window it will be in. In this way, you can easily answer the question of "will this order be accepted?" by asking "is it older than X?" where X is the grace period. 5. Increases the frequency we check to send up orders to once every 5 minutes instead of once every hour because we already have hour-long buffering due to the windows. This decreases the maximum latency that an order will be reported back to the satellite by 55 minutes. Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
now := time.Now()
tomorrow := now.Add(24 * time.Hour)
userAgent := "Minio"
user, err := satellite.AddUser(ctx, console.CreateUser{
FullName: "user@test",
Email: "user@test",
PartnerID: "",
UserAgent: []byte(userAgent),
}, 1)
require.NoError(t, err)
satProject, err := satellite.AddProject(ctx, user.ID, "test")
require.NoError(t, err)
userCtx, err := satellite.UserContext(ctx, user.ID)
require.NoError(t, err)
_, apiKeyInfo, err := satellite.API.Console.Service.CreateAPIKey(userCtx, satProject.ID, "root")
require.NoError(t, err)
access, err := uplink.RequestAccessWithPassphrase(ctx, satellite.NodeURL().String(), apiKeyInfo.Serialize(), "mypassphrase")
require.NoError(t, err)
project, err := uplink.OpenProject(ctx, access)
require.NoError(t, err)
_, err = project.CreateBucket(ctx, bucketName)
require.NoError(t, err)
{ // upload and download should be accounted for Minio
upload, err := project.UploadObject(ctx, bucketName, objectKey, nil)
require.NoError(t, err)
_, err = upload.Write(testrand.Bytes(5 * memory.KiB))
require.NoError(t, err)
err = upload.Commit()
require.NoError(t, err)
download, err := project.DownloadObject(ctx, bucketName, objectKey, nil)
require.NoError(t, err)
_, err = ioutil.ReadAll(download)
require.NoError(t, err)
err = download.Close()
require.NoError(t, err)
}
// Wait for the storage nodes to be done processing the download
require.NoError(t, planet.WaitForStorageNodeEndpoints(ctx))
{ // Flush all the pending information through the system.
// Calculate the usage used for upload
for _, sn := range planet.StorageNodes {
storagenode: live tracking of order window usage This change accomplishes multiple things: 1. Instead of having a max in flight time, which means we effectively have a minimum bandwidth for uploads and downloads, we keep track of what windows have active requests happening in them. 2. We don't double check when we save the order to see if it is too old: by then, it's too late. A malicious uplink could just submit orders outside of the grace window and receive all the data, but the node would just not commit it, so the uplink gets free traffic. Because the endpoints also check for the order being too old, this would be a very tight race that depends on knowledge of the node system clock, but best to not have the race exist. Instead, we piggy back off of the in flight tracking and do the check when we start to handle the order, and commit at the end. 3. Change the functions that send orders and list unsent orders to accept a time at which that operation is happening. This way, in tests, we can pretend we're listing or sending far into the future after the windows are available to send, rather than exposing test functions to modify internal state about the grace period to get the desired effect. This brings tests closer to actual usage in production. 4. Change the calculation for if an order is allowed to be enqueued due to the grace period to just look at the order creation time, rather than some computation involving the window it will be in. In this way, you can easily answer the question of "will this order be accepted?" by asking "is it older than X?" where X is the grace period. 5. Increases the frequency we check to send up orders to once every 5 minutes instead of once every hour because we already have hour-long buffering due to the windows. This decreases the maximum latency that an order will be reported back to the satellite by 55 minutes. Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
sn.Storage2.Orders.SendOrders(ctx, tomorrow)
}
// The orders chore writes bucket bandwidth rollup changes to satellitedb
planet.Satellites[0].Orders.Chore.Loop.TriggerWait()
// Trigger tally so it gets all set up and can return a storage usage
planet.Satellites[0].Accounting.Tally.Loop.TriggerWait()
}
{
storagenode: live tracking of order window usage This change accomplishes multiple things: 1. Instead of having a max in flight time, which means we effectively have a minimum bandwidth for uploads and downloads, we keep track of what windows have active requests happening in them. 2. We don't double check when we save the order to see if it is too old: by then, it's too late. A malicious uplink could just submit orders outside of the grace window and receive all the data, but the node would just not commit it, so the uplink gets free traffic. Because the endpoints also check for the order being too old, this would be a very tight race that depends on knowledge of the node system clock, but best to not have the race exist. Instead, we piggy back off of the in flight tracking and do the check when we start to handle the order, and commit at the end. 3. Change the functions that send orders and list unsent orders to accept a time at which that operation is happening. This way, in tests, we can pretend we're listing or sending far into the future after the windows are available to send, rather than exposing test functions to modify internal state about the grace period to get the desired effect. This brings tests closer to actual usage in production. 4. Change the calculation for if an order is allowed to be enqueued due to the grace period to just look at the order creation time, rather than some computation involving the window it will be in. In this way, you can easily answer the question of "will this order be accepted?" by asking "is it older than X?" where X is the grace period. 5. Increases the frequency we check to send up orders to once every 5 minutes instead of once every hour because we already have hour-long buffering due to the windows. This decreases the maximum latency that an order will be reported back to the satellite by 55 minutes. Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
before := now.Add(-time.Hour)
after := before.Add(2 * time.Hour)
usage, err := planet.Satellites[0].DB.ProjectAccounting().GetProjectTotal(ctx, satProject.ID, before, after)
require.NoError(t, err)
require.NotZero(t, usage.Egress)
partner, _ := planet.Satellites[0].API.Marketing.PartnersService.ByName(ctx, "")
userAgent := []byte("Minio")
require.NoError(t, err)
rows, err := planet.Satellites[0].DB.Attribution().QueryAttribution(ctx, partner.UUID, userAgent, before, after)
require.NoError(t, err)
require.NotZero(t, rows[0].ByteHours)
require.Equal(t, rows[0].EgressData, usage.Egress)
// also test QueryAllAttribution
rows, err = planet.Satellites[0].DB.Attribution().QueryAllAttribution(ctx, before, after)
require.NoError(t, err)
require.Equal(t, rows[0].UserAgent, userAgent)
require.NotZero(t, rows[0].ByteHours)
require.Equal(t, rows[0].EgressData, usage.Egress)
}
})
}
func TestAttributionReport(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 4, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: testplanet.ReconfigureRS(2, 3, 4, 4),
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
const (
bucketName = "test"
filePath = "path"
)
storagenode: live tracking of order window usage This change accomplishes multiple things: 1. Instead of having a max in flight time, which means we effectively have a minimum bandwidth for uploads and downloads, we keep track of what windows have active requests happening in them. 2. We don't double check when we save the order to see if it is too old: by then, it's too late. A malicious uplink could just submit orders outside of the grace window and receive all the data, but the node would just not commit it, so the uplink gets free traffic. Because the endpoints also check for the order being too old, this would be a very tight race that depends on knowledge of the node system clock, but best to not have the race exist. Instead, we piggy back off of the in flight tracking and do the check when we start to handle the order, and commit at the end. 3. Change the functions that send orders and list unsent orders to accept a time at which that operation is happening. This way, in tests, we can pretend we're listing or sending far into the future after the windows are available to send, rather than exposing test functions to modify internal state about the grace period to get the desired effect. This brings tests closer to actual usage in production. 4. Change the calculation for if an order is allowed to be enqueued due to the grace period to just look at the order creation time, rather than some computation involving the window it will be in. In this way, you can easily answer the question of "will this order be accepted?" by asking "is it older than X?" where X is the grace period. 5. Increases the frequency we check to send up orders to once every 5 minutes instead of once every hour because we already have hour-long buffering due to the windows. This decreases the maximum latency that an order will be reported back to the satellite by 55 minutes. Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
now := time.Now()
tomorrow := now.Add(24 * time.Hour)
up := planet.Uplinks[0]
zenkoStr := "Zenko/1.0"
up.Config.UserAgent = zenkoStr
err := up.CreateBucket(ctx, planet.Satellites[0], bucketName)
require.NoError(t, err)
{ // upload and download as Zenko
err = up.Upload(ctx, planet.Satellites[0], bucketName, filePath, testrand.Bytes(5*memory.KiB))
require.NoError(t, err)
_, err = up.Download(ctx, planet.Satellites[0], bucketName, filePath)
require.NoError(t, err)
}
minioStr := "Minio/1.0"
up.Config.UserAgent = minioStr
{ // upload and download as Minio
err = up.Upload(ctx, planet.Satellites[0], bucketName, filePath, testrand.Bytes(5*memory.KiB))
require.NoError(t, err)
_, err = up.Download(ctx, planet.Satellites[0], bucketName, filePath)
require.NoError(t, err)
}
// Wait for the storage nodes to be done processing the download
require.NoError(t, planet.WaitForStorageNodeEndpoints(ctx))
{ // Flush all the pending information through the system.
// Calculate the usage used for upload
for _, sn := range planet.StorageNodes {
storagenode: live tracking of order window usage This change accomplishes multiple things: 1. Instead of having a max in flight time, which means we effectively have a minimum bandwidth for uploads and downloads, we keep track of what windows have active requests happening in them. 2. We don't double check when we save the order to see if it is too old: by then, it's too late. A malicious uplink could just submit orders outside of the grace window and receive all the data, but the node would just not commit it, so the uplink gets free traffic. Because the endpoints also check for the order being too old, this would be a very tight race that depends on knowledge of the node system clock, but best to not have the race exist. Instead, we piggy back off of the in flight tracking and do the check when we start to handle the order, and commit at the end. 3. Change the functions that send orders and list unsent orders to accept a time at which that operation is happening. This way, in tests, we can pretend we're listing or sending far into the future after the windows are available to send, rather than exposing test functions to modify internal state about the grace period to get the desired effect. This brings tests closer to actual usage in production. 4. Change the calculation for if an order is allowed to be enqueued due to the grace period to just look at the order creation time, rather than some computation involving the window it will be in. In this way, you can easily answer the question of "will this order be accepted?" by asking "is it older than X?" where X is the grace period. 5. Increases the frequency we check to send up orders to once every 5 minutes instead of once every hour because we already have hour-long buffering due to the windows. This decreases the maximum latency that an order will be reported back to the satellite by 55 minutes. Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
sn.Storage2.Orders.SendOrders(ctx, tomorrow)
}
// The orders chore writes bucket bandwidth rollup changes to satellitedb
planet.Satellites[0].Orders.Chore.Loop.TriggerWait()
// Trigger tally so it gets all set up and can return a storage usage
planet.Satellites[0].Accounting.Tally.Loop.TriggerWait()
}
{
storagenode: live tracking of order window usage This change accomplishes multiple things: 1. Instead of having a max in flight time, which means we effectively have a minimum bandwidth for uploads and downloads, we keep track of what windows have active requests happening in them. 2. We don't double check when we save the order to see if it is too old: by then, it's too late. A malicious uplink could just submit orders outside of the grace window and receive all the data, but the node would just not commit it, so the uplink gets free traffic. Because the endpoints also check for the order being too old, this would be a very tight race that depends on knowledge of the node system clock, but best to not have the race exist. Instead, we piggy back off of the in flight tracking and do the check when we start to handle the order, and commit at the end. 3. Change the functions that send orders and list unsent orders to accept a time at which that operation is happening. This way, in tests, we can pretend we're listing or sending far into the future after the windows are available to send, rather than exposing test functions to modify internal state about the grace period to get the desired effect. This brings tests closer to actual usage in production. 4. Change the calculation for if an order is allowed to be enqueued due to the grace period to just look at the order creation time, rather than some computation involving the window it will be in. In this way, you can easily answer the question of "will this order be accepted?" by asking "is it older than X?" where X is the grace period. 5. Increases the frequency we check to send up orders to once every 5 minutes instead of once every hour because we already have hour-long buffering due to the windows. This decreases the maximum latency that an order will be reported back to the satellite by 55 minutes. Change-Id: Ie08b90d139d45ee89b82347e191a2f8db1b88036
2020-08-12 20:01:43 +01:00
before := now.Add(-time.Hour)
after := before.Add(2 * time.Hour)
projectID := up.Projects[0].ID
usage, err := planet.Satellites[0].DB.ProjectAccounting().GetProjectTotal(ctx, projectID, before, after)
require.NoError(t, err)
require.NotZero(t, usage.Egress)
partner, _ := planet.Satellites[0].API.Marketing.PartnersService.ByUserAgent(ctx, "")
rows, err := planet.Satellites[0].DB.Attribution().QueryAttribution(ctx, partner.UUID, []byte(zenkoStr), before, after)
require.NoError(t, err)
require.NotZero(t, rows[0].ByteHours)
require.Equal(t, rows[0].EgressData, usage.Egress)
// Minio should have no attribution because bucket was created by Zenko
partner, _ = planet.Satellites[0].API.Marketing.PartnersService.ByUserAgent(ctx, "")
rows, err = planet.Satellites[0].DB.Attribution().QueryAttribution(ctx, partner.UUID, []byte(minioStr), before, after)
require.NoError(t, err)
require.Empty(t, rows)
// also test QueryAllAttribution
rows, err = planet.Satellites[0].DB.Attribution().QueryAllAttribution(ctx, before, after)
require.NoError(t, err)
var zenkoFound, minioFound bool
for _, r := range rows {
if bytes.Equal(r.UserAgent, []byte(zenkoStr)) {
require.NotZero(t, rows[0].ByteHours)
require.Equal(t, rows[0].EgressData, usage.Egress)
zenkoFound = true
} else if bytes.Equal(r.UserAgent, []byte(minioStr)) {
minioFound = true
}
}
require.True(t, zenkoFound)
// Minio should have no attribution because bucket was created by Zenko
require.False(t, minioFound)
}
})
}
func TestBucketAttributionConcurrentUpload(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1,
StorageNodeCount: 0,
UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
satellite := planet.Satellites[0]
err := planet.Uplinks[0].CreateBucket(ctx, satellite, "attr-bucket")
require.NoError(t, err)
config := uplink.Config{
UserAgent: "Minio",
}
project, err := config.OpenProject(ctx, planet.Uplinks[0].Access[satellite.ID()])
require.NoError(t, err)
for i := 0; i < 3; i++ {
i := i
ctx.Go(func() error {
upload, err := project.UploadObject(ctx, "attr-bucket", "key"+strconv.Itoa(i), nil)
require.NoError(t, err)
_, err = upload.Write([]byte("content"))
require.NoError(t, err)
err = upload.Commit()
require.NoError(t, err)
return nil
})
}
ctx.Wait()
attributionInfo, err := planet.Satellites[0].DB.Attribution().Get(ctx, planet.Uplinks[0].Projects[0].ID, []byte("attr-bucket"))
require.NoError(t, err)
require.Equal(t, []byte(config.UserAgent), attributionInfo.UserAgent)
})
}