cmd/s3-benchmark: move to storj.io/benchmark

Change-Id: Idca2b836bdf876ca28eb5cabc9bfae1d576e4a3e
This commit is contained in:
Egon Elbre 2020-03-23 18:59:49 +02:00
parent d7558db5ed
commit 6a7571f73e
12 changed files with 0 additions and 1437 deletions

View File

@ -1,359 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package main
import (
"bytes"
"flag"
"fmt"
"io"
"log"
"os"
"strconv"
"text/tabwriter"
"time"
"github.com/loov/hrtime"
"storj.io/common/memory"
"storj.io/storj/private/s3client"
)
func main() {
var conf s3client.Config
flag.StringVar(&conf.S3Gateway, "s3-gateway", "127.0.0.1:7777", "s3 gateway address")
flag.StringVar(&conf.Satellite, "satellite", "127.0.0.1:7778", "satellite address")
flag.StringVar(&conf.AccessKey, "accesskey", "insecure-dev-access-key", "access key")
flag.StringVar(&conf.SecretKey, "secretkey", "insecure-dev-secret-key", "secret key")
flag.StringVar(&conf.APIKey, "apikey", "abc123", "api key")
flag.StringVar(&conf.EncryptionKey, "encryptionkey", "abc123", "encryption key")
flag.BoolVar(&conf.NoSSL, "no-ssl", false, "disable ssl")
flag.StringVar(&conf.ConfigDir, "config-dir", "", "path of config dir to use. If empty, a config will be created.")
clientName := flag.String("client", "minio", "client to use for requests (supported: minio, aws-cli, uplink)")
location := flag.String("location", "", "bucket location")
count := flag.Int("count", 50, "benchmark count")
duration := flag.Duration("time", 2*time.Minute, "maximum benchmark time per filesize")
suffix := time.Now().Format("-2006-01-02-150405")
plotname := flag.String("plot", "plot"+suffix+".svg", "plot results")
filesizes := &memory.Sizes{
Default: []memory.Size{
1 * memory.KiB,
256 * memory.KiB,
1 * memory.MiB,
32 * memory.MiB,
64 * memory.MiB,
256 * memory.MiB,
},
}
flag.Var(filesizes, "filesize", "filesizes to test with")
listsize := flag.Int("listsize", 1000, "listsize to test with")
flag.Parse()
var client s3client.Client
var err error
switch *clientName {
default:
log.Println("unknown client name ", *clientName, " defaulting to minio")
fallthrough
// TODO: reenable
// case "minio":
// client, err = s3client.NewMinio(conf)
case "aws-cli":
client, err = s3client.NewAWSCLI(conf)
case "uplink":
client, err = s3client.NewUplink(conf)
}
if err != nil {
log.Fatal(err)
}
bucket := "benchmark" + suffix
log.Println("Creating bucket", bucket)
// 1 bucket for file up and downloads
err = client.MakeBucket(bucket, *location)
if err != nil {
log.Fatalf("failed to create bucket %q: %+v\n", bucket, err)
}
data := make([]byte, 1)
log.Println("Creating files", bucket)
// n files in one folder
for k := 0; k < *listsize; k++ {
err := client.Upload(bucket, "folder/data"+strconv.Itoa(k), data)
if err != nil {
log.Fatalf("failed to create file %q: %+v\n", "folder/data"+strconv.Itoa(k), err)
}
}
log.Println("Creating folders", bucket)
// n - 1 (one folder already exists) folders with one file in each folder
for k := 0; k < *listsize-1; k++ {
err := client.Upload(bucket, "folder"+strconv.Itoa(k)+"/data", data)
if err != nil {
log.Fatalf("failed to create folder %q: %+v\n", "folder"+strconv.Itoa(k)+"/data", err)
}
}
defer func() {
log.Println("Removing files")
for k := 0; k < *listsize; k++ {
err := client.Delete(bucket, "folder/data"+strconv.Itoa(k))
if err != nil {
log.Fatalf("failed to delete file %q: %+v\n", "folder/data"+strconv.Itoa(k), err)
}
}
log.Println("Removing folders")
for k := 0; k < *listsize-1; k++ {
err := client.Delete(bucket, "folder"+strconv.Itoa(k)+"/data")
if err != nil {
log.Fatalf("failed to delete folder %q: %+v\n", "folder"+strconv.Itoa(k)+"/data", err)
}
}
log.Println("Removing bucket")
err := client.RemoveBucket(bucket)
if err != nil {
log.Fatalf("failed to remove bucket %q", bucket)
}
}()
measurements := []Measurement{}
measurement, err := ListBenchmark(client, bucket, *listsize, *count, *duration)
if err != nil {
log.Fatal(err)
}
measurements = append(measurements, measurement)
for _, filesize := range filesizes.Sizes() {
measurement, err := FileBenchmark(client, bucket, filesize, *count, *duration)
if err != nil {
log.Fatal(err)
}
measurements = append(measurements, measurement)
}
fmt.Print("\n\n")
w := tabwriter.NewWriter(os.Stdout, 0, 0, 4, ' ', 0)
fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n",
"Size", "",
"Avg", "",
"Max", "",
"P50", "", "P90", "", "P99", "",
)
fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n",
"", "",
"s", "MB/s",
"s", "MB/s",
"s", "MB/s", "s", "MB/s", "s", "MB/s",
)
for _, m := range measurements {
m.PrintStats(w)
}
_ = w.Flush()
if *plotname != "" {
err := Plot(*plotname, measurements)
if err != nil {
log.Fatal(err)
}
}
}
// Measurement contains measurements for different requests
type Measurement struct {
Size memory.Size
Results []*Result
}
// Result contains durations for specific tests
type Result struct {
Name string
WithSpeed bool
Durations []time.Duration
}
// Result finds or creates a result with the specified name
func (m *Measurement) Result(name string) *Result {
for _, x := range m.Results {
if x.Name == name {
return x
}
}
r := &Result{}
r.Name = name
m.Results = append(m.Results, r)
return r
}
// Record records a time measurement
func (m *Measurement) Record(name string, duration time.Duration) {
r := m.Result(name)
r.WithSpeed = false
r.Durations = append(r.Durations, duration)
}
// RecordSpeed records a time measurement that can be expressed in speed
func (m *Measurement) RecordSpeed(name string, duration time.Duration) {
r := m.Result(name)
r.WithSpeed = true
r.Durations = append(r.Durations, duration)
}
// PrintStats prints important valueas about the measurement
func (m *Measurement) PrintStats(w io.Writer) {
const binCount = 10
type Hist struct {
*Result
*hrtime.Histogram
}
hists := []Hist{}
for _, result := range m.Results {
hists = append(hists, Hist{
Result: result,
Histogram: hrtime.NewDurationHistogram(result.Durations, binCount),
})
}
sec := func(ns float64) string {
return fmt.Sprintf("%.2f", ns/1e9)
}
speed := func(ns float64) string {
return fmt.Sprintf("%.2f", m.Size.MB()/(ns/1e9))
}
for _, hist := range hists {
if !hist.WithSpeed {
fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n",
m.Size, hist.Name,
sec(hist.Average), "",
sec(hist.Maximum), "",
sec(hist.P50), "",
sec(hist.P90), "",
sec(hist.P99), "",
)
continue
}
fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n",
m.Size, hist.Name,
sec(hist.Average), speed(hist.Average),
sec(hist.Maximum), speed(hist.Maximum),
sec(hist.P50), speed(hist.P50),
sec(hist.P90), speed(hist.P90),
sec(hist.P99), speed(hist.P99),
)
}
}
// FileBenchmark runs file upload, download and delete benchmarks on bucket with given filesize
func FileBenchmark(client s3client.Client, bucket string, filesize memory.Size, count int, duration time.Duration) (Measurement, error) {
log.Print("Benchmarking file size ", filesize.String(), " ")
data := make([]byte, filesize.Int())
result := make([]byte, filesize.Int())
defer fmt.Println()
measurement := Measurement{}
measurement.Size = filesize
start := time.Now()
for k := 0; k < count; k++ {
if time.Since(start) > duration {
break
}
fmt.Print(".")
// rand.Read(data[:])
for i := range data {
data[i] = 'a' + byte(i%26)
}
{ // uploading
start := hrtime.Now()
err := client.Upload(bucket, "data", data)
finish := hrtime.Now()
if err != nil {
return measurement, fmt.Errorf("upload failed: %+v", err)
}
measurement.RecordSpeed("Upload", finish-start)
}
{ // downloading
start := hrtime.Now()
var err error
result, err = client.Download(bucket, "data", result)
if err != nil {
return measurement, fmt.Errorf("get object failed: %+v", err)
}
finish := hrtime.Now()
if !bytes.Equal(data, result) {
return measurement, fmt.Errorf("upload/download do not match: lengths %d and %d", len(data), len(result))
}
measurement.RecordSpeed("Download", finish-start)
}
{ // deleting
start := hrtime.Now()
err := client.Delete(bucket, "data")
if err != nil {
return measurement, fmt.Errorf("delete failed: %+v", err)
}
finish := hrtime.Now()
measurement.Record("Delete", finish-start)
}
}
return measurement, nil
}
// ListBenchmark runs list buckets, folders and files benchmarks on bucket
func ListBenchmark(client s3client.Client, bucket string, listsize int, count int, duration time.Duration) (Measurement, error) {
log.Print("Benchmarking list")
defer fmt.Println()
measurement := Measurement{}
//measurement.Size = listsize
for k := 0; k < count; k++ {
{ // list folders
start := hrtime.Now()
result, err := client.ListObjects(bucket, "")
if err != nil {
return measurement, fmt.Errorf("list folders failed: %+v", err)
}
finish := hrtime.Now()
if len(result) != listsize {
return measurement, fmt.Errorf("list folders result wrong: %+v", len(result))
}
measurement.Record("List Folders", finish-start)
}
{ // list files
start := hrtime.Now()
result, err := client.ListObjects(bucket, "folder")
if err != nil {
return measurement, fmt.Errorf("list files failed: %+v", err)
}
finish := hrtime.Now()
if len(result) != listsize {
return measurement, fmt.Errorf("list files result to low: %+v", len(result))
}
measurement.Record("List Files", finish-start)
}
}
return measurement, nil
}

View File

@ -1,112 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package main
import (
"image/color"
"io/ioutil"
"time"
"github.com/loov/plot"
)
var palette = []color.Color{
color.NRGBA{0, 200, 0, 255},
color.NRGBA{0, 0, 200, 255},
color.NRGBA{200, 0, 0, 255},
}
// Plot plots measurements into filename as an svg
func Plot(filename string, measurements []Measurement) error {
p := plot.New()
p.X.Min = 0
p.X.Max = 10
p.X.MajorTicks = 10
p.X.MinorTicks = 10
speed := plot.NewAxisGroup()
speed.Y.Min = 0
speed.Y.Max = 1
speed.X.Min = 0
speed.X.Max = 30
speed.X.MajorTicks = 10
speed.X.MinorTicks = 10
rows := plot.NewVStack()
rows.Margin = plot.R(5, 5, 5, 5)
p.Add(rows)
for _, m := range measurements {
row := plot.NewHFlex()
rows.Add(row)
row.Add(35, plot.NewTextbox(m.Size.String()))
plots := plot.NewVStack()
row.Add(0, plots)
{ // time plotting
group := []plot.Element{plot.NewGrid()}
for i, result := range m.Results {
time := plot.NewDensity("s", asSeconds(result.Durations))
time.Stroke = palette[i%len(palette)]
group = append(group, time)
}
group = append(group, plot.NewTickLabels())
flexTime := plot.NewHFlex()
plots.Add(flexTime)
flexTime.Add(70, plot.NewTextbox("time (s)"))
flexTime.AddGroup(0, group...)
}
{ // speed plotting
group := []plot.Element{plot.NewGrid()}
for i, result := range m.Results {
if !result.WithSpeed {
continue
}
speed := plot.NewDensity("MB/s", asSpeed(result.Durations, m.Size.Int64()))
speed.Stroke = palette[i%len(palette)]
}
group = append(group, plot.NewTickLabels())
flexSpeed := plot.NewHFlex()
plots.Add(flexSpeed)
speedGroup := plot.NewAxisGroup()
speedGroup.X, speedGroup.Y = speed.X, speed.Y
speedGroup.AddGroup(group...)
flexSpeed.Add(70, plot.NewTextbox("speed (MB/s)"))
flexSpeed.AddGroup(0, speedGroup)
}
}
svgCanvas := plot.NewSVG(1500, 150*float64(len(measurements)))
p.Draw(svgCanvas)
return ioutil.WriteFile(filename, svgCanvas.Bytes(), 0755)
}
func asSeconds(durations []time.Duration) []float64 {
xs := make([]float64, 0, len(durations))
for _, dur := range durations {
xs = append(xs, dur.Seconds())
}
return xs
}
func asSpeed(durations []time.Duration, size int64) []float64 {
const MB = 1 << 20
xs := make([]float64, 0, len(durations))
for _, dur := range durations {
xs = append(xs, (float64(size)/MB)/dur.Seconds())
}
return xs
}

View File

@ -1,254 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package cmd_test
import (
"encoding/hex"
"errors"
"log"
"os"
"sync"
"testing"
"storj.io/common/memory"
"storj.io/common/testrand"
"storj.io/storj/private/s3client"
)
var benchmarkCases = []struct {
name string
objectsize memory.Size
}{
{"100B", 100 * memory.B},
{"1MB", 1 * memory.MiB},
{"10MB", 10 * memory.MiB},
{"100MB", 100 * memory.MiB},
{"1G", 1 * memory.GiB},
}
var testobjectData struct {
sync.Once
objects map[string][]byte
}
// testObjects returns test objects (i.e. slice of bytes) that
// will be used as the objects to upload/download tests
func testObjects() map[string][]byte {
testobjectData.Do(func() {
objects := make(map[string][]byte)
for _, bm := range benchmarkCases {
objects[bm.name] = testrand.Bytes(bm.objectsize)
}
testobjectData.objects = objects
})
return testobjectData.objects
}
// uplinkSetup setups an uplink to use for testing uploads/downloads
func uplinkSetup(bucket string) s3client.Client {
conf, err := setupConfig()
if err != nil {
log.Fatalf("failed to setup s3client config: %+v\n", err)
}
client, err := s3client.NewUplink(conf)
if err != nil {
log.Fatalf("failed to create s3client NewUplink: %+v\n", err)
}
err = client.MakeBucket(bucket, "")
if err != nil {
log.Fatalf("failed to create bucket with s3client %q: %+v\n", bucket, err)
}
return client
}
func setupConfig() (s3client.Config, error) {
const (
uplinkEncryptionKey = "supersecretkey"
defaultSatelliteAddr = "127.0.0.1:10000"
)
var conf s3client.Config
conf.EncryptionKey = uplinkEncryptionKey
conf.Satellite = getEnvOrDefault("SATELLITE_0_ADDR", defaultSatelliteAddr)
conf.APIKey = getEnvOrDefault("GATEWAY_0_API_KEY", os.Getenv("apiKey"))
if conf.APIKey == "" {
return conf, errors.New("no api key provided. Expecting an env var $GATEWAY_0_API_KEY or $apiKey")
}
return conf, nil
}
func getEnvOrDefault(key, fallback string) string {
if value, exists := os.LookupEnv(key); exists {
return value
}
return fallback
}
func BenchmarkUpload_Uplink(b *testing.B) {
bucket := "testbucket"
client := uplinkSetup(bucket)
// uploadedObjects is used to store the names of all objects that are uploaded
// so that we can make sure to delete them all during cleanup
var uploadedObjects = map[string][]string{}
uploadedObjects = benchmarkUpload(b, client, bucket, uploadedObjects)
teardown(client, bucket, uploadedObjects)
}
func teardown(client s3client.Client, bucket string, uploadedObjects map[string][]string) {
for _, bm := range benchmarkCases {
for _, objectPath := range uploadedObjects[bm.name] {
err := client.Delete(bucket, objectPath)
if err != nil {
log.Printf("failed to delete object %q: %+v\n", objectPath, err)
}
}
}
err := client.RemoveBucket(bucket)
if err != nil {
log.Fatalf("failed to remove bucket %q: %+v\n", bucket, err)
}
}
func BenchmarkDownload_Uplink(b *testing.B) {
bucket := "testbucket"
client := uplinkSetup(bucket)
// upload some test objects so that there is something to download
uploadTestObjects(client, bucket)
benchmarkDownload(b, bucket, client)
teardownTestObjects(client, bucket)
}
func uploadTestObjects(client s3client.Client, bucket string) {
for name, data := range testObjects() {
objectName := "folder/data_" + name
err := client.Upload(bucket, objectName, data)
if err != nil {
log.Fatalf("failed to upload object %q: %+v\n", objectName, err)
}
}
}
func teardownTestObjects(client s3client.Client, bucket string) {
for name := range testObjects() {
objectName := "folder/data_" + name
err := client.Delete(bucket, objectName)
if err != nil {
log.Fatalf("failed to delete object %q: %+v\n", objectName, err)
}
}
err := client.RemoveBucket(bucket)
if err != nil {
log.Fatalf("failed to remove bucket %q: %+v\n", bucket, err)
}
}
func benchmarkUpload(b *testing.B, client s3client.Client, bucket string, uploadedObjects map[string][]string) map[string][]string {
for _, bm := range benchmarkCases {
benchmark := bm
b.Run(benchmark.name, func(b *testing.B) {
b.SetBytes(benchmark.objectsize.Int64())
b.ResetTimer()
for i := 0; i < b.N; i++ {
// make some random bytes so the objectPath is unique
randomBytes := testrand.Bytes(16)
uniquePathPart := hex.EncodeToString(randomBytes)
objectPath := "folder/data" + uniquePathPart + "_" + benchmark.name
err := client.Upload(bucket, objectPath, testObjects()[benchmark.name])
if err != nil {
log.Fatalf("failed to upload object %q: %+v\n", objectPath, err)
}
if uploadedObjects[benchmark.name] == nil {
uploadedObjects[benchmark.name] = []string{}
}
uploadedObjects[benchmark.name] = append(uploadedObjects[benchmark.name], objectPath)
}
})
}
return uploadedObjects
}
func benchmarkDownload(b *testing.B, bucket string, client s3client.Client) {
for _, bm := range benchmarkCases {
benchmark := bm
b.Run(benchmark.name, func(b *testing.B) {
buf := make([]byte, benchmark.objectsize)
b.SetBytes(benchmark.objectsize.Int64())
b.ResetTimer()
for i := 0; i < b.N; i++ {
objectName := "folder/data_" + benchmark.name
out, err := client.Download(bucket, objectName, buf)
if err != nil {
log.Fatalf("failed to download object %q: %+v\n", objectName, err)
}
expectedBytes := benchmark.objectsize.Int()
actualBytes := len(out)
if actualBytes != expectedBytes {
log.Fatalf("err downloading object %q: Expected %d bytes, but got actual bytes: %d\n",
objectName, expectedBytes, actualBytes,
)
}
}
})
}
}
func s3ClientSetup(bucket string) s3client.Client {
conf, err := s3ClientConfigSetup()
if err != nil {
log.Fatalf("failed to setup s3client config: %+v\n", err)
}
client, err := s3client.NewAWSCLI(conf)
if err != nil {
log.Fatalf("failed to create s3client NewUplink: %+v\n", err)
}
const region = "us-east-1"
err = client.MakeBucket(bucket, region)
if err != nil {
log.Fatalf("failed to create bucket with s3client %q: %+v\n", bucket, err)
}
return client
}
func s3ClientConfigSetup() (s3client.Config, error) {
const s3gateway = "https://s3.amazonaws.com/"
var conf s3client.Config
conf.S3Gateway = s3gateway
conf.AccessKey = os.Getenv("AWS_ACCESS_KEY_ID")
conf.SecretKey = os.Getenv("AWS_SECRET_ACCESS_KEY")
if conf.AccessKey == "" || conf.SecretKey == "" {
return conf, errors.New("expecting environment variables $AWS_ACCESS_KEY_ID and $AWS_SECRET_ACCESS_KEY to be set")
}
return conf, nil
}
func BenchmarkUpload_S3(b *testing.B) {
bucket := "testbucket3bgdp2xbkkflxc2tallstvh6pb824r"
var client = s3ClientSetup(bucket)
// uploadedObjects is used to store the names of all objects that are uploaded
// so that we can make sure to delete them all during cleanup
var uploadedObjects = map[string][]string{}
uploadedObjects = benchmarkUpload(b, client, bucket, uploadedObjects)
teardown(client, bucket, uploadedObjects)
}
func BenchmarkDownload_S3(b *testing.B) {
bucket := "testbucket3bgdp2xbkkflxc2tallstvh6pb826a"
var client = s3ClientSetup(bucket)
// upload some test objects so that there is something to download
uploadTestObjects(client, bucket)
benchmarkDownload(b, bucket, client)
teardownTestObjects(client, bucket)
}

2
go.mod
View File

@ -27,8 +27,6 @@ require (
github.com/jtolds/monkit-hw/v2 v2.0.0-20191108235325-141a0da276b3
github.com/jtolds/tracetagger/v2 v2.0.0-rc5
github.com/lib/pq v1.3.0
github.com/loov/hrtime v0.0.0-20181214195526-37a208e8344e
github.com/loov/plot v0.0.0-20180510142208-e59891ae1271
github.com/mattn/go-isatty v0.0.9 // indirect
github.com/mattn/go-sqlite3 v2.0.2+incompatible
github.com/nsf/jsondiff v0.0.0-20160203110537-7de28ed2b6e3

8
go.sum
View File

@ -240,10 +240,6 @@ github.com/kshvakov/clickhouse v1.3.5/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLg
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.3.0 h1:/qkRGz8zljWiDcFvgpwUpwIAPu3r07TDvs3Rws+o/pU=
github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/loov/hrtime v0.0.0-20181214195526-37a208e8344e h1:UC+nLCm+w3WL+ibAW/wsWbQC3KAz7LLawR2hgX0eR9s=
github.com/loov/hrtime v0.0.0-20181214195526-37a208e8344e/go.mod h1:2871C3urfEJnq/bpTYjFdMOdgxVd8otLLEL6vMNy/Iw=
github.com/loov/plot v0.0.0-20180510142208-e59891ae1271 h1:51ToN6N0TDtCruf681gufYuEhO9qFHQzM3RFTS/n6XE=
github.com/loov/plot v0.0.0-20180510142208-e59891ae1271/go.mod h1:3yy5HBPbe5e1UmEffbO0n0g6A8h6ChHaCTeundr6H60=
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU=
@ -583,14 +579,10 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
storj.io/common v0.0.0-20200318103328-b7e942ff9304 h1:KQ1ITzbT32bAyBx5gPYQFkMs86iUGA1Sl+QvsMQuJZc=
storj.io/common v0.0.0-20200318103328-b7e942ff9304/go.mod h1:I0QTs7z1rI+ZEN95GGY2LKMzP5OZqu0Udga3WhyQfO0=
storj.io/common v0.0.0-20200319165559-1fc2508a7284 h1:Bym7jVTXPdZ5cHEH3tey/RR5ps3vG1Ru9ucDVlJWIxs=
storj.io/common v0.0.0-20200319165559-1fc2508a7284/go.mod h1:I0QTs7z1rI+ZEN95GGY2LKMzP5OZqu0Udga3WhyQfO0=
storj.io/common v0.0.0-20200320083002-07faa8a64ad8 h1:DgWXFCldvyu1drZKKL0D3vlHSkjmBsvBqr8nXg59SFs=
storj.io/common v0.0.0-20200320083002-07faa8a64ad8/go.mod h1:I0QTs7z1rI+ZEN95GGY2LKMzP5OZqu0Udga3WhyQfO0=
storj.io/drpc v0.0.7-0.20191115031725-2171c57838d2/go.mod h1:/ascUDbzNAv0A3Jj7wUIKFBH2JdJ2uJIBO/b9+2yHgQ=
storj.io/drpc v0.0.8 h1:wu68cMmtoT0vSWIAZz29RpJkWdi4o0S8BIrLslpH5FQ=
storj.io/drpc v0.0.8/go.mod h1:v39uWro/EbXXk+gNnrM9FQuVVS2zUBWBfeduydgeXUA=
storj.io/uplink v1.0.0-rc.5.0.20200318180255-45744be46610 h1:Bu6pS7+LDHpc+EyujFCp+O5p7b2HIH080p0Cic5ELFo=
storj.io/uplink v1.0.0-rc.5.0.20200318180255-45744be46610/go.mod h1:2VBdNnx7zZoCx91ETUVM56mDRIFDToRyKBWxH/Usrek=
storj.io/uplink v1.0.0 h1:2tj09e88/2CuGMguS9qyVIEPX2NDE4W/1vMf4bdUCyA=
storj.io/uplink v1.0.0/go.mod h1:2VBdNnx7zZoCx91ETUVM56mDRIFDToRyKBWxH/Usrek=

View File

@ -1,205 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package s3client
import (
"bytes"
"encoding/json"
"fmt"
"os"
"os/exec"
"strings"
"github.com/zeebo/errs"
)
// AWSCLIError is class for minio errors
var AWSCLIError = errs.Class("aws-cli error")
// AWSCLI implements basic S3 Client with aws-cli
type AWSCLI struct {
conf Config
}
// NewAWSCLI creates new Client
func NewAWSCLI(conf Config) (Client, error) {
if !strings.HasPrefix(conf.S3Gateway, "https://") &&
!strings.HasPrefix(conf.S3Gateway, "http://") {
conf.S3Gateway = "http://" + conf.S3Gateway
}
return &AWSCLI{conf}, nil
}
func (client *AWSCLI) cmd(subargs ...string) *exec.Cmd {
args := []string{
"--endpoint", client.conf.S3Gateway,
}
if client.conf.NoSSL {
args = append(args, "--no-verify-ssl")
}
args = append(args, subargs...)
cmd := exec.Command("aws", args...)
cmd.Env = append(os.Environ(),
"AWS_ACCESS_KEY_ID="+client.conf.AccessKey,
"AWS_SECRET_ACCESS_KEY="+client.conf.SecretKey,
)
return cmd
}
// MakeBucket makes a new bucket
func (client *AWSCLI) MakeBucket(bucket, location string) error {
cmd := client.cmd("s3", "mb", "s3://"+bucket, "--region", location)
_, err := cmd.Output()
if err != nil {
return AWSCLIError.Wrap(fullExitError(err))
}
return nil
}
// RemoveBucket removes a bucket
func (client *AWSCLI) RemoveBucket(bucket string) error {
cmd := client.cmd("s3", "rb", "s3://"+bucket)
_, err := cmd.Output()
if err != nil {
return AWSCLIError.Wrap(fullExitError(err))
}
return nil
}
// ListBuckets lists all buckets
func (client *AWSCLI) ListBuckets() ([]string, error) {
cmd := client.cmd("s3api", "list-buckets", "--output", "json")
jsondata, err := cmd.Output()
if err != nil {
return nil, AWSCLIError.Wrap(fullExitError(err))
}
var response struct {
Buckets []struct {
Name string `json:"Name"`
} `json:"Buckets"`
}
err = json.Unmarshal(jsondata, &response)
if err != nil {
return nil, AWSCLIError.Wrap(fullExitError(err))
}
names := []string{}
for _, bucket := range response.Buckets {
names = append(names, bucket.Name)
}
return names, nil
}
// Upload uploads object data to the specified path
func (client *AWSCLI) Upload(bucket, objectName string, data []byte) error {
// TODO: add upload threshold
cmd := client.cmd("s3", "cp", "-", "s3://"+bucket+"/"+objectName)
cmd.Stdin = bytes.NewReader(data)
_, err := cmd.Output()
if err != nil {
return AWSCLIError.Wrap(fullExitError(err))
}
return nil
}
// UploadMultipart uses multipart uploads, has hardcoded threshold
func (client *AWSCLI) UploadMultipart(bucket, objectName string, data []byte, threshold int) error {
// TODO: add upload threshold
cmd := client.cmd("s3", "cp", "-", "s3://"+bucket+"/"+objectName)
cmd.Stdin = bytes.NewReader(data)
_, err := cmd.Output()
if err != nil {
return AWSCLIError.Wrap(fullExitError(err))
}
return nil
}
// Download downloads object data
func (client *AWSCLI) Download(bucket, objectName string, buffer []byte) ([]byte, error) {
cmd := client.cmd("s3", "cp", "s3://"+bucket+"/"+objectName, "-")
buf := &bufferWriter{buffer[:0]}
cmd.Stdout = buf
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
return nil, AWSCLIError.Wrap(fullExitError(err))
}
return buf.data, nil
}
type bufferWriter struct {
data []byte
}
func (b *bufferWriter) Write(data []byte) (n int, err error) {
b.data = append(b.data, data...)
return len(data), nil
}
// Delete deletes object
func (client *AWSCLI) Delete(bucket, objectName string) error {
cmd := client.cmd("s3", "rm", "s3://"+bucket+"/"+objectName)
_, err := cmd.Output()
if err != nil {
return AWSCLIError.Wrap(fullExitError(err))
}
return nil
}
// ListObjects lists objects
func (client *AWSCLI) ListObjects(bucket, prefix string) ([]string, error) {
cmd := client.cmd("s3api", "list-objects",
"--output", "json",
"--bucket", bucket,
"--prefix", prefix,
"--delimiter", "/")
jsondata, err := cmd.Output()
if err != nil {
return nil, AWSCLIError.Wrap(fullExitError(err))
}
var response struct {
Contents []struct {
Key string `json:"Key"`
} `json:"Contents"`
CommonPrefixes []struct {
Key string `json:"Prefix"`
} `json:"CommonPrefixes"`
}
err = json.Unmarshal(jsondata, &response)
if err != nil {
return nil, AWSCLIError.Wrap(fullExitError(err))
}
names := []string{}
for _, object := range response.Contents {
names = append(names, object.Key)
}
for _, object := range response.CommonPrefixes {
names = append(names, object.Key)
}
return names, nil
}
// fullExitError returns error string with the Stderr output
func fullExitError(err error) error {
if err == nil {
return nil
}
if exitErr, ok := err.(*exec.ExitError); ok {
return fmt.Errorf("%v\n%v", exitErr.Error(), string(exitErr.Stderr))
}
return err
}

View File

@ -1,28 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package s3client
// Config is the setup for a particular client
type Config struct {
S3Gateway string
Satellite string
AccessKey string
SecretKey string
APIKey string
EncryptionKey string
NoSSL bool
ConfigDir string
}
// Client is the common interface for different implementations
type Client interface {
MakeBucket(bucket, location string) error
RemoveBucket(bucket string) error
ListBuckets() ([]string, error)
Upload(bucket, objectName string, data []byte) error
Download(bucket, objectName string, buffer []byte) ([]byte, error)
Delete(bucket, objectName string) error
ListObjects(bucket, prefix string) ([]string, error)
}

View File

@ -1,138 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
// This is disabled for the time-being to allow removing minio dependencies from
// storj.io/storj package.
// +build ignore
package s3client
import (
"bytes"
"io"
"io/ioutil"
minio "github.com/minio/minio-go"
"github.com/zeebo/errs"
)
// MinioError is class for minio errors
var MinioError = errs.Class("minio error")
// Minio implements basic S3 Client with minio
type Minio struct {
api *minio.Client
}
// NewMinio creates new Client
func NewMinio(conf Config) (Client, error) {
api, err := minio.New(conf.S3Gateway, conf.AccessKey, conf.SecretKey, !conf.NoSSL)
if err != nil {
return nil, MinioError.Wrap(err)
}
return &Minio{api}, nil
}
// MakeBucket makes a new bucket
func (client *Minio) MakeBucket(bucket, location string) error {
err := client.api.MakeBucket(bucket, location)
if err != nil {
return MinioError.Wrap(err)
}
return nil
}
// RemoveBucket removes a bucket
func (client *Minio) RemoveBucket(bucket string) error {
err := client.api.RemoveBucket(bucket)
if err != nil {
return MinioError.Wrap(err)
}
return nil
}
// ListBuckets lists all buckets
func (client *Minio) ListBuckets() ([]string, error) {
buckets, err := client.api.ListBuckets()
if err != nil {
return nil, MinioError.Wrap(err)
}
names := []string{}
for _, bucket := range buckets {
names = append(names, bucket.Name)
}
return names, nil
}
// Upload uploads object data to the specified path
func (client *Minio) Upload(bucket, objectName string, data []byte) error {
_, err := client.api.PutObject(
bucket, objectName,
bytes.NewReader(data), int64(len(data)),
minio.PutObjectOptions{ContentType: "application/octet-stream"})
if err != nil {
return MinioError.Wrap(err)
}
return nil
}
// UploadMultipart uses multipart uploads, has hardcoded threshold
func (client *Minio) UploadMultipart(bucket, objectName string, data []byte, threshold int) error {
_, err := client.api.PutObject(
bucket, objectName,
bytes.NewReader(data), -1,
minio.PutObjectOptions{ContentType: "application/octet-stream"})
if err != nil {
return MinioError.Wrap(err)
}
return nil
}
// Download downloads object data
func (client *Minio) Download(bucket, objectName string, buffer []byte) ([]byte, error) {
reader, err := client.api.GetObject(bucket, objectName, minio.GetObjectOptions{})
if err != nil {
return nil, MinioError.Wrap(err)
}
defer func() { _ = reader.Close() }()
n, err := reader.Read(buffer[:cap(buffer)])
if err != io.EOF {
rest, err := ioutil.ReadAll(reader)
if err == io.EOF {
err = nil
}
if err != nil {
return nil, MinioError.Wrap(err)
}
buffer = append(buffer, rest...)
n = len(buffer)
}
buffer = buffer[:n]
return buffer, nil
}
// Delete deletes object
func (client *Minio) Delete(bucket, objectName string) error {
err := client.api.RemoveObject(bucket, objectName)
if err != nil {
return MinioError.Wrap(err)
}
return nil
}
// ListObjects lists objects
func (client *Minio) ListObjects(bucket, prefix string) ([]string, error) {
doneCh := make(chan struct{})
defer close(doneCh)
names := []string{}
for message := range client.api.ListObjects(bucket, prefix, false, doneCh) {
names = append(names, message.Key)
}
return names, nil
}

View File

@ -1,145 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package s3client
import (
"bytes"
"fmt"
"os"
"os/exec"
"strings"
"github.com/zeebo/errs"
"storj.io/common/fpath"
)
// UplinkError is class for minio errors
var UplinkError = errs.Class("uplink error")
// Uplink implements basic S3 Client with uplink
type Uplink struct {
conf Config
}
// NewUplink creates new Client
func NewUplink(conf Config) (Client, error) {
client := &Uplink{conf}
if client.conf.ConfigDir != "" {
fmt.Printf("Using existing uplink config at %s\n", client.conf.ConfigDir)
return client, nil
}
client.conf.ConfigDir = fpath.ApplicationDir("storj", "s3-client", "uplink")
// remove existing s3client uplink config so that
// we can create a new one with up-to-date settings
err := os.RemoveAll(client.conf.ConfigDir)
if err != nil {
return nil, UplinkError.Wrap(fullExitError(err))
}
cmd := client.cmd("--config-dir", client.conf.ConfigDir,
"setup",
"--non-interactive", "true",
"--api-key", client.conf.APIKey,
"--enc.encryption-key", client.conf.EncryptionKey,
"--satellite-addr", client.conf.Satellite,
)
_, err = cmd.Output()
if err != nil {
return nil, UplinkError.Wrap(fullExitError(err))
}
return client, nil
}
func (client *Uplink) cmd(subargs ...string) *exec.Cmd {
args := []string{}
configArgs := []string{"--config-dir", client.conf.ConfigDir}
args = append(args, configArgs...)
args = append(args, subargs...)
cmd := exec.Command("uplink", args...)
return cmd
}
// MakeBucket makes a new bucket
func (client *Uplink) MakeBucket(bucket, location string) error {
cmd := client.cmd("mb", "s3://"+bucket)
_, err := cmd.Output()
if err != nil {
return UplinkError.Wrap(fullExitError(err))
}
return nil
}
// RemoveBucket removes a bucket
func (client *Uplink) RemoveBucket(bucket string) error {
cmd := client.cmd("rb", "s3://"+bucket)
_, err := cmd.Output()
if err != nil {
return UplinkError.Wrap(fullExitError(err))
}
return nil
}
// ListBuckets lists all buckets
func (client *Uplink) ListBuckets() ([]string, error) {
cmd := client.cmd("ls")
data, err := cmd.Output()
if err != nil {
return nil, UplinkError.Wrap(fullExitError(err))
}
names := strings.Split(strings.TrimRight(string(data), "\n"), "\n")
return names, nil
}
// Upload uploads object data to the specified path
func (client *Uplink) Upload(bucket, objectName string, data []byte) error {
// TODO: add upload threshold
cmd := client.cmd("put", "s3://"+bucket+"/"+objectName)
cmd.Stdin = bytes.NewReader(data)
_, err := cmd.Output()
if err != nil {
return UplinkError.Wrap(fullExitError(err))
}
return nil
}
// Download downloads object data
func (client *Uplink) Download(bucket, objectName string, buffer []byte) ([]byte, error) {
cmd := client.cmd("cat", "s3://"+bucket+"/"+objectName)
out, err := cmd.Output()
if err != nil {
return nil, UplinkError.Wrap(fullExitError(err))
}
return out, nil
}
// Delete deletes object
func (client *Uplink) Delete(bucket, objectName string) error {
cmd := client.cmd("rm", "s3://"+bucket+"/"+objectName)
_, err := cmd.Output()
if err != nil {
return UplinkError.Wrap(fullExitError(err))
}
return nil
}
// ListObjects lists objects
func (client *Uplink) ListObjects(bucket, prefix string) ([]string, error) {
cmd := client.cmd("ls", "s3://"+bucket+"/"+prefix)
data, err := cmd.Output()
if err != nil {
return nil, UplinkError.Wrap(fullExitError(err))
}
names := strings.Split(strings.TrimRight(string(data), "\n"), "\n")
return names, nil
}

View File

@ -1,136 +0,0 @@
#!/bin/bash
# Copyright (C) 2019 Storj Labs, Inc.
# See LICENSE for copying information.
usage="Usage: $(basename "$0") <service> <command> <file_name> <bucket> <operations>"
if [ "$1" == "" ]; then
echo "$usage" >&2
exit 1
fi
if [ "$2" == "" ]; then
echo "$usage" >&2
exit 1
fi
if [ "$3" == "" ]; then
echo "$usage" >&2
exit 1
fi
# SETTINGS
SERVICE=$1
COMMAND=$2
FILE=$3
BUCKET=$4
OPERATIONS=$5
FILENAME="$(basename -- $FILE)"
DOWNLOAD_DIR="/tmp/"
DOWNLOAD_FILE="$DOWNLOAD_DIR$FILENAME"
EXEC_COMMAND=""
UPLOAD_COMMAND=""
DOWNLOAD_COMMAND=""
LOG_FILE=$SERVICE"_"$COMMAND".log"
RESULTS_FILE=$SERVICE"_"$COMMAND"_results.log"
FAILURES=0
if [ "$SERVICE" == "aws" ]; then
UPLOAD_COMMAND="exec_aws_upload"
DOWNLOAD_COMMAND="exec_aws_download"
elif [ "$SERVICE" == "storj" ]; then
UPLOAD_COMMAND="exec_storj_upload"
DOWNLOAD_COMMAND="exec_storj_download"
fi
if [ "$COMMAND" == "upload" ]; then
EXEC_COMMAND=$UPLOAD_COMMAND
elif [ "$COMMAND" == "download" ]; then
EXEC_COMMAND=$DOWNLOAD_COMMAND
fi
function exec_aws_upload() {
/usr/bin/time aws s3 cp $FILE s3://$BUCKET 2>&1 | tail -n 3
}
function exec_aws_download() {
/usr/bin/time -p aws s3 cp s3://$BUCKET/$FILENAME $DOWNLOAD_FILE 2>&1 | tail -n 3
}
function exec_aws_delete() {
aws s3 cp $FILE s3://$BUCKET 2>&1
/usr/bin/time aws s3 rm $FILE s3://$BUCKET 2>&1 | tail -n 3
}
function exec_storj_create_bucket() {
/usr/bin/time -p uplink --log.level error --log.output /tmp/storj.log mb sj://$BUCKET 2>&1
return
}
function exec_storj_upload() {
/usr/bin/time -p uplink --log.level error --log.output /tmp/storj.log cp $FILE sj://$BUCKET/$FILENAME 2>&1 | tail -n 3
}
function exec_storj_download() {
/usr/bin/time -p uplink --log.level error --log.output /tmp/storj.log cp sj://$BUCKET/$FILENAME $DOWNLOAD_FILE 2>&1 | tail -n 3
}
function exec_storj_delete() {
$UPLINK_COMMAND --log.level debug --log.output /tmp/storj.log cp $FILE sj://$BUCKET/$FILENAME 2>&1
/usr/bin/time -p $UPLINK_COMMAND --log.level error --log.output /tmp/storj.log rm sj://$BUCKET/$FILENAME 2>&1 | tail -n 3
}
echo "========================================"
echo "Environment"
echo "========================================"
echo "UPLOAD CMD:" $UPLOAD_COMMAND
echo "DOWNLOAD CMD:" $DOWNLOAD_COMMAND
echo "EXEC CMD:" $EXEC_COMMAND
echo "BUCKET:" $BUCKET
echo "FILE:" $FILE
echo "OPERATIONS:" $OPERATIONS
echo "LOG FILE:" $LOG_FILE
echo "RESULTS FILE:" $RESULTS_FILE
echo ""
echo "========================================"
echo "Benchmark"
echo "========================================"
if [ "$COMMAND" == "download" ]; then
echo "Uploading file for download benchmark..."
exec_storj_create_bucket
$UPLOAD_COMMAND
elif [ "$COMMAND" == "delete" ]; then
exec_storj_create_bucket
fi
# Benchmark.
echo "Benchmarking $SERVICE $COMMAND..."
rm -rf "$LOG_FILE"
rm -rf "$RESULTS_FILE"
for (( i=1; i<=$OPERATIONS; i++ ))
do
response="$(${EXEC_COMMAND})"
if [ $? == 0 ]; then
response_time=`echo "$response" | awk '/real/{print $2}'`
echo $response_time >> "$LOG_FILE"
else
echo "Failed to "$COMMAND" file"
echo $response
let "FAILURES++"
fi
done
latency_50="$(cat $LOG_FILE| sort -n | awk 'BEGIN{i=0} {s[i]=$1; i++;} END{print s[int(NR*0.50-0.5)]}')"
latency_75="$(cat $LOG_FILE| sort -n | awk 'BEGIN{i=0} {s[i]=$1; i++;} END{print s[int(NR*0.75-0.5)]}')"
latency_90="$(cat $LOG_FILE| sort -n | awk 'BEGIN{i=0} {s[i]=$1; i++;} END{print s[int(NR*0.90-0.5)]}')"
latency_95="$(cat $LOG_FILE| sort -n | awk 'BEGIN{i=0} {s[i]=$1; i++;} END{print s[int(NR*0.95-0.5)]}')"
latency_99="$(cat $LOG_FILE| sort -n | awk 'BEGIN{i=0} {s[i]=$1; i++;} END{print s[int(NR*0.99-0.5)]}')"
cat >$RESULTS_FILE <<EOL
50%: ${latency_50}s
75%: ${latency_75}s
90%: ${latency_90}s
95%: ${latency_95}s
99%: ${latency_99}s
EOL
echo "Failures: $FAILURES" >> $RESULTS_FILE
cat $RESULTS_FILE

View File

@ -1,24 +0,0 @@
#!/bin/bash
set -ueo pipefail
# Purpose: This script executes upload and download benchmark tests against aws s3 to compare with storj performance.
# Setup: Assumes the awscli is installed. Assumes $AWS_ACCESS_KEY_ID and $AWS_SECRET_ACCESS_KEY environment
# variables are set with valid aws credentials with permissions to read/write to aws s3.
# Usage: from root of storj repo, run
# $ ./scripts/test-aws-benchmark.sh
aws configure set aws_access_key_id "$AWS_ACCESS_KEY_ID"
aws configure set aws_secret_access_key "$AWS_SECRET_ACCESS_KEY"
aws configure set default.region us-east-1
# run aws s3 benchmark tests
echo
echo "Executing upload/download benchmark tests for aws s3..."
go test -bench=S3 -benchmem ./cmd/uplink/cmd/
# run s3-benchmark with aws s3
echo
echo "Executing s3-benchmark tests with aws s3 client..."
s3-benchmark --client=aws-cli --accesskey="$AWS_ACCESS_KEY_ID" --secretkey="$AWS_SECRET_ACCESS_KEY" --location="us-east-1" --s3-gateway="https://s3.amazonaws.com/"

View File

@ -1,26 +0,0 @@
#!/bin/bash
set -ueo pipefail
# Purpose: This script executes uplink upload and download benchmark tests against storj-sim.
# Setup: Remove any existing uplink configs.
# Usage: from root of storj repo, run
# $ storj-sim network test bash ./scripts/test-sim-benchmark.sh
# To run and filter out storj-sim logs, run:
# $ storj-sim -x network test bash ./scripts/test-sim-benchmark.sh | grep -i "test.out"
SATELLITE_0_ADDR=${SATELLITE_0_ADDR:-127.0.0.1:10000}
apiKey=$(storj-sim network env GATEWAY_0_API_KEY)
export apiKey=$(storj-sim network env GATEWAY_0_API_KEY)
echo "apiKey:"
echo "$apiKey"
# run benchmark tests
echo
echo "Executing benchmark tests with uplink client against storj-sim..."
go test -bench=Uplink -benchmem ./cmd/uplink/cmd/
# run s3-benchmark with uplink
echo
echo "Executing s3-benchmark tests with uplink client against storj-sim..."
s3-benchmark --client=uplink --satellite="$SATELLITE_0_ADDR" --apikey="$apiKey"