s3-benchmark: add aws-cli support (#386)

This commit is contained in:
Egon Elbre 2018-09-28 15:40:08 +03:00 committed by GitHub
parent c9cabaf73a
commit e83502f33d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 844 additions and 272 deletions

183
cmd/s3-benchmark/awscli.go Normal file
View File

@ -0,0 +1,183 @@
package main
import (
"bytes"
"encoding/json"
"os"
"os/exec"
"strings"
"github.com/zeebo/errs"
)
// AWSCLIError is class for minio errors
var AWSCLIError = errs.Class("aws-cli error")
// AWSCLI implements basic S3 Client with aws-cli
type AWSCLI struct {
conf Config
}
// NewAWSCLI creates new Client
func NewAWSCLI(conf Config) (Client, error) {
if !strings.HasPrefix(conf.Endpoint, "https://") &&
!strings.HasPrefix(conf.Endpoint, "http://") {
conf.Endpoint = "http://" + conf.Endpoint
}
return &AWSCLI{conf}, nil
}
func (client *AWSCLI) cmd(subargs ...string) *exec.Cmd {
args := []string{
"--endpoint", client.conf.Endpoint,
}
if client.conf.NoSSL {
args = append(args, "--no-verify-ssl")
}
args = append(args, subargs...)
cmd := exec.Command("aws", args...)
cmd.Env = append(os.Environ(),
"AWS_ACCESS_KEY_ID="+client.conf.AccessKey,
"AWS_SECRET_ACCESS_KEY="+client.conf.SecretKey,
)
return cmd
}
// MakeBucket makes a new bucket
func (client *AWSCLI) MakeBucket(bucket, location string) error {
cmd := client.cmd("s3", "mb", "s3://"+bucket, "--region", location)
_, err := cmd.Output()
if err != nil {
return AWSCLIError.Wrap(err)
}
return nil
}
// RemoveBucket removes a bucket
func (client *AWSCLI) RemoveBucket(bucket string) error {
cmd := client.cmd("s3", "rb", "s3://"+bucket)
_, err := cmd.Output()
if err != nil {
return AWSCLIError.Wrap(err)
}
return nil
}
// ListBuckets lists all buckets
func (client *AWSCLI) ListBuckets() ([]string, error) {
cmd := client.cmd("s3api", "list-buckets", "--output", "json")
jsondata, err := cmd.Output()
if err != nil {
return nil, AWSCLIError.Wrap(err)
}
var response struct {
Buckets []struct {
Name string `json:"Name"`
} `json:"Buckets"`
}
err = json.Unmarshal(jsondata, &response)
if err != nil {
return nil, AWSCLIError.Wrap(err)
}
names := []string{}
for _, bucket := range response.Buckets {
names = append(names, bucket.Name)
}
return names, nil
}
// Upload uploads object data to the specified path
func (client *AWSCLI) Upload(bucket, objectName string, data []byte) error {
// TODO: add upload threshold
cmd := client.cmd("s3", "cp", "-", "s3://"+bucket+"/"+objectName)
cmd.Stdin = bytes.NewReader(data)
_, err := cmd.Output()
if err != nil {
return AWSCLIError.Wrap(err)
}
return nil
}
// UploadMultipart uses multipart uploads, has hardcoded threshold
func (client *AWSCLI) UploadMultipart(bucket, objectName string, data []byte, threshold int) error {
// TODO: add upload threshold
cmd := client.cmd("s3", "cp", "-", "s3://"+bucket+"/"+objectName)
cmd.Stdin = bytes.NewReader(data)
_, err := cmd.Output()
if err != nil {
return AWSCLIError.Wrap(err)
}
return nil
}
// Download downloads object data
func (client *AWSCLI) Download(bucket, objectName string, buffer []byte) ([]byte, error) {
cmd := client.cmd("s3", "cp", "s3://"+bucket+"/"+objectName, "-")
buf := &bufferWriter{buffer[:0]}
cmd.Stdout = buf
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
return nil, AWSCLIError.Wrap(err)
}
return buf.data, nil
}
type bufferWriter struct {
data []byte
}
func (b *bufferWriter) Write(data []byte) (n int, err error) {
b.data = append(b.data, data...)
return len(data), nil
}
// Delete deletes object
func (client *AWSCLI) Delete(bucket, objectName string) error {
cmd := client.cmd("s3", "rm", "s3://"+bucket+"/"+objectName)
_, err := cmd.Output()
if err != nil {
return AWSCLIError.Wrap(err)
}
return nil
}
// ListObjects lists objects
func (client *AWSCLI) ListObjects(bucket, prefix string) ([]string, error) {
cmd := client.cmd("s3api", "list-objects",
"--output", "json",
"--bucket", bucket,
"--prefix", prefix)
jsondata, err := cmd.Output()
if err != nil {
return nil, AWSCLIError.Wrap(err)
}
var response struct {
Contents []struct {
Key string `json:"Key"`
} `json:"Contents"`
}
err = json.Unmarshal(jsondata, &response)
if err != nil {
return nil, AWSCLIError.Wrap(err)
}
names := []string{}
for _, object := range response.Contents {
names = append(names, object.Key)
}
return names, nil
}

View File

@ -7,26 +7,27 @@ import (
"bytes"
"flag"
"fmt"
"image/color"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
"text/tabwriter"
"time"
"github.com/loov/hrtime"
"github.com/loov/plot"
minio "github.com/minio/minio-go"
"storj.io/storj/internal/memory"
)
func main() {
endpoint := flag.String("endpoint", "127.0.0.1:7777", "endpoint address")
accesskey := flag.String("accesskey", "insecure-dev-access-key", "access key")
secretkey := flag.String("secretkey", "insecure-dev-secret-key", "secret key")
useSSL := flag.Bool("use-ssl", true, "use ssl")
var conf Config
flag.StringVar(&conf.Endpoint, "endpoint", "127.0.0.1:7777", "endpoint address")
flag.StringVar(&conf.AccessKey, "accesskey", "insecure-dev-access-key", "access key")
flag.StringVar(&conf.SecretKey, "secretkey", "insecure-dev-secret-key", "secret key")
flag.BoolVar(&conf.NoSSL, "no-ssl", false, "disable ssl")
clientName := flag.String("client", "minio", "client to use for requests (supported: minio, aws-cli)")
location := flag.String("location", "", "bucket location")
count := flag.Int("count", 50, "benchmark count")
duration := flag.Duration("time", 2*time.Minute, "maximum benchmark time per size")
@ -35,14 +36,32 @@ func main() {
plotname := flag.String("plot", "plot"+suffix+".svg", "plot results")
sizes := &Sizes{
Default: []Size{{1 * KB}, {256 * KB}, {1 * MB}, {32 * MB}, {64 * MB}, {256 * MB}},
sizes := &memory.Sizes{
Default: []memory.Size{
1 * memory.KB,
256 * memory.KB,
1 * memory.MB,
32 * memory.MB,
64 * memory.MB,
256 * memory.MB,
},
}
flag.Var(sizes, "size", "sizes to test with")
flag.Parse()
client, err := minio.New(*endpoint, *accesskey, *secretkey, *useSSL)
var client Client
var err error
switch *clientName {
default:
log.Println("unknown client name ", *clientName, " defaulting to minio")
fallthrough
case "minio":
client, err = NewMinio(conf)
case "aws-cli":
client, err = NewAWSCLI(conf)
}
if err != nil {
log.Fatal(err)
}
@ -51,14 +70,14 @@ func main() {
log.Println("Creating bucket", bucket)
err = client.MakeBucket(bucket, *location)
if err != nil {
log.Fatal("failed to create bucket: ", bucket, ": ", err)
log.Fatalf("failed to create bucket %q: %+v\n", bucket, err)
}
defer func() {
log.Println("Removing bucket")
err := client.RemoveBucket(bucket)
if err != nil {
log.Fatal("failed to remove bucket: ", bucket)
log.Fatalf("failed to remove bucket %q", bucket)
}
}()
@ -91,163 +110,107 @@ func main() {
_ = w.Flush()
if *plotname != "" {
p := plot.New()
p.X.Min = 0
p.X.Max = 10
p.X.MajorTicks = 10
p.X.MinorTicks = 10
speed := plot.NewAxisGroup()
speed.Y.Min = 0
speed.Y.Max = 1
speed.X.Min = 0
speed.X.Max = 30
speed.X.MajorTicks = 10
speed.X.MinorTicks = 10
rows := plot.NewVStack()
rows.Margin = plot.R(5, 5, 5, 5)
p.Add(rows)
for _, m := range measurements {
row := plot.NewHFlex()
rows.Add(row)
row.Add(35, plot.NewTextbox(m.Size.String()))
plots := plot.NewVStack()
row.Add(0, plots)
{ // time plotting
uploadTime := plot.NewDensity("s", asSeconds(m.Upload))
uploadTime.Stroke = color.NRGBA{0, 200, 0, 255}
downloadTime := plot.NewDensity("s", asSeconds(m.Download))
downloadTime.Stroke = color.NRGBA{0, 0, 200, 255}
deleteTime := plot.NewDensity("s", asSeconds(m.Delete))
deleteTime.Stroke = color.NRGBA{200, 0, 0, 255}
flexTime := plot.NewHFlex()
plots.Add(flexTime)
flexTime.Add(70, plot.NewTextbox("time (s)"))
flexTime.AddGroup(0,
plot.NewGrid(),
uploadTime,
downloadTime,
deleteTime,
plot.NewTickLabels(),
)
}
{ // speed plotting
uploadSpeed := plot.NewDensity("MB/s", asSpeed(m.Upload, m.Size.bytes))
uploadSpeed.Stroke = color.NRGBA{0, 200, 0, 255}
downloadSpeed := plot.NewDensity("MB/s", asSpeed(m.Download, m.Size.bytes))
downloadSpeed.Stroke = color.NRGBA{0, 0, 200, 255}
flexSpeed := plot.NewHFlex()
plots.Add(flexSpeed)
speedGroup := plot.NewAxisGroup()
speedGroup.X, speedGroup.Y = speed.X, speed.Y
speedGroup.AddGroup(
plot.NewGrid(),
uploadSpeed,
downloadSpeed,
plot.NewTickLabels(),
)
flexSpeed.Add(70, plot.NewTextbox("speed (MB/s)"))
flexSpeed.AddGroup(0, speedGroup)
}
}
svgCanvas := plot.NewSVG(1500, 150*float64(len(measurements)))
p.Draw(svgCanvas)
err := ioutil.WriteFile(*plotname, svgCanvas.Bytes(), 0755)
err := Plot(*plotname, measurements)
if err != nil {
log.Fatal(err)
}
}
}
func asSeconds(durations []time.Duration) []float64 {
xs := make([]float64, 0, len(durations))
for _, dur := range durations {
xs = append(xs, dur.Seconds())
}
return xs
}
func asSpeed(durations []time.Duration, size int64) []float64 {
const MB = 1 << 20
xs := make([]float64, 0, len(durations))
for _, dur := range durations {
xs = append(xs, (float64(size)/MB)/dur.Seconds())
}
return xs
}
// Measurement contains measurements for different requests
type Measurement struct {
Size Size
Upload []time.Duration
Download []time.Duration
Delete []time.Duration
Size memory.Size
Results []*Result
}
// Result contains durations for specific tests
type Result struct {
Name string
WithSpeed bool
Durations []time.Duration
}
// Result finds or creates a result with the specified name
func (m *Measurement) Result(name string) *Result {
for _, x := range m.Results {
if x.Name == name {
return x
}
}
r := &Result{}
r.Name = name
m.Results = append(m.Results, r)
return r
}
// Record records a time measurement
func (m *Measurement) Record(name string, duration time.Duration) {
r := m.Result(name)
r.WithSpeed = false
r.Durations = append(r.Durations, duration)
}
// RecordSpeed records a time measurement that can be expressed in speed
func (m *Measurement) RecordSpeed(name string, duration time.Duration) {
r := m.Result(name)
r.WithSpeed = true
r.Durations = append(r.Durations, duration)
}
// PrintStats prints important valueas about the measurement
func (m *Measurement) PrintStats(w io.Writer) {
const binCount = 10
upload := hrtime.NewDurationHistogram(m.Upload, binCount)
download := hrtime.NewDurationHistogram(m.Download, binCount)
delete := hrtime.NewDurationHistogram(m.Delete, binCount)
type Hist struct {
*Result
*hrtime.Histogram
}
hists := []struct {
L string
H *hrtime.Histogram
}{
{"Upload", upload},
{"Download", download},
{"Delete", delete},
hists := []Hist{}
for _, result := range m.Results {
hists = append(hists, Hist{
Result: result,
Histogram: hrtime.NewDurationHistogram(result.Durations, binCount),
})
}
sec := func(ns float64) string {
return fmt.Sprintf("%.2f", ns/1e9)
}
speed := func(ns float64) string {
return fmt.Sprintf("%.2f", (float64(m.Size.bytes)/(1<<20))/(ns/1e9))
return fmt.Sprintf("%.2f", m.Size.MB()/(ns/1e9))
}
for _, hist := range hists {
if hist.L == "Delete" {
if !hist.WithSpeed {
fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n",
m.Size, hist.L,
sec(hist.H.Average), "",
sec(hist.H.Maximum), "",
sec(hist.H.P50), "",
sec(hist.H.P90), "",
sec(hist.H.P99), "",
m.Size, hist.Name,
sec(hist.Average), "",
sec(hist.Maximum), "",
sec(hist.P50), "",
sec(hist.P90), "",
sec(hist.P99), "",
)
continue
}
fmt.Fprintf(w, "%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\t%v\n",
m.Size, hist.L,
sec(hist.H.Average), speed(hist.H.Average),
sec(hist.H.Maximum), speed(hist.H.Maximum),
sec(hist.H.P50), speed(hist.H.P50),
sec(hist.H.P90), speed(hist.H.P90),
sec(hist.H.P99), speed(hist.H.P99),
m.Size, hist.Name,
sec(hist.Average), speed(hist.Average),
sec(hist.Maximum), speed(hist.Maximum),
sec(hist.P50), speed(hist.P50),
sec(hist.P90), speed(hist.P90),
sec(hist.P99), speed(hist.P99),
)
}
}
// Benchmark runs benchmarks on bucket with given size
func Benchmark(client *minio.Client, bucket string, size Size, count int, duration time.Duration) (Measurement, error) {
func Benchmark(client Client, bucket string, size memory.Size, count int, duration time.Duration) (Measurement, error) {
log.Print("Benchmarking size ", size.String(), " ")
data := make([]byte, size.bytes)
result := make([]byte, size.bytes)
data := make([]byte, size.Int())
result := make([]byte, size.Int())
defer fmt.Println()
@ -260,50 +223,70 @@ func Benchmark(client *minio.Client, bucket string, size Size, count int, durati
}
fmt.Print(".")
rand.Read(data[:])
// rand.Read(data[:])
for i := range data {
data[i] = 'a' + byte(i%26)
}
{ // uploading
start := hrtime.Now()
_, err := client.PutObject(bucket, "data", bytes.NewReader(data), int64(len(data)), minio.PutObjectOptions{
ContentType: "application/octet-stream",
})
err := client.Upload(bucket, "data", data)
finish := hrtime.Now()
if err != nil {
return measurement, fmt.Errorf("upload failed: %v", err)
return measurement, fmt.Errorf("upload failed: %+v", err)
}
measurement.Upload = append(measurement.Upload, (finish - start))
measurement.RecordSpeed("Upload", finish-start)
}
{ // downloading
start := hrtime.Now()
reader, err := client.GetObject(bucket, "data", minio.GetObjectOptions{})
var err error
result, err = client.Download(bucket, "data", result)
if err != nil {
return measurement, fmt.Errorf("get object failed: %v", err)
}
var n int
n, err = reader.Read(result)
if err != nil && err != io.EOF {
return measurement, fmt.Errorf("download failed: %v", err)
return measurement, fmt.Errorf("get object failed: %+v", err)
}
finish := hrtime.Now()
if !bytes.Equal(data, result[:n]) {
return measurement, fmt.Errorf("upload/download do not match: length %d != %d", len(data), n)
if !bytes.Equal(data, result) {
return measurement, fmt.Errorf("upload/download do not match: lengths %d and %d", len(data), len(result))
}
measurement.Download = append(measurement.Download, (finish - start))
measurement.RecordSpeed("Download", finish-start)
}
{ // deleting
start := hrtime.Now()
err := client.RemoveObject(bucket, "data")
err := client.Delete(bucket, "data")
if err != nil {
return measurement, fmt.Errorf("delete failed: %v", err)
return measurement, fmt.Errorf("delete failed: %+v", err)
}
finish := hrtime.Now()
measurement.Delete = append(measurement.Delete, (finish - start))
measurement.Record("Delete", finish-start)
}
}
return measurement, nil
}
// Config is the setup for a particular client
type Config struct {
Endpoint string
AccessKey string
SecretKey string
NoSSL bool
}
// Client is the common interface for different implementations
type Client interface {
MakeBucket(bucket, location string) error
RemoveBucket(bucket string) error
ListBuckets() ([]string, error)
Upload(bucket, objectName string, data []byte) error
UploadMultipart(bucket, objectName string, data []byte, multipartThreshold int) error
Download(bucket, objectName string, buffer []byte) ([]byte, error)
Delete(bucket, objectName string) error
ListObjects(bucket, prefix string) ([]string, error)
}

130
cmd/s3-benchmark/minio.go Normal file
View File

@ -0,0 +1,130 @@
package main
import (
"bytes"
"io"
"io/ioutil"
minio "github.com/minio/minio-go"
"github.com/zeebo/errs"
)
// MinioError is class for minio errors
var MinioError = errs.Class("minio error")
// Minio implements basic S3 Client with minio
type Minio struct {
api *minio.Client
}
// NewMinio creates new Client
func NewMinio(conf Config) (Client, error) {
api, err := minio.New(conf.Endpoint, conf.AccessKey, conf.SecretKey, !conf.NoSSL)
if err != nil {
return nil, MinioError.Wrap(err)
}
return &Minio{api}, nil
}
// MakeBucket makes a new bucket
func (client *Minio) MakeBucket(bucket, location string) error {
err := client.api.MakeBucket(bucket, location)
if err != nil {
return MinioError.Wrap(err)
}
return nil
}
// RemoveBucket removes a bucket
func (client *Minio) RemoveBucket(bucket string) error {
err := client.api.RemoveBucket(bucket)
if err != nil {
return MinioError.Wrap(err)
}
return nil
}
// ListBuckets lists all buckets
func (client *Minio) ListBuckets() ([]string, error) {
buckets, err := client.api.ListBuckets()
if err != nil {
return nil, MinioError.Wrap(err)
}
names := []string{}
for _, bucket := range buckets {
names = append(names, bucket.Name)
}
return names, nil
}
// Upload uploads object data to the specified path
func (client *Minio) Upload(bucket, objectName string, data []byte) error {
_, err := client.api.PutObject(
bucket, objectName,
bytes.NewReader(data), int64(len(data)),
minio.PutObjectOptions{ContentType: "application/octet-stream"})
if err != nil {
return MinioError.Wrap(err)
}
return nil
}
// UploadMultipart uses multipart uploads, has hardcoded threshold
func (client *Minio) UploadMultipart(bucket, objectName string, data []byte, threshold int) error {
_, err := client.api.PutObject(
bucket, objectName,
bytes.NewReader(data), -1,
minio.PutObjectOptions{ContentType: "application/octet-stream"})
if err != nil {
return MinioError.Wrap(err)
}
return nil
}
// Download downloads object data
func (client *Minio) Download(bucket, objectName string, buffer []byte) ([]byte, error) {
reader, err := client.api.GetObject(bucket, objectName, minio.GetObjectOptions{})
if err != nil {
return nil, MinioError.Wrap(err)
}
defer func() { _ = reader.Close() }()
n, err := reader.Read(buffer[:cap(buffer)])
if err != io.EOF {
rest, err := ioutil.ReadAll(reader)
if err == io.EOF {
err = nil
}
if err != nil {
return nil, MinioError.Wrap(err)
}
buffer = append(buffer, rest...)
n = len(buffer)
}
buffer = buffer[:n]
return buffer, nil
}
// Delete deletes object
func (client *Minio) Delete(bucket, objectName string) error {
err := client.api.RemoveObject(bucket, objectName)
if err != nil {
return MinioError.Wrap(err)
}
return nil
}
// ListObjects lists objects
func (client *Minio) ListObjects(bucket, prefix string) ([]string, error) {
doneCh := make(chan struct{})
defer close(doneCh)
names := []string{}
for message := range client.api.ListObjects(bucket, prefix, true, doneCh) {
names = append(names, message.Key)
}
return names, nil
}

109
cmd/s3-benchmark/plot.go Normal file
View File

@ -0,0 +1,109 @@
package main
import (
"image/color"
"io/ioutil"
"time"
"github.com/loov/plot"
)
var palette = []color.Color{
color.NRGBA{0, 200, 0, 255},
color.NRGBA{0, 0, 200, 255},
color.NRGBA{200, 0, 0, 255},
}
// Plot plots measurements into filename as an svg
func Plot(filename string, measurements []Measurement) error {
p := plot.New()
p.X.Min = 0
p.X.Max = 10
p.X.MajorTicks = 10
p.X.MinorTicks = 10
speed := plot.NewAxisGroup()
speed.Y.Min = 0
speed.Y.Max = 1
speed.X.Min = 0
speed.X.Max = 30
speed.X.MajorTicks = 10
speed.X.MinorTicks = 10
rows := plot.NewVStack()
rows.Margin = plot.R(5, 5, 5, 5)
p.Add(rows)
for _, m := range measurements {
row := plot.NewHFlex()
rows.Add(row)
row.Add(35, plot.NewTextbox(m.Size.String()))
plots := plot.NewVStack()
row.Add(0, plots)
{ // time plotting
group := []plot.Element{plot.NewGrid()}
for i, result := range m.Results {
time := plot.NewDensity("s", asSeconds(result.Durations))
time.Stroke = palette[i%len(palette)]
group = append(group, time)
}
group = append(group, plot.NewTickLabels())
flexTime := plot.NewHFlex()
plots.Add(flexTime)
flexTime.Add(70, plot.NewTextbox("time (s)"))
flexTime.AddGroup(0, group...)
}
{ // speed plotting
group := []plot.Element{plot.NewGrid()}
for i, result := range m.Results {
if !result.WithSpeed {
continue
}
speed := plot.NewDensity("MB/s", asSpeed(result.Durations, m.Size.Int64()))
speed.Stroke = palette[i%len(palette)]
}
group = append(group, plot.NewTickLabels())
flexSpeed := plot.NewHFlex()
plots.Add(flexSpeed)
speedGroup := plot.NewAxisGroup()
speedGroup.X, speedGroup.Y = speed.X, speed.Y
speedGroup.AddGroup(group...)
flexSpeed.Add(70, plot.NewTextbox("speed (MB/s)"))
flexSpeed.AddGroup(0, speedGroup)
}
}
svgCanvas := plot.NewSVG(1500, 150*float64(len(measurements)))
p.Draw(svgCanvas)
return ioutil.WriteFile(filename, svgCanvas.Bytes(), 0755)
}
func asSeconds(durations []time.Duration) []float64 {
xs := make([]float64, 0, len(durations))
for _, dur := range durations {
xs = append(xs, dur.Seconds())
}
return xs
}
func asSpeed(durations []time.Duration, size int64) []float64 {
const MB = 1 << 20
xs := make([]float64, 0, len(durations))
for _, dur := range durations {
xs = append(xs, (float64(size)/MB)/dur.Seconds())
}
return xs
}

View File

@ -1,117 +0,0 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package main
import (
"errors"
"strconv"
"strings"
)
// Sizes implements flag.Value for collecting byte counts
type Sizes struct {
Default []Size
Custom []Size
}
// Sizes returns the loaded values
func (sizes Sizes) Sizes() []Size {
if len(sizes.Custom) > 0 {
return sizes.Custom
}
return sizes.Default
}
// String converts values to a string
func (sizes Sizes) String() string {
sz := sizes.Sizes()
xs := make([]string, len(sz))
for i, size := range sz {
xs[i] = size.String()
}
return strings.Join(xs, " ")
}
// Set adds values from byte values
func (sizes *Sizes) Set(s string) error {
for _, x := range strings.Fields(s) {
var size Size
if err := size.Set(x); err != nil {
return err
}
sizes.Custom = append(sizes.Custom, size)
}
return nil
}
// Size represents a value of bytes
type Size struct {
bytes int64
}
type unit struct {
suffix string
scale float64
}
// different byte-size suffixes
const (
TB = 1 << 40
GB = 1 << 40
MB = 1 << 40
KB = 1 << 40
B = 1
)
var units = []unit{
{"T", TB},
{"G", GB},
{"M", MB},
{"K", KB},
{"B", B},
{"", 0},
}
// String converts size to a string
func (size Size) String() string {
if size.bytes <= 0 {
return "0"
}
v := float64(size.bytes)
for _, unit := range units {
if v >= unit.scale {
r := strconv.FormatFloat(v/unit.scale, 'f', 1, 64)
r = strings.TrimSuffix(r, "0")
r = strings.TrimSuffix(r, ".")
return r + unit.suffix
}
}
return strconv.Itoa(int(size.bytes)) + "B"
}
// Set updates value from string
func (size *Size) Set(s string) error {
if s == "" {
return errors.New("empty size")
}
value, suffix := s[:len(s)-1], s[len(s)-1]
if '0' <= suffix && suffix <= '9' {
suffix = 'B'
value = s
}
for _, unit := range units {
if unit.suffix == string(suffix) {
v, err := strconv.ParseFloat(value, 64)
if err != nil {
return err
}
size.bytes = int64(v * unit.scale)
return nil
}
}
return errors.New("unknown suffix " + string(suffix))
}

128
internal/memory/size.go Normal file
View File

@ -0,0 +1,128 @@
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package memory
import (
"errors"
"fmt"
"strconv"
"strings"
)
// different sizes
const (
B Size = 1 << (10 * iota)
KB
MB
GB
TB
PB
EB
)
// Size implements flag.Value for collecting memory size in bytes
type Size int64
// Int returns bytes size as int
func (size Size) Int() int { return int(size) }
// Int64 returns bytes size as int64
func (size Size) Int64() int64 { return int64(size) }
// Float64 returns bytes size as float64
func (size Size) Float64() float64 { return float64(size) }
// KB returns size in kilobytes
func (size Size) KB() float64 { return size.Float64() / KB.Float64() }
// MB returns size in megabytes
func (size Size) MB() float64 { return size.Float64() / MB.Float64() }
// GB returns size in gigabytes
func (size Size) GB() float64 { return size.Float64() / GB.Float64() }
// TB returns size in terabytes
func (size Size) TB() float64 { return size.Float64() / TB.Float64() }
// PB returns size in petabytes
func (size Size) PB() float64 { return size.Float64() / PB.Float64() }
// EB returns size in etabytes
func (size Size) EB() float64 { return size.Float64() / EB.Float64() }
// String converts size to a string
func (size Size) String() string {
if size == 0 {
return "0"
}
switch {
case size >= EB*2/3:
return fmt.Sprintf("%.1f EB", size.EB())
case size >= PB*2/3:
return fmt.Sprintf("%.1f PB", size.PB())
case size >= TB*2/3:
return fmt.Sprintf("%.1f TB", size.TB())
case size >= GB*2/3:
return fmt.Sprintf("%.1f GB", size.GB())
case size >= MB*2/3:
return fmt.Sprintf("%.1f MB", size.MB())
case size >= KB*2/3:
return fmt.Sprintf("%.1f KB", size.KB())
}
return strconv.Itoa(size.Int()) + " B"
}
func isLetter(b byte) bool {
return ('a' <= b && b <= 'z') || ('A' <= b && b <= 'Z')
}
// Set updates value from string
func (size *Size) Set(s string) error {
if s == "" {
return errors.New("empty size")
}
p := len(s)
if isLetter(s[len(s)-1]) {
p--
if len(s)-2 >= 0 && isLetter(s[len(s)-2]) {
p--
}
}
value, suffix := s[:p], s[p:]
suffix = strings.ToUpper(suffix)
if suffix == "" || suffix[len(suffix)-1] != 'B' {
suffix += "B"
}
value = strings.TrimSpace(value)
v, err := strconv.ParseFloat(value, 64)
if err != nil {
return err
}
switch suffix {
case "EB":
*size = Size(v * EB.Float64())
case "PB":
*size = Size(v * PB.Float64())
case "TB":
*size = Size(v * TB.Float64())
case "GB":
*size = Size(v * GB.Float64())
case "MB":
*size = Size(v * MB.Float64())
case "KB":
*size = Size(v * KB.Float64())
case "B", "":
*size = Size(v)
default:
return fmt.Errorf("unknown suffix %q", suffix)
}
return nil
}

View File

@ -0,0 +1,104 @@
package memory_test
import (
"testing"
"storj.io/storj/internal/memory"
)
const (
tb = 1 << 40
gb = 1 << 30
mb = 1 << 20
kb = 1 << 10
)
func TestSize(t *testing.T) {
var tests = []struct {
size memory.Size
text string
}{
// basics
{1 * tb, "1.0 TB"},
{1 * gb, "1.0 GB"},
{1 * mb, "1.0 MB"},
{1 * kb, "1.0 KB"},
{1, "1 B"},
// complicated
{68 * tb, "68.0 TB"},
{256 * mb, "256.0 MB"},
{500, "500 B"},
{5, "5 B"},
{1, "1 B"},
{0, "0"},
}
for i, test := range tests {
if test.size.String() != test.text {
t.Errorf("%d. invalid text got %v expected %v", i, test.size.String(), test.text)
}
var size memory.Size
err := size.Set(test.text)
if err != nil {
t.Errorf("%d. got error %v", i, err)
}
if test.size != size {
t.Errorf("%d. invalid size got %d expected %d", i, size, test.size)
}
}
}
func TestParse(t *testing.T) {
var tests = []struct {
size memory.Size
text string
}{
// case insensitivity
{1 * tb, "1.00TB"},
{1 * gb, "1.00gB"},
{1 * mb, "1.00Mb"},
{1 * kb, "1.00kb"},
{1, "1.00"},
{1 * tb, "1.0 TB"},
{1 * gb, "1.0 gB"},
{1 * mb, "1.0 Mb"},
{1 * kb, "1.0 kb"},
{1, "1.00"},
// without B suffix
{1 * tb, "1.00T"},
{1 * gb, "1.00g"},
{1 * mb, "1.00M"},
{1 * kb, "1.00k"},
}
for i, test := range tests {
var size memory.Size
err := size.Set(test.text)
if err != nil {
t.Errorf("%d. got error %v", i, err)
}
if test.size != size {
t.Errorf("%d. invalid size got %d expected %d", i, size, test.size)
}
}
}
func TestInvalidParse(t *testing.T) {
var tests = []string{
"1.0Q",
"1.0QB",
"z1.0KB",
"z1.0Q",
"1.0zQ",
"1.0zQB",
}
for i, test := range tests {
var size memory.Size
err := size.Set(test)
if err == nil {
t.Errorf("%d. didn't get error", i)
}
}
}

39
internal/memory/sizes.go Normal file
View File

@ -0,0 +1,39 @@
package memory
import "strings"
// Sizes implements flag.Value for collecting memory size
type Sizes struct {
Default []Size
Custom []Size
}
// Sizes returns the loaded values
func (sizes Sizes) Sizes() []Size {
if len(sizes.Custom) > 0 {
return sizes.Custom
}
return sizes.Default
}
// String converts values to a string
func (sizes Sizes) String() string {
sz := sizes.Sizes()
xs := make([]string, len(sz))
for i, size := range sz {
xs[i] = size.String()
}
return strings.Join(xs, " ")
}
// Set adds values from byte values
func (sizes *Sizes) Set(s string) error {
for _, x := range strings.Fields(s) {
var size Size
if err := size.Set(x); err != nil {
return err
}
sizes.Custom = append(sizes.Custom, size)
}
return nil
}

13
internal/memory/string.go Normal file
View File

@ -0,0 +1,13 @@
package memory
// FormatBytes converts number of bytes to appropriately sized string
func FormatBytes(bytes int64) string {
return Size(bytes).String()
}
// ParseString converts string to number of bytes
func ParseString(s string) (int64, error) {
var size Size
err := size.Set(s)
return size.Int64(), err
}