cmd/uplinkng: test framework and ls tests
this adds a test framework with fake implementations of a filesystem so that unit tests can be written asserting the output of different command invocations as well as the effects they have on a hypothetical filesystem and storj network. it also implements the mb command lol Change-Id: I134c7ea6bf34f46192956c274a96cb5df7632ac0
This commit is contained in:
parent
f1a9b45599
commit
b24ea2ead5
@ -16,6 +16,7 @@ type cmdLs struct {
|
||||
recursive bool
|
||||
encrypted bool
|
||||
pending bool
|
||||
utc bool
|
||||
|
||||
prefix *Location
|
||||
}
|
||||
@ -33,6 +34,9 @@ func (c *cmdLs) Setup(a clingy.Arguments, f clingy.Flags) {
|
||||
c.pending = f.New("pending", "List pending object uploads instead", false,
|
||||
clingy.Transform(strconv.ParseBool),
|
||||
).(bool)
|
||||
c.utc = f.New("utc", "Show all timestamps in UTC instead of local time", false,
|
||||
clingy.Transform(strconv.ParseBool),
|
||||
).(bool)
|
||||
|
||||
c.prefix = a.New("prefix", "Prefix to list (sj://BUCKET[/KEY])", clingy.Optional,
|
||||
clingy.Transform(parseLocation),
|
||||
@ -59,7 +63,7 @@ func (c *cmdLs) listBuckets(ctx clingy.Context) error {
|
||||
iter := project.ListBuckets(ctx, nil)
|
||||
for iter.Next() {
|
||||
item := iter.Item()
|
||||
tw.WriteLine(formatTime(item.Created), item.Name)
|
||||
tw.WriteLine(formatTime(c.utc, item.Created), item.Name)
|
||||
}
|
||||
return iter.Err()
|
||||
}
|
||||
@ -91,12 +95,17 @@ func (c *cmdLs) listLocation(ctx clingy.Context, prefix Location) error {
|
||||
if obj.IsPrefix {
|
||||
tw.WriteLine("PRE", "", "", obj.Loc.Key())
|
||||
} else {
|
||||
tw.WriteLine("OBJ", formatTime(obj.Created), obj.ContentLength, obj.Loc.Key())
|
||||
tw.WriteLine("OBJ", formatTime(c.utc, obj.Created), obj.ContentLength, obj.Loc.Key())
|
||||
}
|
||||
}
|
||||
return iter.Err()
|
||||
}
|
||||
|
||||
func formatTime(x time.Time) string {
|
||||
return x.Local().Format("2006-01-02 15:04:05")
|
||||
func formatTime(utc bool, x time.Time) string {
|
||||
if utc {
|
||||
x = x.UTC()
|
||||
} else {
|
||||
x = x.Local()
|
||||
}
|
||||
return x.Format("2006-01-02 15:04:05")
|
||||
}
|
||||
|
281
cmd/uplinkng/cmd_ls_test.go
Normal file
281
cmd/uplinkng/cmd_ls_test.go
Normal file
@ -0,0 +1,281 @@
|
||||
// Copyright (C) 2021 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestLsErrors(t *testing.T) {
|
||||
state := Setup(t)
|
||||
|
||||
// empty bucket name is a parse error
|
||||
state.Fail(t, "ls", "sj:///jeff")
|
||||
}
|
||||
|
||||
func TestLsRemote(t *testing.T) {
|
||||
state := Setup(t,
|
||||
WithFile("sj://jeff/deep/aaa/bbb/1"),
|
||||
WithFile("sj://jeff/deep/aaa/bbb/2"),
|
||||
WithFile("sj://jeff/deep/aaa/bbb/3"),
|
||||
WithFile("sj://jeff/foobar"),
|
||||
WithFile("sj://jeff/foobar/"),
|
||||
WithFile("sj://jeff/foobar/1"),
|
||||
WithFile("sj://jeff/foobar/2"),
|
||||
WithFile("sj://jeff/foobar/3"),
|
||||
WithFile("sj://jeff/foobaz/1"),
|
||||
|
||||
WithPendingFile("sj://jeff/invisible"),
|
||||
)
|
||||
|
||||
t.Run("Recursive", func(t *testing.T) {
|
||||
state.Succeed(t, "ls", "sj://jeff", "--recursive", "--utc").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
OBJ 1970-01-01 00:00:01 0 deep/aaa/bbb/1
|
||||
OBJ 1970-01-01 00:00:02 0 deep/aaa/bbb/2
|
||||
OBJ 1970-01-01 00:00:03 0 deep/aaa/bbb/3
|
||||
OBJ 1970-01-01 00:00:04 0 foobar
|
||||
OBJ 1970-01-01 00:00:05 0 foobar/
|
||||
OBJ 1970-01-01 00:00:06 0 foobar/1
|
||||
OBJ 1970-01-01 00:00:07 0 foobar/2
|
||||
OBJ 1970-01-01 00:00:08 0 foobar/3
|
||||
OBJ 1970-01-01 00:00:09 0 foobaz/1
|
||||
`)
|
||||
})
|
||||
|
||||
t.Run("Basic", func(t *testing.T) {
|
||||
state.Succeed(t, "ls", "sj://jeff/fo", "--utc").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
OBJ 1970-01-01 00:00:04 0 foobar
|
||||
PRE foobar/
|
||||
PRE foobaz/
|
||||
`)
|
||||
})
|
||||
|
||||
t.Run("ExactPrefix", func(t *testing.T) {
|
||||
state.Succeed(t, "ls", "sj://jeff/foobar", "--utc").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
OBJ 1970-01-01 00:00:04 0 foobar
|
||||
PRE foobar/
|
||||
`)
|
||||
})
|
||||
|
||||
t.Run("ExactPrefixWithSlash", func(t *testing.T) {
|
||||
state.Succeed(t, "ls", "sj://jeff/foobar/", "--utc").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
OBJ 1970-01-01 00:00:05 0
|
||||
OBJ 1970-01-01 00:00:06 0 1
|
||||
OBJ 1970-01-01 00:00:07 0 2
|
||||
OBJ 1970-01-01 00:00:08 0 3
|
||||
`)
|
||||
})
|
||||
|
||||
t.Run("MultipleLayers", func(t *testing.T) {
|
||||
state.Succeed(t, "ls", "sj://jeff/deep/").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
PRE aaa/
|
||||
`)
|
||||
|
||||
state.Succeed(t, "ls", "sj://jeff/deep/aaa/").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
PRE bbb/
|
||||
`)
|
||||
|
||||
state.Succeed(t, "ls", "sj://jeff/deep/aaa/bbb/", "--utc").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
OBJ 1970-01-01 00:00:01 0 1
|
||||
OBJ 1970-01-01 00:00:02 0 2
|
||||
OBJ 1970-01-01 00:00:03 0 3
|
||||
`)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLsPending(t *testing.T) {
|
||||
state := Setup(t,
|
||||
WithPendingFile("sj://jeff/deep/aaa/bbb/1"),
|
||||
WithPendingFile("sj://jeff/deep/aaa/bbb/2"),
|
||||
WithPendingFile("sj://jeff/deep/aaa/bbb/3"),
|
||||
WithPendingFile("sj://jeff/foobar"),
|
||||
WithPendingFile("sj://jeff/foobar/"),
|
||||
WithPendingFile("sj://jeff/foobar/1"),
|
||||
WithPendingFile("sj://jeff/foobar/2"),
|
||||
WithPendingFile("sj://jeff/foobar/3"),
|
||||
WithPendingFile("sj://jeff/foobaz/1"),
|
||||
|
||||
WithFile("sj://jeff/invisible"),
|
||||
)
|
||||
|
||||
t.Run("Recursive", func(t *testing.T) {
|
||||
state.Succeed(t, "ls", "sj://jeff", "--recursive", "--pending", "--utc").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
OBJ 1970-01-01 00:00:01 0 deep/aaa/bbb/1
|
||||
OBJ 1970-01-01 00:00:02 0 deep/aaa/bbb/2
|
||||
OBJ 1970-01-01 00:00:03 0 deep/aaa/bbb/3
|
||||
OBJ 1970-01-01 00:00:04 0 foobar
|
||||
OBJ 1970-01-01 00:00:05 0 foobar/
|
||||
OBJ 1970-01-01 00:00:06 0 foobar/1
|
||||
OBJ 1970-01-01 00:00:07 0 foobar/2
|
||||
OBJ 1970-01-01 00:00:08 0 foobar/3
|
||||
OBJ 1970-01-01 00:00:09 0 foobaz/1
|
||||
`)
|
||||
})
|
||||
|
||||
t.Run("Basic", func(t *testing.T) {
|
||||
state.Succeed(t, "ls", "sj://jeff/fo", "--pending", "--utc").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
OBJ 1970-01-01 00:00:04 0 foobar
|
||||
PRE foobar/
|
||||
PRE foobaz/
|
||||
`)
|
||||
})
|
||||
|
||||
t.Run("ExactPrefix", func(t *testing.T) {
|
||||
state.Succeed(t, "ls", "sj://jeff/foobar", "--pending", "--utc").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
OBJ 1970-01-01 00:00:04 0 foobar
|
||||
PRE foobar/
|
||||
`)
|
||||
})
|
||||
|
||||
t.Run("ExactPrefixWithSlash", func(t *testing.T) {
|
||||
state.Succeed(t, "ls", "sj://jeff/foobar/", "--pending", "--utc").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
OBJ 1970-01-01 00:00:05 0
|
||||
OBJ 1970-01-01 00:00:06 0 1
|
||||
OBJ 1970-01-01 00:00:07 0 2
|
||||
OBJ 1970-01-01 00:00:08 0 3
|
||||
`)
|
||||
})
|
||||
|
||||
t.Run("MultipleLayers", func(t *testing.T) {
|
||||
state.Succeed(t, "ls", "sj://jeff/deep/", "--pending").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
PRE aaa/
|
||||
`)
|
||||
|
||||
state.Succeed(t, "ls", "sj://jeff/deep/aaa/", "--pending").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
PRE bbb/
|
||||
`)
|
||||
|
||||
state.Succeed(t, "ls", "sj://jeff/deep/aaa/bbb/", "--pending", "--utc").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
OBJ 1970-01-01 00:00:01 0 1
|
||||
OBJ 1970-01-01 00:00:02 0 2
|
||||
OBJ 1970-01-01 00:00:03 0 3
|
||||
`)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLsDifficult(t *testing.T) {
|
||||
state := Setup(t,
|
||||
WithFile("sj://jeff//"),
|
||||
WithFile("sj://jeff///"),
|
||||
WithFile("sj://jeff////"),
|
||||
|
||||
WithFile("sj://jeff//starts-slash"),
|
||||
|
||||
WithFile("sj://jeff/ends-slash"),
|
||||
WithFile("sj://jeff/ends-slash/"),
|
||||
WithFile("sj://jeff/ends-slash//"),
|
||||
|
||||
WithFile("sj://jeff/mid-slash"),
|
||||
WithFile("sj://jeff/mid-slash//2"),
|
||||
WithFile("sj://jeff/mid-slash/1"),
|
||||
)
|
||||
|
||||
t.Run("Recursive", func(t *testing.T) {
|
||||
state.Succeed(t, "ls", "sj://jeff", "--recursive", "--utc").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
OBJ 1970-01-01 00:00:01 0 /
|
||||
OBJ 1970-01-01 00:00:02 0 //
|
||||
OBJ 1970-01-01 00:00:03 0 ///
|
||||
OBJ 1970-01-01 00:00:04 0 /starts-slash
|
||||
OBJ 1970-01-01 00:00:05 0 ends-slash
|
||||
OBJ 1970-01-01 00:00:06 0 ends-slash/
|
||||
OBJ 1970-01-01 00:00:07 0 ends-slash//
|
||||
OBJ 1970-01-01 00:00:08 0 mid-slash
|
||||
OBJ 1970-01-01 00:00:09 0 mid-slash//2
|
||||
OBJ 1970-01-01 00:00:10 0 mid-slash/1
|
||||
`)
|
||||
})
|
||||
|
||||
t.Run("Basic", func(t *testing.T) {
|
||||
state.Succeed(t, "ls", "sj://jeff", "--utc").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
PRE /
|
||||
OBJ 1970-01-01 00:00:05 0 ends-slash
|
||||
PRE ends-slash/
|
||||
OBJ 1970-01-01 00:00:08 0 mid-slash
|
||||
PRE mid-slash/
|
||||
`)
|
||||
|
||||
state.Succeed(t, "ls", "sj://jeff/", "--utc").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
PRE /
|
||||
OBJ 1970-01-01 00:00:05 0 ends-slash
|
||||
PRE ends-slash/
|
||||
OBJ 1970-01-01 00:00:08 0 mid-slash
|
||||
PRE mid-slash/
|
||||
`)
|
||||
})
|
||||
|
||||
t.Run("OnlySlash", func(t *testing.T) {
|
||||
state.Succeed(t, "ls", "sj://jeff//", "--utc").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
OBJ 1970-01-01 00:00:01 0
|
||||
PRE /
|
||||
OBJ 1970-01-01 00:00:04 0 starts-slash
|
||||
`)
|
||||
|
||||
state.Succeed(t, "ls", "sj://jeff///", "--utc").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
OBJ 1970-01-01 00:00:02 0
|
||||
PRE /
|
||||
`)
|
||||
|
||||
state.Succeed(t, "ls", "sj://jeff////", "--utc").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
OBJ 1970-01-01 00:00:03 0
|
||||
`)
|
||||
})
|
||||
|
||||
t.Run("EndsSlash", func(t *testing.T) {
|
||||
state.Succeed(t, "ls", "sj://jeff/ends-slash", "--utc").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
OBJ 1970-01-01 00:00:05 0 ends-slash
|
||||
PRE ends-slash/
|
||||
`)
|
||||
|
||||
state.Succeed(t, "ls", "sj://jeff/ends-slash/", "--utc").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
OBJ 1970-01-01 00:00:06 0
|
||||
PRE /
|
||||
`)
|
||||
|
||||
state.Succeed(t, "ls", "sj://jeff/ends-slash//", "--utc").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
OBJ 1970-01-01 00:00:07 0
|
||||
`)
|
||||
})
|
||||
|
||||
t.Run("MidSlash", func(t *testing.T) {
|
||||
state.Succeed(t, "ls", "sj://jeff/mid-slash", "--utc").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
OBJ 1970-01-01 00:00:08 0 mid-slash
|
||||
PRE mid-slash/
|
||||
`)
|
||||
|
||||
state.Succeed(t, "ls", "sj://jeff/mid-slash/", "--utc").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
PRE /
|
||||
OBJ 1970-01-01 00:00:10 0 1
|
||||
`)
|
||||
|
||||
state.Succeed(t, "ls", "sj://jeff/mid-slash//", "--utc").RequireStdout(t, `
|
||||
KIND CREATED SIZE KEY
|
||||
OBJ 1970-01-01 00:00:09 0 2
|
||||
`)
|
||||
})
|
||||
}
|
@ -5,6 +5,7 @@ package main
|
||||
|
||||
import (
|
||||
"github.com/zeebo/clingy"
|
||||
"github.com/zeebo/errs"
|
||||
)
|
||||
|
||||
type cmdMb struct {
|
||||
@ -20,5 +21,12 @@ func (c *cmdMb) Setup(a clingy.Arguments, f clingy.Flags) {
|
||||
}
|
||||
|
||||
func (c *cmdMb) Execute(ctx clingy.Context) error {
|
||||
return nil
|
||||
project, err := c.OpenProject(ctx)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
defer func() { _ = project.Close() }()
|
||||
|
||||
_, err = project.CreateBucket(ctx, c.name)
|
||||
return err
|
||||
}
|
||||
|
@ -10,14 +10,16 @@ import (
|
||||
type cmdMetaGet struct {
|
||||
projectProvider
|
||||
|
||||
location string
|
||||
location Location
|
||||
entry *string
|
||||
}
|
||||
|
||||
func (c *cmdMetaGet) Setup(a clingy.Arguments, f clingy.Flags) {
|
||||
c.projectProvider.Setup(a, f)
|
||||
|
||||
c.location = a.New("location", "Location of object (sj://BUCKET/KEY)").(string)
|
||||
c.location = a.New("location", "Location of object (sj://BUCKET/KEY)",
|
||||
clingy.Transform(parseLocation),
|
||||
).(Location)
|
||||
c.entry = a.New("entry", "Metadata entry to get", clingy.Optional).(*string)
|
||||
}
|
||||
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
"strconv"
|
||||
|
||||
"github.com/zeebo/clingy"
|
||||
"github.com/zeebo/errs"
|
||||
)
|
||||
|
||||
type cmdRm struct {
|
||||
@ -16,7 +15,7 @@ type cmdRm struct {
|
||||
recursive bool
|
||||
encrypted bool
|
||||
|
||||
location string
|
||||
location Location
|
||||
}
|
||||
|
||||
func (c *cmdRm) Setup(a clingy.Arguments, f clingy.Flags) {
|
||||
@ -30,26 +29,21 @@ func (c *cmdRm) Setup(a clingy.Arguments, f clingy.Flags) {
|
||||
clingy.Transform(strconv.ParseBool),
|
||||
).(bool)
|
||||
|
||||
c.location = a.New("location", "Location to remove (sj://BUCKET[/KEY])").(string)
|
||||
c.location = a.New("location", "Location to remove (sj://BUCKET[/KEY])",
|
||||
clingy.Transform(parseLocation),
|
||||
).(Location)
|
||||
}
|
||||
|
||||
func (c *cmdRm) Execute(ctx clingy.Context) error {
|
||||
project, err := c.OpenProject(ctx, bypassEncryption(c.encrypted))
|
||||
fs, err := c.OpenFilesystem(ctx, bypassEncryption(c.encrypted))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() { _ = project.Close() }()
|
||||
defer func() { _ = fs.Close() }()
|
||||
|
||||
// TODO: use the filesystem interface
|
||||
// TODO: recursive remove
|
||||
|
||||
p, err := parseLocation(c.location)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if !p.remote {
|
||||
return errs.New("can only delete remote objects")
|
||||
}
|
||||
|
||||
_, err = project.DeleteObject(ctx, p.bucket, p.key)
|
||||
return err
|
||||
// return fs.Delete(ctx, c.location)
|
||||
return nil
|
||||
}
|
||||
|
476
cmd/uplinkng/filesystem_test.go
Normal file
476
cmd/uplinkng/filesystem_test.go
Normal file
@ -0,0 +1,476 @@
|
||||
// Copyright (C) 2021 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"sort"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/zeebo/clingy"
|
||||
"github.com/zeebo/errs"
|
||||
)
|
||||
|
||||
//
|
||||
// helpers to execute commands for tests
|
||||
//
|
||||
|
||||
func Setup(t *testing.T, opts ...ExecuteOption) State {
|
||||
return State{
|
||||
opts: opts,
|
||||
}
|
||||
}
|
||||
|
||||
type State struct {
|
||||
opts []ExecuteOption
|
||||
}
|
||||
|
||||
// Succeed is the same as Run followed by result.RequireSuccess.
|
||||
func (st State) Succeed(t *testing.T, args ...string) Result {
|
||||
result := st.Run(t, args...)
|
||||
result.RequireSuccess(t)
|
||||
return result
|
||||
}
|
||||
|
||||
// Fail is the same as Run followed by result.RequireFailure.
|
||||
func (st State) Fail(t *testing.T, args ...string) Result {
|
||||
result := st.Run(t, args...)
|
||||
result.RequireFailure(t)
|
||||
return result
|
||||
}
|
||||
|
||||
func (st State) Run(t *testing.T, args ...string) Result {
|
||||
var stdout bytes.Buffer
|
||||
var stderr bytes.Buffer
|
||||
var stdin bytes.Buffer
|
||||
var ops []Operation
|
||||
var ran bool
|
||||
|
||||
ok, err := clingy.Environment{
|
||||
Name: "uplink-test",
|
||||
Args: args,
|
||||
|
||||
Stdin: &stdin,
|
||||
Stdout: &stdout,
|
||||
Stderr: &stderr,
|
||||
|
||||
Wrap: func(ctx clingy.Context, cmd clingy.Cmd) error {
|
||||
tfs := newTestFilesystem()
|
||||
for _, opt := range st.opts {
|
||||
if err := opt(ctx, tfs); err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
}
|
||||
tfs.ops = nil
|
||||
|
||||
if len(tfs.stdin) > 0 {
|
||||
_, _ = stdin.WriteString(tfs.stdin)
|
||||
}
|
||||
|
||||
if setter, ok := cmd.(interface {
|
||||
setTestFilesystem(filesystem)
|
||||
}); ok {
|
||||
setter.setTestFilesystem(tfs)
|
||||
}
|
||||
|
||||
ran = true
|
||||
err := cmd.Execute(ctx)
|
||||
ops = tfs.ops
|
||||
return err
|
||||
},
|
||||
}.Run(context.Background(), commands)
|
||||
|
||||
if ok && err == nil {
|
||||
require.True(t, ran, "no command was executed: %q", args)
|
||||
}
|
||||
return Result{
|
||||
Stdout: stdout.String(),
|
||||
Stderr: stderr.String(),
|
||||
Ok: ok,
|
||||
Err: err,
|
||||
Operations: ops,
|
||||
}
|
||||
}
|
||||
|
||||
type ExecuteOption func(ctx clingy.Context, tfs *testFilesystem) error
|
||||
|
||||
func WithFile(location string) ExecuteOption {
|
||||
return func(ctx clingy.Context, tfs *testFilesystem) error {
|
||||
loc, err := parseLocation(location)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if loc.Remote() {
|
||||
tfs.ensureBucket(loc.bucket)
|
||||
}
|
||||
wh, err := tfs.Create(ctx, loc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return wh.Commit()
|
||||
}
|
||||
}
|
||||
|
||||
func WithPendingFile(location string) ExecuteOption {
|
||||
return func(ctx clingy.Context, tfs *testFilesystem) error {
|
||||
loc, err := parseLocation(location)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if loc.Remote() {
|
||||
tfs.ensureBucket(loc.bucket)
|
||||
}
|
||||
_, err = tfs.Create(ctx, loc)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// execution results
|
||||
//
|
||||
|
||||
type Result struct {
|
||||
Stdout string
|
||||
Stderr string
|
||||
Ok bool
|
||||
Err error
|
||||
Operations []Operation
|
||||
}
|
||||
|
||||
func (r Result) RequireSuccess(t *testing.T) {
|
||||
if !r.Ok {
|
||||
errs := parseErrors(r.Stdout)
|
||||
require.True(t, r.Ok, "test did not run successfully. errors:\n%s",
|
||||
strings.Join(errs, "\n"))
|
||||
}
|
||||
require.NoError(t, r.Err)
|
||||
}
|
||||
|
||||
func (r Result) RequireFailure(t *testing.T) {
|
||||
require.False(t, r.Ok && r.Err == nil, "command ran with no error")
|
||||
}
|
||||
|
||||
func (r Result) RequireStdout(t *testing.T, stdout string) {
|
||||
require.Equal(t, trimNewlineSpaces(stdout), trimNewlineSpaces(r.Stdout))
|
||||
}
|
||||
|
||||
func (r Result) RequireStderr(t *testing.T, stderr string) {
|
||||
require.Equal(t, trimNewlineSpaces(stderr), trimNewlineSpaces(r.Stderr))
|
||||
}
|
||||
|
||||
func parseErrors(s string) []string {
|
||||
lines := strings.Split(s, "\n")
|
||||
start := 0
|
||||
for i, line := range lines {
|
||||
if line == "Errors:" {
|
||||
start = i + 1
|
||||
} else if len(line) > 0 && line[0] != ' ' {
|
||||
return lines[start:i]
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func trimNewlineSpaces(s string) string {
|
||||
lines := strings.Split(s, "\n")
|
||||
|
||||
j := 0
|
||||
for _, line := range lines {
|
||||
if trimmed := strings.TrimSpace(line); len(trimmed) > 0 {
|
||||
lines[j] = trimmed
|
||||
j++
|
||||
}
|
||||
}
|
||||
return strings.Join(lines[:j], "\n")
|
||||
}
|
||||
|
||||
type Operation struct {
|
||||
Kind string
|
||||
Loc string
|
||||
Error bool
|
||||
}
|
||||
|
||||
func newOp(kind string, loc Location, err error) Operation {
|
||||
return Operation{
|
||||
Kind: kind,
|
||||
Loc: loc.String(),
|
||||
Error: err != nil,
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// filesystem
|
||||
//
|
||||
|
||||
type testFilesystem struct {
|
||||
stdin string
|
||||
ops []Operation
|
||||
created int64
|
||||
files map[Location]byteFileData
|
||||
pending map[Location][]*byteWriteHandle
|
||||
buckets map[string]struct{}
|
||||
}
|
||||
|
||||
func newTestFilesystem() *testFilesystem {
|
||||
return &testFilesystem{
|
||||
files: make(map[Location]byteFileData),
|
||||
pending: make(map[Location][]*byteWriteHandle),
|
||||
buckets: make(map[string]struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
type byteFileData struct {
|
||||
data []byte
|
||||
created int64
|
||||
}
|
||||
|
||||
func (tfs *testFilesystem) ensureBucket(name string) {
|
||||
tfs.buckets[name] = struct{}{}
|
||||
}
|
||||
|
||||
func (tfs *testFilesystem) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (tfs *testFilesystem) Open(ctx clingy.Context, loc Location) (_ readHandle, err error) {
|
||||
defer func() { tfs.ops = append(tfs.ops, newOp("open", loc, err)) }()
|
||||
|
||||
bf, ok := tfs.files[loc]
|
||||
if !ok {
|
||||
return nil, errs.New("file does not exist")
|
||||
}
|
||||
return &byteReadHandle{Buffer: bytes.NewBuffer(bf.data)}, nil
|
||||
}
|
||||
|
||||
func (tfs *testFilesystem) Create(ctx clingy.Context, loc Location) (_ writeHandle, err error) {
|
||||
defer func() { tfs.ops = append(tfs.ops, newOp("create", loc, err)) }()
|
||||
|
||||
if loc.Remote() {
|
||||
if _, ok := tfs.buckets[loc.bucket]; !ok {
|
||||
return nil, errs.New("bucket %q does not exist", loc.bucket)
|
||||
}
|
||||
}
|
||||
|
||||
tfs.created++
|
||||
wh := &byteWriteHandle{
|
||||
buf: bytes.NewBuffer(nil),
|
||||
loc: loc,
|
||||
tfs: tfs,
|
||||
cre: tfs.created,
|
||||
}
|
||||
|
||||
tfs.pending[loc] = append(tfs.pending[loc], wh)
|
||||
|
||||
return wh, nil
|
||||
}
|
||||
|
||||
func (tfs *testFilesystem) ListObjects(ctx context.Context, prefix Location, recursive bool) (objectIterator, error) {
|
||||
var infos []objectInfo
|
||||
for loc, bf := range tfs.files {
|
||||
if loc.HasPrefix(prefix) {
|
||||
infos = append(infos, objectInfo{
|
||||
Loc: loc,
|
||||
Created: time.Unix(bf.created, 0),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(objectInfos(infos))
|
||||
|
||||
if !recursive {
|
||||
infos = collapseObjectInfos(prefix, infos)
|
||||
}
|
||||
|
||||
return &objectInfoIterator{infos: infos}, nil
|
||||
}
|
||||
|
||||
func (tfs *testFilesystem) ListUploads(ctx context.Context, prefix Location, recursive bool) (objectIterator, error) {
|
||||
var infos []objectInfo
|
||||
for loc, whs := range tfs.pending {
|
||||
if loc.HasPrefix(prefix) {
|
||||
for _, wh := range whs {
|
||||
infos = append(infos, objectInfo{
|
||||
Loc: loc,
|
||||
Created: time.Unix(wh.cre, 0),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(objectInfos(infos))
|
||||
|
||||
if !recursive {
|
||||
infos = collapseObjectInfos(prefix, infos)
|
||||
}
|
||||
|
||||
return &objectInfoIterator{infos: infos}, nil
|
||||
}
|
||||
|
||||
func (tfs *testFilesystem) IsLocalDir(ctx context.Context, loc Location) bool {
|
||||
// TODO: implement this
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
//
|
||||
// readHandle
|
||||
//
|
||||
|
||||
type byteReadHandle struct {
|
||||
*bytes.Buffer
|
||||
}
|
||||
|
||||
func (b *byteReadHandle) Close() error { return nil }
|
||||
func (b *byteReadHandle) Info() objectInfo { return objectInfo{} }
|
||||
|
||||
//
|
||||
// writeHandle
|
||||
//
|
||||
|
||||
type byteWriteHandle struct {
|
||||
buf *bytes.Buffer
|
||||
loc Location
|
||||
tfs *testFilesystem
|
||||
cre int64
|
||||
done bool
|
||||
}
|
||||
|
||||
func (b *byteWriteHandle) Write(p []byte) (int, error) {
|
||||
return b.buf.Write(p)
|
||||
}
|
||||
|
||||
func (b *byteWriteHandle) Commit() error {
|
||||
if err := b.close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.tfs.ops = append(b.tfs.ops, newOp("commit", b.loc, nil))
|
||||
b.tfs.files[b.loc] = byteFileData{
|
||||
data: b.buf.Bytes(),
|
||||
created: b.cre,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *byteWriteHandle) Abort() error {
|
||||
if err := b.close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.tfs.ops = append(b.tfs.ops, newOp("append", b.loc, nil))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *byteWriteHandle) close() error {
|
||||
if b.done {
|
||||
return errs.New("already done")
|
||||
}
|
||||
b.done = true
|
||||
|
||||
handles := b.tfs.pending[b.loc]
|
||||
for i, v := range handles {
|
||||
if v == b {
|
||||
handles = append(handles[:i], handles[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(handles) > 0 {
|
||||
b.tfs.pending[b.loc] = handles
|
||||
} else {
|
||||
delete(b.tfs.pending, b.loc)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//
|
||||
// objectIterator
|
||||
//
|
||||
|
||||
type objectInfoIterator struct {
|
||||
infos []objectInfo
|
||||
current objectInfo
|
||||
}
|
||||
|
||||
func (li *objectInfoIterator) Next() bool {
|
||||
if len(li.infos) == 0 {
|
||||
return false
|
||||
}
|
||||
li.current, li.infos = li.infos[0], li.infos[1:]
|
||||
return true
|
||||
}
|
||||
|
||||
func (li *objectInfoIterator) Err() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (li *objectInfoIterator) Item() objectInfo {
|
||||
return li.current
|
||||
}
|
||||
|
||||
type objectInfos []objectInfo
|
||||
|
||||
func (ois objectInfos) Len() int { return len(ois) }
|
||||
func (ois objectInfos) Swap(i int, j int) { ois[i], ois[j] = ois[j], ois[i] }
|
||||
func (ois objectInfos) Less(i int, j int) bool {
|
||||
li, lj := ois[i].Loc, ois[j].Loc
|
||||
|
||||
if !li.remote && lj.remote {
|
||||
return true
|
||||
} else if !lj.remote && li.remote {
|
||||
return false
|
||||
}
|
||||
|
||||
if li.bucket < lj.bucket {
|
||||
return true
|
||||
} else if lj.bucket < li.bucket {
|
||||
return false
|
||||
}
|
||||
|
||||
if li.key < lj.key {
|
||||
return true
|
||||
} else if lj.key < li.key {
|
||||
return false
|
||||
}
|
||||
|
||||
if li.path < lj.path {
|
||||
return true
|
||||
} else if lj.path < li.path {
|
||||
return false
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func collapseObjectInfos(prefix Location, infos []objectInfo) []objectInfo {
|
||||
collapsing := false
|
||||
current := ""
|
||||
j := 0
|
||||
|
||||
for _, oi := range infos {
|
||||
first, ok := oi.Loc.ListKeyName(prefix)
|
||||
if ok {
|
||||
if collapsing && first == current {
|
||||
continue
|
||||
}
|
||||
|
||||
collapsing = true
|
||||
current = first
|
||||
|
||||
oi.IsPrefix = true
|
||||
}
|
||||
|
||||
oi.Loc = oi.Loc.SetKey(first)
|
||||
|
||||
infos[j] = oi
|
||||
j++
|
||||
}
|
||||
|
||||
return infos[:j]
|
||||
}
|
@ -87,6 +87,32 @@ func (p Location) Key() string {
|
||||
return p.path
|
||||
}
|
||||
|
||||
// SetKey sets the key portion of the location.
|
||||
func (p Location) SetKey(s string) Location {
|
||||
if p.remote {
|
||||
p.key = s
|
||||
} else {
|
||||
p.path = s
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// Parent returns the section of the key up to and including the final slash.
|
||||
func (p Location) Parent() string {
|
||||
if p.Std() {
|
||||
return ""
|
||||
} else if p.remote {
|
||||
if idx := strings.LastIndexByte(p.key, '/'); idx >= 0 {
|
||||
return p.key[:idx+1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
if idx := strings.LastIndexByte(p.path, filepath.Separator); idx >= 0 {
|
||||
return p.path[:idx+1]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Base returns the last base component of the key.
|
||||
func (p Location) Base() (string, bool) {
|
||||
if p.Std() {
|
||||
@ -149,3 +175,27 @@ func (p Location) AppendKey(key string) Location {
|
||||
p.path = filepath.Join(p.path, key)
|
||||
return p
|
||||
}
|
||||
|
||||
// HasPrefix returns true if the passed in loc is a prefix.
|
||||
func (p Location) HasPrefix(loc Location) bool {
|
||||
if p.Std() {
|
||||
return loc.Std()
|
||||
} else if p.remote != loc.remote {
|
||||
return false
|
||||
} else if !p.remote {
|
||||
return strings.HasPrefix(p.path, loc.path)
|
||||
} else if p.bucket != loc.bucket {
|
||||
return false
|
||||
}
|
||||
return strings.HasPrefix(p.key, loc.key)
|
||||
}
|
||||
|
||||
// ListKeyName returns the full first component of the key after the provided
|
||||
// prefix and a boolean indicating if the component is itself a prefix.
|
||||
func (p Location) ListKeyName(prefix Location) (string, bool) {
|
||||
rem := p.Key()[len(prefix.Parent()):]
|
||||
if idx := strings.IndexByte(rem, '/'); idx >= 0 {
|
||||
return rem[:idx+1], true
|
||||
}
|
||||
return rem, false
|
||||
}
|
||||
|
@ -15,38 +15,19 @@ import (
|
||||
var gf = newGlobalFlags()
|
||||
|
||||
func main() {
|
||||
env := clingy.Environment{
|
||||
ok, err := clingy.Environment{
|
||||
Name: "uplink",
|
||||
Args: os.Args[1:],
|
||||
|
||||
Dynamic: gf.Dynamic,
|
||||
Wrap: gf.Wrap,
|
||||
}
|
||||
|
||||
ok, err := env.Run(context.Background(), func(c clingy.Commands, f clingy.Flags) {
|
||||
}.Run(context.Background(), func(c clingy.Commands, f clingy.Flags) {
|
||||
// setup the dynamic global flags first so that they may be consulted
|
||||
// by the stdlib flags during their definition.
|
||||
gf.Setup(f)
|
||||
newStdlibFlags(flag.CommandLine).Setup(f)
|
||||
|
||||
c.Group("access", "Access related commands", func() {
|
||||
c.New("save", "Save an existing access", new(cmdAccessSave))
|
||||
c.New("create", "Create an access from a setup token", new(cmdAccessCreate))
|
||||
c.New("delete", "Delete an access from local store", new(cmdAccessDelete))
|
||||
c.New("list", "List saved accesses", new(cmdAccessList))
|
||||
c.New("use", "Set default access to use", new(cmdAccessUse))
|
||||
c.New("revoke", "Revoke an access", new(cmdAccessRevoke))
|
||||
})
|
||||
c.New("share", "Shares restricted accesses to objects", new(cmdShare))
|
||||
c.New("mb", "Create a new bucket", new(cmdMb))
|
||||
c.New("rb", "Remove a bucket bucket", new(cmdRb))
|
||||
c.New("cp", "Copies files or objects into or out of tardigrade", new(cmdCp))
|
||||
c.New("ls", "Lists buckets, prefixes, or objects", new(cmdLs))
|
||||
c.New("rm", "Remove an object", new(cmdRm))
|
||||
c.Group("meta", "Object metadata related commands", func() {
|
||||
c.New("get", "Get an object's metadata", new(cmdMetaGet))
|
||||
})
|
||||
c.New("version", "Prints version information", new(cmdVersion))
|
||||
commands(c, f)
|
||||
})
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%+v\n", err)
|
||||
@ -55,3 +36,24 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func commands(c clingy.Commands, f clingy.Flags) {
|
||||
c.Group("access", "Access related commands", func() {
|
||||
c.New("save", "Save an existing access", new(cmdAccessSave))
|
||||
c.New("create", "Create an access from a setup token", new(cmdAccessCreate))
|
||||
c.New("delete", "Delete an access from local store", new(cmdAccessDelete))
|
||||
c.New("list", "List saved accesses", new(cmdAccessList))
|
||||
c.New("use", "Set default access to use", new(cmdAccessUse))
|
||||
c.New("revoke", "Revoke an access", new(cmdAccessRevoke))
|
||||
})
|
||||
c.New("share", "Shares restricted accesses to objects", new(cmdShare))
|
||||
c.New("mb", "Create a new bucket", new(cmdMb))
|
||||
c.New("rb", "Remove a bucket bucket", new(cmdRb))
|
||||
c.New("cp", "Copies files or objects into or out of tardigrade", new(cmdCp))
|
||||
c.New("ls", "Lists buckets, prefixes, or objects", new(cmdLs))
|
||||
c.New("rm", "Remove an object", new(cmdRm))
|
||||
c.Group("meta", "Object metadata related commands", func() {
|
||||
c.New("get", "Get an object's metadata", new(cmdMetaGet))
|
||||
})
|
||||
c.New("version", "Prints version information", new(cmdVersion))
|
||||
}
|
||||
|
@ -13,15 +13,23 @@ import (
|
||||
)
|
||||
|
||||
type projectProvider struct {
|
||||
access string
|
||||
openProject func(ctx context.Context) (*uplink.Project, error)
|
||||
access string
|
||||
|
||||
testProject *uplink.Project
|
||||
testFilesystem filesystem
|
||||
}
|
||||
|
||||
func (pp *projectProvider) Setup(a clingy.Arguments, f clingy.Flags) {
|
||||
pp.access = f.New("access", "Which access to use", "").(string)
|
||||
}
|
||||
|
||||
func (pp *projectProvider) setTestFilesystem(fs filesystem) { pp.testFilesystem = fs }
|
||||
|
||||
func (pp *projectProvider) OpenFilesystem(ctx context.Context, options ...projectOption) (filesystem, error) {
|
||||
if pp.testFilesystem != nil {
|
||||
return pp.testFilesystem, nil
|
||||
}
|
||||
|
||||
project, err := pp.OpenProject(ctx, options...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -35,8 +43,8 @@ func (pp *projectProvider) OpenFilesystem(ctx context.Context, options ...projec
|
||||
}
|
||||
|
||||
func (pp *projectProvider) OpenProject(ctx context.Context, options ...projectOption) (*uplink.Project, error) {
|
||||
if pp.openProject != nil {
|
||||
return pp.openProject(ctx)
|
||||
if pp.testProject != nil {
|
||||
return pp.testProject, nil
|
||||
}
|
||||
|
||||
var opts projectOptions
|
||||
@ -57,6 +65,8 @@ func (pp *projectProvider) OpenProject(ctx context.Context, options ...projectOp
|
||||
access, err = uplink.ParseAccess(data)
|
||||
} else {
|
||||
access, err = uplink.ParseAccess(accessDefault)
|
||||
// TODO: if this errors then it's probably a name so don't report an error
|
||||
// that says "it failed to parse"
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
Loading…
Reference in New Issue
Block a user