cmd/uplinkng: become cmd/uplink

Change-Id: If426c32219d32044d715ab6dfa9718807f32cb9f
This commit is contained in:
Jeff Wendling 2022-01-06 14:55:46 -05:00 committed by Michał Niewrzał
parent 9d52112af8
commit 9061dd309f
91 changed files with 343 additions and 4021 deletions

View File

@ -167,15 +167,15 @@ pipeline {
steps {
// verify most of the commands, we cannot check everything since some of them
// have a C dependency and we don't have cross-compilation in storj/ci image
sh 'GOOS=linux GOARCH=386 go vet ./cmd/uplink ./cmd/satellite ./cmd/uplinkng ./cmd/storagenode-updater ./cmd/storj-sim'
sh 'GOOS=linux GOARCH=amd64 go vet ./cmd/uplink ./cmd/satellite ./cmd/uplinkng ./cmd/storagenode-updater ./cmd/storj-sim'
sh 'GOOS=linux GOARCH=arm go vet ./cmd/uplink ./cmd/satellite ./cmd/uplinkng ./cmd/storagenode-updater ./cmd/storj-sim'
sh 'GOOS=linux GOARCH=arm64 go vet ./cmd/uplink ./cmd/satellite ./cmd/uplinkng ./cmd/storagenode-updater ./cmd/storj-sim'
sh 'GOOS=windows GOARCH=386 go vet ./cmd/uplink ./cmd/satellite ./cmd/uplinkng ./cmd/storagenode-updater ./cmd/storj-sim'
sh 'GOOS=windows GOARCH=amd64 go vet ./cmd/uplink ./cmd/satellite ./cmd/uplinkng ./cmd/storagenode-updater ./cmd/storj-sim'
sh 'GOOS=windows GOARCH=arm64 go vet ./cmd/uplink ./cmd/satellite ./cmd/uplinkng ./cmd/storagenode-updater ./cmd/storj-sim'
sh 'GOOS=darwin GOARCH=amd64 go vet ./cmd/uplink ./cmd/satellite ./cmd/uplinkng ./cmd/storagenode-updater ./cmd/storj-sim'
sh 'GOOS=darwin GOARCH=arm64 go vet ./cmd/uplink ./cmd/satellite ./cmd/uplinkng ./cmd/storagenode-updater ./cmd/storj-sim'
sh 'GOOS=linux GOARCH=386 go vet ./cmd/uplink ./cmd/satellite ./cmd/storagenode-updater ./cmd/storj-sim'
sh 'GOOS=linux GOARCH=amd64 go vet ./cmd/uplink ./cmd/satellite ./cmd/storagenode-updater ./cmd/storj-sim'
sh 'GOOS=linux GOARCH=arm go vet ./cmd/uplink ./cmd/satellite ./cmd/storagenode-updater ./cmd/storj-sim'
sh 'GOOS=linux GOARCH=arm64 go vet ./cmd/uplink ./cmd/satellite ./cmd/storagenode-updater ./cmd/storj-sim'
sh 'GOOS=windows GOARCH=386 go vet ./cmd/uplink ./cmd/satellite ./cmd/storagenode-updater ./cmd/storj-sim'
sh 'GOOS=windows GOARCH=amd64 go vet ./cmd/uplink ./cmd/satellite ./cmd/storagenode-updater ./cmd/storj-sim'
sh 'GOOS=windows GOARCH=arm64 go vet ./cmd/uplink ./cmd/satellite ./cmd/storagenode-updater ./cmd/storj-sim'
sh 'GOOS=darwin GOARCH=amd64 go vet ./cmd/uplink ./cmd/satellite ./cmd/storagenode-updater ./cmd/storj-sim'
sh 'GOOS=darwin GOARCH=arm64 go vet ./cmd/uplink ./cmd/satellite ./cmd/storagenode-updater ./cmd/storj-sim'
}
}
stage('Tests') {

View File

@ -319,12 +319,9 @@ versioncontrol_%:
.PHONY: multinode_%
multinode_%: multinode-console
$(MAKE) binary-check COMPONENT=multinode GOARCH=$(word 3, $(subst _, ,$@)) GOOS=$(word 2, $(subst _, ,$@))
.PHONY: uplinkng_%
uplinkng_%:
$(MAKE) binary-check COMPONENT=uplinkng GOARCH=$(word 3, $(subst _, ,$@)) GOOS=$(word 2, $(subst _, ,$@))
COMPONENTLIST := certificates identity inspector satellite storagenode storagenode-updater uplink versioncontrol multinode uplinkng
COMPONENTLIST := certificates identity inspector satellite storagenode storagenode-updater uplink versioncontrol multinode
OSARCHLIST := linux_amd64 linux_arm linux_arm64 windows_amd64 freebsd_amd64
BINARIES := $(foreach C,$(COMPONENTLIST),$(foreach O,$(OSARCHLIST),$C_$O))
.PHONY: binaries

View File

@ -1,20 +0,0 @@
ARG DOCKER_ARCH
# Fetch ca-certificates file for arch independent builds below
FROM alpine as ca-cert
RUN apk -U add ca-certificates
FROM ${DOCKER_ARCH:-amd64}/alpine
ARG TAG
ARG GOARCH
ENV GOARCH ${GOARCH}
ENV CONF_PATH=/root/.local/storj/uplink \
API_KEY= \
SATELLITE_ADDR=
WORKDIR /app
VOLUME /root/.local/storj/uplink
COPY --from=ca-cert /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/ca-certificates.crt
COPY release/${TAG}/uplink_linux_${GOARCH:-amd64} /app/uplink
COPY cmd/uplink/entrypoint /entrypoint
ENTRYPOINT ["/entrypoint"]

View File

@ -1,17 +0,0 @@
# Uplink
Documentation for developing and building the uplink service.
Usage:
Then setup the uplink:
```
go install storj.io/storj/cmd/uplink
uplink setup
```
You can edit `:~/.local/share/storj/uplink/config.yaml` to your liking. Then run it!
```
uplink ls
```

View File

@ -10,7 +10,7 @@ import (
"github.com/zeebo/clingy"
"github.com/zeebo/errs"
"storj.io/storj/cmd/uplinkng/ulext"
"storj.io/storj/cmd/uplink/ulext"
"storj.io/uplink"
)

View File

@ -10,7 +10,7 @@ import (
"github.com/zeebo/clingy"
"github.com/zeebo/errs"
"storj.io/storj/cmd/uplinkng/ulloc"
"storj.io/storj/cmd/uplink/ulloc"
"storj.io/uplink"
)

View File

@ -1,306 +0,0 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package cmd
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"os"
"strings"
"github.com/spf13/cobra"
"github.com/zeebo/errs"
"storj.io/common/base58"
"storj.io/common/macaroon"
"storj.io/common/pb"
"storj.io/private/cfgstruct"
"storj.io/private/process"
"storj.io/uplink"
"storj.io/uplink/edge"
)
type registerConfig struct {
AuthService string `help:"the address to the service you wish to register your access with" default:"" basic-help:"true"`
CACert string `help:"path to a file in PEM format with certificate(s) or certificate chain(s) to validate the auth service against" default:""`
Public bool `help:"if the access should be public" default:"false" basic-help:"true"`
Format string `help:"format of credentials, use 'env' or 'aws' for using in scripts" default:""`
AWSProfile string `help:"if using --format=aws, output the --profile tag using this profile" default:""`
AccessConfig
}
var (
inspectCfg AccessConfig
listCfg AccessConfig
registerCfg registerConfig
)
func init() {
// We skip the use of addCmd here because we only want the configuration options listed
// above, and addCmd adds a whole lot more than we want.
accessCmd := &cobra.Command{
Use: "access",
Short: "Set of commands to manage access.",
}
inspectCmd := &cobra.Command{
Use: "inspect [ACCESS]",
Short: "Inspect allows you to explode a serialized access into its constituent parts.",
RunE: accessInspect,
Args: cobra.MaximumNArgs(1),
}
listCmd := &cobra.Command{
Use: "list",
Short: "Prints name and associated satellite of all available accesses.",
RunE: accessList,
Args: cobra.NoArgs,
}
registerCmd := &cobra.Command{
Use: "register [ACCESS]",
Short: "Register your access for use with a hosted S3 compatible gateway and linksharing.",
RunE: accessRegister,
Args: cobra.MaximumNArgs(1),
}
RootCmd.AddCommand(accessCmd)
accessCmd.AddCommand(inspectCmd)
accessCmd.AddCommand(listCmd)
accessCmd.AddCommand(registerCmd)
process.Bind(inspectCmd, &inspectCfg, defaults, cfgstruct.ConfDir(getConfDir()))
process.Bind(listCmd, &listCfg, defaults, cfgstruct.ConfDir(getConfDir()))
process.Bind(registerCmd, &registerCfg, defaults, cfgstruct.ConfDir(getConfDir()))
}
func accessList(cmd *cobra.Command, args []string) (err error) {
accesses := listCfg.Accesses
fmt.Println("=========== ACCESSES LIST: name / satellite ================================")
for name, data := range accesses {
satelliteAddr, _, _, err := parseAccess(data)
if err != nil {
return err
}
fmt.Println(name, "/", satelliteAddr)
}
return nil
}
type base64url []byte
func (b base64url) MarshalJSON() ([]byte, error) {
return []byte(`"` + base64.URLEncoding.EncodeToString(b) + `"`), nil
}
type accessInfo struct {
SatelliteAddr string `json:"satellite_addr"`
EncryptionAccess *pb.EncryptionAccess `json:"encryption_access"`
APIKey string `json:"api_key"`
Macaroon accessInfoMacaroon `json:"macaroon"`
}
type accessInfoMacaroon struct {
Head base64url `json:"head"`
Caveats []macaroon.Caveat `json:"caveats"`
Tail base64url `json:"tail"`
}
func accessInspect(cmd *cobra.Command, args []string) (err error) {
// FIXME: This is inefficient. We end up parsing, serializing, parsing
// again. It can get particularly bad with large access grants.
access, err := getAccessFromArgZeroOrConfig(inspectCfg, args)
if err != nil {
return errs.New("no access specified: %w", err)
}
serializedAccess, err := access.Serialize()
if err != nil {
return err
}
p, err := parseAccessRaw(serializedAccess)
if err != nil {
return err
}
m, err := macaroon.ParseMacaroon(p.ApiKey)
if err != nil {
return err
}
// TODO: this could be better
apiKey, err := macaroon.ParseRawAPIKey(p.ApiKey)
if err != nil {
return err
}
ai := accessInfo{
SatelliteAddr: p.SatelliteAddr,
EncryptionAccess: p.EncryptionAccess,
APIKey: apiKey.Serialize(),
Macaroon: accessInfoMacaroon{
Head: m.Head(),
Caveats: []macaroon.Caveat{},
Tail: m.Tail(),
},
}
for _, cb := range m.Caveats() {
var c macaroon.Caveat
err := pb.Unmarshal(cb, &c)
if err != nil {
return err
}
ai.Macaroon.Caveats = append(ai.Macaroon.Caveats, c)
}
bs, err := json.MarshalIndent(ai, "", " ")
if err != nil {
return err
}
fmt.Println(string(bs))
return nil
}
func parseAccessRaw(access string) (_ *pb.Scope, err error) {
data, version, err := base58.CheckDecode(access)
if err != nil || version != 0 {
return nil, errs.New("invalid access grant format: %w", err)
}
p := new(pb.Scope)
if err := pb.Unmarshal(data, p); err != nil {
return nil, err
}
return p, nil
}
func parseAccess(access string) (sa string, apiKey string, ea string, err error) {
p, err := parseAccessRaw(access)
if err != nil {
return "", "", "", err
}
eaData, err := pb.Marshal(p.EncryptionAccess)
if err != nil {
return "", "", "", errs.New("unable to marshal encryption access: %w", err)
}
apiKey = base58.CheckEncode(p.ApiKey, 0)
ea = base58.CheckEncode(eaData, 0)
return p.SatelliteAddr, apiKey, ea, nil
}
func accessRegister(cmd *cobra.Command, args []string) (err error) {
ctx, _ := withTelemetry(cmd)
access, err := getAccessFromArgZeroOrConfig(registerCfg.AccessConfig, args)
if err != nil {
return errs.New("no access specified: %w", err)
}
credentials, err := RegisterAccess(ctx, access, registerCfg.AuthService, registerCfg.Public, registerCfg.CACert)
if err != nil {
return err
}
return DisplayGatewayCredentials(credentials, registerCfg.Format, registerCfg.AWSProfile)
}
func getAccessFromArgZeroOrConfig(config AccessConfig, args []string) (access *uplink.Access, err error) {
if len(args) != 0 {
access, err = config.GetNamedAccess(args[0])
if err != nil {
return nil, err
}
if access != nil {
return access, nil
}
return uplink.ParseAccess(args[0])
}
return config.GetAccess()
}
// DisplayGatewayCredentials formats and writes credentials to stdout.
func DisplayGatewayCredentials(credentials *edge.Credentials, format, awsProfile string) (err error) {
switch format {
case "env": // export / set compatible format
// note that AWS_ENDPOINT configuration is not natively utilized by the AWS CLI
_, err = fmt.Printf("AWS_ACCESS_KEY_ID=%s\n"+
"AWS_SECRET_ACCESS_KEY=%s\n"+
"AWS_ENDPOINT=%s\n",
credentials.AccessKeyID,
credentials.SecretKey,
credentials.Endpoint)
if err != nil {
return err
}
case "aws": // aws configuration commands
profile := ""
if awsProfile != "" {
profile = " --profile " + awsProfile
_, err = fmt.Printf("aws configure %s\n", profile)
if err != nil {
return err
}
}
// note that the endpoint_url configuration is not natively utilized by the AWS CLI
_, err = fmt.Printf("aws configure %s set aws_access_key_id %s\n"+
"aws configure %s set aws_secret_access_key %s\n"+
"aws configure %s set s3.endpoint_url %s\n",
profile, credentials.AccessKeyID,
profile, credentials.SecretKey,
profile, credentials.Endpoint)
if err != nil {
return err
}
default: // plain text
_, err = fmt.Printf("========== CREDENTIALS ===================================================================\n"+
"Access Key ID: %s\n"+
"Secret Key : %s\n"+
"Endpoint : %s\n",
credentials.AccessKeyID, credentials.SecretKey, credentials.Endpoint)
if err != nil {
return err
}
}
return nil
}
// RegisterAccess registers an access grant with a Gateway Authorization Service.
func RegisterAccess(ctx context.Context, access *uplink.Access, authService string, public bool, certificateFile string) (credentials *edge.Credentials, err error) {
if authService == "" {
return nil, errs.New("no auth service address provided")
}
// preserve compatibility with previous https service
authService = strings.TrimPrefix(authService, "https://")
authService = strings.TrimSuffix(authService, "/")
if !strings.Contains(authService, ":") {
authService += ":7777"
}
var certificatePEM []byte
if certificateFile != "" {
certificatePEM, err = os.ReadFile(certificateFile)
if err != nil {
return nil, errs.New("can't read certificate file: %w", err)
}
}
edgeConfig := edge.Config{
AuthServiceAddress: authService,
CertificatePEM: certificatePEM,
}
return edgeConfig.RegisterAccess(ctx, access, &edge.RegisterAccessOptions{Public: public})
}

View File

@ -1,152 +0,0 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information
package cmd_test
import (
"context"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"math/big"
"net"
"os"
"os/exec"
"strconv"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"storj.io/common/pb"
"storj.io/common/testcontext"
"storj.io/drpc/drpcmux"
"storj.io/drpc/drpcserver"
"storj.io/storj/cmd/uplink/cmd"
"storj.io/uplink"
"storj.io/uplink/edge"
)
const testAccess = "12edqrJX1V243n5fWtUrwpMQXL8gKdY2wbyqRPSG3rsA1tzmZiQjtCyF896egifN2C2qdY6g5S1t6e8iDhMUon9Pb7HdecBFheAcvmN8652mqu8hRx5zcTUaRTWfFCKS2S6DHmTeqPUHJLEp6cJGXNHcdqegcKfeahVZGP4rTagHvFGEraXjYRJ3knAcWDGW6BxACqogEWez6r274JiUBfs4yRSbRNRqUEURd28CwDXMSHLRKKA7TEDKEdQ"
func TestRegisterAccess(t *testing.T) {
ctx := testcontext.NewWithTimeout(t, 5*time.Second)
defer ctx.Cleanup()
server := DRPCServerMock{}
cancelCtx, authCancel := context.WithCancel(ctx)
defer authCancel()
port, certificatePEM := startMockAuthService(cancelCtx, ctx, t, &server)
caFile := ctx.File("cert.pem")
err := os.WriteFile(caFile, certificatePEM, os.FileMode(0600))
require.NoError(t, err)
url := "https://localhost:" + strconv.Itoa(port)
// make sure we get back things
access, err := uplink.ParseAccess(testAccess)
require.NoError(t, err)
credentials, err := cmd.RegisterAccess(ctx, access, url, true, caFile)
require.NoError(t, err)
assert.Equal(t,
&edge.Credentials{
AccessKeyID: "l5pucy3dmvzxgs3fpfewix27l5pq",
SecretKey: "l5pvgzldojsxis3fpfpv6x27l5pv6x27l5pv6x27l5pv6",
Endpoint: "https://gateway.example",
},
credentials)
}
func TestAccessImport(t *testing.T) {
ctx := testcontext.New(t)
defer ctx.Cleanup()
const testAccess = "12edqwjdy4fmoHasYrxLzmu8Ubv8Hsateq1LPYne6Jzd64qCsYgET53eJzhB4L2pWDKBpqMowxt8vqLCbYxu8Qz7BJVH1CvvptRt9omm24k5GAq1R99mgGjtmc6yFLqdEFgdevuQwH5yzXCEEtbuBYYgES8Stb1TnuSiU3sa62bd2G88RRgbTCtwYrB8HZ7CLjYWiWUphw7RNa3NfD1TW6aUJ6E5D1F9AM6sP58X3D4H7tokohs2rqCkwRT"
uplinkExe := ctx.Compile("storj.io/storj/cmd/uplink")
output, err := exec.Command(uplinkExe, "--config-dir", ctx.Dir("uplink"), "import", testAccess).CombinedOutput()
t.Log(string(output))
require.NoError(t, err)
}
type DRPCServerMock struct {
pb.DRPCEdgeAuthServer
}
func (g *DRPCServerMock) RegisterAccess(context.Context, *pb.EdgeRegisterAccessRequest) (*pb.EdgeRegisterAccessResponse, error) {
return &pb.EdgeRegisterAccessResponse{
AccessKeyId: "l5pucy3dmvzxgs3fpfewix27l5pq",
SecretKey: "l5pvgzldojsxis3fpfpv6x27l5pv6x27l5pv6x27l5pv6",
Endpoint: "https://gateway.example",
}, nil
}
func startMockAuthService(cancelCtx context.Context, testCtx *testcontext.Context, t *testing.T, srv pb.DRPCEdgeAuthServer) (port int, certificatePEM []byte) {
certificatePEM, privateKeyPEM := createSelfSignedCertificate(t, "localhost")
certificate, err := tls.X509KeyPair(certificatePEM, privateKeyPEM)
require.NoError(t, err)
serverTLSConfig := &tls.Config{
Certificates: []tls.Certificate{certificate},
}
drpcListener, err := tls.Listen("tcp", "127.0.0.1:0", serverTLSConfig)
require.NoError(t, err)
port = drpcListener.Addr().(*net.TCPAddr).Port
mux := drpcmux.New()
err = pb.DRPCRegisterEdgeAuth(mux, srv)
require.NoError(t, err)
server := drpcserver.New(mux)
testCtx.Go(func() error {
return server.Serve(cancelCtx, drpcListener)
})
return port, certificatePEM
}
func createSelfSignedCertificate(t *testing.T, hostname string) (certificatePEM []byte, privateKeyPEM []byte) {
notAfter := time.Now().Add(1 * time.Minute)
// first create a server certificate
template := x509.Certificate{
Subject: pkix.Name{
CommonName: hostname,
},
DNSNames: []string{hostname},
SerialNumber: big.NewInt(1337),
BasicConstraintsValid: false,
IsCA: true,
NotAfter: notAfter,
}
privateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
require.NoError(t, err)
certificateDERBytes, err := x509.CreateCertificate(
rand.Reader,
&template,
&template,
&privateKey.PublicKey,
privateKey,
)
require.NoError(t, err)
certificatePEM = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certificateDERBytes})
privateKeyBytes, err := x509.MarshalPKCS8PrivateKey(privateKey)
require.NoError(t, err)
privateKeyPEM = pem.EncodeToMemory(&pem.Block{Type: "PRIVATE KEY", Bytes: privateKeyBytes})
return certificatePEM, privateKeyPEM
}

View File

@ -1,46 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package cmd
import (
"fmt"
"github.com/spf13/cobra"
"storj.io/common/fpath"
)
func init() {
addCmd(&cobra.Command{
Use: "cat sj://BUCKET/KEY",
Short: "Copies a Storj object to standard out",
RunE: catMain,
Args: cobra.ExactArgs(1),
}, RootCmd)
}
// catMain is the function executed when catCmd is called.
func catMain(cmd *cobra.Command, args []string) (err error) {
if len(args) == 0 {
return fmt.Errorf("no object specified for copy")
}
ctx, _ := withTelemetry(cmd)
src, err := fpath.New(args[0])
if err != nil {
return err
}
if src.IsLocal() {
return fmt.Errorf("no bucket specified, use format sj://bucket/")
}
dst, err := fpath.New("-")
if err != nil {
return err
}
return download(ctx, src, dst, false)
}

View File

@ -1,37 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package cmd
import (
"context"
"github.com/spf13/cobra"
"storj.io/common/fpath"
"storj.io/private/cfgstruct"
"storj.io/private/process"
"storj.io/uplink/telemetry"
)
func getConfDir() string {
if param := cfgstruct.FindConfigDirParam(); param != "" {
return param
}
return fpath.ApplicationDir("storj", "uplink")
}
func withTelemetry(cmd *cobra.Command) (context.Context, context.CancelFunc) {
ctx, _ := process.Ctx(cmd)
addr := cmd.Flag("metrics.addr")
if addr == nil || addr.Value.String() == "" {
return ctx, nil
}
return telemetry.Enable(ctx)
}
func enableTracing(config map[string]interface{}) {
config["tracing.enabled"] = true
config["tracing.sample"] = 1
}

View File

@ -1,94 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package cmd
import (
"time"
"github.com/spacemonkeygo/monkit/v3"
"storj.io/uplink"
)
var mon = monkit.Package()
// ClientConfig is a configuration struct for the uplink that controls how
// to talk to the rest of the network.
type ClientConfig struct {
UserAgent string `help:"User-Agent used for connecting to the satellite" default:""`
DialTimeout time.Duration `help:"timeout for dials" default:"0h2m00s"`
EnableQUIC bool `help:"Use QUIC as the transport protocol when it's available, otherwise, fallback to TCP" default:"false"`
}
// Config uplink configuration.
type Config struct {
AccessConfig
Client ClientConfig
}
// AccessConfig holds information about which accesses exist and are selected.
type AccessConfig struct {
Accesses map[string]string `internal:"true"`
Access string `help:"the serialized access, or name of the access to use" default:"" basic-help:"true"`
// used for backward compatibility
Scopes map[string]string `internal:"true"` // deprecated
Scope string `internal:"true"` // deprecated
}
// normalize looks for usage of deprecated config values and sets the respective
// non-deprecated config values accordingly and returns them in a copy of the config.
func (a AccessConfig) normalize() (_ AccessConfig) {
// fallback to scope if access not found
if a.Access == "" {
a.Access = a.Scope
}
if a.Accesses == nil {
a.Accesses = make(map[string]string)
}
// fallback to scopes if accesses not found
if len(a.Accesses) == 0 {
for name, access := range a.Scopes {
a.Accesses[name] = access
}
}
return a
}
// GetAccess returns the appropriate access for the config.
func (a AccessConfig) GetAccess() (_ *uplink.Access, err error) {
defer mon.Task()(nil)(&err)
a = a.normalize()
access, err := a.GetNamedAccess(a.Access)
if err != nil {
return nil, err
}
if access != nil {
return access, nil
}
// Otherwise, try to load the access name as a serialized access.
return uplink.ParseAccess(a.Access)
}
// GetNamedAccess returns named access if exists.
func (a AccessConfig) GetNamedAccess(name string) (_ *uplink.Access, err error) {
// if an access exists for that name, try to load it.
if data, ok := a.Accesses[name]; ok {
return uplink.ParseAccess(data)
}
return nil, nil
}
// IsSerializedAccess returns whether the passed access is a serialized
// access string or not.
func IsSerializedAccess(access string) bool {
_, err := uplink.ParseAccess(access)
return err == nil
}

View File

@ -1,516 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package cmd
import (
"context"
"encoding/json"
"errors"
"fmt"
"io"
"os"
"strings"
"sync"
"time"
progressbar "github.com/cheggaaa/pb/v3"
"github.com/spf13/cobra"
"github.com/zeebo/errs"
"storj.io/common/fpath"
"storj.io/common/memory"
"storj.io/common/ranger/httpranger"
"storj.io/common/sync2"
"storj.io/uplink"
"storj.io/uplink/private/object"
)
var (
progress *bool
expires *string
metadata *string
parallelism *int
byteRangeStr *string
)
func init() {
cpCmd := addCmd(&cobra.Command{
Use: "cp SOURCE DESTINATION",
Short: "Copies a local file or Storj object to another location locally or in Storj",
RunE: copyMain,
Args: cobra.ExactArgs(2),
}, RootCmd)
progress = cpCmd.Flags().Bool("progress", true, "if true, show progress")
expires = cpCmd.Flags().String("expires", "", "optional expiration date of an object. Please use format (yyyy-mm-ddThh:mm:ssZhh:mm)")
metadata = cpCmd.Flags().String("metadata", "", "optional metadata for the object. Please use a single level JSON object of string to string only")
parallelism = cpCmd.Flags().Int("parallelism", 1, "controls how many parallel uploads/downloads of a single object will be performed")
byteRangeStr = cpCmd.Flags().String("range", "", "Downloads the specified range bytes of an object. For more information about the HTTP Range header, see https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35")
setBasicFlags(cpCmd.Flags(), "progress", "expires", "metadata", "parallelism", "range")
}
// upload transfers src from local machine to s3 compatible object dst.
func upload(ctx context.Context, src fpath.FPath, dst fpath.FPath, expiration time.Time, metadata []byte, showProgress bool) (err error) {
if !src.IsLocal() {
return fmt.Errorf("source must be local path: %s", src)
}
if dst.IsLocal() {
return fmt.Errorf("destination must be Storj URL: %s", dst)
}
// if object name not specified, default to filename
if strings.HasSuffix(dst.String(), "/") || dst.Path() == "" {
dst = dst.Join(src.Base())
}
var file *os.File
if src.Base() == "-" {
file = os.Stdin
} else {
file, err = os.Open(src.Path())
if err != nil {
return err
}
defer func() { err = errs.Combine(err, file.Close()) }()
}
fileInfo, err := file.Stat()
if err != nil {
return err
}
if fileInfo.IsDir() {
return fmt.Errorf("source cannot be a directory: %s", src)
}
project, err := cfg.getProject(ctx, false)
if err != nil {
return err
}
defer closeProject(project)
if *parallelism < 1 {
return fmt.Errorf("parallelism must be at least 1")
}
var customMetadata uplink.CustomMetadata
if len(metadata) > 0 {
err := json.Unmarshal(metadata, &customMetadata)
if err != nil {
return err
}
if err := customMetadata.Verify(); err != nil {
return err
}
}
var bar *progressbar.ProgressBar
if *parallelism <= 1 {
reader := io.Reader(file)
if showProgress {
bar = progressbar.New64(fileInfo.Size())
reader = bar.NewProxyReader(reader)
bar.Start()
}
upload, err := project.UploadObject(ctx, dst.Bucket(), dst.Path(), &uplink.UploadOptions{
Expires: expiration,
})
if err != nil {
return err
}
err = upload.SetCustomMetadata(ctx, customMetadata)
if err != nil {
abortErr := upload.Abort()
err = errs.Combine(err, abortErr)
return err
}
_, err = io.Copy(upload, reader)
if err != nil {
abortErr := upload.Abort()
err = errs.Combine(err, abortErr)
return err
}
if err := upload.Commit(); err != nil {
return err
}
} else {
err = func() (err error) {
if showProgress {
bar = progressbar.New64(fileInfo.Size())
bar.Start()
}
info, err := project.BeginUpload(ctx, dst.Bucket(), dst.Path(), &uplink.UploadOptions{
Expires: expiration,
})
if err != nil {
return err
}
defer func() {
if err != nil {
err = errs.Combine(err, project.AbortUpload(ctx, dst.Bucket(), dst.Path(), info.UploadID))
}
}()
var (
limiter = sync2.NewLimiter(*parallelism)
es errs.Group
mu sync.Mutex
)
cancelCtx, cancel := context.WithCancel(ctx)
defer cancel()
addError := func(err error) {
mu.Lock()
defer mu.Unlock()
es.Add(err)
cancel()
}
objectSize := fileInfo.Size()
partSize := 64 * memory.MiB.Int64() // TODO make it configurable
numberOfParts := (objectSize + partSize - 1) / partSize
for i := uint32(0); i < uint32(numberOfParts); i++ {
partNumber := i + 1
offset := int64(i) * partSize
length := partSize
if offset+length > objectSize {
length = objectSize - offset
}
var reader io.Reader
reader = io.NewSectionReader(file, offset, length)
if showProgress {
reader = bar.NewProxyReader(reader)
}
ok := limiter.Go(cancelCtx, func() {
err := uploadPart(cancelCtx, project, dst, info.UploadID, partNumber, reader)
if err != nil {
addError(err)
return
}
})
if !ok {
break
}
}
limiter.Wait()
if err := es.Err(); err != nil {
return err
}
_, err = project.CommitUpload(ctx, dst.Bucket(), dst.Path(), info.UploadID, &uplink.CommitUploadOptions{
CustomMetadata: customMetadata,
})
return err
}()
if err != nil {
return err
}
}
if bar != nil {
bar.Finish()
}
fmt.Printf("Created %s\n", dst.String())
return nil
}
func uploadPart(ctx context.Context, project *uplink.Project, dst fpath.FPath, uploadID string, partNumber uint32, reader io.Reader) error {
if err := ctx.Err(); err != nil {
return err
}
upload, err := project.UploadPart(ctx, dst.Bucket(), dst.Path(), uploadID, partNumber)
if err != nil {
return err
}
_, err = sync2.Copy(ctx, upload, reader)
if err != nil {
return err
}
return upload.Commit()
}
// WriterAt wraps writer and progress bar to display progress correctly.
type WriterAt struct {
object.WriterAt
bar *progressbar.ProgressBar
}
// WriteAt writes bytes to wrapped writer and add amount of bytes to progress bar.
func (w *WriterAt) WriteAt(p []byte, off int64) (n int, err error) {
n, err = w.WriterAt.WriteAt(p, off)
w.bar.Add(n)
return
}
// Truncate truncates writer to specific size.
func (w *WriterAt) Truncate(size int64) error {
w.bar.SetTotal(size)
return w.WriterAt.Truncate(size)
}
// download transfers s3 compatible object src to dst on local machine.
func download(ctx context.Context, src fpath.FPath, dst fpath.FPath, showProgress bool) (err error) {
if src.IsLocal() {
return fmt.Errorf("source must be Storj URL: %s", src)
}
if !dst.IsLocal() {
return fmt.Errorf("destination must be local path: %s", dst)
}
if *parallelism < 1 {
return fmt.Errorf("parallelism must be at least 1")
}
if *parallelism > 1 && *byteRangeStr != "" {
return fmt.Errorf("--parellelism and --range flags are mutually exclusive")
}
project, err := cfg.getProject(ctx, false)
if err != nil {
return err
}
defer closeProject(project)
if fileInfo, err := os.Stat(dst.Path()); err == nil && fileInfo.IsDir() {
dst = dst.Join(src.Base())
}
var file *os.File
if dst.Base() == "-" {
file = os.Stdout
} else {
file, err = os.Create(dst.Path())
if err != nil {
return err
}
defer func() {
if err := file.Close(); err != nil {
fmt.Printf("error closing file: %+v\n", err)
}
}()
}
var bar *progressbar.ProgressBar
var contentLength int64
if *parallelism <= 1 {
var downloadOpts *uplink.DownloadOptions
if *byteRangeStr != "" {
// TODO: if range option will be frequently used we may think about avoiding this call
statObject, err := project.StatObject(ctx, src.Bucket(), src.Path())
if err != nil {
return err
}
bRange, err := httpranger.ParseRange(*byteRangeStr, statObject.System.ContentLength)
if err != nil && bRange == nil {
return fmt.Errorf("error parsing range: %w", err)
}
if len(bRange) == 0 {
return fmt.Errorf("invalid range")
}
if len(bRange) > 1 {
return fmt.Errorf("retrieval of multiple byte ranges of data not supported: %d provided", len(bRange))
}
downloadOpts = &uplink.DownloadOptions{
Offset: bRange[0].Start,
Length: bRange[0].Length,
}
contentLength = bRange[0].Length
}
download, err := project.DownloadObject(ctx, src.Bucket(), src.Path(), downloadOpts)
if err != nil {
return err
}
defer func() { err = errs.Combine(err, download.Close()) }()
var reader io.ReadCloser
if showProgress {
if contentLength <= 0 {
info := download.Info()
contentLength = info.System.ContentLength
}
bar = progressbar.New64(contentLength)
reader = bar.NewProxyReader(download)
bar.Start()
} else {
reader = download
}
_, err = io.Copy(file, reader)
} else {
var writer object.WriterAt
if showProgress {
bar = progressbar.New64(0)
bar.Set(progressbar.Bytes, true)
writer = &WriterAt{file, bar}
bar.Start()
} else {
writer = file
}
// final DownloadObjectAt method signature is under design so we can still have some
// inconsistency between naming e.g. concurrency - parallelism.
err = object.DownloadObjectAt(ctx, project, src.Bucket(), src.Path(), writer, &object.DownloadObjectAtOptions{
Concurrency: *parallelism,
})
}
if bar != nil {
bar.Finish()
}
if err != nil {
return err
}
if dst.Base() != "-" {
fmt.Printf("Downloaded %s to %s\n", src.String(), dst.String())
}
return nil
}
// copy copies s3 compatible object src to s3 compatible object dst.
func copyObject(ctx context.Context, src fpath.FPath, dst fpath.FPath) (err error) {
if src.IsLocal() {
return fmt.Errorf("source must be Storj URL: %s", src)
}
if dst.IsLocal() {
return fmt.Errorf("destination must be Storj URL: %s", dst)
}
project, err := cfg.getProject(ctx, false)
if err != nil {
return err
}
defer closeProject(project)
download, err := project.DownloadObject(ctx, src.Bucket(), src.Path(), nil)
if err != nil {
return err
}
defer func() { err = errs.Combine(err, download.Close()) }()
downloadInfo := download.Info()
var bar *progressbar.ProgressBar
var reader io.Reader
if *progress {
bar = progressbar.New64(downloadInfo.System.ContentLength)
reader = bar.NewProxyReader(download)
bar.Start()
} else {
reader = download
}
// if destination object name not specified, default to source object name
if strings.HasSuffix(dst.Path(), "/") {
dst = dst.Join(src.Base())
}
upload, err := project.UploadObject(ctx, dst.Bucket(), dst.Path(), &uplink.UploadOptions{
Expires: downloadInfo.System.Expires,
})
_, err = io.Copy(upload, reader)
if err != nil {
abortErr := upload.Abort()
return errs.Combine(err, abortErr)
}
err = upload.SetCustomMetadata(ctx, downloadInfo.Custom)
if err != nil {
abortErr := upload.Abort()
return errs.Combine(err, abortErr)
}
err = upload.Commit()
if err != nil {
return err
}
if bar != nil {
bar.Finish()
}
if err != nil {
return err
}
fmt.Printf("%s copied to %s\n", src.String(), dst.String())
return nil
}
// copyMain is the function executed when cpCmd is called.
func copyMain(cmd *cobra.Command, args []string) (err error) {
if len(args) == 0 {
return fmt.Errorf("no object specified for copy")
}
if len(args) == 1 {
return fmt.Errorf("no destination specified")
}
ctx, _ := withTelemetry(cmd)
src, err := fpath.New(args[0])
if err != nil {
return err
}
dst, err := fpath.New(args[1])
if err != nil {
return err
}
// if both local
if src.IsLocal() && dst.IsLocal() {
return errors.New("at least one of the source or the destination must be a Storj URL")
}
// if uploading
if src.IsLocal() {
var expiration time.Time
if *expires != "" {
expiration, err = time.Parse(time.RFC3339, *expires)
if err != nil {
return err
}
if expiration.Before(time.Now()) {
return fmt.Errorf("invalid expiration date: (%s) has already passed", *expires)
}
}
return upload(ctx, src, dst, expiration, []byte(*metadata), *progress)
}
// if downloading
if dst.IsLocal() {
return download(ctx, src, dst, *progress)
}
// if copying from one remote location to another
return copyObject(ctx, src, dst)
}

View File

@ -0,0 +1,47 @@
// Copyright (C) 2021 Storj Labs, Inc.
// See LICENSE for copying information.
// N.B. this file exists to ease a migration from cmd/uplinkng to
// cmd/uplink. There is a test that imports and uses this function
// and cmd/uplinkng does not yet use it the same way.
package cmd
import (
"context"
"os"
"strings"
"github.com/zeebo/errs"
"storj.io/uplink"
"storj.io/uplink/edge"
)
// RegisterAccess registers an access grant with a Gateway Authorization Service.
func RegisterAccess(ctx context.Context, access *uplink.Access, authService string, public bool, certificateFile string) (credentials *edge.Credentials, err error) {
if authService == "" {
return nil, errs.New("no auth service address provided")
}
// preserve compatibility with previous https service
authService = strings.TrimPrefix(authService, "https://")
authService = strings.TrimSuffix(authService, "/")
if !strings.Contains(authService, ":") {
authService += ":7777"
}
var certificatePEM []byte
if certificateFile != "" {
certificatePEM, err = os.ReadFile(certificateFile)
if err != nil {
return nil, errs.New("can't read certificate file: %w", err)
}
}
edgeConfig := edge.Config{
AuthServiceAddress: authService,
CertificatePEM: certificatePEM,
}
return edgeConfig.RegisterAccess(ctx, access, &edge.RegisterAccessOptions{Public: public})
}

View File

@ -1,246 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package cmd
import (
"bufio"
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/spf13/cobra"
"github.com/zeebo/errs"
"storj.io/common/base58"
"storj.io/common/pb"
"storj.io/private/cfgstruct"
"storj.io/private/process"
)
var importCfg struct {
Overwrite bool `default:"false" help:"if true, allows an access to be overwritten" source:"flag"`
SatelliteAddress string `default:"" help:"updates satellite address in imported Access" hidden:"true"`
UplinkFlags
}
func init() {
importCmd := &cobra.Command{
Use: "import [NAME] (ACCESS | FILE)",
Short: "Imports an access into configuration. Configuration will be created if doesn't exists.",
Args: cobra.RangeArgs(1, 2),
RunE: importMain,
Annotations: map[string]string{"type": "setup"},
}
RootCmd.AddCommand(importCmd)
// We don't really want all of the uplink flags on this command but
// otherwise, there is difficulty getting the config to load right since
// configuration/flag code assumes it needs to load/persist everything from
// flags.
// TODO: revisit after the configuration/flag code is refactored.
process.Bind(importCmd, &importCfg, defaults, cfgstruct.ConfDir(confDir))
// NB: access is not supported by `setup` or `import`
cfgstruct.SetBoolAnnotation(importCmd.Flags(), "access", cfgstruct.BasicHelpAnnotationName, false)
}
// importMain is the function executed when importCmd is called.
func importMain(cmd *cobra.Command, args []string) (err error) {
if cmd.Flag("access").Changed {
return ErrAccessFlag
}
saveConfig := func(saveConfigOption process.SaveConfigOption) error {
path := filepath.Join(confDir, process.DefaultCfgFilename)
exists, err := fileExists(path)
if err != nil {
return Error.Wrap(err)
}
if !exists {
if err := createConfigFile(path); err != nil {
return err
}
}
return process.SaveConfig(cmd, path,
saveConfigOption,
process.SaveConfigRemovingDeprecated())
}
// one argument means we are importing into main 'access' field without name
if len(args) == 1 {
overwritten := false
if importCfg.Access != "" {
if !importCfg.Overwrite {
return Error.New("%s", "default access already exists")
}
overwritten = true
}
accessData, err := findAccess(args[0])
if err != nil {
return Error.Wrap(err)
}
if importCfg.SatelliteAddress != "" {
newAccessData, err := updateSatelliteAddress(importCfg.SatelliteAddress, accessData)
if err != nil {
return Error.Wrap(err)
}
accessData = newAccessData
}
if err := saveConfig(process.SaveConfigWithOverride("access", accessData)); err != nil {
return err
}
if overwritten {
fmt.Printf("default access overwritten.\n")
} else {
fmt.Printf("default access imported.\n")
}
} else {
name := args[0]
// This is a little hacky but viper deserializes accesses into a map[string]interface{}
// and complains if we try and override with map[string]string{}.
accesses := convertAccessesForViper(importCfg.Accesses)
overwritten := false
if _, ok := accesses[name]; ok {
if !importCfg.Overwrite {
return fmt.Errorf("access %q already exists", name)
}
overwritten = true
}
accessData, err := findAccess(args[1])
if err != nil {
return Error.Wrap(err)
}
if importCfg.SatelliteAddress != "" {
newAccessData, err := updateSatelliteAddress(importCfg.SatelliteAddress, accessData)
if err != nil {
return Error.Wrap(err)
}
accessData = newAccessData
}
// There is no easy way currently to save off a "hidden" configurable into
// the config file without a larger refactoring. For now, just do a manual
// override of the access.
// TODO: revisit when the configuration/flag code makes it easy
accessKey := "accesses." + name
if err := saveConfig(process.SaveConfigWithOverride(accessKey, accessData)); err != nil {
return err
}
if overwritten {
fmt.Printf("access %q overwritten.\n", name)
} else {
fmt.Printf("access %q imported.\n", name)
}
}
return nil
}
func findAccess(input string) (access string, err error) {
// check if parameter is a valid access, otherwise try to read it from file
if IsSerializedAccess(input) {
access = input
} else {
path := input
access, err = readFirstUncommentedLine(path)
if err != nil {
return "", err
}
// Parse the access data to ensure it is well formed
if !IsSerializedAccess(access) {
return "", err
}
}
return access, nil
}
func readFirstUncommentedLine(path string) (_ string, err error) {
f, err := os.Open(path)
if err != nil {
return "", Error.Wrap(err)
}
defer func() { err = errs.Combine(err, f.Close()) }()
scanner := bufio.NewScanner(f)
for scanner.Scan() {
line := strings.TrimSpace(scanner.Text())
if line == "" {
continue
}
if line[0] == '#' {
continue
}
return line, nil
}
if err := scanner.Err(); err != nil {
return "", Error.Wrap(err)
}
return "", Error.New("no data found")
}
func createConfigFile(path string) error {
setupDir, err := filepath.Abs(confDir)
if err != nil {
return err
}
err = os.MkdirAll(setupDir, 0700)
if err != nil {
return err
}
f, err := os.Create(path)
if err != nil {
return err
}
return f.Close()
}
func fileExists(path string) (bool, error) {
stat, err := os.Stat(path)
if err != nil {
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
return !stat.IsDir(), nil
}
func updateSatelliteAddress(satelliteAddr string, serializedAccess string) (string, error) {
data, version, err := base58.CheckDecode(serializedAccess)
if err != nil || version != 0 {
return "", errors.New("invalid access grant format")
}
p := new(pb.Scope)
if err := pb.Unmarshal(data, p); err != nil {
return "", err
}
p.SatelliteAddr = satelliteAddr
accessData, err := pb.Marshal(p)
if err != nil {
return "", err
}
return base58.CheckEncode(accessData, 0), nil
}

View File

@ -1,275 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package cmd
import (
"context"
"errors"
"fmt"
"strings"
"time"
"github.com/spf13/cobra"
"storj.io/common/fpath"
"storj.io/uplink"
)
var (
lsRecursiveFlag *bool
lsEncryptedFlag *bool
lsPendingFlag *bool
lsExpandedFlag *bool
)
func init() {
lsCmd := addCmd(&cobra.Command{
Use: "ls [sj://BUCKET[/PREFIX]]",
Short: "List objects and prefixes or all buckets",
RunE: list,
Args: cobra.MaximumNArgs(1),
}, RootCmd)
lsRecursiveFlag = lsCmd.Flags().Bool("recursive", false, "if true, list recursively")
lsEncryptedFlag = lsCmd.Flags().Bool("encrypted", false, "if true, show paths as base64-encoded encrypted paths")
lsPendingFlag = lsCmd.Flags().Bool("pending", false, "if true, list pending objects")
lsExpandedFlag = lsCmd.Flags().BoolP("expanded", "x", false, "if true, use an expanded output, showing object expiration times and whether there is custom metadata attached")
setBasicFlags(lsCmd.Flags(), "recursive", "encrypted", "pending", "expanded")
}
func list(cmd *cobra.Command, args []string) error {
ctx, _ := withTelemetry(cmd)
project, err := cfg.getProject(ctx, *lsEncryptedFlag)
if err != nil {
return err
}
defer closeProject(project)
if *lsExpandedFlag {
printExpandedHeader()
}
// list objects
if len(args) > 0 {
src, err := fpath.New(args[0])
if err != nil {
return err
}
if src.IsLocal() {
return fmt.Errorf("no bucket specified, use format sj://bucket/")
}
if !strings.HasSuffix(args[0], "/") && src.Path() != "" {
err = listObject(ctx, project, src.Bucket(), src.Path())
if err != nil && !errors.Is(err, uplink.ErrObjectNotFound) {
return convertError(err, src)
}
}
err = listObjects(ctx, project, src.Bucket(), src.Path(), false)
return convertError(err, src)
}
noBuckets := true
buckets := project.ListBuckets(ctx, nil)
for buckets.Next() {
bucket := buckets.Item()
if !*lsPendingFlag {
printBucket(bucket, *lsExpandedFlag)
}
if *lsRecursiveFlag {
if err := listObjectsFromBucket(ctx, project, bucket.Name); err != nil {
return err
}
}
noBuckets = false
}
if buckets.Err() != nil {
return buckets.Err()
}
if noBuckets {
fmt.Println("No buckets")
}
return nil
}
func listObjectsFromBucket(ctx context.Context, project *uplink.Project, bucket string) error {
return listObjects(ctx, project, bucket, "", true)
}
func listObject(ctx context.Context, project *uplink.Project, bucket, path string) error {
if *lsPendingFlag {
return listPendingObject(ctx, project, bucket, path)
}
object, err := project.StatObject(ctx, bucket, path)
if err != nil {
return err
}
printObject(path, object.System, *lsExpandedFlag, object.Custom)
return nil
}
func listObjects(ctx context.Context, project *uplink.Project, bucket, prefix string, prependBucket bool) error {
// TODO force adding slash at the end because fpath is removing it,
// most probably should be fixed in storj/common
if prefix != "" && !strings.HasSuffix(prefix, "/") {
prefix += "/"
}
var objects *uplink.ObjectIterator
if *lsPendingFlag {
return listPendingObjects(ctx, project, bucket, prefix, prependBucket)
}
objects = project.ListObjects(ctx, bucket, &uplink.ListObjectsOptions{
Prefix: prefix,
Recursive: *lsRecursiveFlag,
System: true,
Custom: *lsExpandedFlag,
})
for objects.Next() {
object := objects.Item()
path := object.Key
if prependBucket {
path = fmt.Sprintf("%s/%s", bucket, path)
}
if object.IsPrefix {
printPrefix(path, *lsExpandedFlag)
} else {
printObject(path, object.System, *lsExpandedFlag, object.Custom)
}
}
if objects.Err() != nil {
return objects.Err()
}
return nil
}
func listPendingObject(ctx context.Context, project *uplink.Project, bucket, path string) error {
uploads := project.ListUploads(ctx, bucket, &uplink.ListUploadsOptions{
Prefix: path,
System: true,
Custom: true,
})
for uploads.Next() {
object := uploads.Item()
path := object.Key
printObject(path, object.System, *lsExpandedFlag, object.Custom)
}
return uploads.Err()
}
func listPendingObjects(ctx context.Context, project *uplink.Project, bucket, prefix string, prependBucket bool) error {
// TODO force adding slash at the end because fpath is removing it,
// most probably should be fixed in storj/common
if prefix != "" && !strings.HasSuffix(prefix, "/") {
prefix += "/"
}
objects := project.ListUploads(ctx, bucket, &uplink.ListUploadsOptions{
Prefix: prefix,
Cursor: "",
Recursive: *lsRecursiveFlag,
System: true,
Custom: true,
})
for objects.Next() {
object := objects.Item()
path := object.Key
if prependBucket {
path = fmt.Sprintf("%s/%s", bucket, path)
}
if object.IsPrefix {
printPrefix(path, *lsExpandedFlag)
} else {
printObject(path, object.System, *lsExpandedFlag, object.Custom)
}
}
return objects.Err()
}
var (
objTypeFieldWidth = 3
creationTimeFieldWidth = 19
expirationTimeFieldWidth = 19
sizeFieldWidth = 12
metadataSizeFieldWidth = 8
)
func printExpandedHeader() {
fmt.Printf("%*s %-*s %-*s %*s %*s %s\n",
objTypeFieldWidth, "",
creationTimeFieldWidth, "CREATE-TIME",
expirationTimeFieldWidth, "EXPIRE-TIME",
sizeFieldWidth, "SIZE",
metadataSizeFieldWidth, "META",
"PATH")
}
func printObject(path string, system uplink.SystemMetadata, expandedFormat bool, custom uplink.CustomMetadata) {
if expandedFormat {
expiryTime := "---------- --------"
if !system.Expires.IsZero() {
expiryTime = formatTime(system.Expires)
}
fmt.Printf("%*s %*s %*s %*d %*d %s\n",
objTypeFieldWidth, "OBJ",
creationTimeFieldWidth, formatTime(system.Created),
expirationTimeFieldWidth, expiryTime,
sizeFieldWidth, system.ContentLength,
metadataSizeFieldWidth, sumMetadataSize(custom),
path)
} else {
fmt.Printf("%v %v %12v %v\n", "OBJ", formatTime(system.Created), system.ContentLength, path)
}
}
func printBucket(bucket *uplink.Bucket, expandedFormat bool) {
if expandedFormat {
fmt.Printf("%*s %*s %*s %*s %*s %s\n",
objTypeFieldWidth, "BKT",
creationTimeFieldWidth, formatTime(bucket.Created),
expirationTimeFieldWidth, "",
sizeFieldWidth, "",
metadataSizeFieldWidth, "",
bucket.Name)
} else {
fmt.Println("BKT", formatTime(bucket.Created), bucket.Name)
}
}
func printPrefix(path string, expandedFormat bool) {
if expandedFormat {
fmt.Printf("%*s %*s %*s %*s %*s %s\n",
objTypeFieldWidth, "PRE",
creationTimeFieldWidth, "",
expirationTimeFieldWidth, "",
sizeFieldWidth, "",
metadataSizeFieldWidth, "",
path)
} else {
fmt.Println("PRE", path)
}
}
func sumMetadataSize(md uplink.CustomMetadata) int {
size := 0
for k, v := range md {
size += len(k)
size += len(v)
}
return size
}
func formatTime(t time.Time) string {
return t.Local().Format("2006-01-02 15:04:05")
}

View File

@ -1,196 +0,0 @@
// Copyright (C) 2021 Storj Labs, Inc.
// See LICENSE for copying information.
package cmd_test
import (
"os/exec"
"strings"
"testing"
"github.com/stretchr/testify/require"
"storj.io/common/memory"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
)
func TestLsPending(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1,
StorageNodeCount: 4,
UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
uplinkExe := ctx.Compile("storj.io/storj/cmd/uplink")
// Configure uplink.
{
access := planet.Uplinks[0].Access[planet.Satellites[0].ID()]
accessString, err := access.Serialize()
require.NoError(t, err)
output, err := exec.Command(uplinkExe,
"--config-dir", ctx.Dir("uplink"),
"import",
accessString,
).CombinedOutput()
t.Log(string(output))
require.NoError(t, err)
}
// Create bucket.
bucketName := "testbucket"
err := planet.Uplinks[0].CreateBucket(ctx, planet.Satellites[0], "testbucket")
require.NoError(t, err)
// Create pending objects and committed objects.
{
uplinkPeer := planet.Uplinks[0]
satellite := planet.Satellites[0]
project, err := uplinkPeer.GetProject(ctx, satellite)
require.NoError(t, err)
defer ctx.Check(project.Close)
_, err = project.BeginUpload(ctx, bucketName, "pending-object", nil)
require.NoError(t, err)
_, err = project.BeginUpload(ctx, bucketName, "prefixed/pending-object", nil)
require.NoError(t, err)
err = uplinkPeer.Upload(ctx, satellite, "testbucket", "committed-object", testrand.Bytes(5*memory.KiB))
require.NoError(t, err)
err = uplinkPeer.Upload(ctx, satellite, "testbucket", "prefixed/committed-object", testrand.Bytes(5*memory.KiB))
require.NoError(t, err)
}
// List pending objects non-recursively.
{
cmd := exec.Command(uplinkExe,
"--config-dir", ctx.Dir("uplink"),
"ls",
"--pending",
)
t.Log(cmd)
output, err := cmd.Output()
require.NoError(t, err)
checkOutput(t, output)
}
// List pending objects recursively.
{
cmd := exec.Command(uplinkExe,
"--config-dir", ctx.Dir("uplink"),
"ls",
"--pending",
"--recursive",
)
t.Log(cmd)
output, err := cmd.Output()
require.NoError(t, err)
checkOutput(t, output,
bucketName,
"prefixed/pending-object",
"pending-object",
)
}
// List pending objects from bucket non-recursively.
{
cmd := exec.Command(uplinkExe,
"--config-dir", ctx.Dir("uplink"),
"ls",
"--pending",
"sj://"+bucketName,
)
t.Log(cmd)
output, err := cmd.Output()
require.NoError(t, err)
checkOutput(t, output,
"prefixed",
"pending-object",
)
}
// List pending object from bucket recursively.
{
cmd := exec.Command(uplinkExe,
"--config-dir", ctx.Dir("uplink"),
"ls",
"--pending",
"--recursive",
"sj://"+bucketName,
)
t.Log(cmd)
output, err := cmd.Output()
require.NoError(t, err)
checkOutput(t, output,
"prefixed/pending-object",
"pending-object",
)
}
// List pending objects with prefix.
{
cmd := exec.Command(uplinkExe,
"--config-dir", ctx.Dir("uplink"),
"ls",
"--pending",
"sj://"+bucketName+"/prefixed",
)
t.Log(cmd)
output, err := cmd.Output()
require.NoError(t, err)
checkOutput(t, output,
"prefixed/pending-object",
)
}
// List pending object by specifying object key.
{
cmd := exec.Command(uplinkExe,
"--config-dir", ctx.Dir("uplink"),
"ls",
"--pending",
"sj://"+bucketName+"/prefixed/pending-object",
)
t.Log(cmd)
output, err := cmd.Output()
require.NoError(t, err)
checkOutput(t, output,
"prefixed/pending-object",
)
}
})
}
func checkOutput(t *testing.T, output []byte, objectKeys ...string) {
lines := strings.Split(string(output), "\n")
objectKeyFound := false
foundObjectKeys := make(map[string]bool, len(objectKeys))
for _, line := range lines {
if line != "" {
for _, objectKey := range objectKeys {
if strings.Contains(line, objectKey) {
objectKeyFound = true
foundObjectKeys[objectKey] = true
}
}
require.True(t, objectKeyFound, line, " Object should not be listed.")
}
}
require.Len(t, foundObjectKeys, len(objectKeys))
}

View File

@ -1,56 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package cmd
import (
"fmt"
"github.com/spf13/cobra"
"storj.io/common/fpath"
)
func init() {
addCmd(&cobra.Command{
Use: "mb sj://BUCKET",
Short: "Create a new bucket",
RunE: makeBucket,
Args: cobra.ExactArgs(1),
}, RootCmd)
}
func makeBucket(cmd *cobra.Command, args []string) error {
ctx, _ := withTelemetry(cmd)
if len(args) == 0 {
return fmt.Errorf("no bucket specified for creation")
}
dst, err := fpath.New(args[0])
if err != nil {
return err
}
if dst.IsLocal() {
return fmt.Errorf("no bucket specified, use format sj://bucket/")
}
if dst.Path() != "" {
return fmt.Errorf("nested buckets not supported, use format sj://bucket/")
}
project, err := cfg.getProject(ctx, false)
if err != nil {
return err
}
defer closeProject(project)
if _, err := project.CreateBucket(ctx, dst.Bucket()); err != nil {
return err
}
fmt.Printf("Bucket %s created\n", dst.Bucket())
return nil
}

View File

@ -1,17 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package cmd
import (
"github.com/spf13/cobra"
)
var metaCmd *cobra.Command
func init() {
metaCmd = addCmd(&cobra.Command{
Use: "meta",
Short: "Metadata related commands",
}, RootCmd)
}

View File

@ -1,98 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package cmd
import (
"encoding/json"
"fmt"
"github.com/spf13/cobra"
"storj.io/common/fpath"
)
func init() {
addCmd(&cobra.Command{
Use: "get [KEY] PATH",
Short: "Get a Storj object's metadata",
RunE: metaGetMain,
Args: cobra.RangeArgs(1, 2),
}, metaCmd)
}
// metaGetMain is the function executed when metaGetCmd is called.
func metaGetMain(cmd *cobra.Command, args []string) (err error) {
var key *string
var path string
switch len(args) {
case 0:
return fmt.Errorf("no object specified")
case 1:
path = args[0]
case 2:
key = &args[0]
path = args[1]
default:
return fmt.Errorf("too many arguments")
}
ctx, _ := withTelemetry(cmd)
src, err := fpath.New(path)
if err != nil {
return err
}
if src.IsLocal() {
return fmt.Errorf("the source destination must be a Storj URL")
}
project, err := cfg.getProject(ctx, false)
if err != nil {
return err
}
defer closeProject(project)
object, err := project.StatObject(ctx, src.Bucket(), src.Path())
if err != nil {
return err
}
if key != nil {
var keyNorm string
err := json.Unmarshal([]byte("\""+*key+"\""), &keyNorm)
if err != nil {
return err
}
value, ok := object.Custom[keyNorm]
if !ok {
return fmt.Errorf("key does not exist")
}
str, err := json.Marshal(value)
if err != nil {
return err
}
fmt.Printf("%s\n", str[1:len(str)-1])
return nil
}
if object.Custom != nil {
str, err := json.MarshalIndent(object.Custom, "", " ")
if err != nil {
return err
}
fmt.Printf("%s\n", string(str))
return nil
}
fmt.Printf("{}\n")
return nil
}

View File

@ -1,171 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package cmd_test
import (
"encoding/json"
"errors"
"fmt"
"os/exec"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
)
func min(x, y int) int {
if x < y {
return x
}
return y
}
func TestSetGetMeta(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1,
StorageNodeCount: 4,
UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
uplinkExe := ctx.Compile("storj.io/storj/cmd/uplink")
// Configure uplink.
{
access := planet.Uplinks[0].Access[planet.Satellites[0].ID()]
accessString, err := access.Serialize()
require.NoError(t, err)
output, err := exec.Command(uplinkExe,
"--config-dir", ctx.Dir("uplink"),
"import",
accessString,
).CombinedOutput()
t.Log(string(output))
require.NoError(t, err)
}
// Create bucket.
bucketName := testrand.BucketName()
{
output, err := exec.Command(uplinkExe,
"--config-dir", ctx.Dir("uplink"),
"mb",
"sj://"+bucketName,
).CombinedOutput()
t.Log(string(output))
require.NoError(t, err)
}
// Upload file with metadata.
metadata := testrand.Metadata()
// TODO fix this in storj/common
for k, v := range metadata {
if strings.IndexByte(k, 0) >= 0 || strings.IndexByte(v, 0) >= 0 {
delete(metadata, k)
}
}
metadataBs, err := json.Marshal(metadata)
require.NoError(t, err)
metadataStr := string(metadataBs)
var metadataNorm map[string]string
err = json.Unmarshal(metadataBs, &metadataNorm)
require.NoError(t, err)
path := testrand.URLPathNonFolder()
uri := "sj://" + bucketName + "/" + path
{
output, err := exec.Command(uplinkExe,
"--config-dir", ctx.Dir("uplink"),
"cp",
"--metadata", metadataStr,
"-", uri,
).CombinedOutput()
t.Log(string(output))
require.NoError(t, err)
}
// Get all metadata.
{
cmd := exec.Command(uplinkExe,
"--config-dir", ctx.Dir("uplink"),
"meta", "get", uri,
)
t.Log(cmd)
output, err := cmd.Output()
t.Log(string(output))
if !assert.NoError(t, err) {
var ee *exec.ExitError
if errors.As(err, &ee) {
t.Log(ee)
t.Log(string(ee.Stderr))
}
return
}
var md map[string]string
err = json.Unmarshal(output, &md)
require.NoError(t, err)
assert.Equal(t, metadataNorm, md)
}
// Get specific metadata.
//
// NOTE: The CLI expects JSON encoded strings for input and
// output. The key and value returned from the CLI have to be
// converted from the JSON encoded string into the Go native
// string for comparison.
for key, value := range metadataNorm {
key, value := key, value
t.Run(fmt.Sprintf("Fetching key %q", key[:min(len(key), 8)]), func(t *testing.T) {
keyNorm, err := json.Marshal(key)
require.NoError(t, err)
cmd := exec.Command(uplinkExe,
"--config-dir", ctx.Dir("uplink"),
"meta", "get", "--", string(keyNorm[1:len(keyNorm)-1]), uri,
)
t.Log(cmd)
output, err := cmd.Output()
assert.NoError(t, err)
if err != nil {
var ee *exec.ExitError
if errors.As(err, &ee) {
t.Log(ee)
t.Log(string(ee.Stderr))
}
return
}
// Remove trailing newline.
if len(output) > 0 && string(output[len(output)-1]) == "\n" {
output = output[:len(output)-1]
}
var outputNorm string
err = json.Unmarshal([]byte("\""+string(output)+"\""), &outputNorm)
require.NoError(t, err)
assert.Equal(t, value, outputNorm)
})
}
})
}

View File

@ -1,103 +0,0 @@
// Copyright (C) 2021 Storj Labs, Inc.
// See LICENSE for copying information.
package cmd
import (
"context"
"errors"
"fmt"
"strings"
"github.com/spf13/cobra"
"github.com/zeebo/errs"
"storj.io/common/fpath"
"storj.io/uplink"
)
func init() {
addCmd(&cobra.Command{
Use: "mv SOURCE DESTINATION",
Short: "Moves a Storj object to another location in Storj",
RunE: move,
Args: cobra.ExactArgs(2),
}, RootCmd)
}
func move(cmd *cobra.Command, args []string) (err error) {
ctx, _ := withTelemetry(cmd)
src, err := fpath.New(args[0])
if err != nil {
return err
}
dst, err := fpath.New(args[1])
if err != nil {
return err
}
if src.IsLocal() || dst.IsLocal() {
return errors.New("the source and the destination must be a Storj URL")
}
project, err := cfg.getProject(ctx, false)
if err != nil {
return err
}
defer closeProject(project)
sourceIsPrefix := strings.HasSuffix(src.String(), "/")
destinationIsPrefix := strings.HasSuffix(dst.String(), "/")
if destinationIsPrefix != sourceIsPrefix {
return errs.New("both source and destination should be a prefixes")
}
if destinationIsPrefix && sourceIsPrefix {
return moveObjects(ctx, project, src.Bucket(), src.Path(), dst.Bucket(), dst.Path())
}
return moveObject(ctx, project, src.Bucket(), src.Path(), dst.Bucket(), dst.Path())
}
func moveObject(ctx context.Context, project *uplink.Project, oldbucket, oldkey, newbucket, newkey string) error {
err := project.MoveObject(ctx, oldbucket, oldkey, newbucket, newkey, nil)
if err != nil {
return err
}
fmt.Printf("sj://%s/%s moved to sj://%s/%s\n", oldbucket, oldkey, newbucket, newkey)
return nil
}
func moveObjects(ctx context.Context, project *uplink.Project, oldbucket, oldkey, newbucket, newkey string) error {
oldPrefix := oldkey
if oldPrefix != "" && !strings.HasSuffix(oldPrefix, "/") {
oldPrefix += "/"
}
objectsIterator := project.ListObjects(ctx, oldbucket, &uplink.ListObjectsOptions{
Prefix: oldPrefix,
})
for objectsIterator.Next() {
object := objectsIterator.Item()
if object.IsPrefix {
continue
}
objectKeyWithNewPrefix := strings.TrimPrefix(object.Key, oldPrefix)
if newkey != "" {
objectKeyWithNewPrefix = newkey + "/" + objectKeyWithNewPrefix
}
err := moveObject(ctx, project, oldbucket, object.Key, newbucket, objectKeyWithNewPrefix)
if err != nil {
return err
}
}
return objectsIterator.Err()
}

View File

@ -1,70 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package cmd
import (
"fmt"
"time"
"github.com/spf13/cobra"
"storj.io/common/fpath"
)
var (
putProgress *bool
putExpires *string
putMetadata *string
)
func init() {
putCmd := addCmd(&cobra.Command{
Use: "put sj://BUCKET/KEY",
Short: "Copies data from standard in to a Storj object",
RunE: putMain,
Args: cobra.ExactArgs(1),
}, RootCmd)
putProgress = putCmd.Flags().Bool("progress", false, "if true, show upload progress")
putExpires = putCmd.Flags().String("expires", "", "optional expiration date of the new object. Please use format (yyyy-mm-ddThh:mm:ssZhh:mm)")
putMetadata = putCmd.Flags().String("metadata", "", "optional metadata for the object. Please use a single level JSON object of string to string only")
setBasicFlags(putCmd.Flags(), "progress", "expires", "metadata")
}
// putMain is the function executed when putCmd is called.
func putMain(cmd *cobra.Command, args []string) (err error) {
if len(args) == 0 {
return fmt.Errorf("no object specified for copy")
}
ctx, _ := withTelemetry(cmd)
dst, err := fpath.New(args[0])
if err != nil {
return err
}
if dst.IsLocal() {
return fmt.Errorf("no bucket specified, use format sj://bucket/")
}
src, err := fpath.New("-")
if err != nil {
return err
}
var expiration time.Time
if *putExpires != "" {
expiration, err = time.Parse(time.RFC3339, *putExpires)
if err != nil {
return err
}
if expiration.Before(time.Now()) {
return fmt.Errorf("invalid expiration date: (%s) has already passed", *putExpires)
}
}
return upload(ctx, src, dst, expiration, []byte(*putMetadata), *putProgress)
}

View File

@ -1,77 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package cmd
import (
"fmt"
"github.com/spf13/cobra"
"storj.io/common/fpath"
)
var (
rbForceFlag *bool
)
func init() {
rbCmd := addCmd(&cobra.Command{
Use: "rb sj://BUCKET",
Short: "Remove an empty bucket",
RunE: deleteBucket,
Args: cobra.ExactArgs(1),
}, RootCmd)
rbForceFlag = rbCmd.Flags().Bool("force", false, "if true, empties the bucket of objects first")
setBasicFlags(rbCmd.Flags(), "force")
}
func deleteBucket(cmd *cobra.Command, args []string) (err error) {
ctx, _ := withTelemetry(cmd)
if len(args) == 0 {
return fmt.Errorf("no bucket specified for deletion")
}
dst, err := fpath.New(args[0])
if err != nil {
return err
}
if dst.IsLocal() {
return fmt.Errorf("no bucket specified, use format sj://bucket/")
}
if dst.Path() != "" {
return fmt.Errorf("nested buckets not supported, use format sj://bucket/")
}
project, err := cfg.getProject(ctx, true)
if err != nil {
return convertError(err, dst)
}
defer closeProject(project)
defer func() {
if err != nil {
fmt.Printf("Bucket %s has NOT been deleted\n %+v", dst.Bucket(), err.Error())
} else {
fmt.Printf("Bucket %s has been deleted\n", dst.Bucket())
}
}()
if *rbForceFlag {
// TODO: Do we need to have retry here?
if _, err := project.DeleteBucketWithObjects(ctx, dst.Bucket()); err != nil {
return convertError(err, dst)
}
return nil
}
if _, err := project.DeleteBucket(ctx, dst.Bucket()); err != nil {
return convertError(err, dst)
}
return nil
}

View File

@ -1,51 +0,0 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package cmd
import (
"errors"
"fmt"
"github.com/spf13/cobra"
"storj.io/uplink"
)
func init() {
addCmd(&cobra.Command{
Use: "revoke access_here",
Short: "Revoke an access",
RunE: revokeAccess,
Args: cobra.ExactArgs(1),
}, RootCmd)
}
func revokeAccess(cmd *cobra.Command, args []string) error {
ctx, _ := withTelemetry(cmd)
if len(args) == 0 {
return fmt.Errorf("no access specified for revocation")
}
accessRaw := args[0]
access, err := uplink.ParseAccess(accessRaw)
if err != nil {
return errors.New("invalid access provided")
}
project, err := cfg.getProject(ctx, false)
if err != nil {
return err
}
defer closeProject(project)
if err = project.RevokeAccess(ctx, access); err != nil {
return err
}
fmt.Println("=========== SUCCESSFULLY REVOKED =========================================================")
fmt.Println("NOTE: It may take the satellite several minutes to process the revocation request,")
fmt.Println(" depending on its caching policies.")
return nil
}

View File

@ -1,77 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package cmd
import (
"fmt"
"github.com/spf13/cobra"
"storj.io/common/fpath"
"storj.io/uplink"
)
var (
rmEncryptedFlag *bool
rmPendingFlag *bool
)
func init() {
rmCmd := addCmd(&cobra.Command{
Use: "rm sj://BUCKET/KEY",
Short: "Delete an object",
RunE: deleteObject,
Args: cobra.ExactArgs(1),
}, RootCmd)
rmEncryptedFlag = rmCmd.Flags().Bool("encrypted", false, "if true, treat paths as base64-encoded encrypted paths")
rmPendingFlag = rmCmd.Flags().Bool("pending", false, "if true, delete a pending object")
setBasicFlags(rmCmd.Flags(), "pending")
setBasicFlags(rmCmd.Flags(), "encrypted")
}
func deleteObject(cmd *cobra.Command, args []string) (err error) {
ctx, _ := withTelemetry(cmd)
if len(args) == 0 {
return fmt.Errorf("no object specified for deletion")
}
dst, err := fpath.New(args[0])
if err != nil {
return err
}
if dst.IsLocal() {
return fmt.Errorf("no bucket specified, use format sj://bucket/")
}
project, err := cfg.getProject(ctx, *rmEncryptedFlag)
if err != nil {
return err
}
defer closeProject(project)
if *rmPendingFlag {
// TODO we may need a dedicated endpoint for deleting pending object streams
list := project.ListUploads(ctx, dst.Bucket(), &uplink.ListUploadsOptions{
Prefix: dst.Path(),
})
// TODO modify when we can have several pending objects for the same object key
if list.Next() {
err = project.AbortUpload(ctx, dst.Bucket(), dst.Path(), list.Item().UploadID)
if err != nil {
return convertError(err, dst)
}
} else if err := list.Err(); err != nil {
return convertError(err, dst)
}
} else if _, err = project.DeleteObject(ctx, dst.Bucket(), dst.Path()); err != nil {
return convertError(err, dst)
}
fmt.Printf("Deleted %s\n", dst)
return nil
}

View File

@ -1,170 +0,0 @@
// Copyright (C) 2021 Storj Labs, Inc.
// See LICENSE for copying information.
package cmd_test
import (
"context"
"os/exec"
"strings"
"testing"
"github.com/stretchr/testify/require"
"storj.io/common/memory"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
"storj.io/uplink"
)
func TestRmPending(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1,
StorageNodeCount: 4,
UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
uplinkExe := ctx.Compile("storj.io/storj/cmd/uplink")
uplinkPeer := planet.Uplinks[0]
satellite := planet.Satellites[0]
project, err := uplinkPeer.GetProject(ctx, satellite)
require.NoError(t, err)
defer ctx.Check(project.Close)
// Configure uplink.
{
access := planet.Uplinks[0].Access[planet.Satellites[0].ID()]
accessString, err := access.Serialize()
require.NoError(t, err)
output, err := exec.Command(uplinkExe,
"--config-dir", ctx.Dir("uplink"),
"import",
accessString,
).CombinedOutput()
t.Log(string(output))
require.NoError(t, err)
}
// Create bucket.
bucketName := "testbucket"
err = uplinkPeer.CreateBucket(ctx, satellite, "testbucket")
require.NoError(t, err)
// Create pending objects and one committed object.
{
_, err = project.BeginUpload(ctx, bucketName, "pending-object", nil)
require.NoError(t, err)
_, err = project.BeginUpload(ctx, bucketName, "prefixed/pending-object", nil)
require.NoError(t, err)
err = uplinkPeer.Upload(ctx, satellite, "testbucket", "committed-object", testrand.Bytes(5*memory.KiB))
require.NoError(t, err)
}
// Ensure all of the objects exist
{
require.True(t, pendingObjectExists(ctx, satellite, project, bucketName, "pending-object"))
require.True(t, pendingObjectExists(ctx, satellite, project, bucketName, "prefixed/pending-object"))
}
// Try to delete a non-existing object.
{
cmd := exec.Command(uplinkExe,
"--config-dir", ctx.Dir("uplink"),
"rm",
"--pending",
"sj://"+bucketName+"/does-not-exist",
)
t.Log(cmd)
output, err := cmd.Output()
require.NoError(t, err)
require.True(t, strings.HasPrefix(string(output), "Deleted sj://"+bucketName+"/does-not-exist"))
}
// Try to delete a pending object without specifying --pending.
{
cmd := exec.Command(uplinkExe,
"--config-dir", ctx.Dir("uplink"),
"rm",
"sj://"+bucketName+"/pending-object",
)
t.Log(cmd)
output, err := cmd.Output()
require.NoError(t, err)
require.True(t, strings.HasPrefix(string(output), "Deleted sj://"+bucketName+"/pending-object"))
require.True(t, pendingObjectExists(ctx, satellite, project, bucketName, "pending-object"))
}
// Try to delete a committed object.
{
cmd := exec.Command(uplinkExe,
"--config-dir", ctx.Dir("uplink"),
"rm",
"--pending",
"sj://"+bucketName+"/committed-object",
)
t.Log(cmd)
output, err := cmd.Output()
require.NoError(t, err)
require.True(t, strings.HasPrefix(string(output), "Deleted sj://"+bucketName+"/committed-object"))
require.True(t, committedObjectExists(ctx, satellite, project, bucketName, "committed-object"))
}
// Delete pending object without prefix.
{
cmd := exec.Command(uplinkExe,
"--config-dir", ctx.Dir("uplink"),
"rm",
"--pending",
"sj://"+bucketName+"/pending-object",
)
t.Log(cmd)
output, err := cmd.Output()
require.NoError(t, err)
require.True(t, strings.HasPrefix(string(output), "Deleted sj://"+bucketName+"/pending-object"))
require.False(t, pendingObjectExists(ctx, satellite, project, bucketName, "pending-object"))
}
// Delete pending object with prefix.
{
cmd := exec.Command(uplinkExe,
"--config-dir", ctx.Dir("uplink"),
"rm",
"--pending",
"sj://"+bucketName+"/prefixed/pending-object",
)
t.Log(cmd)
output, err := cmd.Output()
require.NoError(t, err)
require.True(t, strings.HasPrefix(string(output), "Deleted sj://"+bucketName+"/prefixed/pending-object"))
require.False(t, pendingObjectExists(ctx, satellite, project, bucketName, "prefixed/pending-object"))
}
})
}
func pendingObjectExists(ctx context.Context, satellite *testplanet.Satellite, project *uplink.Project, bucketName string, objectKey string) bool {
iterator := project.ListUploads(ctx, bucketName, &uplink.ListUploadsOptions{
Prefix: objectKey,
})
return iterator.Next()
}
func committedObjectExists(ctx context.Context, satellite *testplanet.Satellite, project *uplink.Project, bucketName string, objectKey string) bool {
_, err := project.StatObject(ctx, bucketName, objectKey)
return err == nil
}

View File

@ -1,302 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package cmd
import (
"bufio"
"bytes"
"context"
"flag"
"fmt"
"os"
"runtime"
"runtime/pprof"
"strings"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/zeebo/errs"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"storj.io/common/fpath"
"storj.io/common/rpc"
"storj.io/common/storj"
"storj.io/private/cfgstruct"
"storj.io/private/process"
"storj.io/storj/private/version/checker"
"storj.io/uplink"
privateAccess "storj.io/uplink/private/access"
"storj.io/uplink/private/transport"
)
const (
advancedFlagName = "advanced"
uplinkCLIUserAgent = "uplink-cli"
)
// UplinkFlags configuration flags.
type UplinkFlags struct {
Config
Version checker.Config
PBKDFConcurrency int `help:"Unfortunately, up until v0.26.2, keys generated from passphrases depended on the number of cores the local CPU had. If you entered a passphrase with v0.26.2 earlier, you'll want to set this number to the number of CPU cores your computer had at the time. This flag may go away in the future. For new installations the default value is highly recommended." default:"0"`
}
var (
cfg UplinkFlags
confDir string
defaults = cfgstruct.DefaultsFlag(RootCmd)
// Error is the class of errors returned by this package.
Error = errs.Class("uplink")
// ErrAccessFlag is used where the `--access` flag is registered but not supported.
ErrAccessFlag = Error.New("--access flag not supported with `setup` and `import` subcommands")
)
func init() {
defaultConfDir := fpath.ApplicationDir("storj", "uplink")
cfgstruct.SetupFlag(zap.L(), RootCmd, &confDir, "config-dir", defaultConfDir, "main directory for uplink configuration")
// NB: more-help flag is always retrieved using `findBoolFlagEarly()`
RootCmd.PersistentFlags().BoolVar(new(bool), advancedFlagName, false, "if used in with -h, print advanced flags help")
setBasicFlags(RootCmd.PersistentFlags(), "config-dir", advancedFlagName)
setUsageFunc(RootCmd)
}
var cpuProfile = flag.String("profile.cpu", "", "file path of the cpu profile to be created")
var memoryProfile = flag.String("profile.mem", "", "file path of the memory profile to be created")
// RootCmd represents the base CLI command when called without any subcommands.
var RootCmd = &cobra.Command{
Use: "uplink",
Short: "The Storj client-side CLI",
Args: cobra.OnlyValidArgs,
PersistentPreRunE: combineCobraFuncs(startCPUProfile, modifyFlagDefaults),
PersistentPostRunE: stopAndWriteProfile,
}
func addCmd(cmd *cobra.Command, root *cobra.Command) *cobra.Command {
root.AddCommand(cmd)
process.Bind(cmd, &cfg, defaults, cfgstruct.ConfDir(getConfDir()))
return cmd
}
func (cliCfg *UplinkFlags) getProject(ctx context.Context, encryptionBypass bool) (_ *uplink.Project, err error) {
access, err := cfg.GetAccess()
if err != nil {
return nil, err
}
uplinkCfg := uplink.Config{}
uplinkCfg.UserAgent = uplinkCLIUserAgent
if cliCfg.Client.UserAgent != "" {
uplinkCfg.UserAgent = cliCfg.Client.UserAgent + " " + uplinkCfg.UserAgent
}
uplinkCfg.DialTimeout = cliCfg.Client.DialTimeout
if cliCfg.Client.EnableQUIC {
transport.SetConnector(&uplinkCfg, rpc.NewHybridConnector())
}
if encryptionBypass {
err = privateAccess.EnablePathEncryptionBypass(access)
if err != nil {
return nil, Error.Wrap(err)
}
}
project, err := uplinkCfg.OpenProject(ctx, access)
if err != nil {
return nil, Error.Wrap(err)
}
return project, nil
}
func closeProject(project *uplink.Project) {
if err := project.Close(); err != nil {
fmt.Printf("error closing project: %+v\n", err)
}
}
func convertError(err error, path fpath.FPath) error {
if storj.ErrBucketNotFound.Has(err) {
return fmt.Errorf("bucket not found: %s", path.Bucket())
}
if storj.ErrObjectNotFound.Has(err) {
return fmt.Errorf("object not found: %s", path.String())
}
return err
}
func startCPUProfile(cmd *cobra.Command, args []string) error {
if *cpuProfile != "" {
f, err := os.Create(*cpuProfile)
if err != nil {
return err
}
if err := pprof.StartCPUProfile(f); err != nil {
return err
}
}
return nil
}
func stopAndWriteProfile(cmd *cobra.Command, args []string) error {
if *cpuProfile != "" {
pprof.StopCPUProfile()
}
if *memoryProfile != "" {
return writeMemoryProfile()
}
return nil
}
func writeMemoryProfile() error {
f, err := os.Create(*memoryProfile)
if err != nil {
return err
}
runtime.GC()
if err := pprof.WriteHeapProfile(f); err != nil {
return err
}
return f.Close()
}
// convertAccessesForViper converts map[string]string to map[string]interface{}.
//
// This is a little hacky but viper deserializes accesses into a map[string]interface{}
// and complains if we try and override with map[string]string{}.
func convertAccessesForViper(from map[string]string) map[string]interface{} {
to := make(map[string]interface{})
for key, value := range from {
to[key] = value
}
return to
}
func modifyFlagDefaults(cmd *cobra.Command, args []string) (err error) {
levelFlag := cmd.Flag("log.level")
if levelFlag != nil && !levelFlag.Changed {
err := flag.Set("log.level", zapcore.WarnLevel.String())
if err != nil {
return Error.Wrap(errs.Combine(errs.New("unable to set log level flag"), err))
}
}
return nil
}
func combineCobraFuncs(funcs ...func(*cobra.Command, []string) error) func(*cobra.Command, []string) error {
return func(cmd *cobra.Command, args []string) (err error) {
for _, fn := range funcs {
if err = fn(cmd, args); err != nil {
return err
}
}
return err
}
}
/* `setUsageFunc` is a bit unconventional but cobra didn't leave much room for
extensibility here. `cmd.SetUsageTemplate` is fairly useless for our case without
the ability to add to the template's function map (see: https://golang.org/pkg/text/template/#hdr-Functions).
Because we can't alter what `cmd.Usage` generates, we have to edit it afterwards.
In order to hook this function *and* get the usage string, we have to juggle the
`cmd.usageFunc` between our hook and `nil`, so that we can get the usage string
from the default usage func.
*/
func setUsageFunc(cmd *cobra.Command) {
if findBoolFlagEarly(advancedFlagName) {
return
}
reset := func() (set func()) {
original := cmd.UsageFunc()
cmd.SetUsageFunc(nil)
return func() {
cmd.SetUsageFunc(original)
}
}
cmd.SetUsageFunc(func(cmd *cobra.Command) error {
set := reset()
usageStr := cmd.UsageString()
defer set()
usageScanner := bufio.NewScanner(bytes.NewBufferString(usageStr))
var basicFlags []string
cmd.Flags().VisitAll(func(flag *pflag.Flag) {
basic, ok := flag.Annotations[cfgstruct.BasicHelpAnnotationName]
if ok && len(basic) == 1 && basic[0] == "true" {
basicFlags = append(basicFlags, flag.Name)
}
})
for usageScanner.Scan() {
line := usageScanner.Text()
trimmedLine := strings.TrimSpace(line)
var flagName string
if _, err := fmt.Sscanf(trimmedLine, "--%s", &flagName); err != nil {
fmt.Println(line)
continue
}
// TODO: properly filter flags with short names
if !strings.HasPrefix(trimmedLine, "--") {
fmt.Println(line)
}
for _, basicFlag := range basicFlags {
if basicFlag == flagName {
fmt.Println(line)
}
}
}
return nil
})
}
func findBoolFlagEarly(flagName string) bool {
for i, arg := range os.Args {
arg := arg
argHasPrefix := func(format string, args ...interface{}) bool {
return strings.HasPrefix(arg, fmt.Sprintf(format, args...))
}
if !argHasPrefix("--%s", flagName) {
continue
}
// NB: covers `--<flagName> false` usage
if i+1 != len(os.Args) {
next := os.Args[i+1]
if next == "false" {
return false
}
}
if !argHasPrefix("--%s=false", flagName) {
return true
}
}
return false
}
func setBasicFlags(flagset interface{}, flagNames ...string) {
for _, name := range flagNames {
cfgstruct.SetBoolAnnotation(flagset, name, cfgstruct.BasicHelpAnnotationName, true)
}
}

View File

@ -1,159 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package cmd
import (
"fmt"
"os"
"path/filepath"
"github.com/spf13/cobra"
"github.com/zeebo/errs"
"storj.io/common/grant"
"storj.io/private/cfgstruct"
"storj.io/private/process"
"storj.io/storj/cmd/internal/wizard"
"storj.io/uplink"
"storj.io/uplink/backcomp"
)
var (
setupCmd = &cobra.Command{
Use: "setup",
Short: "Create an uplink config file",
RunE: cmdSetup,
Annotations: map[string]string{"type": "setup"},
Args: cobra.NoArgs,
}
setupCfg UplinkFlags
)
func init() {
RootCmd.AddCommand(setupCmd)
process.Bind(setupCmd, &setupCfg, defaults, cfgstruct.ConfDir(confDir), cfgstruct.SetupMode())
// NB: access is not supported by `setup` or `import`
cfgstruct.SetBoolAnnotation(setupCmd.Flags(), "access", cfgstruct.BasicHelpAnnotationName, false)
}
func cmdSetup(cmd *cobra.Command, args []string) (err error) {
if cmd.Flag("access").Changed {
return ErrAccessFlag
}
setupDir, err := filepath.Abs(confDir)
if err != nil {
return err
}
satelliteAddress, err := wizard.PromptForSatellite(cmd)
if err != nil {
return Error.Wrap(err)
}
var (
accessName string
defaultSerializedAccessExists bool
)
setupCfg.AccessConfig = setupCfg.AccessConfig.normalize()
defaultSerializedAccessExists = IsSerializedAccess(setupCfg.Access)
accessName, err = wizard.PromptForAccessName()
if err != nil {
return Error.Wrap(err)
}
if accessName == "default" && defaultSerializedAccessExists {
return Error.New("a default access already exists")
}
if access, err := setupCfg.GetNamedAccess(accessName); err == nil && access != nil {
return Error.New("an access with the name %q already exists", accessName)
}
apiKeyString, err := wizard.PromptForAPIKey()
if err != nil {
return Error.Wrap(err)
}
passphrase, err := wizard.PromptForEncryptionPassphrase()
if err != nil {
return Error.Wrap(err)
}
uplinkConfig := uplink.Config{
UserAgent: setupCfg.Client.UserAgent,
DialTimeout: setupCfg.Client.DialTimeout,
}
overrides := make(map[string]interface{})
analyticEnabled, err := wizard.PromptForTracing()
if err != nil {
return Error.Wrap(err)
}
if analyticEnabled {
enableTracing(overrides)
} else {
// set metrics address to empty string so we can disable it on each operation
overrides["metrics.addr"] = ""
}
ctx, _ := withTelemetry(cmd)
var access *uplink.Access
if setupCfg.PBKDFConcurrency == 0 {
access, err = uplinkConfig.RequestAccessWithPassphrase(ctx, satelliteAddress, apiKeyString, passphrase)
} else {
access, err = backcomp.RequestAccessWithPassphraseAndConcurrency(ctx, uplinkConfig, satelliteAddress, apiKeyString, passphrase, uint8(setupCfg.PBKDFConcurrency))
}
if err != nil {
_, err2 := grant.ParseAccess(apiKeyString)
if err2 == nil {
err2 = Error.New("API key appears to be an access grant: try running `uplink import` instead")
}
return errs.Combine(err, err2)
}
accessData, err := access.Serialize()
if err != nil {
return Error.Wrap(err)
}
// NB: accesses should always be `map[string]interface{}` for "conventional"
// config serialization/flattening.
accesses := convertAccessesForViper(setupCfg.Accesses)
accesses[accessName] = accessData
overrides["accesses"] = accesses
saveCfgOpts := []process.SaveConfigOption{
process.SaveConfigWithOverrides(overrides),
process.SaveConfigRemovingDeprecated(),
}
if setupCfg.Access == "" {
saveCfgOpts = append(saveCfgOpts, process.SaveConfigWithOverride("access", accessName))
}
err = os.MkdirAll(setupDir, 0700)
if err != nil {
return err
}
configPath := filepath.Join(setupDir, process.DefaultCfgFilename)
err = process.SaveConfig(cmd, configPath, saveCfgOpts...)
if err != nil {
return Error.Wrap(err)
}
// if there is an error with this we cannot do that much and the setup process
// has ended OK, so we ignore it.
fmt.Println(`
Your Uplink CLI is configured and ready to use!
* See https://docs.storj.io/api-reference/uplink-cli for some example commands`)
return nil
}

View File

@ -1,328 +0,0 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package cmd
import (
"fmt"
"io/ioutil"
"net/url"
"os"
"path/filepath"
"strings"
"text/tabwriter"
"time"
"github.com/spf13/cobra"
"github.com/zeebo/errs"
"storj.io/common/fpath"
"storj.io/private/cfgstruct"
"storj.io/private/process"
"storj.io/uplink"
)
var shareCfg struct {
DisallowReads bool `default:"false" help:"if true, disallow reads" basic-help:"true"`
DisallowWrites bool `default:"false" help:"if true, disallow writes. see also --readonly" basic-help:"true"`
DisallowLists bool `default:"false" help:"if true, disallow lists" basic-help:"true"`
DisallowDeletes bool `default:"false" help:"if true, disallow deletes. see also --readonly" basic-help:"true"`
Readonly bool `default:"true" help:"implies --disallow-writes and --disallow-deletes. you must specify --readonly=false if you don't want this" basic-help:"true"`
Writeonly bool `default:"false" help:"implies --disallow-reads and --disallow-lists" basic-help:"true"`
NotBefore string `help:"disallow access before this time (e.g. '+2h', '2020-01-02T15:01:01-01:00')" basic-help:"true"`
NotAfter string `help:"disallow access after this time (e.g. '+2h', '2020-01-02T15:01:01-01:00')" basic-help:"true"`
AllowedPathPrefix []string `help:"whitelist of path prefixes to require, overrides the [allowed-path-prefix] arguments"`
ExportTo string `default:"" help:"path to export the shared access to" basic-help:"true"`
BaseURL string `default:"https://link.us1.storjshare.io" help:"the base url for link sharing" basic-help:"true"`
Register bool `default:"false" help:"if true, creates and registers access grant" basic-help:"true"`
URL bool `default:"false" help:"if true, returns a url for the shared path. implies --register and --public" basic-help:"true"`
DNS string `default:"" help:"specify your custom hostname. if set, returns dns settings for web hosting. implies --register and --public" basic-help:"true"`
AuthService string `default:"https://auth.us1.storjshare.io" help:"url for shared auth service" basic-help:"true"`
CACert string `help:"path to a file in PEM format with certificate(s) or certificate chain(s) to validate the auth service against" default:""`
Public bool `default:"false" help:"if true, the access will be public. --dns and --url override this" basic-help:"true"`
// Share requires information about the current access
AccessConfig
}
func init() {
// We skip the use of addCmd here because we only want the configuration options listed
// above, and addCmd adds a whole lot more than we want.
shareCmd := &cobra.Command{
Use: "share [ALLOWED_PATH_PREFIX]...",
Short: "Shares restricted access to objects.",
RunE: shareMain,
}
RootCmd.AddCommand(shareCmd)
process.Bind(shareCmd, &shareCfg, defaults, cfgstruct.ConfDir(getConfDir()))
}
func shareMain(cmd *cobra.Command, args []string) (err error) {
ctx, _ := withTelemetry(cmd)
isPublic := shareCfg.Public || shareCfg.URL || shareCfg.DNS != ""
if isPublic {
if shareCfg.NotAfter == "" {
fmt.Println("It's not recommended to create a shared Access without an expiration date.")
fmt.Println("If you wish to do so anyway, please run this command with --not-after=none.")
return
}
if shareCfg.NotAfter == "none" {
shareCfg.NotAfter = ""
}
}
newAccess, newAccessData, sharePrefixes, permission, err := createAccessGrant(args)
if err != nil {
return err
}
if shareCfg.Register || shareCfg.URL || shareCfg.DNS != "" {
credentials, err := RegisterAccess(ctx, newAccess, shareCfg.AuthService, isPublic, shareCfg.CACert)
if err != nil {
return err
}
err = DisplayGatewayCredentials(credentials, "", "")
if err != nil {
return err
}
_, err = fmt.Println("Public Access: ", isPublic)
if err != nil {
return err
}
if len(shareCfg.AllowedPathPrefix) == 1 && !permission.AllowUpload && !permission.AllowDelete {
if shareCfg.URL {
if err = createURL(credentials.AccessKeyID, sharePrefixes); err != nil {
return err
}
}
if shareCfg.DNS != "" {
if err = createDNS(credentials.AccessKeyID); err != nil {
return err
}
}
}
}
if shareCfg.ExportTo != "" {
// convert to an absolute path, mostly for output purposes.
exportTo, err := filepath.Abs(shareCfg.ExportTo)
if err != nil {
return Error.Wrap(err)
}
if err := ioutil.WriteFile(exportTo, []byte(newAccessData+"\n"), 0600); err != nil {
return Error.Wrap(err)
}
fmt.Println("Exported to:", exportTo)
}
return nil
}
// Creates access grant for allowed path prefixes.
func createAccessGrant(args []string) (newAccess *uplink.Access, newAccessData string, sharePrefixes []sharePrefixExtension, permission uplink.Permission, err error) {
now := time.Now()
notBefore, err := parseHumanDate(shareCfg.NotBefore, now)
if err != nil {
return newAccess, newAccessData, sharePrefixes, permission, err
}
notAfter, err := parseHumanDate(shareCfg.NotAfter, now)
if err != nil {
return newAccess, newAccessData, sharePrefixes, permission, err
}
if len(shareCfg.AllowedPathPrefix) == 0 {
// if the --allowed-path-prefix flag is not set,
// use any arguments as allowed path prefixes
for _, arg := range args {
shareCfg.AllowedPathPrefix = append(shareCfg.AllowedPathPrefix, strings.Split(arg, ",")...)
}
}
var uplinkSharePrefixes []uplink.SharePrefix
for _, path := range shareCfg.AllowedPathPrefix {
p, err := fpath.New(path)
if err != nil {
return newAccess, newAccessData, sharePrefixes, permission, err
}
if p.IsLocal() {
return newAccess, newAccessData, sharePrefixes, permission, errs.New("required path must be remote: %q", path)
}
uplinkSharePrefix := uplink.SharePrefix{
Bucket: p.Bucket(),
Prefix: p.Path(),
}
sharePrefixes = append(sharePrefixes, sharePrefixExtension{
uplinkSharePrefix: uplinkSharePrefix,
hasTrailingSlash: strings.HasSuffix(path, "/"),
})
uplinkSharePrefixes = append(uplinkSharePrefixes, uplinkSharePrefix)
}
access, err := shareCfg.GetAccess()
if err != nil {
return newAccess, newAccessData, sharePrefixes, permission, err
}
permission = uplink.Permission{}
permission.AllowDelete = !shareCfg.DisallowDeletes && !shareCfg.Readonly
permission.AllowList = !shareCfg.DisallowLists && !shareCfg.Writeonly
permission.AllowDownload = !shareCfg.DisallowReads && !shareCfg.Writeonly
permission.AllowUpload = !shareCfg.DisallowWrites && !shareCfg.Readonly
permission.NotBefore = notBefore
permission.NotAfter = notAfter
newAccess, err = access.Share(permission, uplinkSharePrefixes...)
if err != nil {
return newAccess, newAccessData, sharePrefixes, permission, err
}
newAccessData, err = newAccess.Serialize()
if err != nil {
return newAccess, newAccessData, sharePrefixes, permission, err
}
satelliteAddr, _, _, err := parseAccess(newAccessData)
if err != nil {
return newAccess, newAccessData, sharePrefixes, permission, err
}
fmt.Println("Sharing access to satellite", satelliteAddr)
fmt.Println("=========== ACCESS RESTRICTIONS ==========================================================")
fmt.Println("Download :", formatPermission(permission.AllowDownload))
fmt.Println("Upload :", formatPermission(permission.AllowUpload))
fmt.Println("Lists :", formatPermission(permission.AllowList))
fmt.Println("Deletes :", formatPermission(permission.AllowDelete))
fmt.Println("NotBefore :", formatTimeRestriction(permission.NotBefore))
fmt.Println("NotAfter :", formatTimeRestriction(permission.NotAfter))
fmt.Println("Paths :", formatPaths(sharePrefixes))
fmt.Println("=========== SERIALIZED ACCESS WITH THE ABOVE RESTRICTIONS TO SHARE WITH OTHERS ===========")
fmt.Println("Access :", newAccessData)
return newAccess, newAccessData, sharePrefixes, permission, nil
}
// Creates linksharing url for allowed path prefixes.
func createURL(newAccessData string, sharePrefixes []sharePrefixExtension) (err error) {
p, err := fpath.New(shareCfg.AllowedPathPrefix[0])
if err != nil {
return err
}
fmt.Println("=========== BROWSER URL ==================================================================")
fmt.Println("REMINDER : Object key must end in '/' when trying to share recursively")
path := p.Path()
// If we're not sharing the entire bucket (the path is empty)
// and the requested share prefix has a trailing slash, then
// make sure to append a trailing slash to the URL.
if path != "" && sharePrefixes[0].hasTrailingSlash {
path += "/"
}
fmt.Printf("URL : %s/s/%s/%s/%s\n", shareCfg.BaseURL, url.PathEscape(newAccessData), p.Bucket(), path)
return nil
}
// Creates dns record info for allowed path prefixes.
func createDNS(accessKey string) (err error) {
p, err := fpath.New(shareCfg.AllowedPathPrefix[0])
if err != nil {
return err
}
CNAME, err := url.Parse(shareCfg.BaseURL)
if err != nil {
return err
}
minWidth := len(shareCfg.DNS) + 5 // add 5 spaces to account for "txt-"
w := new(tabwriter.Writer)
w.Init(os.Stdout, minWidth, minWidth, 0, '\t', 0)
defer func() {
err = errs.Combine(err, w.Flush())
}()
var printStorjRoot string
if p.Path() == "" {
printStorjRoot = fmt.Sprintf("txt-%s\tIN\tTXT \tstorj-root:%s", shareCfg.DNS, p.Bucket())
} else {
printStorjRoot = fmt.Sprintf("txt-%s\tIN\tTXT \tstorj-root:%s/%s", shareCfg.DNS, p.Bucket(), p.Path())
}
fmt.Println("=========== DNS INFO =====================================================================")
fmt.Println("Remember to update the $ORIGIN with your domain name. You may also change the $TTL.")
fmt.Fprintln(w, "$ORIGIN example.com.")
fmt.Fprintln(w, "$TTL 3600")
fmt.Fprintf(w, "%s \tIN\tCNAME\t%s.\n", shareCfg.DNS, CNAME.Host)
fmt.Fprintln(w, printStorjRoot)
fmt.Fprintf(w, "txt-%s\tIN\tTXT \tstorj-access:%s\n", shareCfg.DNS, accessKey)
return nil
}
func parseHumanDate(date string, now time.Time) (time.Time, error) {
switch {
case date == "":
return time.Time{}, nil
case date == "now":
return now, nil
case date[0] == '+':
d, err := time.ParseDuration(date[1:])
t := now.Add(d)
return t, errs.Wrap(err)
case date[0] == '-':
d, err := time.ParseDuration(date[1:])
t := now.Add(-d)
return t, errs.Wrap(err)
default:
t, err := time.Parse(time.RFC3339, date)
return t, errs.Wrap(err)
}
}
// sharePrefixExtension is a temporary struct type. We might want to add hasTrailingSlash bool to `uplink.SharePrefix` directly.
type sharePrefixExtension struct {
uplinkSharePrefix uplink.SharePrefix
hasTrailingSlash bool
}
func formatPermission(allowed bool) string {
if allowed {
return "Allowed"
}
return "Disallowed"
}
func formatTimeRestriction(t time.Time) string {
if t.IsZero() {
return "No restriction"
}
return formatTime(t)
}
func formatPaths(sharePrefixes []sharePrefixExtension) string {
if len(sharePrefixes) == 0 {
return "WARNING! The entire project is shared!"
}
var paths []string
for _, prefix := range sharePrefixes {
path := "sj://" + prefix.uplinkSharePrefix.Bucket
if len(prefix.uplinkSharePrefix.Prefix) == 0 {
path += "/ (entire bucket)"
} else {
path += "/" + prefix.uplinkSharePrefix.Prefix
if prefix.hasTrailingSlash {
path += "/"
}
}
paths = append(paths, path)
}
return strings.Join(paths, "\n ")
}

View File

@ -12,7 +12,7 @@ import (
"github.com/zeebo/clingy"
"github.com/zeebo/errs"
"storj.io/storj/cmd/uplinkng/ulext"
"storj.io/storj/cmd/uplink/ulext"
)
type cmdAccessCreate struct {

View File

@ -7,7 +7,7 @@ import (
"github.com/zeebo/clingy"
"github.com/zeebo/errs"
"storj.io/storj/cmd/uplinkng/ulext"
"storj.io/storj/cmd/uplink/ulext"
)
type cmdAccessExport struct {

View File

@ -10,7 +10,7 @@ import (
"github.com/zeebo/clingy"
"github.com/zeebo/errs"
"storj.io/storj/cmd/uplinkng/ulext"
"storj.io/storj/cmd/uplink/ulext"
"storj.io/uplink"
)

View File

@ -14,7 +14,7 @@ import (
"storj.io/common/base58"
"storj.io/common/macaroon"
"storj.io/common/pb"
"storj.io/storj/cmd/uplinkng/ulext"
"storj.io/storj/cmd/uplink/ulext"
)
// ensures that cmdAccessInspect implements clingy.Command.

View File

@ -6,7 +6,7 @@ package main
import (
"testing"
"storj.io/storj/cmd/uplinkng/ultest"
"storj.io/storj/cmd/uplink/ultest"
)
func TestAccessInspect(t *testing.T) {

View File

@ -10,7 +10,7 @@ import (
"github.com/zeebo/clingy"
"storj.io/storj/cmd/uplinkng/ulext"
"storj.io/storj/cmd/uplink/ulext"
"storj.io/uplink"
)

View File

@ -8,7 +8,7 @@ import (
"github.com/zeebo/clingy"
"storj.io/storj/cmd/uplinkng/ulext"
"storj.io/storj/cmd/uplink/ulext"
)
type cmdAccessRegister struct {

View File

@ -9,7 +9,7 @@ import (
"github.com/zeebo/clingy"
"github.com/zeebo/errs"
"storj.io/storj/cmd/uplinkng/ulext"
"storj.io/storj/cmd/uplink/ulext"
)
type cmdAccessRemove struct {

View File

@ -9,7 +9,7 @@ import (
"github.com/zeebo/clingy"
"github.com/zeebo/errs"
"storj.io/storj/cmd/uplinkng/ulext"
"storj.io/storj/cmd/uplink/ulext"
)
type cmdAccessRestrict struct {

View File

@ -8,7 +8,7 @@ import (
"github.com/zeebo/clingy"
"storj.io/storj/cmd/uplinkng/ulext"
"storj.io/storj/cmd/uplink/ulext"
)
type cmdAccessRevoke struct {

View File

@ -10,7 +10,7 @@ import (
"github.com/zeebo/clingy"
"github.com/zeebo/errs"
"storj.io/storj/cmd/uplinkng/ulext"
"storj.io/storj/cmd/uplink/ulext"
"storj.io/uplink"
)

View File

@ -9,7 +9,7 @@ import (
"github.com/zeebo/clingy"
"github.com/zeebo/errs"
"storj.io/storj/cmd/uplinkng/ulext"
"storj.io/storj/cmd/uplink/ulext"
)
type cmdAccessUse struct {

View File

@ -18,9 +18,9 @@ import (
"storj.io/common/memory"
"storj.io/common/sync2"
"storj.io/storj/cmd/uplinkng/ulext"
"storj.io/storj/cmd/uplinkng/ulfs"
"storj.io/storj/cmd/uplinkng/ulloc"
"storj.io/storj/cmd/uplink/ulext"
"storj.io/storj/cmd/uplink/ulfs"
"storj.io/storj/cmd/uplink/ulloc"
)
type cmdCp struct {

View File

@ -6,7 +6,7 @@ package main
import (
"testing"
"storj.io/storj/cmd/uplinkng/ultest"
"storj.io/storj/cmd/uplink/ultest"
)
func TestCpDownload(t *testing.T) {

View File

@ -9,9 +9,9 @@ import (
"github.com/zeebo/clingy"
"storj.io/storj/cmd/uplinkng/ulext"
"storj.io/storj/cmd/uplinkng/ulfs"
"storj.io/storj/cmd/uplinkng/ulloc"
"storj.io/storj/cmd/uplink/ulext"
"storj.io/storj/cmd/uplink/ulfs"
"storj.io/storj/cmd/uplink/ulloc"
"storj.io/uplink"
)

View File

@ -6,7 +6,7 @@ package main
import (
"testing"
"storj.io/storj/cmd/uplinkng/ultest"
"storj.io/storj/cmd/uplink/ultest"
)
func TestLsErrors(t *testing.T) {

View File

@ -7,8 +7,8 @@ import (
"github.com/zeebo/clingy"
"github.com/zeebo/errs"
"storj.io/storj/cmd/uplinkng/ulext"
"storj.io/storj/cmd/uplinkng/ulloc"
"storj.io/storj/cmd/uplink/ulext"
"storj.io/storj/cmd/uplink/ulloc"
)
type cmdMb struct {

View File

@ -11,8 +11,8 @@ import (
"github.com/zeebo/clingy"
"github.com/zeebo/errs"
"storj.io/storj/cmd/uplinkng/ulext"
"storj.io/storj/cmd/uplinkng/ulloc"
"storj.io/storj/cmd/uplink/ulext"
"storj.io/storj/cmd/uplink/ulloc"
)
type cmdMetaGet struct {

View File

@ -13,9 +13,9 @@ import (
"github.com/zeebo/errs"
"storj.io/common/sync2"
"storj.io/storj/cmd/uplinkng/ulext"
"storj.io/storj/cmd/uplinkng/ulfs"
"storj.io/storj/cmd/uplinkng/ulloc"
"storj.io/storj/cmd/uplink/ulext"
"storj.io/storj/cmd/uplink/ulfs"
"storj.io/storj/cmd/uplink/ulloc"
)
type cmdMv struct {

View File

@ -6,7 +6,7 @@ package main
import (
"testing"
"storj.io/storj/cmd/uplinkng/ultest"
"storj.io/storj/cmd/uplink/ultest"
)
func TestMv(t *testing.T) {

View File

@ -10,8 +10,8 @@ import (
"github.com/zeebo/clingy"
"github.com/zeebo/errs"
"storj.io/storj/cmd/uplinkng/ulext"
"storj.io/storj/cmd/uplinkng/ulloc"
"storj.io/storj/cmd/uplink/ulext"
"storj.io/storj/cmd/uplink/ulloc"
)
type cmdRb struct {

View File

@ -13,9 +13,9 @@ import (
"github.com/zeebo/errs"
"storj.io/common/sync2"
"storj.io/storj/cmd/uplinkng/ulext"
"storj.io/storj/cmd/uplinkng/ulfs"
"storj.io/storj/cmd/uplinkng/ulloc"
"storj.io/storj/cmd/uplink/ulext"
"storj.io/storj/cmd/uplink/ulfs"
"storj.io/storj/cmd/uplink/ulloc"
)
type cmdRm struct {

View File

@ -6,7 +6,7 @@ package main
import (
"testing"
"storj.io/storj/cmd/uplinkng/ultest"
"storj.io/storj/cmd/uplink/ultest"
)
func TestRmRemote(t *testing.T) {

View File

@ -19,8 +19,8 @@ import (
"github.com/zeebo/clingy"
"github.com/zeebo/errs"
"storj.io/storj/cmd/uplinkng/ulext"
"storj.io/storj/cmd/uplinkng/ulloc"
"storj.io/storj/cmd/uplink/ulext"
"storj.io/storj/cmd/uplink/ulloc"
"storj.io/uplink"
)

View File

@ -8,7 +8,7 @@ import (
"github.com/stretchr/testify/require"
"storj.io/storj/cmd/uplinkng/ultest"
"storj.io/storj/cmd/uplink/ultest"
)
func TestShare(t *testing.T) {

View File

@ -1,19 +0,0 @@
#!/bin/sh
set -euo pipefail
if [[ ! -f "${CONF_PATH}/config.yaml" ]]; then
./uplink setup
fi
RUN_PARAMS="${RUN_PARAMS:-} --config ${CONF_PATH}"
if [[ -n "${API_KEY:-}" ]]; then
RUN_PARAMS="${RUN_PARAMS} --api-key ${API_KEY}"
fi
if [ -n "${SATELLITE_ADDR:-}" ]; then
RUN_PARAMS="${RUN_PARAMS} --overlay-addr $SATELLITE_ADDR"
RUN_PARAMS="${RUN_PARAMS} --pointer-db-addr $SATELLITE_ADDR"
fi
exec ./uplink run $RUN_PARAMS "$@"

View File

@ -7,8 +7,8 @@ import (
"context"
"storj.io/common/rpc"
"storj.io/storj/cmd/uplinkng/ulext"
"storj.io/storj/cmd/uplinkng/ulfs"
"storj.io/storj/cmd/uplink/ulext"
"storj.io/storj/cmd/uplink/ulfs"
"storj.io/uplink"
privateAccess "storj.io/uplink/private/access"
"storj.io/uplink/private/transport"

View File

@ -9,7 +9,7 @@ import (
"github.com/zeebo/clingy"
"github.com/zeebo/errs"
"storj.io/storj/cmd/uplinkng/ulext"
"storj.io/storj/cmd/uplink/ulext"
)
func saveInitialConfig(ctx clingy.Context, ex ulext.External) error {

View File

@ -1,27 +1,64 @@
// Copyright (C) 2019 Storj Labs, Inc.
// Copyright (C) 2021 Storj Labs, Inc.
// See LICENSE for copying information.
package main
import (
"github.com/spf13/cobra"
"github.com/spf13/viper"
"context"
"flag"
"fmt"
"os"
_ "storj.io/common/rpc/quic" // This enables quic connector
"storj.io/private/process"
"storj.io/storj/cmd/uplink/cmd"
_ "storj.io/storj/private/version" // This attaches version information during release builds.
"github.com/zeebo/clingy"
_ "storj.io/common/rpc/quic" // include quic connector
"storj.io/storj/cmd/uplink/ulext"
)
func main() {
process.ExecWithCustomConfig(cmd.RootCmd, true, func(cmd *cobra.Command, vip *viper.Viper) error {
accessFlag := cmd.Flags().Lookup("access")
// try to load configuration because we may still need 'accesses' (for named access)
// field but error only if 'access' flag is not set
err := process.LoadConfig(cmd, vip)
if err != nil && (accessFlag == nil || accessFlag.Value.String() == "") {
return err
}
return nil
ex := newExternal()
ok, err := clingy.Environment{
Name: "uplink",
Args: os.Args[1:],
Dynamic: ex.Dynamic,
Wrap: ex.Wrap,
}.Run(context.Background(), func(cmds clingy.Commands) {
ex.Setup(cmds) // setup ex first so that stdlib flags can consult config
newStdlibFlags(flag.CommandLine).Setup(cmds)
commands(cmds, ex)
})
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
}
if !ok || err != nil {
os.Exit(1)
}
}
func commands(cmds clingy.Commands, ex ulext.External) {
cmds.Group("access", "Access related commands", func() {
cmds.New("create", "Create an access from the satellite UI", newCmdAccessCreate(ex))
cmds.New("export", "Export an access to a file", newCmdAccessExport(ex))
cmds.New("import", "Import an existing access", newCmdAccessImport(ex))
cmds.New("inspect", "Inspect shows verbose details about an access", newCmdAccessInspect(ex))
cmds.New("list", "List saved accesses", newCmdAccessList(ex))
cmds.New("register", "Register an access grant for use with a hosted S3 compatible gateway and linksharing", newCmdAccessRegister(ex))
cmds.New("remove", "Removes an access from local store", newCmdAccessRemove(ex))
cmds.New("restrict", "Restrict an access", newCmdAccessRestrict(ex))
cmds.New("revoke", "Revoke an access", newCmdAccessRevoke(ex))
cmds.New("setup", "Wizard for setting up uplink from satellite UI", newCmdAccessSetup(ex))
cmds.New("use", "Set default access to use", newCmdAccessUse(ex))
})
cmds.New("setup", "Wizard for setting up uplink from satellite UI", newCmdAccessSetup(ex))
cmds.New("mb", "Create a new bucket", newCmdMb(ex))
cmds.New("rb", "Remove a bucket bucket", newCmdRb(ex))
cmds.New("cp", "Copies files or objects into or out of storj", newCmdCp(ex))
cmds.New("mv", "Moves files or objects", newCmdMv(ex))
cmds.New("ls", "Lists buckets, prefixes, or objects", newCmdLs(ex))
cmds.New("rm", "Remove an object", newCmdRm(ex))
cmds.Group("meta", "Object metadata related commands", func() {
cmds.New("get", "Get an object's metadata", newCmdMetaGet(ex))
})
cmds.New("share", "Shares restricted accesses to objects", newCmdShare(ex))
cmds.New("version", "Prints version information", newCmdVersion())
}

View File

@ -5,11 +5,18 @@
package ulext
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"time"
"github.com/zeebo/clingy"
"github.com/zeebo/errs"
"storj.io/storj/cmd/uplinkng/ulfs"
"storj.io/storj/cmd/uplink/ulfs"
"storj.io/uplink"
)
@ -55,3 +62,57 @@ type Option struct {
func BypassEncryption(bypass bool) Option {
return Option{apply: func(opt *Options) { opt.EncryptionBypass = bypass }}
}
// RegisterAccess registers an access grant with a Gateway Authorization Service.
func RegisterAccess(ctx context.Context, access *uplink.Access, authService string, public bool, timeout time.Duration) (accessKey, secretKey, endpoint string, err error) {
if authService == "" {
return "", "", "", errs.New("no auth service address provided")
}
accessSerialized, err := access.Serialize()
if err != nil {
return "", "", "", errs.Wrap(err)
}
postData, err := json.Marshal(map[string]interface{}{
"access_grant": accessSerialized,
"public": public,
})
if err != nil {
return accessKey, "", "", errs.Wrap(err)
}
client := &http.Client{
Timeout: timeout,
}
req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("%s/v1/access", authService), bytes.NewReader(postData))
if err != nil {
return "", "", "", err
}
req.Header.Set("Content-Type", "application/json")
resp, err := client.Do(req)
if err != nil {
return "", "", "", err
}
defer func() { err = errs.Combine(err, resp.Body.Close()) }()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", "", "", err
}
respBody := make(map[string]string)
if err := json.Unmarshal(body, &respBody); err != nil {
return "", "", "", errs.New("unexpected response from auth service: %s", string(body))
}
accessKey, ok := respBody["access_key_id"]
if !ok {
return "", "", "", errs.New("access_key_id missing in response")
}
secretKey, ok = respBody["secret_key"]
if !ok {
return "", "", "", errs.New("secret_key missing in response")
}
return accessKey, secretKey, respBody["endpoint"], nil
}

View File

@ -10,7 +10,7 @@ import (
"github.com/zeebo/clingy"
"storj.io/storj/cmd/uplinkng/ulloc"
"storj.io/storj/cmd/uplink/ulloc"
"storj.io/uplink"
)

View File

@ -8,7 +8,7 @@ import (
"github.com/zeebo/errs"
"storj.io/storj/cmd/uplinkng/ulloc"
"storj.io/storj/cmd/uplink/ulloc"
)
//

View File

@ -3,7 +3,7 @@
package ulfs
import "storj.io/storj/cmd/uplinkng/ulloc"
import "storj.io/storj/cmd/uplink/ulloc"
// filteredObjectIterator removes any iteration entries that do not begin with the filter.
// all entries must begin with the trim string which is removed before checking for the

View File

@ -13,7 +13,7 @@ import (
"github.com/zeebo/errs"
"storj.io/storj/cmd/uplinkng/ulloc"
"storj.io/storj/cmd/uplink/ulloc"
)
// Local implements something close to a filesystem but backed by the local disk.

View File

@ -9,7 +9,7 @@ import (
"github.com/zeebo/clingy"
"github.com/zeebo/errs"
"storj.io/storj/cmd/uplinkng/ulloc"
"storj.io/storj/cmd/uplink/ulloc"
)
// Mixed dispatches to either the local or remote filesystem depending on the location.

View File

@ -9,7 +9,7 @@ import (
"github.com/zeebo/errs"
"storj.io/storj/cmd/uplinkng/ulloc"
"storj.io/storj/cmd/uplink/ulloc"
"storj.io/uplink"
)

View File

@ -6,8 +6,8 @@ package ultest
import (
"context"
"storj.io/storj/cmd/uplinkng/ulext"
"storj.io/storj/cmd/uplinkng/ulfs"
"storj.io/storj/cmd/uplink/ulext"
"storj.io/storj/cmd/uplink/ulfs"
"storj.io/uplink"
)

View File

@ -16,8 +16,8 @@ import (
"github.com/zeebo/clingy"
"github.com/zeebo/errs"
"storj.io/storj/cmd/uplinkng/ulfs"
"storj.io/storj/cmd/uplinkng/ulloc"
"storj.io/storj/cmd/uplink/ulfs"
"storj.io/storj/cmd/uplink/ulloc"
)
//

View File

@ -10,7 +10,7 @@ import (
"github.com/stretchr/testify/require"
"storj.io/storj/cmd/uplinkng/ulloc"
"storj.io/storj/cmd/uplink/ulloc"
)
// Result captures all the output of running a command for inspection.

View File

@ -11,9 +11,9 @@ import (
"github.com/stretchr/testify/require"
"github.com/zeebo/clingy"
"storj.io/storj/cmd/uplinkng/ulext"
"storj.io/storj/cmd/uplinkng/ulfs"
"storj.io/storj/cmd/uplinkng/ulloc"
"storj.io/storj/cmd/uplink/ulext"
"storj.io/storj/cmd/uplink/ulfs"
"storj.io/storj/cmd/uplink/ulloc"
)
// Commands is an alias to refer to a function that builds clingy commands.

View File

@ -1,64 +0,0 @@
// Copyright (C) 2021 Storj Labs, Inc.
// See LICENSE for copying information.
package main
import (
"context"
"flag"
"fmt"
"os"
"github.com/zeebo/clingy"
_ "storj.io/common/rpc/quic" // include quic connector
"storj.io/storj/cmd/uplinkng/ulext"
)
func main() {
ex := newExternal()
ok, err := clingy.Environment{
Name: "uplink",
Args: os.Args[1:],
Dynamic: ex.Dynamic,
Wrap: ex.Wrap,
}.Run(context.Background(), func(cmds clingy.Commands) {
ex.Setup(cmds) // setup ex first so that stdlib flags can consult config
newStdlibFlags(flag.CommandLine).Setup(cmds)
commands(cmds, ex)
})
if err != nil {
fmt.Fprintf(os.Stderr, "%+v\n", err)
}
if !ok || err != nil {
os.Exit(1)
}
}
func commands(cmds clingy.Commands, ex ulext.External) {
cmds.Group("access", "Access related commands", func() {
cmds.New("create", "Create an access from the satellite UI", newCmdAccessCreate(ex))
cmds.New("export", "Export an access to a file", newCmdAccessExport(ex))
cmds.New("import", "Import an existing access", newCmdAccessImport(ex))
cmds.New("inspect", "Inspect shows verbose details about an access", newCmdAccessInspect(ex))
cmds.New("list", "List saved accesses", newCmdAccessList(ex))
cmds.New("register", "Register an access grant for use with a hosted S3 compatible gateway and linksharing", newCmdAccessRegister(ex))
cmds.New("remove", "Removes an access from local store", newCmdAccessRemove(ex))
cmds.New("restrict", "Restrict an access", newCmdAccessRestrict(ex))
cmds.New("revoke", "Revoke an access", newCmdAccessRevoke(ex))
cmds.New("setup", "Wizard for setting up uplink from satellite UI", newCmdAccessSetup(ex))
cmds.New("use", "Set default access to use", newCmdAccessUse(ex))
})
cmds.New("setup", "Wizard for setting up uplink from satellite UI", newCmdAccessSetup(ex))
cmds.New("mb", "Create a new bucket", newCmdMb(ex))
cmds.New("rb", "Remove a bucket bucket", newCmdRb(ex))
cmds.New("cp", "Copies files or objects into or out of storj", newCmdCp(ex))
cmds.New("mv", "Moves files or objects", newCmdMv(ex))
cmds.New("ls", "Lists buckets, prefixes, or objects", newCmdLs(ex))
cmds.New("rm", "Remove an object", newCmdRm(ex))
cmds.Group("meta", "Object metadata related commands", func() {
cmds.New("get", "Get an object's metadata", newCmdMetaGet(ex))
})
cmds.New("share", "Shares restricted accesses to objects", newCmdShare(ex))
cmds.New("version", "Prints version information", newCmdVersion())
}

View File

@ -3,6 +3,14 @@ set -ueo pipefail
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
source $SCRIPTDIR/utils.sh
TMPDIR=$(mktemp -d -t tmp.XXXXXXXXXX)
cleanup(){
rm -rf "$TMPDIR"
echo "cleaned up test successfully"
}
trap cleanup EXIT
trap 'failure ${LINENO} "$BASH_COMMAND"' ERR
: "${STORJ_NETWORK_DIR?Environment variable STORJ_NETWORK_DIR needs to be set}"
@ -24,7 +32,16 @@ DOWNLOAD_FILES_DIR="$STORJ_NETWORK_DIR/download/$BUCKET"
# override configured access with access where address is node ID + satellite addess
STORJ_ACCESS=$(go run "$SCRIPTDIR"/update-access.go "$SATELLITE_0_DIR" "$GATEWAY_0_ACCESS")
UPLINK_ACCESS="$STORJ_ACCESS"
export STORJ_ACCESS
export UPLINK_ACCESS
# workaround for issues with automatic accepting monitoring question
# with first run we need to accept question y/n about monitoring
export UPLINK_CONFIG_DIR=$TMPDIR/uplink
mkdir -p "$UPLINK_CONFIG_DIR"
touch "$UPLINK_CONFIG_DIR/config.ini"
set -x
@ -37,17 +54,17 @@ if [[ "$1" == "upload" ]]; then
# sometimes we overwrite files in the same bucket. allow the mb to fail because of an existing
# bucket. if it fails for any other reason, the following cp will get it anyway.
uplink --config-dir "$GATEWAY_0_DIR" mb "sj://$BUCKET/" || true
uplink mb "sj://$BUCKET/" || true
uplink --config-dir "$GATEWAY_0_DIR" cp --progress=false "$PRISTINE_FILES_DIR/small-upload-testfile" "sj://$BUCKET/"
uplink --config-dir "$GATEWAY_0_DIR" cp --progress=false "$PRISTINE_FILES_DIR/big-upload-testfile" "sj://$BUCKET/"
uplink --config-dir "$GATEWAY_0_DIR" cp --progress=false "$PRISTINE_FILES_DIR/multisegment-upload-testfile" "sj://$BUCKET/"
uplink cp --progress=false "$PRISTINE_FILES_DIR/small-upload-testfile" "sj://$BUCKET/"
uplink cp --progress=false "$PRISTINE_FILES_DIR/big-upload-testfile" "sj://$BUCKET/"
uplink cp --progress=false "$PRISTINE_FILES_DIR/multisegment-upload-testfile" "sj://$BUCKET/"
fi
if [[ "$1" == "download" ]]; then
uplink --config-dir "$GATEWAY_0_DIR" cp --progress=false "sj://$BUCKET/small-upload-testfile" "$DOWNLOAD_FILES_DIR"
uplink --config-dir "$GATEWAY_0_DIR" cp --progress=false "sj://$BUCKET/big-upload-testfile" "$DOWNLOAD_FILES_DIR"
uplink --config-dir "$GATEWAY_0_DIR" cp --progress=false "sj://$BUCKET/multisegment-upload-testfile" "$DOWNLOAD_FILES_DIR"
uplink cp --progress=false "sj://$BUCKET/small-upload-testfile" "$DOWNLOAD_FILES_DIR"
uplink cp --progress=false "sj://$BUCKET/big-upload-testfile" "$DOWNLOAD_FILES_DIR"
uplink cp --progress=false "sj://$BUCKET/multisegment-upload-testfile" "$DOWNLOAD_FILES_DIR"
compare_files "$PRISTINE_FILES_DIR/small-upload-testfile" "$DOWNLOAD_FILES_DIR/small-upload-testfile"
compare_files "$PRISTINE_FILES_DIR/big-upload-testfile" "$DOWNLOAD_FILES_DIR/big-upload-testfile"
@ -61,9 +78,9 @@ fi
if [[ "$1" == "cleanup" ]]; then
for BUCKET_DIR in "$STORJ_NETWORK_DIR"/pristine/*; do
BUCKET="$(basename "$BUCKET_DIR")"
uplink --config-dir "$GATEWAY_0_DIR" rm "sj://$BUCKET/small-upload-testfile"
uplink --config-dir "$GATEWAY_0_DIR" rm "sj://$BUCKET/big-upload-testfile"
uplink --config-dir "$GATEWAY_0_DIR" rm "sj://$BUCKET/multisegment-upload-testfile"
uplink --config-dir "$GATEWAY_0_DIR" rb "sj://$BUCKET"
uplink rm "sj://$BUCKET/small-upload-testfile"
uplink rm "sj://$BUCKET/big-upload-testfile"
uplink rm "sj://$BUCKET/multisegment-upload-testfile"
uplink rb "sj://$BUCKET"
done
fi

View File

@ -16,7 +16,6 @@ make -C "$SCRIPTDIR"/.. install-sim
echo "Overriding default max segment size to 6MiB"
GOBIN=$TMP go install -v -ldflags "-X 'storj.io/uplink.maxSegmentSize=6MiB'" storj.io/storj/cmd/uplink
GOBIN=$TMP go install -v -ldflags "-X 'storj.io/uplink.maxSegmentSize=6MiB'" storj.io/storj/cmd/uplinkng
# use modified version of uplink
export PATH=$TMP:$PATH
@ -36,7 +35,6 @@ fi
# run tests
storj-sim -x --satellites 1 --host $STORJ_NETWORK_HOST4 network test bash "$SCRIPTDIR"/test-uplink.sh
storj-sim -x --satellites 1 --host $STORJ_NETWORK_HOST4 network test bash "$SCRIPTDIR"/test-uplinkng.sh
storj-sim -x --satellites 1 --host $STORJ_NETWORK_HOST4 network test bash "$SCRIPTDIR"/test-uplink-share.sh
storj-sim -x --satellites 1 --host $STORJ_NETWORK_HOST4 network test bash "$SCRIPTDIR"/test-billing.sh

View File

@ -13,6 +13,7 @@ UPLINK_DEBUG_ADDR=""
readonly UPLINK_DEBUG_ADDR
export STORJ_ACCESS="${GATEWAY_0_ACCESS}"
export UPLINK_ACCESS="${STORJ_ACCESS}"
export STORJ_DEBUG_ADDR="${UPLINK_DEBUG_ADDR}"
# Vars
@ -36,7 +37,10 @@ uplink_test() {
local dst_dir="${temp_dir}/dst"
mkdir -p "${src_dir}" "${dst_dir}"
local uplink_dir="${temp_dir}/uplink"
local UPLINK_CONFIG_DIR="${temp_dir}/uplink"
export UPLINK_CONFIG_DIR
mkdir -p "$UPLINK_CONFIG_DIR"
touch "$UPLINK_CONFIG_DIR/config.ini"
random_bytes_file "2KiB" "${src_dir}/small-upload-testfile" # create 2KiB file of random bytes (inline)
random_bytes_file "5MiB" "${src_dir}/big-upload-testfile" # create 5MiB file of random bytes (remote)
@ -52,13 +56,12 @@ uplink_test() {
uplink cp "${src_dir}/multisegment-upload-testfile" "sj://$BUCKET/" --progress=false
uplink cp "${src_dir}/diff-size-segments" "sj://$BUCKET/" --progress=false
uplink <"${src_dir}/put-file" put "sj://$BUCKET/put-file"
uplink <"${src_dir}/put-file" cp - "sj://$BUCKET/put-file"
uplink --config-dir "${uplink_dir}" import named-access "${STORJ_ACCESS}"
uplink access import -f named-access "${STORJ_ACCESS}"
local files
files=$(STORJ_ACCESS='' uplink --config-dir "${uplink_dir}" --access named-access \
ls "sj://${BUCKET}" | tee "${temp_dir}/list" | wc -l)
files=$(uplink ls "sj://${BUCKET}" --access named-access | grep -v '^KIND' | tee "${temp_dir}/list" | wc -l)
local expected_files="5"
if [ "${files}" == "${expected_files}" ]; then
echo "listing returns ${files} files"
@ -68,7 +71,7 @@ uplink_test() {
fi
local size_check
size_check=$(awk <"${temp_dir}/list" '{if($4 == "0") print "invalid size";}')
size_check=$(awk <"${temp_dir}/list" '{if($3 == "0") print "invalid size";}')
if [ "${size_check}" != "" ]; then
echo "listing returns invalid size for one of the objects:"
cat "${temp_dir}/list"
@ -82,7 +85,7 @@ uplink_test() {
uplink cp "sj://$BUCKET/multisegment-upload-testfile" "${dst_dir}" --progress=false
uplink cp "sj://$BUCKET/diff-size-segments" "${dst_dir}" --progress=false
uplink cp "sj://$BUCKET/put-file" "${dst_dir}" --progress=false
uplink cat "sj://$BUCKET/put-file" >>"${dst_dir}/put-file-from-cat"
uplink cp "sj://$BUCKET/put-file" - >>"${dst_dir}/put-file-from-cat"
uplink rm "sj://$BUCKET/small-upload-testfile"
uplink rm "sj://$BUCKET/big-upload-testfile"
@ -110,7 +113,7 @@ uplink_test() {
uplink rb "sj://$BUCKET" --force
if [ "$(uplink ls | grep -c "No buckets")" = "0" ]; then
if [ "$(uplink ls | grep -c '^CREATED')" != "0" ]; then
echo "uplink didn't remove the entire bucket with the 'force' flag"
exit 1
fi

View File

@ -10,16 +10,18 @@ cleanup(){
trap cleanup EXIT
# workaround for issues with automatic accepting monitoring question
# with first run we need to accept question y/n about monitoring
export UPLINK_CONFIG_DIR=$TMPDIR/uplink
mkdir -p "$UPLINK_CONFIG_DIR"
touch "$UPLINK_CONFIG_DIR/config.ini"
uplink access import -f test-access "$GATEWAY_0_ACCESS" --use
BUCKET=bucket-for-rs-change
SRC_DIR=$TMPDIR/source
DST_DIR=$TMPDIR/dst
UPLINK_DIR=$TMPDIR/uplink
mkdir -p "$SRC_DIR" "$DST_DIR"
UPLINK_DEBUG_ADDR=""
export STORJ_ACCESS=$GATEWAY_0_ACCESS
export STORJ_DEBUG_ADDR=$UPLINK_DEBUG_ADDR
uplink cp "sj://$BUCKET/big-upload-testfile" "$DST_DIR" --progress=false

View File

@ -7,28 +7,29 @@ cleanup(){
rm -rf "$TMPDIR"
echo "cleaned up test successfully"
}
trap cleanup EXIT
# workaround for issues with automatic accepting monitoring question
# with first run we need to accept question y/n about monitoring
export UPLINK_CONFIG_DIR=$TMPDIR/uplink
mkdir -p "$UPLINK_CONFIG_DIR"
touch "$UPLINK_CONFIG_DIR/config.ini"
uplink access import -f test-access "$GATEWAY_0_ACCESS" --use
BUCKET=bucket-for-rs-change
SRC_DIR=$TMPDIR/source
DST_DIR=$TMPDIR/dst
UPLINK_DIR=$TMPDIR/uplink
mkdir -p "$SRC_DIR" "$DST_DIR"
random_bytes_file () {
size=$1
output=$2
head -c $size </dev/urandom > $output
head -c "$size" </dev/urandom > "$output"
}
random_bytes_file "1MiB" "$SRC_DIR/big-upload-testfile"
UPLINK_DEBUG_ADDR=""
export STORJ_ACCESS=$GATEWAY_0_ACCESS
export STORJ_DEBUG_ADDR=$UPLINK_DEBUG_ADDR
uplink mb "sj://$BUCKET/"
uplink cp "$SRC_DIR/big-upload-testfile" "sj://$BUCKET/" --progress=false

View File

@ -7,16 +7,24 @@ TMPDIR=$(mktemp -d -t tmp.XXXXXXXXXX)
cleanup(){
rm -rf "$TMPDIR"
uplink --access "$GATEWAY_0_ACCESS" rm "sj://$BUCKET_WITH_ACCESS/$FOLDER_TO_SHARE_FILE/testfile"
uplink --access "$GATEWAY_0_ACCESS" rm "sj://$BUCKET_WITH_ACCESS/another-testfile"
uplink --access "$GATEWAY_0_ACCESS" rm "sj://$BUCKET_WITHOUT_ACCESS/another-testfile"
uplink --access "$GATEWAY_0_ACCESS" rb "sj://$BUCKET_WITHOUT_ACCESS"
uplink --access "$GATEWAY_0_ACCESS" rb "sj://$BUCKET_WITH_ACCESS"
uplink rm "sj://$BUCKET_WITH_ACCESS/$FOLDER_TO_SHARE_FILE/testfile"
uplink rm "sj://$BUCKET_WITH_ACCESS/another-testfile"
uplink rm "sj://$BUCKET_WITHOUT_ACCESS/another-testfile"
uplink rb "sj://$BUCKET_WITHOUT_ACCESS"
uplink rb "sj://$BUCKET_WITH_ACCESS"
echo "cleaned up test successfully"
}
trap cleanup EXIT
trap 'failure ${LINENO} "$BASH_COMMAND"' ERR
# workaround for issues with automatic accepting monitoring question
# with first run we need to accept question y/n about monitoring
export UPLINK_CONFIG_DIR=$TMPDIR/uplink
mkdir -p "$UPLINK_CONFIG_DIR"
touch "$UPLINK_CONFIG_DIR/config.ini"
uplink access import -f test-access "$GATEWAY_0_ACCESS" --use
BUCKET_WITHOUT_ACCESS=bucket1
BUCKET_WITH_ACCESS=bucket2
@ -30,26 +38,27 @@ mkdir -p "$SRC_DIR" "$DST_DIR"
random_bytes_file "2KiB" "$SRC_DIR/another-testfile" # create 2kb file of random bytes (inline)
random_bytes_file "5KiB" "$SRC_DIR/testfile" # create 5kb file of random bytes (remote)
uplink --access "$GATEWAY_0_ACCESS" mb "sj://$BUCKET_WITHOUT_ACCESS/"
uplink --access "$GATEWAY_0_ACCESS" mb "sj://$BUCKET_WITH_ACCESS/"
uplink mb "sj://$BUCKET_WITHOUT_ACCESS/"
uplink mb "sj://$BUCKET_WITH_ACCESS/"
uplink --access "$GATEWAY_0_ACCESS" cp "$SRC_DIR/testfile" "sj://$BUCKET_WITH_ACCESS/$FOLDER_TO_SHARE_FILE/"
uplink --access "$GATEWAY_0_ACCESS" cp "$SRC_DIR/another-testfile" "sj://$BUCKET_WITH_ACCESS/"
uplink --access "$GATEWAY_0_ACCESS" cp "$SRC_DIR/another-testfile" "sj://$BUCKET_WITHOUT_ACCESS/"
uplink cp "$SRC_DIR/testfile" "sj://$BUCKET_WITH_ACCESS/$FOLDER_TO_SHARE_FILE/" --progress=false
uplink cp "$SRC_DIR/another-testfile" "sj://$BUCKET_WITH_ACCESS/" --progress=false
uplink cp "$SRC_DIR/another-testfile" "sj://$BUCKET_WITHOUT_ACCESS/" --progress=false
# Make access with readonly rights
SHARED_ACCESS=$(uplink --access "$GATEWAY_0_ACCESS" share --allowed-path-prefix sj://$BUCKET_WITH_ACCESS/$FOLDER_TO_SHARE_FILE/ --readonly | grep Access | cut -d: -f2)
SHARED_ACCESS=$(uplink share "sj://$BUCKET_WITH_ACCESS/$FOLDER_TO_SHARE_FILE/" --readonly | grep Access | awk '{print $3}')
echo "Shared access: $SHARED_ACCESS"
uplink cp "$SRC_DIR/another-testfile" "sj://$BUCKET_WITH_ACCESS/$FOLDER_TO_SHARE_FILE/" --access $SHARED_ACCESS
uplink cp "$SRC_DIR/another-testfile" "sj://$BUCKET_WITH_ACCESS/$FOLDER_TO_SHARE_FILE/" --access $SHARED_ACCESS --progress=false
require_error_exit_code $?
uplink cp "$SRC_DIR/testfile" "sj://$BUCKET_WITHOUT_ACCESS/" --access $SHARED_ACCESS
uplink cp "$SRC_DIR/testfile" "sj://$BUCKET_WITHOUT_ACCESS/" --access $SHARED_ACCESS --progress=false
require_error_exit_code $?
uplink cp "sj://$BUCKET_WITHOUT_ACCESS/another-testfile" "$SRC_DIR/" --access $SHARED_ACCESS
uplink cp "sj://$BUCKET_WITHOUT_ACCESS/another-testfile" "$SRC_DIR/" --access $SHARED_ACCESS --progress=false
require_error_exit_code $?
NUMBER_OF_BUCKETS=$(uplink ls --access $SHARED_ACCESS | wc -l)
NUMBER_OF_BUCKETS=$(uplink ls --access $SHARED_ACCESS | grep -v '^CREATED' | wc -l)
# We share one bucket, so we expect to see only one bucket in the output of ls command
if [ $NUMBER_OF_BUCKETS -eq 1 ]; then
@ -59,6 +68,6 @@ else
exit 1
fi
uplink cp "sj://$BUCKET_WITH_ACCESS/$FOLDER_TO_SHARE_FILE/testfile" "$DST_DIR" --access $SHARED_ACCESS
uplink cp "sj://$BUCKET_WITH_ACCESS/$FOLDER_TO_SHARE_FILE/testfile" "$DST_DIR" --access $SHARED_ACCESS --progress=false
compare_files "$SRC_DIR/testfile" "$DST_DIR/testfile"

View File

@ -16,7 +16,8 @@ trap 'failure ${LINENO} "$BASH_COMMAND"' ERR
BUCKET=bucket-123
SRC_DIR=$TMPDIR/source
DST_DIR=$TMPDIR/dst
UPLINK_DIR=$TMPDIR/uplink
export UPLINK_CONFIG_DIR=$TMPDIR/uplink
mkdir -p "$SRC_DIR" "$DST_DIR"
@ -28,10 +29,12 @@ random_bytes_file "13MiB" "$SRC_DIR/diff-size-segments" # create 1
random_bytes_file "100KiB" "$SRC_DIR/put-file" # create 100KiB file of random bytes (remote)
UPLINK_DEBUG_ADDR=""
# workaround for issues with automatic accepting monitoring question
# with first run we need to accept question y/n about monitoring
mkdir -p "$UPLINK_CONFIG_DIR"
touch "$UPLINK_CONFIG_DIR/config.ini"
export STORJ_ACCESS=$GATEWAY_0_ACCESS
export STORJ_DEBUG_ADDR=$UPLINK_DEBUG_ADDR
uplink access import -f test-access "$GATEWAY_0_ACCESS" --use
uplink mb "sj://$BUCKET/"
@ -44,12 +47,11 @@ uplink cp "$SRC_DIR/diff-size-segments" "sj://$BUCKET/" --progress=fal
# TODO change hardcoded part size from 64MiB to 6MiB
uplink cp "$SRC_DIR/diff-size-segments" "sj://$BUCKET/diff-size-segments_upl_p2" --progress=false --parallelism 2
cat "$SRC_DIR/put-file" | uplink put "sj://$BUCKET/put-file"
uplink --config-dir "$UPLINK_DIR" import named-access $STORJ_ACCESS
FILES=$(STORJ_ACCESS= uplink --config-dir "$UPLINK_DIR" --access named-access ls "sj://$BUCKET" | tee $TMPDIR/list | wc -l)
EXPECTED_FILES="6"
if [ "$FILES" == $EXPECTED_FILES ]
# check named access
uplink access import -f named-access "$GATEWAY_0_ACCESS"
FILES=$(uplink ls "sj://$BUCKET" --access named-access | tee "$TMPDIR/list" | wc -l)
EXPECTED_FILES="6" # 5 objects + one line more for headers
if [ "$FILES" == "$EXPECTED_FILES" ]
then
echo "listing returns $FILES files"
else
@ -67,12 +69,10 @@ fi
uplink ls "sj://$BUCKET/non-existing-prefix"
uplink cp "sj://$BUCKET/small-upload-testfile" "$DST_DIR" --progress=false
uplink cp "sj://$BUCKET/big-upload-testfile" "$DST_DIR" --progress=false
uplink cp "sj://$BUCKET/multisegment-upload-testfile" "$DST_DIR" --progress=false
uplink cp "sj://$BUCKET/diff-size-segments" "$DST_DIR" --progress=false
uplink cp "sj://$BUCKET/put-file" "$DST_DIR" --progress=false
uplink cat "sj://$BUCKET/put-file" >> "$DST_DIR/put-file-from-cat"
uplink cp "sj://$BUCKET/small-upload-testfile" "$DST_DIR" --progress=false
uplink cp "sj://$BUCKET/big-upload-testfile" "$DST_DIR" --progress=false
uplink cp "sj://$BUCKET/multisegment-upload-testfile" "$DST_DIR" --progress=false
uplink cp "sj://$BUCKET/diff-size-segments" "$DST_DIR" --progress=false
# test parallelism of single object
uplink cp "sj://$BUCKET/multisegment-upload-testfile" "$DST_DIR/multisegment-upload-testfile_p2" --parallelism 2 --progress=false
@ -82,67 +82,42 @@ uplink cp "sj://$BUCKET/diff-size-segments_upl_p2" "$DST_DIR/diff-size-segmen
uplink ls "sj://$BUCKET/small-upload-testfile" | grep "small-upload-testfile"
# test ranged download of object
uplink cp "sj://$BUCKET/put-file" "$DST_DIR/put-file-from-cp-range" --range bytes=0-5 --progress=false
uplink cp "sj://$BUCKET/small-upload-testfile" "$DST_DIR/file-from-cp-range" --progress=false --range bytes=0-5
EXPECTED_FILE_SIZE="6"
ACTUAL_FILE_SIZE=$(get_file_size "$DST_DIR/put-file-from-cp-range")
ACTUAL_FILE_SIZE=$(get_file_size "$DST_DIR/file-from-cp-range")
if [ "$EXPECTED_FILE_SIZE" != "$ACTUAL_FILE_SIZE" ]
then
echo "expected downloaded file size to be equal to $EXPECTED_FILE_SIZE, got $ACTUAL_FILE_SIZE"
exit 1
fi
# test ranged download with multiple byte range
set +e
EXPECTED_ERROR="retrieval of multiple byte ranges of data not supported: 2 provided"
ERROR=$(uplink cp "sj://$BUCKET/put-file" "$DST_DIR/put-file-from-cp-range" --range bytes=0-5,6-10)
if [ $ERROR != $EXPECTED_ERROR ]
then
echo EXPECTED_ERROR
exit 1
fi
set -e
# test server-side move operation
uplink mv "sj://$BUCKET/big-upload-testfile" "sj://$BUCKET/moved-big-upload-testfile"
uplink ls "sj://$BUCKET/moved-big-upload-testfile" | grep "moved-big-upload-testfile"
uplink mv "sj://$BUCKET/moved-big-upload-testfile" "sj://$BUCKET/big-upload-testfile"
# test server-side move operation between different prefixes.
# destination and source should both be prefixes.
set +e
EXPECTED_ERROR="both source and destination should be a prefixes"
ERROR=$(uplink mv "sj://$BUCKET/" "sj://$BUCKET/new-prefix/file")
if [ $ERROR != $EXPECTED_ERROR ]
# move prefix
uplink mv "sj://$BUCKET/" "sj://$BUCKET/my-prefix/" --recursive
FILES=$(uplink ls "sj://$BUCKET/my-prefix/" | tee "$TMPDIR/list" | wc -l)
EXPECTED_FILES="6" # 5 objects + one line more for headers
if [ "$FILES" == "$EXPECTED_FILES" ]
then
echo EXPECTED_ERROR
exit 1
fi
set -e
# checking if all files are moved from bucket to bucket/prefix.
EXPECTED_FILES=$(uplink ls "sj://$BUCKET/" | wc -l)
uplink mv "sj://$BUCKET/" "sj://$BUCKET/new-prefix/"
FILES=$(uplink ls "sj://$BUCKET/new-prefix/" | wc -l)
if [ "$FILES" == $EXPECTED_FILES ]
then
echo "listing returns $FILES files as expected"
echo "listing after move returns $FILES files"
else
echo "listing returns $FILES files but want $EXPECTED_FILES"
echo "listing after move returns $FILES files but want $EXPECTED_FILES"
cat "$TMPDIR/list"
exit 1
fi
# moving files back.
uplink mv "sj://$BUCKET/new-prefix/" "sj://$BUCKET/"
uplink ls "sj://$BUCKET/"
uplink mv "sj://$BUCKET/my-prefix/" "sj://$BUCKET/" --recursive
uplink rm "sj://$BUCKET/small-upload-testfile"
uplink rm "sj://$BUCKET/big-upload-testfile"
uplink rm "sj://$BUCKET/multisegment-upload-testfile"
uplink rm "sj://$BUCKET/diff-size-segments"
uplink rm "sj://$BUCKET/diff-size-segments_upl_p2"
uplink rm "sj://$BUCKET/put-file"
uplink ls "sj://$BUCKET"
uplink ls -x "sj://$BUCKET"
uplink rb "sj://$BUCKET"
@ -150,8 +125,6 @@ compare_files "$SRC_DIR/small-upload-testfile" "$DST_DIR/small-upload-tes
compare_files "$SRC_DIR/big-upload-testfile" "$DST_DIR/big-upload-testfile"
compare_files "$SRC_DIR/multisegment-upload-testfile" "$DST_DIR/multisegment-upload-testfile"
compare_files "$SRC_DIR/diff-size-segments" "$DST_DIR/diff-size-segments"
compare_files "$SRC_DIR/put-file" "$DST_DIR/put-file"
compare_files "$SRC_DIR/put-file" "$DST_DIR/put-file-from-cat"
# test parallelism of single object
compare_files "$SRC_DIR/multisegment-upload-testfile" "$DST_DIR/multisegment-upload-testfile_p2"
@ -167,7 +140,7 @@ done
uplink rb "sj://$BUCKET" --force
if [ "$(uplink ls | grep "No buckets" | wc -l)" = "0" ]; then
if [ "$(uplink ls | wc -l)" != "0" ]; then
echo "an integration test did not clean up after itself entirely"
exit 1
fi

View File

@ -1,132 +0,0 @@
#!/usr/bin/env bash
set -ueo pipefail
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
source $SCRIPTDIR/utils.sh
TMPDIR=$(mktemp -d -t tmp.XXXXXXXXXX)
cleanup(){
rm -rf "$TMPDIR"
echo "cleaned up test successfully"
}
trap cleanup EXIT
trap 'failure ${LINENO} "$BASH_COMMAND"' ERR
BUCKET=bucket-123
SRC_DIR=$TMPDIR/source
DST_DIR=$TMPDIR/dst
export UPLINK_CONFIG_DIR=$TMPDIR/uplink
mkdir -p "$SRC_DIR" "$DST_DIR"
random_bytes_file "2KiB" "$SRC_DIR/small-upload-testfile" # create 2KiB file of random bytes (inline)
random_bytes_file "5MiB" "$SRC_DIR/big-upload-testfile" # create 5MiB file of random bytes (remote)
# this is special case where we need to test at least one remote segment and inline segment of exact size 0
random_bytes_file "12MiB" "$SRC_DIR/multisegment-upload-testfile" # create 12MiB file of random bytes (1 remote segments + inline)
random_bytes_file "13MiB" "$SRC_DIR/diff-size-segments" # create 13MiB file of random bytes (2 remote segments)
random_bytes_file "100KiB" "$SRC_DIR/put-file" # create 100KiB file of random bytes (remote)
# workaround for issues with automatic accepting monitoring question
# with first run we need to accept question y/n about monitoring
mkdir -p "$UPLINK_CONFIG_DIR"
touch "$UPLINK_CONFIG_DIR/config.ini"
uplinkng access import -f test-access "$GATEWAY_0_ACCESS"
uplinkng access use test-access
uplinkng mb "sj://$BUCKET/"
uplinkng cp "$SRC_DIR/small-upload-testfile" "sj://$BUCKET/" --progress=false
uplinkng cp "$SRC_DIR/big-upload-testfile" "sj://$BUCKET/" --progress=false
uplinkng cp "$SRC_DIR/multisegment-upload-testfile" "sj://$BUCKET/" --progress=false
uplinkng cp "$SRC_DIR/diff-size-segments" "sj://$BUCKET/" --progress=false
# check named access
uplinkng access import -f named-access "$GATEWAY_0_ACCESS"
FILES=$(uplinkng ls "sj://$BUCKET" --access named-access | tee "$TMPDIR/list" | wc -l)
EXPECTED_FILES="5"
if [ "$FILES" == "$EXPECTED_FILES" ]
then
echo "listing returns $FILES files"
else
echo "listing returns $FILES files but want $EXPECTED_FILES"
exit 1
fi
SIZE_CHECK=$(cat "$TMPDIR/list" | awk '{if($4 == "0") print "invalid size";}')
if [ "$SIZE_CHECK" != "" ]
then
echo "listing returns invalid size for one of the objects:"
cat "$TMPDIR/list"
exit 1
fi
uplinkng ls "sj://$BUCKET/non-existing-prefix"
uplinkng cp "sj://$BUCKET/small-upload-testfile" "$DST_DIR" --progress=false
uplinkng cp "sj://$BUCKET/big-upload-testfile" "$DST_DIR" --progress=false
uplinkng cp "sj://$BUCKET/multisegment-upload-testfile" "$DST_DIR" --progress=false
uplinkng cp "sj://$BUCKET/diff-size-segments" "$DST_DIR" --progress=false
uplinkng ls "sj://$BUCKET/small-upload-testfile" | grep "small-upload-testfile"
# test ranged download of object
uplinkng cp "sj://$BUCKET/small-upload-testfile" "$DST_DIR/file-from-cp-range" --progress=false --range bytes=0-5
EXPECTED_FILE_SIZE="6"
ACTUAL_FILE_SIZE=$(get_file_size "$DST_DIR/file-from-cp-range")
if [ "$EXPECTED_FILE_SIZE" != "$ACTUAL_FILE_SIZE" ]
then
echo "expected downloaded file size to be equal to $EXPECTED_FILE_SIZE, got $ACTUAL_FILE_SIZE"
exit 1
fi
# test server-side move operation
uplinkng mv "sj://$BUCKET/big-upload-testfile" "sj://$BUCKET/moved-big-upload-testfile"
uplinkng ls "sj://$BUCKET/moved-big-upload-testfile" | grep "moved-big-upload-testfile"
uplinkng mv "sj://$BUCKET/moved-big-upload-testfile" "sj://$BUCKET/big-upload-testfile"
# move prefix
uplinkng mv "sj://$BUCKET/" "sj://$BUCKET/my-prefix/" --recursive
FILES=$(uplinkng ls "sj://$BUCKET/my-prefix/" | tee "$TMPDIR/list" | wc -l)
EXPECTED_FILES="5" # 4 objects + one line more for headers
if [ "$FILES" == "$EXPECTED_FILES" ]
then
echo "listing after move returns $FILES files"
else
echo "listing after move returns $FILES files but want $EXPECTED_FILES"
cat "$TMPDIR/list"
exit 1
fi
uplinkng mv "sj://$BUCKET/my-prefix/" "sj://$BUCKET/" --recursive
uplinkng rm "sj://$BUCKET/small-upload-testfile"
uplinkng rm "sj://$BUCKET/big-upload-testfile"
uplinkng rm "sj://$BUCKET/multisegment-upload-testfile"
uplinkng rm "sj://$BUCKET/diff-size-segments"
uplinkng ls "sj://$BUCKET"
uplinkng ls -x "sj://$BUCKET"
uplinkng rb "sj://$BUCKET"
compare_files "$SRC_DIR/small-upload-testfile" "$DST_DIR/small-upload-testfile"
compare_files "$SRC_DIR/big-upload-testfile" "$DST_DIR/big-upload-testfile"
compare_files "$SRC_DIR/multisegment-upload-testfile" "$DST_DIR/multisegment-upload-testfile"
compare_files "$SRC_DIR/diff-size-segments" "$DST_DIR/diff-size-segments"
# test deleting non empty bucket with --force flag
uplinkng mb "sj://$BUCKET/"
for i in $(seq -w 1 16); do
uplinkng cp "$SRC_DIR/small-upload-testfile" "sj://$BUCKET/small-file-$i" --progress=false
done
uplinkng rb "sj://$BUCKET" --force
if [ "$(uplinkng ls | wc -l)" != "0" ]; then
echo "an integration test did not clean up after itself entirely"
exit 1
fi

View File

@ -22,7 +22,6 @@ require (
github.com/Azure/azure-storage-blob-go v0.10.0 // indirect
github.com/Shopify/sarama v1.27.2 // indirect
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
github.com/VividCortex/ewma v1.1.1 // indirect
github.com/alecthomas/participle v0.2.1 // indirect
github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a // indirect
github.com/alicebob/miniredis/v2 v2.13.3 // indirect
@ -36,7 +35,6 @@ require (
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/cheekybits/genny v1.0.0 // indirect
github.com/cheggaaa/pb v1.0.29 // indirect
github.com/cheggaaa/pb/v3 v3.0.5 // indirect
github.com/cloudfoundry/gosigar v1.1.0 // indirect
github.com/coredns/coredns v1.4.0 // indirect
github.com/coreos/go-semver v0.3.0 // indirect
@ -106,7 +104,6 @@ require (
github.com/klauspost/reedsolomon v1.9.9 // indirect
github.com/lib/pq v1.10.2 // indirect
github.com/lucas-clemente/quic-go v0.25.0 // indirect
github.com/magiconair/properties v1.8.5 // indirect
github.com/mailru/easyjson v0.7.6 // indirect
github.com/marten-seemann/qtls-go1-16 v0.1.4 // indirect
github.com/marten-seemann/qtls-go1-17 v0.1.0 // indirect
@ -143,7 +140,6 @@ require (
github.com/olivere/elastic/v7 v7.0.22 // indirect
github.com/onsi/ginkgo v1.16.4 // indirect
github.com/oschwald/maxminddb-golang v1.8.0 // indirect
github.com/pelletier/go-toml v1.9.0 // indirect
github.com/philhofer/fwd v1.1.1 // indirect
github.com/pierrec/lz4 v2.5.2+incompatible // indirect
github.com/pkg/errors v0.9.1 // indirect
@ -164,14 +160,9 @@ require (
github.com/shopspring/decimal v1.2.0 // indirect
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 // indirect
github.com/spaolacci/murmur3 v1.1.0 // indirect
github.com/spf13/afero v1.6.0 // indirect
github.com/spf13/cast v1.3.1 // indirect
github.com/spf13/cobra v1.1.3 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/spf13/viper v1.7.1 // indirect
github.com/streadway/amqp v1.0.0 // indirect
github.com/stripe/stripe-go/v72 v72.51.0 // indirect
github.com/subosito/gotenv v1.2.0 // indirect
github.com/tidwall/gjson v1.3.5 // indirect
github.com/tidwall/match v1.0.1 // indirect
github.com/tidwall/pretty v1.0.0 // indirect
@ -188,11 +179,9 @@ require (
github.com/ysmood/gson v0.6.4 // indirect
github.com/ysmood/leakless v0.7.0 // indirect
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb // indirect
github.com/zeebo/admission/v3 v3.0.3 // indirect
github.com/zeebo/clingy v0.0.0-20220125233608-1457d47c8d8d // indirect
github.com/zeebo/errs v1.2.2 // indirect
github.com/zeebo/float16 v0.1.0 // indirect
github.com/zeebo/incenc v0.0.0-20180505221441-0d92902eec54 // indirect
github.com/zeebo/structs v1.0.2 // indirect
github.com/zeebo/errs/v2 v2.0.3 // indirect
go.etcd.io/bbolt v1.3.5 // indirect
go.etcd.io/etcd v0.0.0-20201125193152-8a03d2e9614b // indirect
go.opentelemetry.io/otel v0.18.0 // indirect
@ -205,7 +194,6 @@ require (
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 // indirect
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf // indirect
golang.org/x/text v0.3.6 // indirect
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect
golang.org/x/tools v0.1.1 // indirect

View File

@ -60,7 +60,6 @@ github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWso
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk=
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/VividCortex/ewma v1.1.1 h1:MnEK4VOv6n0RSY4vtRe3h11qjxL3+t0B8yOL8iMXdcM=
github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA=
github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
@ -135,7 +134,6 @@ github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitf
github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ=
github.com/cheggaaa/pb v1.0.29 h1:FckUN5ngEk2LpvuG0fw1GEFx6LtyY2pWI/Z2QgCnEYo=
github.com/cheggaaa/pb v1.0.29/go.mod h1:W40334L7FMC5JKWldsTWbdGjLo0RxUKK73K+TuPxX30=
github.com/cheggaaa/pb/v3 v3.0.5 h1:lmZOti7CraK9RSjzExsY53+WWfub9Qv13B5m4ptEoPE=
github.com/cheggaaa/pb/v3 v3.0.5/go.mod h1:X1L61/+36nz9bjIsrDU52qHKOQukUQe2Ge+YvGuquCw=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
@ -548,7 +546,6 @@ github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXq
github.com/magefile/mage v1.11.0 h1:C/55Ywp9BpgVVclD3lRnSYCwXTYxmSppIgLeDYlNuls=
github.com/magefile/mage v1.11.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
@ -713,7 +710,6 @@ github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144T
github.com/pborman/getopt v0.0.0-20180729010549-6fdd0a2c7117/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/pelletier/go-toml v1.9.0 h1:NOd0BRdOKpPf0SxkL3HxSQOG7rNh+4kl6PHcBPFs7Q0=
github.com/pelletier/go-toml v1.9.0/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ=
@ -864,23 +860,19 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO
github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI=
github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY=
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng=
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/spf13/viper v1.7.1 h1:pM5oEahlgWv/WnHXpgbKz7iLIxRf65tye2Ci+XFK5sk=
github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
@ -899,7 +891,6 @@ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5Cc
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stripe/stripe-go/v72 v72.51.0 h1:scXELorHW1SnAfARThO1QayscOsfEIoIAUy0yxoTqxY=
github.com/stripe/stripe-go/v72 v72.51.0/go.mod h1:QwqJQtduHubZht9mek5sds9CtQcKFdsykV9ZepRWwo0=
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
github.com/tidwall/gjson v1.3.5 h1:2oW9FBNu8qt9jy5URgrzsVx/T/KSn3qn/smJQ0crlDQ=
@ -954,23 +945,21 @@ github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb/go.mod h1:gqRgreBU
github.com/zeebo/admission/v2 v2.0.0/go.mod h1:gSeHGelDHW7Vq6UyJo2boeSt/6Dsnqpisv0i4YZSOyM=
github.com/zeebo/admission/v3 v3.0.1/go.mod h1:BP3isIv9qa2A7ugEratNq1dnl2oZRXaQUGdU7WXKtbw=
github.com/zeebo/admission/v3 v3.0.2/go.mod h1:BP3isIv9qa2A7ugEratNq1dnl2oZRXaQUGdU7WXKtbw=
github.com/zeebo/admission/v3 v3.0.3 h1:mwP/Y9EE8zRXOK8ma7CpEJfpiaKv4D4JWIOU4E8FPOw=
github.com/zeebo/admission/v3 v3.0.3/go.mod h1:2OWyAS5yo0Xvj2AEUosOjTUHxaY0oIIiCrXGKCYzWpo=
github.com/zeebo/assert v0.0.0-20181109011804-10f827ce2ed6/go.mod h1:yssERNPivllc1yU3BvpjYI5BUW+zglcz6QWqeVRL5t0=
github.com/zeebo/assert v1.1.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ=
github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0=
github.com/zeebo/clingy v0.0.0-20220125233608-1457d47c8d8d h1:UaVtVecb8eS/OUZAy9T4PW4RGWA4LpgWxWRrpgFZ7V0=
github.com/zeebo/clingy v0.0.0-20220125233608-1457d47c8d8d/go.mod h1:MHEhXvEfewflU7SSVKHI7nkdU+fpyxZ5XPPzj+5gYNw=
github.com/zeebo/errs v1.1.1/go.mod h1:Yj8dHrUQwls1bF3dr/vcSIu+qf4mI7idnTcHfoACc6I=
github.com/zeebo/errs v1.2.2 h1:5NFypMTuSdoySVTqlNs1dEoU21QVamMQJxW/Fii5O7g=
github.com/zeebo/errs v1.2.2/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4=
github.com/zeebo/errs/v2 v2.0.3 h1:WwqAmopgot4ZC+CgIveP+H91Nf78NDEGWjtAXen45Hw=
github.com/zeebo/errs/v2 v2.0.3/go.mod h1:OKmvVZt4UqpyJrYFykDKm168ZquJ55pbbIVUICNmLN0=
github.com/zeebo/float16 v0.1.0 h1:kRqxv5og6z1emEyz5FpW0/BVHe5VfxEAw6b1ljCZlUc=
github.com/zeebo/float16 v0.1.0/go.mod h1:fssGvvXu+XS8MH57cKmyrLB/cqioYeYX/2mXCN3a5wo=
github.com/zeebo/incenc v0.0.0-20180505221441-0d92902eec54 h1:+cwNE5KJ3pika4HuzmDHkDlK5myo0G9Sv+eO7WWxnUQ=
github.com/zeebo/incenc v0.0.0-20180505221441-0d92902eec54/go.mod h1:EI8LcOBDlSL3POyqwC1eJhOYlMBMidES+613EtmmT5w=
github.com/zeebo/ini v0.0.0-20210331155437-86af75b4f524/go.mod h1:oiTrvEJ3c6v+Kpfz1tun0BO+EuR3eKdH4tF+WvEbjw8=
github.com/zeebo/structs v1.0.2 h1:kvcd7s2LqXuO9cdV5LqrGHCOAfCBXaZpKCA3jD9SJIc=
github.com/zeebo/structs v1.0.2/go.mod h1:LphfpprlqJQcbCq+eA3iIK/NsejMwk9mlfH/tM1XuKQ=
github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
@ -1205,7 +1194,6 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27 h1:XDXtA5hveEEV8JB2l7nhMTp3t
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M=
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=