cmd/uplink: use DeleteBucketWithObjects for bucket force deletion
This PR updates `uplink rb --force` command to use the new libuplink API `DeleteBucketWithObjects`. It also updates `DeleteBucket` endpoint to return a specific error message when a given bucket has concurrent writes while being deleted. Change-Id: Ic9593d55b0c27b26cd8966dd1bc8cd1e02a6666e
This commit is contained in:
parent
36d752e92d
commit
af773ec8a6
@ -9,7 +9,6 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"storj.io/common/fpath"
|
||||
"storj.io/uplink"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -53,51 +52,25 @@ func deleteBucket(cmd *cobra.Command, args []string) (err error) {
|
||||
}
|
||||
defer closeProject(project)
|
||||
|
||||
successes := 0
|
||||
failures := 0
|
||||
defer func() {
|
||||
if successes > 0 {
|
||||
fmt.Printf("(%d) files from bucket %s have been deleted\n", successes, dst.Bucket())
|
||||
}
|
||||
if failures > 0 {
|
||||
fmt.Printf("(%d) files from bucket %s have NOT been deleted\n", failures, dst.Bucket())
|
||||
}
|
||||
if err == nil && failures == 0 {
|
||||
fmt.Printf("Bucket %s have been deleted\n", dst.Bucket())
|
||||
if err != nil {
|
||||
fmt.Printf("Bucket %s has NOT been deleted\n %+v", dst.Bucket(), err.Error())
|
||||
} else {
|
||||
fmt.Printf("Bucket %s have NOT been deleted\n", dst.Bucket())
|
||||
fmt.Printf("Bucket %s has been deleted\n", dst.Bucket())
|
||||
}
|
||||
}()
|
||||
|
||||
if *rbForceFlag {
|
||||
// TODO add retry in case of failures
|
||||
objects := project.ListObjects(ctx, dst.Bucket(), &uplink.ListObjectsOptions{
|
||||
Recursive: true,
|
||||
})
|
||||
|
||||
for objects.Next() {
|
||||
object := objects.Item()
|
||||
path := object.Key
|
||||
_, err := project.DeleteObject(ctx, dst.Bucket(), path)
|
||||
if err != nil {
|
||||
fmt.Printf("failed to delete encrypted object, cannot empty bucket %q: %+v\n", dst.Bucket(), err)
|
||||
failures++
|
||||
continue
|
||||
}
|
||||
successes++
|
||||
if successes%10 == 0 {
|
||||
fmt.Printf("(%d) files from bucket %s have been deleted\n", successes, dst.Bucket())
|
||||
}
|
||||
}
|
||||
if err := objects.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if failures == 0 {
|
||||
if _, err := project.DeleteBucket(ctx, dst.Bucket()); err != nil {
|
||||
//TODO: Do we need to have retry here?
|
||||
if _, err := project.DeleteBucketWithObjects(ctx, dst.Bucket()); err != nil {
|
||||
return convertError(err, dst)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err := project.DeleteBucket(ctx, dst.Bucket()); err != nil {
|
||||
return convertError(err, dst)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
2
go.mod
2
go.mod
@ -46,5 +46,5 @@ require (
|
||||
storj.io/drpc v0.0.14
|
||||
storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b
|
||||
storj.io/private v0.0.0-20200818170340-c2963305092f
|
||||
storj.io/uplink v1.2.1-0.20200819222810-c04778f35988
|
||||
storj.io/uplink v1.2.1-0.20200827173845-d4d04d4dd802
|
||||
)
|
||||
|
4
go.sum
4
go.sum
@ -742,5 +742,5 @@ storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b h1:Bbg9JCtY6l3HrDxs3BX
|
||||
storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b/go.mod h1:gj4vuCeyCRjRmH8LIrgoyU9Dc9uR6H+/GcDUXmTbf80=
|
||||
storj.io/private v0.0.0-20200818170340-c2963305092f h1:1EC5IuctBVgxs3SQI2N2f6u5QdwTaX8IM9QAkFKGbMQ=
|
||||
storj.io/private v0.0.0-20200818170340-c2963305092f/go.mod h1:3BB0H9SmnJDfgk55uZli6DLHmhLiOdKiDY58ZI2e+pk=
|
||||
storj.io/uplink v1.2.1-0.20200819222810-c04778f35988 h1:skEsBbHzfky/DZMLIYcDWe+VWFO4DizqHBvN2CKJp6Y=
|
||||
storj.io/uplink v1.2.1-0.20200819222810-c04778f35988/go.mod h1:8GC+v/MBNdW8I++lroYSMvVPsruufHODY6FnnG5p3xw=
|
||||
storj.io/uplink v1.2.1-0.20200827173845-d4d04d4dd802 h1:sRbCv3PTF2QDfagrq+p1FPE1dkyN7EhEK+7aCO45Qdk=
|
||||
storj.io/uplink v1.2.1-0.20200827173845-d4d04d4dd802/go.mod h1:8GC+v/MBNdW8I++lroYSMvVPsruufHODY6FnnG5p3xw=
|
||||
|
@ -455,18 +455,18 @@ func (endpoint *Endpoint) deleteBucketNotEmpty(ctx context.Context, projectID uu
|
||||
// Delete all zombie objects that have first segment.
|
||||
_, err = endpoint.deleteByPrefix(ctx, projectID, bucketName, firstSegment)
|
||||
if err != nil {
|
||||
return nil, 0, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, deletedCount, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
}
|
||||
|
||||
err = endpoint.metainfo.DeleteBucket(ctx, bucketName, projectID)
|
||||
if err != nil {
|
||||
if ErrBucketNotEmpty.Has(err) {
|
||||
return nil, 0, rpcstatus.Error(rpcstatus.FailedPrecondition, err.Error())
|
||||
return nil, deletedCount, rpcstatus.Error(rpcstatus.FailedPrecondition, "cannot delete the bucket because it's being used by another process")
|
||||
}
|
||||
if storj.ErrBucketNotFound.Has(err) {
|
||||
return bucketName, 0, nil
|
||||
}
|
||||
return nil, 0, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
return nil, deletedCount, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
}
|
||||
|
||||
return bucketName, deletedCount, nil
|
||||
|
@ -104,6 +104,13 @@ compare_files "$SRC_DIR/put-file" "$DST_DIR/put-file-from-ca
|
||||
# test deleting non empty bucket with --force flag
|
||||
uplink mb "sj://$BUCKET/"
|
||||
|
||||
uplink cp "$SRC_DIR/small-upload-testfile" "sj://$BUCKET/" --progress=false
|
||||
for i in $(seq -w 1 16); do
|
||||
uplink cp "$SRC_DIR/small-upload-testfile" "sj://$BUCKET/small-file-$i" --progress=false
|
||||
done
|
||||
|
||||
uplink rb "sj://$BUCKET" --force
|
||||
uplink rb "sj://$BUCKET" --force
|
||||
|
||||
if [ "$(uplink ls | grep "No buckets" | wc -l)" = "0" ]; then
|
||||
echo "an integration test did not clean up after itself entirely"
|
||||
exit 1
|
||||
fi
|
Loading…
Reference in New Issue
Block a user