2020-06-29 21:31:23 +01:00
|
|
|
// Copyright (C) 2020 Storj Labs, Inc.
|
|
|
|
// See LICENSE for copying information.
|
|
|
|
|
|
|
|
package objectdeletion
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
|
2020-08-11 14:00:57 +01:00
|
|
|
"github.com/zeebo/errs"
|
2020-06-29 21:31:23 +01:00
|
|
|
"go.uber.org/zap"
|
2020-07-27 21:12:14 +01:00
|
|
|
|
|
|
|
"storj.io/common/pb"
|
2020-09-03 14:54:56 +01:00
|
|
|
"storj.io/storj/satellite/metainfo/metabase"
|
2020-06-29 21:31:23 +01:00
|
|
|
)
|
|
|
|
|
|
|
|
// Report represents the deleteion status report.
|
|
|
|
type Report struct {
|
2020-07-27 21:12:14 +01:00
|
|
|
Deleted []*ObjectState
|
|
|
|
Failed []*ObjectState
|
|
|
|
}
|
|
|
|
|
|
|
|
// DeletedPointers returns all deleted pointers in a report.
|
|
|
|
func (r Report) DeletedPointers() []*pb.Pointer {
|
|
|
|
pointers := make([]*pb.Pointer, 0, len(r.Deleted))
|
|
|
|
for _, d := range r.Deleted {
|
|
|
|
pointers = append(pointers, d.LastSegment)
|
|
|
|
pointers = append(pointers, d.OtherSegments...)
|
|
|
|
}
|
|
|
|
|
|
|
|
return pointers
|
2020-06-29 21:31:23 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// HasFailures returns wether a delete operation has failures.
|
|
|
|
func (r Report) HasFailures() bool {
|
|
|
|
return len(r.Failed) > 0
|
|
|
|
}
|
|
|
|
|
2020-07-27 21:12:14 +01:00
|
|
|
// DeletedObjects returns successfully deleted objects information.
|
2020-08-11 14:00:57 +01:00
|
|
|
func (r Report) DeletedObjects() ([]*pb.Object, error) {
|
|
|
|
var errlist errs.Group
|
2020-07-27 21:12:14 +01:00
|
|
|
objects := make([]*pb.Object, 0, len(r.Deleted))
|
|
|
|
for _, d := range r.Deleted {
|
|
|
|
if d.LastSegment == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
streamMeta := &pb.StreamMeta{}
|
|
|
|
err := pb.Unmarshal(d.LastSegment.Metadata, streamMeta)
|
|
|
|
if err != nil {
|
2020-08-11 14:00:57 +01:00
|
|
|
errlist.Add(err)
|
2020-07-27 21:12:14 +01:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
object := &pb.Object{
|
|
|
|
Bucket: d.Bucket,
|
|
|
|
EncryptedPath: d.EncryptedPath,
|
|
|
|
Version: -1,
|
|
|
|
ExpiresAt: d.LastSegment.ExpirationDate,
|
|
|
|
CreatedAt: d.LastSegment.CreationDate,
|
|
|
|
EncryptedMetadata: d.LastSegment.Metadata,
|
|
|
|
EncryptionParameters: &pb.EncryptionParameters{
|
|
|
|
CipherSuite: pb.CipherSuite(streamMeta.EncryptionType),
|
|
|
|
BlockSize: int64(streamMeta.EncryptionBlockSize),
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
if d.LastSegment.Remote != nil {
|
|
|
|
object.RedundancyScheme = d.LastSegment.Remote.Redundancy
|
|
|
|
|
|
|
|
} else if streamMeta.NumberOfSegments == 0 || streamMeta.NumberOfSegments > 1 {
|
|
|
|
// workaround
|
|
|
|
// NumberOfSegments == 0 - pointer with encrypted num of segments
|
|
|
|
// NumberOfSegments > 1 - pointer with unencrypted num of segments and multiple segments
|
|
|
|
// The new metainfo API redundancy scheme is on object level (not per segment).
|
|
|
|
// Because of that, RS is always taken from the last segment.
|
|
|
|
// The old implementation saves RS per segment, and in some cases
|
|
|
|
// when the remote file's last segment is an inline segment, we end up
|
|
|
|
// missing an RS scheme. This loop will search for RS in segments other than the last one.
|
|
|
|
|
|
|
|
for _, pointer := range d.OtherSegments {
|
|
|
|
if pointer.Remote != nil {
|
|
|
|
object.RedundancyScheme = pointer.Remote.Redundancy
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
objects = append(objects, object)
|
|
|
|
}
|
|
|
|
|
2020-08-11 14:00:57 +01:00
|
|
|
return objects, errlist.Err()
|
2020-07-27 21:12:14 +01:00
|
|
|
}
|
|
|
|
|
2020-06-29 21:31:23 +01:00
|
|
|
// GenerateReport returns the result of a delete, success, or failure.
|
2020-09-03 14:54:56 +01:00
|
|
|
func GenerateReport(ctx context.Context, log *zap.Logger, requests []*ObjectIdentifier, deletedPaths []metabase.SegmentKey, pointers []*pb.Pointer) Report {
|
2020-06-29 21:31:23 +01:00
|
|
|
defer mon.Task()(&ctx)(nil)
|
|
|
|
|
2020-08-11 14:00:57 +01:00
|
|
|
report := Report{}
|
2020-07-27 21:12:14 +01:00
|
|
|
deletedObjects := make(map[string]*ObjectState)
|
|
|
|
for i, path := range deletedPaths {
|
2020-06-29 21:31:23 +01:00
|
|
|
if path == nil {
|
|
|
|
continue
|
|
|
|
}
|
2020-07-27 21:12:14 +01:00
|
|
|
id, segmentIdx, err := ParseSegmentPath(path)
|
2020-06-29 21:31:23 +01:00
|
|
|
if err != nil {
|
|
|
|
log.Debug("failed to parse deleted segmnt path for report",
|
|
|
|
zap.String("Raw Segment Path", string(path)),
|
|
|
|
)
|
|
|
|
continue
|
|
|
|
}
|
2020-07-27 21:12:14 +01:00
|
|
|
|
2020-06-29 21:31:23 +01:00
|
|
|
if _, ok := deletedObjects[id.Key()]; !ok {
|
2020-07-27 21:12:14 +01:00
|
|
|
deletedObjects[id.Key()] = &ObjectState{
|
|
|
|
OtherSegments: []*pb.Pointer{},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
switch segmentIdx {
|
|
|
|
case lastSegmentIndex:
|
|
|
|
deletedObjects[id.Key()].LastSegment = pointers[i]
|
|
|
|
default:
|
|
|
|
deletedObjects[id.Key()].OtherSegments = append(deletedObjects[id.Key()].OtherSegments, pointers[i])
|
2020-06-29 21:31:23 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// populate report with failed and deleted objects
|
|
|
|
for _, req := range requests {
|
2020-07-27 21:12:14 +01:00
|
|
|
state, ok := deletedObjects[req.Key()]
|
|
|
|
if !ok {
|
|
|
|
report.Failed = append(report.Failed, &ObjectState{
|
|
|
|
ObjectIdentifier: *req,
|
|
|
|
})
|
|
|
|
continue
|
2020-06-29 21:31:23 +01:00
|
|
|
}
|
2020-07-27 21:12:14 +01:00
|
|
|
|
|
|
|
state.ObjectIdentifier = *req
|
|
|
|
report.Deleted = append(report.Deleted, state)
|
2020-06-29 21:31:23 +01:00
|
|
|
}
|
|
|
|
return report
|
|
|
|
}
|