storagenode/gracefulexit: Implement storage node graceful exit worker - part 1 (#3322)
This commit is contained in:
parent
3eec4e9070
commit
3e0d12354a
@ -29,19 +29,22 @@ type TransferFailed_Error int32
|
||||
const (
|
||||
TransferFailed_NOT_FOUND TransferFailed_Error = 0
|
||||
TransferFailed_STORAGE_NODE_UNAVAILABLE TransferFailed_Error = 1
|
||||
TransferFailed_UNKNOWN TransferFailed_Error = 2
|
||||
TransferFailed_HASH_VERIFICATION TransferFailed_Error = 2
|
||||
TransferFailed_UNKNOWN TransferFailed_Error = 10
|
||||
)
|
||||
|
||||
var TransferFailed_Error_name = map[int32]string{
|
||||
0: "NOT_FOUND",
|
||||
1: "STORAGE_NODE_UNAVAILABLE",
|
||||
2: "UNKNOWN",
|
||||
0: "NOT_FOUND",
|
||||
1: "STORAGE_NODE_UNAVAILABLE",
|
||||
2: "HASH_VERIFICATION",
|
||||
10: "UNKNOWN",
|
||||
}
|
||||
|
||||
var TransferFailed_Error_value = map[string]int32{
|
||||
"NOT_FOUND": 0,
|
||||
"STORAGE_NODE_UNAVAILABLE": 1,
|
||||
"UNKNOWN": 2,
|
||||
"HASH_VERIFICATION": 2,
|
||||
"UNKNOWN": 10,
|
||||
}
|
||||
|
||||
func (x TransferFailed_Error) String() string {
|
||||
@ -345,13 +348,13 @@ func (m *ExitProgress) GetSuccessful() bool {
|
||||
}
|
||||
|
||||
type TransferSucceeded struct {
|
||||
AddressedOrderLimit *AddressedOrderLimit `protobuf:"bytes,1,opt,name=addressed_order_limit,json=addressedOrderLimit,proto3" json:"addressed_order_limit,omitempty"`
|
||||
OriginalPieceHash *PieceHash `protobuf:"bytes,2,opt,name=original_piece_hash,json=originalPieceHash,proto3" json:"original_piece_hash,omitempty"`
|
||||
ReplacementPieceHash *PieceHash `protobuf:"bytes,3,opt,name=replacement_piece_hash,json=replacementPieceHash,proto3" json:"replacement_piece_hash,omitempty"`
|
||||
OriginalPieceId PieceID `protobuf:"bytes,4,opt,name=original_piece_id,json=originalPieceId,proto3,customtype=PieceID" json:"original_piece_id"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
OriginalOrderLimit *OrderLimit `protobuf:"bytes,1,opt,name=original_order_limit,json=originalOrderLimit,proto3" json:"original_order_limit,omitempty"`
|
||||
OriginalPieceHash *PieceHash `protobuf:"bytes,2,opt,name=original_piece_hash,json=originalPieceHash,proto3" json:"original_piece_hash,omitempty"`
|
||||
ReplacementPieceHash *PieceHash `protobuf:"bytes,3,opt,name=replacement_piece_hash,json=replacementPieceHash,proto3" json:"replacement_piece_hash,omitempty"`
|
||||
OriginalPieceId PieceID `protobuf:"bytes,4,opt,name=original_piece_id,json=originalPieceId,proto3,customtype=PieceID" json:"original_piece_id"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *TransferSucceeded) Reset() { *m = TransferSucceeded{} }
|
||||
@ -378,9 +381,9 @@ func (m *TransferSucceeded) XXX_DiscardUnknown() {
|
||||
|
||||
var xxx_messageInfo_TransferSucceeded proto.InternalMessageInfo
|
||||
|
||||
func (m *TransferSucceeded) GetAddressedOrderLimit() *AddressedOrderLimit {
|
||||
func (m *TransferSucceeded) GetOriginalOrderLimit() *OrderLimit {
|
||||
if m != nil {
|
||||
return m.AddressedOrderLimit
|
||||
return m.OriginalOrderLimit
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -1029,73 +1032,75 @@ func init() {
|
||||
func init() { proto.RegisterFile("gracefulexit.proto", fileDescriptor_8f0acbf2ce5fa631) }
|
||||
|
||||
var fileDescriptor_8f0acbf2ce5fa631 = []byte{
|
||||
// 1053 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xe1, 0x6e, 0x1b, 0x45,
|
||||
0x10, 0xf6, 0x39, 0xad, 0x13, 0x8f, 0x1d, 0xc7, 0xd9, 0xb4, 0x89, 0x71, 0xdb, 0xd8, 0x3d, 0x81,
|
||||
0x70, 0x25, 0x88, 0x90, 0x81, 0x82, 0x54, 0x09, 0xe4, 0xd4, 0x8e, 0x6d, 0x1a, 0xce, 0xe9, 0x26,
|
||||
0x29, 0x12, 0x12, 0x9c, 0xb6, 0xbe, 0x89, 0x73, 0xc2, 0xbe, 0x3b, 0x76, 0xd7, 0x55, 0xf3, 0x87,
|
||||
0xe7, 0xe0, 0x17, 0x7f, 0x78, 0x00, 0x5e, 0x03, 0x78, 0x03, 0x90, 0xe8, 0xb3, 0xa0, 0xbd, 0x5b,
|
||||
0x5f, 0xee, 0x6c, 0x27, 0x44, 0xf4, 0xdf, 0xdd, 0xcc, 0x37, 0xb3, 0xb3, 0xdf, 0x7c, 0x3b, 0xbb,
|
||||
0x40, 0x46, 0x9c, 0x0d, 0xf1, 0x6c, 0x3a, 0xc6, 0xd7, 0xae, 0xdc, 0x0b, 0xb8, 0x2f, 0x7d, 0x52,
|
||||
0x4c, 0xda, 0xaa, 0x30, 0xf2, 0x47, 0x7e, 0xe4, 0xa9, 0x96, 0x26, 0x28, 0x99, 0xeb, 0x9d, 0xcd,
|
||||
0xfe, 0x8b, 0x3e, 0x77, 0x90, 0x8b, 0xe8, 0xcf, 0xac, 0xc3, 0x6e, 0x17, 0xa5, 0xe5, 0x7b, 0x9d,
|
||||
0xd7, 0xae, 0x74, 0xbd, 0xd1, 0x31, 0x93, 0x38, 0x1e, 0xbb, 0x12, 0x05, 0xc5, 0x1f, 0xa7, 0x28,
|
||||
0xa4, 0x79, 0x00, 0xf7, 0xfa, 0x9e, 0x2b, 0x5d, 0x26, 0xb1, 0xab, 0xd7, 0x50, 0x58, 0xed, 0x26,
|
||||
0xef, 0xc3, 0xaa, 0xe7, 0x3b, 0x68, 0xbb, 0x4e, 0xc5, 0xa8, 0x1b, 0x8d, 0xe2, 0x7e, 0xe9, 0xf7,
|
||||
0x37, 0xb5, 0xcc, 0xdf, 0x6f, 0x6a, 0x39, 0xcb, 0x77, 0xb0, 0xdf, 0xa6, 0x39, 0xe5, 0xee, 0x3b,
|
||||
0xe6, 0x4f, 0xb0, 0xb5, 0x64, 0x99, 0x1b, 0xc7, 0x93, 0x1a, 0x14, 0x1c, 0x7f, 0xc2, 0x5c, 0xcf,
|
||||
0xf6, 0xd8, 0x04, 0x2b, 0xd9, 0xba, 0xd1, 0xc8, 0x53, 0x88, 0x4c, 0x16, 0x9b, 0x20, 0x79, 0x00,
|
||||
0x20, 0x02, 0x36, 0x44, 0x7b, 0x2a, 0xd0, 0xa9, 0xac, 0xd4, 0x8d, 0x86, 0x41, 0xf3, 0xa1, 0xe5,
|
||||
0x54, 0xa0, 0x63, 0x3a, 0x50, 0xbb, 0x72, 0xa7, 0x22, 0xf0, 0x3d, 0x81, 0xa4, 0x05, 0x20, 0x62,
|
||||
0x6b, 0xc5, 0xa8, 0xaf, 0x34, 0x0a, 0xcd, 0x87, 0x7b, 0x29, 0xb6, 0x97, 0xc4, 0xd3, 0x44, 0x90,
|
||||
0x59, 0x81, 0xed, 0x2e, 0x4a, 0x05, 0x39, 0xe2, 0xfe, 0x88, 0xa3, 0x88, 0x79, 0x7c, 0x0e, 0x3b,
|
||||
0x0b, 0x1e, 0xbd, 0xee, 0x63, 0x58, 0x0b, 0xb4, 0x4d, 0xaf, 0x5a, 0x4d, 0xaf, 0x9a, 0x8a, 0x8a,
|
||||
0xb1, 0xe6, 0xaf, 0x06, 0x14, 0x93, 0xae, 0x79, 0x8e, 0x8c, 0x05, 0x8e, 0x12, 0x6c, 0x67, 0xaf,
|
||||
0x65, 0xfb, 0x11, 0x94, 0x03, 0xe4, 0x43, 0xf4, 0xa4, 0x3d, 0xf4, 0x27, 0xc1, 0x18, 0x25, 0x86,
|
||||
0x94, 0x66, 0xe9, 0x86, 0xb6, 0x3f, 0xd5, 0x66, 0xb2, 0x0b, 0x20, 0xa6, 0xc3, 0x21, 0x0a, 0x71,
|
||||
0x36, 0x1d, 0x57, 0x6e, 0xd5, 0x8d, 0xc6, 0x1a, 0x4d, 0x58, 0xcc, 0xdf, 0xb2, 0xb0, 0x79, 0xc2,
|
||||
0x99, 0x27, 0xce, 0x90, 0x1f, 0x2b, 0x33, 0x3a, 0xe8, 0x90, 0xe7, 0x70, 0x97, 0x39, 0x8e, 0xaa,
|
||||
0x1a, 0x1d, 0x3b, 0x94, 0xa4, 0x3d, 0x76, 0x27, 0xae, 0x0c, 0x8b, 0x2e, 0x34, 0x1f, 0xec, 0xc5,
|
||||
0xb2, 0x6d, 0xcd, 0x60, 0x03, 0x85, 0x3a, 0x54, 0x20, 0xba, 0xc5, 0x16, 0x8d, 0xa4, 0x05, 0x5b,
|
||||
0x3e, 0x77, 0x47, 0xae, 0xc7, 0xc6, 0x76, 0xe0, 0xe2, 0x10, 0xed, 0x73, 0x26, 0xce, 0xc3, 0x8d,
|
||||
0x16, 0x9a, 0x9b, 0x7b, 0x5a, 0xf7, 0x47, 0xca, 0xd3, 0x63, 0xe2, 0x9c, 0x6e, 0xce, 0xd0, 0xb1,
|
||||
0x89, 0x74, 0x61, 0x9b, 0x63, 0x30, 0x66, 0x43, 0x9c, 0xa8, 0xad, 0x27, 0xb2, 0xac, 0x5c, 0x95,
|
||||
0xe5, 0x4e, 0x22, 0xe0, 0x32, 0xd1, 0x13, 0xd8, 0x9c, 0xab, 0xc5, 0x75, 0x42, 0x6e, 0x8a, 0xfb,
|
||||
0x1b, 0x9a, 0xf2, 0xd5, 0x10, 0xdd, 0x6f, 0xd3, 0x8d, 0x54, 0x1d, 0x7d, 0xc7, 0xfc, 0xd3, 0x80,
|
||||
0xd2, 0x8c, 0xb1, 0x03, 0xe6, 0x8e, 0xd1, 0x59, 0x9e, 0xcf, 0xb8, 0x59, 0x3e, 0xf2, 0x39, 0xdc,
|
||||
0x46, 0xce, 0x7d, 0x1e, 0x52, 0x51, 0x6a, 0x9a, 0x69, 0x71, 0xa5, 0x57, 0xda, 0xeb, 0x28, 0x24,
|
||||
0x8d, 0x02, 0xcc, 0x16, 0xdc, 0x0e, 0xff, 0xc9, 0x3a, 0xe4, 0xad, 0xc1, 0x89, 0x7d, 0x30, 0x38,
|
||||
0xb5, 0xda, 0xe5, 0x0c, 0xb9, 0x0f, 0x95, 0xe3, 0x93, 0x01, 0x6d, 0x75, 0x3b, 0xb6, 0x35, 0x68,
|
||||
0x77, 0xec, 0x53, 0xab, 0xf5, 0xa2, 0xd5, 0x3f, 0x6c, 0xed, 0x1f, 0x76, 0xca, 0x06, 0x29, 0xc0,
|
||||
0xea, 0xa9, 0xf5, 0xcc, 0x1a, 0x7c, 0x63, 0x95, 0xb3, 0xe6, 0xcf, 0x06, 0x90, 0x63, 0xe9, 0x73,
|
||||
0x36, 0x42, 0xa5, 0xb1, 0xaf, 0x51, 0x08, 0x36, 0x42, 0xf2, 0x25, 0xe4, 0xc5, 0x4c, 0x0c, 0xba,
|
||||
0xe7, 0xb5, 0xe5, 0x75, 0xc5, 0x9a, 0xe9, 0x65, 0xe8, 0x65, 0x0c, 0x79, 0x0c, 0xb9, 0xb3, 0xb0,
|
||||
0x62, 0xdd, 0xe0, 0xfb, 0xd7, 0xed, 0xaa, 0x97, 0xa1, 0x1a, 0xbd, 0x9f, 0x87, 0x55, 0x5d, 0x83,
|
||||
0x09, 0xb0, 0x66, 0xf9, 0x92, 0x22, 0x73, 0x2e, 0xcc, 0xbf, 0x0c, 0x58, 0x9f, 0xc5, 0x84, 0xbc,
|
||||
0xbd, 0x2d, 0xe5, 0x85, 0x80, 0xbb, 0xaf, 0x98, 0x44, 0xfb, 0x07, 0xbc, 0xd0, 0x87, 0x6d, 0x47,
|
||||
0x87, 0x6d, 0x84, 0xa8, 0xa3, 0xc8, 0xff, 0x0c, 0x2f, 0x28, 0x04, 0xf1, 0xf7, 0xd5, 0x07, 0x63,
|
||||
0xe5, 0xff, 0x1e, 0x0c, 0xf3, 0x2b, 0x28, 0xb4, 0x51, 0x9d, 0xd5, 0xb7, 0xdf, 0x98, 0xd9, 0x85,
|
||||
0x75, 0x35, 0x72, 0x66, 0xa7, 0x5f, 0xf5, 0x61, 0x47, 0x11, 0x1e, 0x8f, 0x09, 0x5b, 0xb8, 0x23,
|
||||
0x8f, 0xc9, 0x29, 0x8f, 0xe6, 0x4f, 0x91, 0xde, 0xc5, 0x04, 0xfe, 0x78, 0xe6, 0x34, 0x7f, 0x31,
|
||||
0x00, 0x54, 0x26, 0x2d, 0xf0, 0x4f, 0x60, 0x3b, 0x4c, 0xa3, 0xba, 0x34, 0xe5, 0x8b, 0x59, 0xee,
|
||||
0xa0, 0xc6, 0x4e, 0xf9, 0x65, 0x12, 0xf2, 0x19, 0xe4, 0x38, 0x32, 0xe1, 0x7b, 0x5a, 0xda, 0xb5,
|
||||
0xc5, 0xb9, 0xa9, 0x65, 0x4d, 0x43, 0x18, 0xd5, 0x70, 0xf3, 0x21, 0xe4, 0x22, 0x0b, 0xd9, 0x81,
|
||||
0xad, 0x17, 0x1d, 0xda, 0x3f, 0xe8, 0x3f, 0x6d, 0x9d, 0xf4, 0x07, 0x96, 0x7d, 0xd0, 0xea, 0x1f,
|
||||
0x76, 0xda, 0xe5, 0x8c, 0xf9, 0x4f, 0x16, 0xca, 0xf1, 0x90, 0x9f, 0xc9, 0xf6, 0x53, 0xc8, 0x7b,
|
||||
0xbe, 0xb4, 0xb9, 0xd2, 0x8c, 0x96, 0xed, 0xf6, 0xfc, 0x0d, 0x11, 0x29, 0xaa, 0x97, 0xa1, 0x6b,
|
||||
0x9e, 0xfe, 0x26, 0x6d, 0x28, 0x49, 0x2d, 0xae, 0x88, 0x72, 0x2d, 0xda, 0x7b, 0xcb, 0x45, 0x1b,
|
||||
0xcd, 0x91, 0x0c, 0x5d, 0x97, 0x29, 0x45, 0x7e, 0x01, 0x45, 0x27, 0xec, 0xa3, 0xce, 0x11, 0x29,
|
||||
0xe2, 0x9d, 0x74, 0x8e, 0x44, 0xa7, 0x7b, 0x19, 0x5a, 0x70, 0x12, 0x8d, 0x6f, 0x43, 0x29, 0xd5,
|
||||
0xaa, 0x68, 0x22, 0x2d, 0x54, 0x91, 0xea, 0xaf, 0xaa, 0x02, 0x53, 0x0d, 0x7f, 0x02, 0x85, 0xb8,
|
||||
0x53, 0xe8, 0x54, 0x6e, 0x87, 0x29, 0x2a, 0x57, 0x11, 0xdf, 0xcb, 0x50, 0xc0, 0xf8, 0x2f, 0x71,
|
||||
0xfa, 0x9a, 0x7f, 0x64, 0xa1, 0xac, 0x26, 0x42, 0xf2, 0x55, 0x41, 0x5e, 0x85, 0xb7, 0xe4, 0xb2,
|
||||
0x5b, 0x9a, 0x7c, 0x90, 0x5e, 0xe2, 0xfa, 0x67, 0x4b, 0xf5, 0xc3, 0x1b, 0xa2, 0xf5, 0x15, 0xfc,
|
||||
0x1d, 0xdc, 0x59, 0xf6, 0xca, 0x21, 0x8f, 0xd2, 0x69, 0xae, 0x79, 0x09, 0x55, 0xaf, 0xb9, 0xb3,
|
||||
0xc9, 0xf7, 0xb0, 0x31, 0x77, 0xf9, 0x93, 0x77, 0x17, 0x0a, 0x5c, 0xf2, 0x6a, 0xa8, 0xbe, 0xf7,
|
||||
0x1f, 0xa8, 0xa8, 0xfc, 0xe6, 0x39, 0xdc, 0x8d, 0x37, 0x95, 0xaa, 0x7f, 0x00, 0xab, 0x47, 0xdc,
|
||||
0x57, 0x57, 0x31, 0xa9, 0xa7, 0x53, 0x2d, 0xce, 0xe4, 0xea, 0xee, 0x1c, 0x62, 0x4e, 0xfc, 0x0d,
|
||||
0xe3, 0x23, 0x63, 0xff, 0xd6, 0xb7, 0xd9, 0xe0, 0xe5, 0xcb, 0x5c, 0xf8, 0x7a, 0xfc, 0xf8, 0xdf,
|
||||
0x00, 0x00, 0x00, 0xff, 0xff, 0xb3, 0xe2, 0xc3, 0xb2, 0x8b, 0x0a, 0x00, 0x00,
|
||||
// 1076 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xef, 0x6e, 0x1b, 0x45,
|
||||
0x10, 0xf7, 0xb9, 0x8d, 0x13, 0x8f, 0x1d, 0xc7, 0xd9, 0xfc, 0x33, 0x6e, 0x1b, 0xbb, 0x27, 0x10,
|
||||
0xae, 0x04, 0x11, 0x32, 0x50, 0x90, 0x2a, 0x81, 0x9c, 0xda, 0xb1, 0x4d, 0xc3, 0x39, 0x5d, 0x27,
|
||||
0x05, 0x21, 0xc1, 0x69, 0xeb, 0x9b, 0x38, 0x27, 0xec, 0xbb, 0x63, 0x6f, 0x5d, 0x35, 0x5f, 0x78,
|
||||
0x0e, 0x3e, 0xf1, 0x01, 0x5e, 0x06, 0x1e, 0x01, 0x24, 0x2a, 0xf1, 0x26, 0x68, 0xef, 0xf6, 0x2e,
|
||||
0x77, 0xb6, 0x13, 0x22, 0xf5, 0x93, 0x7d, 0x33, 0xbf, 0xf9, 0xed, 0xec, 0xcc, 0x6f, 0x67, 0x17,
|
||||
0xc8, 0x98, 0xb3, 0x11, 0x9e, 0xcf, 0x26, 0xf8, 0xda, 0x16, 0x07, 0x1e, 0x77, 0x85, 0x4b, 0x8a,
|
||||
0x49, 0x5b, 0x15, 0xc6, 0xee, 0xd8, 0x0d, 0x3d, 0xd5, 0xd2, 0x14, 0x05, 0xb3, 0x9d, 0xf3, 0xe8,
|
||||
0xbb, 0xe8, 0x72, 0x0b, 0xb9, 0x1f, 0x7e, 0xe9, 0x75, 0xd8, 0xef, 0xa2, 0x30, 0x5c, 0xa7, 0xf3,
|
||||
0xda, 0x16, 0xb6, 0x33, 0x1e, 0x32, 0x81, 0x93, 0x89, 0x2d, 0xd0, 0xa7, 0xf8, 0xd3, 0x0c, 0x7d,
|
||||
0xa1, 0x1f, 0xc1, 0xbd, 0xbe, 0x63, 0x0b, 0x9b, 0x09, 0xec, 0xaa, 0x35, 0x24, 0x56, 0xb9, 0xc9,
|
||||
0xfb, 0xb0, 0xea, 0xb8, 0x16, 0x9a, 0xb6, 0x55, 0xd1, 0xea, 0x5a, 0xa3, 0x78, 0x58, 0xfa, 0xe3,
|
||||
0x4d, 0x2d, 0xf3, 0xf7, 0x9b, 0x5a, 0xce, 0x70, 0x2d, 0xec, 0xb7, 0x69, 0x4e, 0xba, 0xfb, 0x96,
|
||||
0xfe, 0x33, 0x6c, 0x2d, 0x59, 0xe6, 0xd6, 0xf1, 0xa4, 0x06, 0x05, 0xcb, 0x9d, 0x32, 0xdb, 0x31,
|
||||
0x1d, 0x36, 0xc5, 0x4a, 0xb6, 0xae, 0x35, 0xf2, 0x14, 0x42, 0x93, 0xc1, 0xa6, 0x48, 0x1e, 0x00,
|
||||
0xf8, 0x1e, 0x1b, 0xa1, 0x39, 0xf3, 0xd1, 0xaa, 0xdc, 0xa9, 0x6b, 0x0d, 0x8d, 0xe6, 0x03, 0xcb,
|
||||
0x99, 0x8f, 0x96, 0x6e, 0x41, 0xed, 0xda, 0x9d, 0xfa, 0x9e, 0xeb, 0xf8, 0x48, 0x5a, 0x00, 0x7e,
|
||||
0x6c, 0xad, 0x68, 0xf5, 0x3b, 0x8d, 0x42, 0xf3, 0xe1, 0x41, 0xaa, 0xda, 0x4b, 0xe2, 0x69, 0x22,
|
||||
0x48, 0xaf, 0xc0, 0x6e, 0x17, 0x85, 0x84, 0x9c, 0x70, 0x77, 0xcc, 0xd1, 0x8f, 0xeb, 0xf8, 0x1c,
|
||||
0xf6, 0x16, 0x3c, 0x6a, 0xdd, 0xc7, 0xb0, 0xe6, 0x29, 0x9b, 0x5a, 0xb5, 0x9a, 0x5e, 0x35, 0x15,
|
||||
0x15, 0x63, 0xf5, 0xdf, 0x35, 0x28, 0x26, 0x5d, 0xf3, 0x35, 0xd2, 0x16, 0x6a, 0x94, 0xa8, 0x76,
|
||||
0xf6, 0xc6, 0x6a, 0x3f, 0x82, 0xb2, 0x87, 0x7c, 0x84, 0x8e, 0x30, 0x47, 0xee, 0xd4, 0x9b, 0xa0,
|
||||
0xc0, 0xa0, 0xa4, 0x59, 0xba, 0xa1, 0xec, 0x4f, 0x95, 0x99, 0xec, 0x03, 0xf8, 0xb3, 0xd1, 0x08,
|
||||
0x7d, 0xff, 0x7c, 0x36, 0xa9, 0xdc, 0xad, 0x6b, 0x8d, 0x35, 0x9a, 0xb0, 0xe8, 0xbf, 0x65, 0x61,
|
||||
0xf3, 0x94, 0x33, 0xc7, 0x3f, 0x47, 0x3e, 0x94, 0x66, 0xb4, 0xd0, 0x22, 0x6d, 0xd8, 0x76, 0xb9,
|
||||
0x3d, 0xb6, 0x1d, 0x36, 0x31, 0x03, 0x45, 0x9a, 0x13, 0x7b, 0x6a, 0x8b, 0x20, 0xe7, 0x42, 0x93,
|
||||
0x1c, 0x28, 0x95, 0x0e, 0xe4, 0xcf, 0xb1, 0xf4, 0x50, 0x12, 0xe1, 0xaf, 0x6c, 0xa4, 0x05, 0x5b,
|
||||
0x31, 0x8b, 0x67, 0xe3, 0x08, 0xcd, 0x0b, 0xe6, 0x5f, 0x04, 0x7b, 0x2b, 0x34, 0x37, 0x23, 0x92,
|
||||
0x13, 0xe9, 0xe9, 0x31, 0xff, 0x82, 0x6e, 0x46, 0xe8, 0xd8, 0x44, 0xba, 0xb0, 0xcb, 0xd1, 0x9b,
|
||||
0xb0, 0x11, 0x4e, 0xe5, 0x6e, 0x13, 0x2c, 0x77, 0xae, 0x63, 0xd9, 0x4e, 0x04, 0x5c, 0x11, 0x3d,
|
||||
0x81, 0xcd, 0xb9, 0x5c, 0x6c, 0x2b, 0x28, 0x47, 0xf1, 0x70, 0x43, 0x55, 0x79, 0x35, 0x40, 0xf7,
|
||||
0xdb, 0x74, 0x23, 0x95, 0x47, 0xdf, 0xd2, 0xff, 0xd5, 0xa0, 0x14, 0x15, 0xe9, 0x88, 0xd9, 0x13,
|
||||
0xb4, 0x96, 0xf3, 0x69, 0xb7, 0xe3, 0x23, 0x9f, 0xc3, 0x0a, 0x72, 0xee, 0xf2, 0xa0, 0x14, 0xa5,
|
||||
0xa6, 0x9e, 0xd6, 0x53, 0x7a, 0xa5, 0x83, 0x8e, 0x44, 0xd2, 0x30, 0x40, 0xff, 0x16, 0x56, 0x82,
|
||||
0x6f, 0xb2, 0x0e, 0x79, 0x63, 0x70, 0x6a, 0x1e, 0x0d, 0xce, 0x8c, 0x76, 0x39, 0x43, 0xee, 0x43,
|
||||
0x65, 0x78, 0x3a, 0xa0, 0xad, 0x6e, 0xc7, 0x34, 0x06, 0xed, 0x8e, 0x79, 0x66, 0xb4, 0x5e, 0xb4,
|
||||
0xfa, 0xc7, 0xad, 0xc3, 0xe3, 0x4e, 0x59, 0x23, 0x3b, 0xb0, 0xd9, 0x6b, 0x0d, 0x7b, 0xe6, 0x8b,
|
||||
0x0e, 0xed, 0x1f, 0xf5, 0x9f, 0xb6, 0x4e, 0xfb, 0x03, 0xa3, 0x9c, 0x25, 0x05, 0x58, 0x3d, 0x33,
|
||||
0x9e, 0x19, 0x83, 0x6f, 0x8c, 0x32, 0xe8, 0xbf, 0x68, 0x40, 0x86, 0xc2, 0xe5, 0x6c, 0x8c, 0x52,
|
||||
0x6d, 0x5f, 0xa3, 0xef, 0xb3, 0x31, 0x92, 0x2f, 0x21, 0xef, 0x47, 0xb2, 0x50, 0xed, 0xaf, 0x2d,
|
||||
0x4f, 0x37, 0x56, 0x4f, 0x2f, 0x43, 0xaf, 0x62, 0xc8, 0x63, 0xc8, 0x9d, 0x07, 0x1b, 0x51, 0x7d,
|
||||
0xbf, 0x7f, 0xd3, 0x66, 0x7b, 0x19, 0xaa, 0xd0, 0x87, 0x79, 0x58, 0x55, 0x39, 0xe8, 0x00, 0x6b,
|
||||
0x86, 0x2b, 0x28, 0x32, 0xeb, 0x52, 0xff, 0x4b, 0x83, 0xf5, 0x28, 0x26, 0x28, 0xe7, 0xdb, 0x76,
|
||||
0xa2, 0xe0, 0x71, 0xfb, 0x15, 0x13, 0x68, 0xfe, 0x88, 0x97, 0xea, 0xd8, 0xed, 0xa9, 0xb0, 0x8d,
|
||||
0x00, 0x75, 0x12, 0xfa, 0x9f, 0xe1, 0x25, 0x05, 0x2f, 0xfe, 0x4f, 0x9e, 0xc3, 0x0e, 0xb3, 0x2c,
|
||||
0x79, 0xb0, 0xd1, 0x4a, 0x9d, 0x91, 0x50, 0x98, 0x0f, 0x0e, 0xe2, 0xc9, 0xde, 0x8a, 0x60, 0x89,
|
||||
0xe3, 0xb2, 0xc5, 0x16, 0x8d, 0xfa, 0x57, 0x50, 0x68, 0xa3, 0x3c, 0xb5, 0x6f, 0xbf, 0x31, 0xbd,
|
||||
0x0b, 0xeb, 0x72, 0xf8, 0x44, 0x73, 0x40, 0xf6, 0x61, 0x4f, 0x16, 0x3c, 0x1e, 0x18, 0xa6, 0x6f,
|
||||
0x8f, 0x1d, 0x26, 0x66, 0x3c, 0x9c, 0x44, 0x45, 0xba, 0x83, 0x09, 0xfc, 0x30, 0x72, 0xea, 0xbf,
|
||||
0x6a, 0x00, 0x92, 0x49, 0xe9, 0xfe, 0x13, 0xd8, 0x0d, 0x68, 0x64, 0x97, 0x66, 0x7c, 0x91, 0x65,
|
||||
0x1b, 0x15, 0x76, 0xc6, 0xaf, 0x48, 0xc8, 0x67, 0x90, 0xe3, 0xc8, 0x7c, 0xd7, 0x51, 0x8a, 0xaf,
|
||||
0x2d, 0x4e, 0x50, 0xa5, 0x76, 0x1a, 0xc0, 0xa8, 0x82, 0xeb, 0x0f, 0x21, 0x17, 0x5a, 0xc8, 0x1e,
|
||||
0x6c, 0x25, 0xe5, 0x6b, 0x1e, 0xb5, 0xfa, 0xc7, 0x9d, 0x76, 0x39, 0xa3, 0xff, 0x93, 0x85, 0x72,
|
||||
0x3c, 0xee, 0x23, 0xd9, 0x7e, 0x0a, 0x79, 0xc7, 0x15, 0x26, 0x97, 0x9a, 0x51, 0xb2, 0xdd, 0x9d,
|
||||
0xbf, 0x2b, 0x42, 0x45, 0xf5, 0x32, 0x74, 0xcd, 0x51, 0xff, 0x49, 0x1b, 0x4a, 0x42, 0x89, 0x2b,
|
||||
0x2c, 0xb9, 0x12, 0xed, 0xbd, 0xe5, 0xa2, 0x0d, 0xc7, 0x4b, 0x86, 0xae, 0x8b, 0x94, 0x22, 0xbf,
|
||||
0x80, 0xa2, 0x15, 0xf4, 0x51, 0x71, 0x84, 0x8a, 0x78, 0x27, 0xcd, 0x91, 0xe8, 0x74, 0x2f, 0x43,
|
||||
0x0b, 0x56, 0xa2, 0xf1, 0x6d, 0x28, 0xa5, 0x5a, 0x15, 0x0e, 0xaa, 0x85, 0x2c, 0x52, 0xfd, 0x95,
|
||||
0x59, 0x60, 0xaa, 0xe1, 0x4f, 0xa0, 0x10, 0x77, 0x0a, 0xad, 0xca, 0x4a, 0x40, 0x51, 0xb9, 0xae,
|
||||
0xf0, 0xbd, 0x0c, 0x05, 0x8c, 0xbf, 0x12, 0xa7, 0xaf, 0xf9, 0x67, 0x16, 0xca, 0x72, 0x22, 0x24,
|
||||
0xdf, 0x17, 0xe4, 0x55, 0x70, 0x5f, 0x2e, 0xbb, 0xaf, 0xc9, 0x07, 0xe9, 0x25, 0x6e, 0x7e, 0xc0,
|
||||
0x54, 0x3f, 0xbc, 0x25, 0x5a, 0x5d, 0xc6, 0xdf, 0xc3, 0xf6, 0xb2, 0xf7, 0x0e, 0x79, 0x94, 0xa6,
|
||||
0xb9, 0xe1, 0x4d, 0x54, 0xbd, 0xe1, 0xf6, 0x26, 0x3f, 0xc0, 0xc6, 0xdc, 0x33, 0x80, 0xbc, 0xbb,
|
||||
0x90, 0xe0, 0x92, 0xf7, 0x43, 0xf5, 0xbd, 0xff, 0x41, 0x85, 0xe9, 0x37, 0x2f, 0x60, 0x27, 0xde,
|
||||
0x54, 0x2a, 0xff, 0x01, 0xac, 0x9e, 0x70, 0x57, 0x5e, 0xca, 0xa4, 0x9e, 0xa6, 0x5a, 0x9c, 0xc9,
|
||||
0xd5, 0xfd, 0x39, 0xc4, 0x9c, 0xf8, 0x1b, 0xda, 0x47, 0xda, 0xe1, 0xdd, 0xef, 0xb2, 0xde, 0xcb,
|
||||
0x97, 0xb9, 0xe0, 0x1d, 0xf9, 0xf1, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x9f, 0x3c, 0xc5, 0xf1,
|
||||
0x95, 0x0a, 0x00, 0x00,
|
||||
}
|
||||
|
||||
type DRPCNodeGracefulExitClient interface {
|
||||
|
@ -56,7 +56,7 @@ service SatelliteGracefulExit {
|
||||
}
|
||||
|
||||
message TransferSucceeded {
|
||||
metainfo.AddressedOrderLimit addressed_order_limit = 1;
|
||||
orders.OrderLimit original_order_limit = 1;
|
||||
orders.PieceHash original_piece_hash = 2;
|
||||
orders.PieceHash replacement_piece_hash = 3;
|
||||
bytes original_piece_id = 4 [(gogoproto.customtype) = "PieceID", (gogoproto.nullable) = false];
|
||||
@ -67,7 +67,9 @@ message TransferFailed {
|
||||
enum Error {
|
||||
NOT_FOUND = 0;
|
||||
STORAGE_NODE_UNAVAILABLE = 1;
|
||||
UNKNOWN = 2;
|
||||
HASH_VERIFICATION = 2;
|
||||
|
||||
UNKNOWN = 10;
|
||||
}
|
||||
Error error = 2;
|
||||
}
|
||||
|
10
proto.lock
10
proto.lock
@ -862,8 +862,12 @@
|
||||
"integer": 1
|
||||
},
|
||||
{
|
||||
"name": "UNKNOWN",
|
||||
"name": "HASH_VERIFICATION",
|
||||
"integer": 2
|
||||
},
|
||||
{
|
||||
"name": "UNKNOWN",
|
||||
"integer": 10
|
||||
}
|
||||
]
|
||||
},
|
||||
@ -995,8 +999,8 @@
|
||||
"fields": [
|
||||
{
|
||||
"id": 1,
|
||||
"name": "addressed_order_limit",
|
||||
"type": "metainfo.AddressedOrderLimit"
|
||||
"name": "original_order_limit",
|
||||
"type": "orders.OrderLimit"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
|
@ -182,6 +182,12 @@ func (endpoint *Endpoint) doProcess(stream processStream) (err error) {
|
||||
pending := newPendingMap()
|
||||
|
||||
var morePiecesFlag int32 = 1
|
||||
errChan := make(chan error, 1)
|
||||
handleError := func(err error) error {
|
||||
errChan <- err
|
||||
close(errChan)
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
var group errgroup.Group
|
||||
group.Go(func() error {
|
||||
@ -192,13 +198,13 @@ func (endpoint *Endpoint) doProcess(stream processStream) (err error) {
|
||||
if pending.length() == 0 {
|
||||
incomplete, err := endpoint.db.GetIncompleteNotFailed(ctx, nodeID, endpoint.config.EndpointBatchSize, 0)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
return handleError(err)
|
||||
}
|
||||
|
||||
if len(incomplete) == 0 {
|
||||
incomplete, err = endpoint.db.GetIncompleteFailed(ctx, nodeID, endpoint.config.EndpointMaxFailures, endpoint.config.EndpointBatchSize, 0)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
return handleError(err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -211,7 +217,7 @@ func (endpoint *Endpoint) doProcess(stream processStream) (err error) {
|
||||
for _, inc := range incomplete {
|
||||
err = endpoint.processIncomplete(ctx, stream, pending, inc)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
return handleError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -220,6 +226,12 @@ func (endpoint *Endpoint) doProcess(stream processStream) (err error) {
|
||||
})
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-errChan:
|
||||
return group.Wait()
|
||||
default:
|
||||
}
|
||||
|
||||
pendingCount := pending.length()
|
||||
// if there are no more transfers and the pending queue is empty, send complete
|
||||
if atomic.LoadInt32(&morePiecesFlag) == 0 && pendingCount == 0 {
|
||||
@ -252,7 +264,17 @@ func (endpoint *Endpoint) doProcess(stream processStream) (err error) {
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
|
||||
deleteMsg := &pb.SatelliteMessage{
|
||||
Message: &pb.SatelliteMessage_DeletePiece{
|
||||
DeletePiece: &pb.DeletePiece{
|
||||
OriginalPieceId: m.Succeeded.OriginalPieceId,
|
||||
},
|
||||
},
|
||||
}
|
||||
err = stream.Send(deleteMsg)
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
case *pb.StorageNodeMessage_Failed:
|
||||
err = endpoint.handleFailed(ctx, pending, nodeID, m)
|
||||
if err != nil {
|
||||
@ -349,7 +371,7 @@ func (endpoint *Endpoint) processIncomplete(ctx context.Context, stream processS
|
||||
}
|
||||
|
||||
bucketID := []byte(storj.JoinPaths(parts[0], parts[1]))
|
||||
limit, privateKey, err := endpoint.orders.CreateGracefulExitPutOrderLimit(ctx, bucketID, newNode.Id, incomplete.PieceNum, remote.RootPieceId, remote.Redundancy.GetErasureShareSize())
|
||||
limit, privateKey, err := endpoint.orders.CreateGracefulExitPutOrderLimit(ctx, bucketID, newNode.Id, incomplete.PieceNum, remote.RootPieceId, int32(pieceSize))
|
||||
if err != nil {
|
||||
return Error.Wrap(err)
|
||||
}
|
||||
@ -378,11 +400,11 @@ func (endpoint *Endpoint) processIncomplete(ctx context.Context, stream processS
|
||||
|
||||
func (endpoint *Endpoint) handleSucceeded(ctx context.Context, pending *pendingMap, nodeID storj.NodeID, message *pb.StorageNodeMessage_Succeeded) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
if message.Succeeded.GetAddressedOrderLimit() == nil {
|
||||
return Error.New("Addressed order limit cannot be nil.")
|
||||
if message.Succeeded.GetOriginalOrderLimit() == nil {
|
||||
return Error.New("original order limit cannot be nil.")
|
||||
}
|
||||
if message.Succeeded.GetOriginalPieceHash() == nil {
|
||||
return Error.New("Original piece hash cannot be nil.")
|
||||
return Error.New("original piece hash cannot be nil.")
|
||||
}
|
||||
|
||||
pieceID := message.Succeeded.OriginalPieceId
|
||||
|
@ -57,10 +57,8 @@ func TestSuccess(t *testing.T) {
|
||||
Succeeded: &pb.TransferSucceeded{
|
||||
OriginalPieceId: m.TransferPiece.OriginalPieceId,
|
||||
OriginalPieceHash: &pb.PieceHash{PieceId: m.TransferPiece.OriginalPieceId},
|
||||
AddressedOrderLimit: &pb.AddressedOrderLimit{
|
||||
Limit: &pb.OrderLimit{
|
||||
PieceId: m.TransferPiece.AddressedOrderLimit.Limit.PieceId,
|
||||
},
|
||||
OriginalOrderLimit: &pb.OrderLimit{
|
||||
PieceId: m.TransferPiece.AddressedOrderLimit.Limit.PieceId,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -84,7 +82,7 @@ func TestSuccess(t *testing.T) {
|
||||
// TODO test completed signature stuff
|
||||
break
|
||||
default:
|
||||
t.FailNow()
|
||||
// TODO finish other message types above so this shouldn't happen
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -743,7 +743,7 @@ func (service *Service) CreateGracefulExitPutOrderLimit(ctx context.Context, buc
|
||||
UplinkPublicKey: piecePublicKey,
|
||||
StorageNodeId: nodeID,
|
||||
PieceId: rootPieceID.Derive(nodeID, pieceNum),
|
||||
Action: pb.PieceAction_PUT_GRACEFUL_EXIT,
|
||||
Action: pb.PieceAction_PUT,
|
||||
Limit: int64(shareSize),
|
||||
OrderCreation: time.Now().UTC(),
|
||||
OrderExpiration: orderExpiration,
|
||||
|
@ -12,7 +12,10 @@ import (
|
||||
"gopkg.in/spacemonkeygo/monkit.v2"
|
||||
|
||||
"storj.io/storj/internal/sync2"
|
||||
"storj.io/storj/pkg/rpc"
|
||||
"storj.io/storj/storagenode/pieces"
|
||||
"storj.io/storj/storagenode/satellites"
|
||||
"storj.io/storj/storagenode/trust"
|
||||
)
|
||||
|
||||
var mon = monkit.Package()
|
||||
@ -22,7 +25,10 @@ var mon = monkit.Package()
|
||||
// architecture: Chore
|
||||
type Chore struct {
|
||||
log *zap.Logger
|
||||
store *pieces.Store
|
||||
satelliteDB satellites.DB
|
||||
trust *trust.Pool
|
||||
dialer rpc.Dialer
|
||||
|
||||
config Config
|
||||
|
||||
@ -38,10 +44,13 @@ type Config struct {
|
||||
}
|
||||
|
||||
// NewChore instantiates Chore.
|
||||
func NewChore(log *zap.Logger, config Config, satelliteDB satellites.DB) *Chore {
|
||||
func NewChore(log *zap.Logger, config Config, store *pieces.Store, trust *trust.Pool, dialer rpc.Dialer, satelliteDB satellites.DB) *Chore {
|
||||
return &Chore{
|
||||
log: log,
|
||||
store: store,
|
||||
satelliteDB: satelliteDB,
|
||||
trust: trust,
|
||||
dialer: dialer,
|
||||
config: config,
|
||||
Loop: *sync2.NewCycle(config.ChoreInterval),
|
||||
limiter: *sync2.NewLimiter(config.NumWorkers),
|
||||
@ -70,20 +79,26 @@ func (chore *Chore) Run(ctx context.Context) (err error) {
|
||||
|
||||
for _, satellite := range satellites {
|
||||
satelliteID := satellite.SatelliteID
|
||||
worker := NewWorker(chore.log, chore.satelliteDB, satelliteID)
|
||||
addr, err := chore.trust.GetAddress(ctx, satelliteID)
|
||||
if err != nil {
|
||||
chore.log.Error("failed to get satellite address.", zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
worker := NewWorker(chore.log, chore.store, chore.satelliteDB, chore.dialer, satelliteID, addr)
|
||||
if _, ok := chore.exitingMap.LoadOrStore(satelliteID, worker); ok {
|
||||
// already running a worker for this satellite
|
||||
chore.log.Debug("skipping graceful exit for satellite. worker already exists.", zap.String("satellite ID", satelliteID.String()))
|
||||
chore.log.Debug("skipping graceful exit for satellite. worker already exists.", zap.Stringer("satellite ID", satelliteID))
|
||||
continue
|
||||
}
|
||||
|
||||
chore.limiter.Go(ctx, func() {
|
||||
err := worker.Run(ctx, satelliteID, func() {
|
||||
chore.log.Debug("finished graceful exit for satellite.", zap.String("satellite ID", satelliteID.String()))
|
||||
err := worker.Run(ctx, func() {
|
||||
chore.log.Debug("finished graceful exit for satellite.", zap.Stringer("satellite ID", satelliteID))
|
||||
chore.exitingMap.Delete(satelliteID)
|
||||
})
|
||||
if err != nil {
|
||||
worker.log.Error("worker failed.", zap.Error(err))
|
||||
chore.log.Error("worker failed.", zap.Error(err))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -4,52 +4,206 @@
|
||||
package gracefulexit_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/storj/internal/memory"
|
||||
"storj.io/storj/internal/testcontext"
|
||||
"storj.io/storj/internal/testplanet"
|
||||
"storj.io/storj/internal/testrand"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/satellite/overlay"
|
||||
"storj.io/storj/storage"
|
||||
"storj.io/storj/storagenode"
|
||||
"storj.io/storj/uplink"
|
||||
)
|
||||
|
||||
func TestChore(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1,
|
||||
StorageNodeCount: 8,
|
||||
StorageNodeCount: 9,
|
||||
UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
satellite1 := planet.Satellites[0]
|
||||
exitingNode := planet.StorageNodes[0]
|
||||
uplinkPeer := planet.Uplinks[0]
|
||||
|
||||
satellite1.GracefulExit.Chore.Loop.Pause()
|
||||
exitingNode.GracefulExit.Chore.Loop.Pause()
|
||||
|
||||
exitStatus := overlay.ExitStatusRequest{
|
||||
NodeID: exitingNode.ID(),
|
||||
ExitInitiatedAt: time.Now(),
|
||||
rs := &uplink.RSConfig{
|
||||
MinThreshold: 4,
|
||||
RepairThreshold: 6,
|
||||
SuccessThreshold: 8,
|
||||
MaxThreshold: 8,
|
||||
}
|
||||
|
||||
_, err := satellite1.Overlay.DB.UpdateExitStatus(ctx, &exitStatus)
|
||||
err := uplinkPeer.UploadWithConfig(ctx, satellite1, rs, "testbucket", "test/path1", testrand.Bytes(5*memory.KiB))
|
||||
require.NoError(t, err)
|
||||
|
||||
err = exitingNode.DB.Satellites().InitiateGracefulExit(ctx, satellite1.ID(), time.Now(), 10000)
|
||||
exitingNode, err := findNodeToExit(ctx, planet, 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
exitProgress, err := exitingNode.DB.Satellites().ListGracefulExits(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, exitProgress, 1)
|
||||
|
||||
exitingNode.GracefulExit.Chore.Loop.TriggerWait()
|
||||
|
||||
exitProgress, err = exitingNode.DB.Satellites().ListGracefulExits(ctx)
|
||||
nodePieceCounts, err := getNodePieceCounts(ctx, planet)
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, progress := range exitProgress {
|
||||
if progress.SatelliteID == satellite1.ID() {
|
||||
require.NotNil(t, progress.FinishedAt)
|
||||
exitSatellite(ctx, t, planet, exitingNode)
|
||||
|
||||
newNodePieceCounts, err := getNodePieceCounts(ctx, planet)
|
||||
require.NoError(t, err)
|
||||
var newExitingNodeID storj.NodeID
|
||||
for k, v := range newNodePieceCounts {
|
||||
if v > nodePieceCounts[k] {
|
||||
newExitingNodeID = k
|
||||
}
|
||||
}
|
||||
require.NotNil(t, newExitingNodeID)
|
||||
require.NotEqual(t, exitingNode.ID(), newExitingNodeID)
|
||||
|
||||
var newExitingNode *storagenode.Peer
|
||||
for _, node := range planet.StorageNodes {
|
||||
if node.ID() == newExitingNodeID {
|
||||
newExitingNode = node
|
||||
}
|
||||
}
|
||||
require.NotNil(t, newExitingNode)
|
||||
|
||||
// TODO enable this after the satellite endpoint starts updating graceful exit status tables
|
||||
// otherwise this fails because the original exiting node information is still returned in several queries
|
||||
//exitSatellite(ctx, t, planet, newExitingNode)
|
||||
})
|
||||
}
|
||||
|
||||
func exitSatellite(ctx context.Context, t *testing.T, planet *testplanet.Planet, exitingNode *storagenode.Peer) {
|
||||
satellite1 := planet.Satellites[0]
|
||||
exitingNode.GracefulExit.Chore.Loop.Pause()
|
||||
|
||||
exitStatus := overlay.ExitStatusRequest{
|
||||
NodeID: exitingNode.ID(),
|
||||
ExitInitiatedAt: time.Now(),
|
||||
}
|
||||
|
||||
_, err := satellite1.Overlay.DB.UpdateExitStatus(ctx, &exitStatus)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = exitingNode.DB.Satellites().InitiateGracefulExit(ctx, satellite1.ID(), time.Now(), 10000)
|
||||
require.NoError(t, err)
|
||||
|
||||
// check that the storage node is exiting
|
||||
exitProgress, err := exitingNode.DB.Satellites().ListGracefulExits(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, exitProgress, 1)
|
||||
|
||||
// initiate graceful exit on satellite side by running the SN chore.
|
||||
exitingNode.GracefulExit.Chore.Loop.TriggerWait()
|
||||
|
||||
// run the satellite chore to build the transfer queue.
|
||||
satellite1.GracefulExit.Chore.Loop.TriggerWait()
|
||||
|
||||
// check that the satellite knows the storage node is exiting.
|
||||
exitingNodes, err := satellite1.DB.OverlayCache().GetExitingNodes(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, exitingNodes, 1)
|
||||
require.Equal(t, exitingNode.ID(), exitingNodes[0])
|
||||
|
||||
queueItems, err := satellite1.DB.GracefulExit().GetIncomplete(ctx, exitStatus.NodeID, 10, 0)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, queueItems, 1)
|
||||
|
||||
// run the SN chore again to start processing transfers.
|
||||
exitingNode.GracefulExit.Chore.Loop.TriggerWait()
|
||||
|
||||
// check that there are no more items to process
|
||||
queueItems, err = satellite1.DB.GracefulExit().GetIncomplete(ctx, exitStatus.NodeID, 10, 0)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, queueItems, 0)
|
||||
|
||||
exitProgress, err = exitingNode.DB.Satellites().ListGracefulExits(ctx)
|
||||
require.NoError(t, err)
|
||||
for _, progress := range exitProgress {
|
||||
if progress.SatelliteID == satellite1.ID() {
|
||||
require.NotNil(t, progress.FinishedAt)
|
||||
}
|
||||
}
|
||||
|
||||
// make sure there are no more pieces on the node.
|
||||
namespaces, err := exitingNode.DB.Pieces().ListNamespaces(ctx)
|
||||
require.NoError(t, err)
|
||||
for _, ns := range namespaces {
|
||||
err = exitingNode.DB.Pieces().WalkNamespace(ctx, ns, func(blobInfo storage.BlobInfo) error {
|
||||
return errs.New("found a piece on the node. this shouldn't happen.")
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// getNodePieceCounts tallies all the pieces per node.
|
||||
func getNodePieceCounts(ctx context.Context, planet *testplanet.Planet) (_ map[storj.NodeID]int, err error) {
|
||||
nodePieceCounts := make(map[storj.NodeID]int)
|
||||
for _, n := range planet.StorageNodes {
|
||||
node := n
|
||||
namespaces, err := node.DB.Pieces().ListNamespaces(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, ns := range namespaces {
|
||||
err = node.DB.Pieces().WalkNamespace(ctx, ns, func(blobInfo storage.BlobInfo) error {
|
||||
nodePieceCounts[node.ID()]++
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nodePieceCounts, err
|
||||
}
|
||||
|
||||
// findNodeToExit selects the node storing the most pieces as the node to graceful exit.
|
||||
func findNodeToExit(ctx context.Context, planet *testplanet.Planet, objects int) (*storagenode.Peer, error) {
|
||||
satellite := planet.Satellites[0]
|
||||
keys, err := satellite.Metainfo.Database.List(ctx, nil, objects)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pieceCountMap := make(map[storj.NodeID]int, len(planet.StorageNodes))
|
||||
for _, sn := range planet.StorageNodes {
|
||||
pieceCountMap[sn.ID()] = 0
|
||||
}
|
||||
|
||||
for _, key := range keys {
|
||||
pointer, err := satellite.Metainfo.Service.Get(ctx, string(key))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pieces := pointer.GetRemote().GetRemotePieces()
|
||||
for _, piece := range pieces {
|
||||
pieceCountMap[piece.NodeId]++
|
||||
}
|
||||
}
|
||||
|
||||
var exitingNodeID storj.NodeID
|
||||
maxCount := 0
|
||||
for k, v := range pieceCountMap {
|
||||
if exitingNodeID.IsZero() {
|
||||
exitingNodeID = k
|
||||
maxCount = v
|
||||
continue
|
||||
}
|
||||
if v > maxCount {
|
||||
exitingNodeID = k
|
||||
maxCount = v
|
||||
}
|
||||
}
|
||||
|
||||
for _, sn := range planet.StorageNodes {
|
||||
if sn.ID() == exitingNodeID {
|
||||
return sn, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -68,13 +68,13 @@ func (e *Endpoint) GetNonExitingSatellites(ctx context.Context, req *pb.GetNonEx
|
||||
// get domain name
|
||||
domain, err := e.trust.GetAddress(ctx, trusted)
|
||||
if err != nil {
|
||||
e.log.Debug("graceful exit: get satellite domian name", zap.String("satelliteID", trusted.String()), zap.Error(err))
|
||||
e.log.Debug("graceful exit: get satellite domian name", zap.Stringer("satelliteID", trusted), zap.Error(err))
|
||||
continue
|
||||
}
|
||||
// get space usage by satellites
|
||||
spaceUsed, err := e.usageCache.SpaceUsedBySatellite(ctx, trusted)
|
||||
if err != nil {
|
||||
e.log.Debug("graceful exit: get space used by satellite", zap.String("satelliteID", trusted.String()), zap.Error(err))
|
||||
e.log.Debug("graceful exit: get space used by satellite", zap.Stringer("satelliteID", trusted), zap.Error(err))
|
||||
continue
|
||||
}
|
||||
availableSatellites = append(availableSatellites, &pb.NonExitingSatellite{
|
||||
@ -91,7 +91,7 @@ func (e *Endpoint) GetNonExitingSatellites(ctx context.Context, req *pb.GetNonEx
|
||||
|
||||
// InitiateGracefulExit updates one or more satellites in the storagenode's database to be gracefully exiting.
|
||||
func (e *Endpoint) InitiateGracefulExit(ctx context.Context, req *pb.InitiateGracefulExitRequest) (*pb.ExitProgress, error) {
|
||||
e.log.Debug("initialize graceful exit: start", zap.String("satellite ID", req.NodeId.String()))
|
||||
e.log.Debug("initialize graceful exit: start", zap.Stringer("satellite ID", req.NodeId))
|
||||
|
||||
domain, err := e.trust.GetAddress(ctx, req.NodeId)
|
||||
if err != nil {
|
||||
@ -102,13 +102,13 @@ func (e *Endpoint) InitiateGracefulExit(ctx context.Context, req *pb.InitiateGra
|
||||
// get space usage by satellites
|
||||
spaceUsed, err := e.usageCache.SpaceUsedBySatellite(ctx, req.NodeId)
|
||||
if err != nil {
|
||||
e.log.Debug("initialize graceful exit: retrieve space used", zap.String("Satellite ID", req.NodeId.String()), zap.Error(err))
|
||||
e.log.Debug("initialize graceful exit: retrieve space used", zap.Stringer("Satellite ID", req.NodeId), zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
}
|
||||
|
||||
err = e.satellites.InitiateGracefulExit(ctx, req.NodeId, time.Now().UTC(), spaceUsed)
|
||||
if err != nil {
|
||||
e.log.Debug("initialize graceful exit: save info into satellites table", zap.String("Satellite ID", req.NodeId.String()), zap.Error(err))
|
||||
e.log.Debug("initialize graceful exit: save info into satellites table", zap.Stringer("Satellite ID", req.NodeId), zap.Error(err))
|
||||
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -132,7 +132,7 @@ func (e *Endpoint) GetExitProgress(ctx context.Context, req *pb.GetExitProgressR
|
||||
for _, progress := range exitProgress {
|
||||
domain, err := e.trust.GetAddress(ctx, progress.SatelliteID)
|
||||
if err != nil {
|
||||
e.log.Debug("graceful exit: get satellite domain name", zap.String("satelliteID", progress.SatelliteID.String()), zap.Error(err))
|
||||
e.log.Debug("graceful exit: get satellite domain name", zap.Stringer("satelliteID", progress.SatelliteID), zap.Error(err))
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -5,47 +5,220 @@ package gracefulexit
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/storj/pkg/pb"
|
||||
"storj.io/storj/pkg/rpc"
|
||||
"storj.io/storj/pkg/storj"
|
||||
"storj.io/storj/storagenode/pieces"
|
||||
"storj.io/storj/storagenode/piecestore"
|
||||
"storj.io/storj/storagenode/satellites"
|
||||
"storj.io/storj/uplink/ecclient"
|
||||
)
|
||||
|
||||
// Worker is responsible for completing the graceful exit for a given satellite.
|
||||
type Worker struct {
|
||||
log *zap.Logger
|
||||
satelliteID storj.NodeID
|
||||
satelliteDB satellites.DB
|
||||
log *zap.Logger
|
||||
store *pieces.Store
|
||||
satelliteDB satellites.DB
|
||||
dialer rpc.Dialer
|
||||
satelliteID storj.NodeID
|
||||
satelliteAddr string
|
||||
ecclient ecclient.Client
|
||||
}
|
||||
|
||||
// NewWorker instantiates Worker.
|
||||
func NewWorker(log *zap.Logger, satelliteDB satellites.DB, satelliteID storj.NodeID) *Worker {
|
||||
func NewWorker(log *zap.Logger, store *pieces.Store, satelliteDB satellites.DB, dialer rpc.Dialer, satelliteID storj.NodeID, satelliteAddr string) *Worker {
|
||||
return &Worker{
|
||||
log: log,
|
||||
satelliteID: satelliteID,
|
||||
satelliteDB: satelliteDB,
|
||||
log: log,
|
||||
store: store,
|
||||
satelliteDB: satelliteDB,
|
||||
dialer: dialer,
|
||||
satelliteID: satelliteID,
|
||||
satelliteAddr: satelliteAddr,
|
||||
ecclient: ecclient.NewClient(log, dialer, 0),
|
||||
}
|
||||
}
|
||||
|
||||
// Run calls the satellite endpoint, transfers pieces, validates, and responds with success or failure.
|
||||
// It also marks the satellite finished once all the pieces have been transferred
|
||||
func (worker *Worker) Run(ctx context.Context, satelliteID storj.NodeID, done func()) (err error) {
|
||||
// TODO handle transfers in parallel
|
||||
func (worker *Worker) Run(ctx context.Context, done func()) (err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
defer done()
|
||||
|
||||
worker.log.Debug("running worker")
|
||||
|
||||
// TODO actually process the order limits
|
||||
// https://storjlabs.atlassian.net/browse/V3-2613
|
||||
conn, err := worker.dialer.DialAddressID(ctx, worker.satelliteAddr, worker.satelliteID)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
defer func() {
|
||||
err = errs.Combine(err, conn.Close())
|
||||
}()
|
||||
|
||||
client := conn.SatelliteGracefulExitClient()
|
||||
|
||||
c, err := client.Process(ctx)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
|
||||
for {
|
||||
response, err := c.Recv()
|
||||
if errs.Is(err, io.EOF) {
|
||||
// Done
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO what happened
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
|
||||
switch msg := response.GetMessage().(type) {
|
||||
case *pb.SatelliteMessage_NotReady:
|
||||
break // wait until next worker execution
|
||||
case *pb.SatelliteMessage_TransferPiece:
|
||||
pieceID := msg.TransferPiece.OriginalPieceId
|
||||
reader, err := worker.store.Reader(ctx, worker.satelliteID, pieceID)
|
||||
if err != nil {
|
||||
transferErr := pb.TransferFailed_UNKNOWN
|
||||
if errs.Is(err, os.ErrNotExist) {
|
||||
transferErr = pb.TransferFailed_NOT_FOUND
|
||||
}
|
||||
worker.log.Error("failed to get piece reader.", zap.Stringer("satellite ID", worker.satelliteID), zap.Stringer("piece ID", pieceID), zap.Error(errs.Wrap(err)))
|
||||
worker.handleFailure(ctx, transferErr, pieceID, c.Send)
|
||||
continue
|
||||
}
|
||||
|
||||
addrLimit := msg.TransferPiece.GetAddressedOrderLimit()
|
||||
pk := msg.TransferPiece.PrivateKey
|
||||
|
||||
originalHash, originalOrderLimit, err := worker.getHashAndLimit(ctx, reader, addrLimit.GetLimit())
|
||||
if err != nil {
|
||||
worker.log.Error("failed to get piece hash and order limit.", zap.Stringer("satellite ID", worker.satelliteID), zap.Stringer("piece ID", pieceID), zap.Error(errs.Wrap(err)))
|
||||
worker.handleFailure(ctx, pb.TransferFailed_UNKNOWN, pieceID, c.Send)
|
||||
continue
|
||||
}
|
||||
|
||||
putCtx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
// TODO what's the typical expiration setting?
|
||||
pieceHash, err := worker.ecclient.PutPiece(putCtx, ctx, addrLimit, pk, reader, time.Now().Add(time.Second*600))
|
||||
if err != nil {
|
||||
if piecestore.ErrVerifyUntrusted.Has(err) {
|
||||
worker.log.Error("failed hash verification.", zap.Stringer("satellite ID", worker.satelliteID), zap.Stringer("piece ID", pieceID), zap.Error(errs.Wrap(err)))
|
||||
worker.handleFailure(ctx, pb.TransferFailed_HASH_VERIFICATION, pieceID, c.Send)
|
||||
} else {
|
||||
worker.log.Error("failed to put piece.", zap.Stringer("satellite ID", worker.satelliteID), zap.Stringer("piece ID", pieceID), zap.Error(errs.Wrap(err)))
|
||||
// TODO look at error type to decide on the transfer error
|
||||
worker.handleFailure(ctx, pb.TransferFailed_STORAGE_NODE_UNAVAILABLE, pieceID, c.Send)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
success := &pb.StorageNodeMessage{
|
||||
Message: &pb.StorageNodeMessage_Succeeded{
|
||||
Succeeded: &pb.TransferSucceeded{
|
||||
OriginalPieceId: msg.TransferPiece.OriginalPieceId,
|
||||
OriginalPieceHash: originalHash,
|
||||
OriginalOrderLimit: originalOrderLimit,
|
||||
ReplacementPieceHash: pieceHash,
|
||||
},
|
||||
},
|
||||
}
|
||||
err = c.Send(success)
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
case *pb.SatelliteMessage_DeletePiece:
|
||||
pieceID := msg.DeletePiece.OriginalPieceId
|
||||
err := worker.store.Delete(ctx, worker.satelliteID, pieceID)
|
||||
if err != nil {
|
||||
worker.log.Error("failed to delete piece.", zap.Stringer("satellite ID", worker.satelliteID), zap.Stringer("piece ID", pieceID), zap.Error(errs.Wrap(err)))
|
||||
}
|
||||
case *pb.SatelliteMessage_ExitFailed:
|
||||
worker.log.Error("graceful exit failed.", zap.Stringer("satellite ID", worker.satelliteID), zap.Stringer("reason", msg.ExitFailed.Reason))
|
||||
|
||||
err = worker.satelliteDB.CompleteGracefulExit(ctx, worker.satelliteID, time.Now(), satellites.ExitFailed, msg.ExitFailed.GetExitFailureSignature())
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
break
|
||||
case *pb.SatelliteMessage_ExitCompleted:
|
||||
worker.log.Info("graceful exit completed.", zap.Stringer("satellite ID", worker.satelliteID))
|
||||
|
||||
err = worker.satelliteDB.CompleteGracefulExit(ctx, worker.satelliteID, time.Now(), satellites.ExitSucceeded, msg.ExitCompleted.GetExitCompleteSignature())
|
||||
if err != nil {
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
break
|
||||
default:
|
||||
// TODO handle err
|
||||
worker.log.Error("unknown graceful exit message.", zap.Stringer("satellite ID", worker.satelliteID))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
err = worker.satelliteDB.CompleteGracefulExit(ctx, satelliteID, time.Now(), satellites.ExitSucceeded, []byte{})
|
||||
return errs.Wrap(err)
|
||||
}
|
||||
|
||||
func (worker *Worker) handleFailure(ctx context.Context, transferError pb.TransferFailed_Error, pieceID pb.PieceID, send func(*pb.StorageNodeMessage) error) {
|
||||
failure := &pb.StorageNodeMessage{
|
||||
Message: &pb.StorageNodeMessage_Failed{
|
||||
Failed: &pb.TransferFailed{
|
||||
OriginalPieceId: pieceID,
|
||||
Error: transferError,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
sendErr := send(failure)
|
||||
if sendErr != nil {
|
||||
worker.log.Error("unable to send failure.", zap.Stringer("satellite ID", worker.satelliteID))
|
||||
}
|
||||
}
|
||||
|
||||
// Close halts the worker.
|
||||
func (worker *Worker) Close() error {
|
||||
// TODO not sure this is needed yet.
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO This comes from piecestore.Endpoint. It should probably be an exported method so I don't have to duplicate it here.
|
||||
func (worker *Worker) getHashAndLimit(ctx context.Context, pieceReader *pieces.Reader, limit *pb.OrderLimit) (pieceHash *pb.PieceHash, orderLimit *pb.OrderLimit, err error) {
|
||||
|
||||
if pieceReader.StorageFormatVersion() == 0 {
|
||||
// v0 stores this information in SQL
|
||||
info, err := worker.store.GetV0PieceInfoDB().Get(ctx, limit.SatelliteId, limit.PieceId)
|
||||
if err != nil {
|
||||
worker.log.Error("error getting piece from v0 pieceinfo db", zap.Error(err))
|
||||
return nil, nil, err
|
||||
}
|
||||
orderLimit = info.OrderLimit
|
||||
pieceHash = info.UplinkPieceHash
|
||||
} else {
|
||||
//v1+ stores this information in the file
|
||||
header, err := pieceReader.GetPieceHeader()
|
||||
if err != nil {
|
||||
worker.log.Error("error getting header from piecereader", zap.Error(err))
|
||||
return nil, nil, err
|
||||
}
|
||||
orderLimit = &header.OrderLimit
|
||||
pieceHash = &pb.PieceHash{
|
||||
PieceId: orderLimit.PieceId,
|
||||
Hash: header.GetHash(),
|
||||
PieceSize: pieceReader.Size(),
|
||||
Timestamp: header.GetCreationTime(),
|
||||
Signature: header.GetSignature(),
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -397,6 +397,9 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten
|
||||
peer.GracefulExit.Chore = gracefulexit.NewChore(
|
||||
peer.Log.Named("gracefulexit:chore"),
|
||||
config.GracefulExit,
|
||||
peer.Storage2.Store,
|
||||
peer.Storage2.Trust,
|
||||
peer.Dialer,
|
||||
peer.DB.Satellites(),
|
||||
)
|
||||
}
|
||||
|
@ -33,6 +33,8 @@ type Client interface {
|
||||
Get(ctx context.Context, limits []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, es eestream.ErasureScheme, size int64) (ranger.Ranger, error)
|
||||
Delete(ctx context.Context, limits []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey) error
|
||||
WithForceErrorDetection(force bool) Client
|
||||
// PutPiece is not intended to be used by normal uplinks directly, but is exported to support storagenode graceful exit transfers.
|
||||
PutPiece(ctx, parent context.Context, limit *pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, data io.ReadCloser, expiration time.Time) (hash *pb.PieceHash, err error)
|
||||
}
|
||||
|
||||
type dialPiecestoreFunc func(context.Context, *pb.Node) (*piecestore.Client, error)
|
||||
@ -105,7 +107,7 @@ func (ec *ecClient) Put(ctx context.Context, limits []*pb.AddressedOrderLimit, p
|
||||
|
||||
for i, addressedLimit := range limits {
|
||||
go func(i int, addressedLimit *pb.AddressedOrderLimit) {
|
||||
hash, err := ec.putPiece(psCtx, ctx, addressedLimit, privateKey, readers[i], expiration)
|
||||
hash, err := ec.PutPiece(psCtx, ctx, addressedLimit, privateKey, readers[i], expiration)
|
||||
infos <- info{i: i, err: err, hash: hash}
|
||||
}(i, addressedLimit)
|
||||
}
|
||||
@ -175,7 +177,7 @@ func (ec *ecClient) Put(ctx context.Context, limits []*pb.AddressedOrderLimit, p
|
||||
return successfulNodes, successfulHashes, nil
|
||||
}
|
||||
|
||||
func (ec *ecClient) putPiece(ctx, parent context.Context, limit *pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, data io.ReadCloser, expiration time.Time) (hash *pb.PieceHash, err error) {
|
||||
func (ec *ecClient) PutPiece(ctx, parent context.Context, limit *pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, data io.ReadCloser, expiration time.Time) (hash *pb.PieceHash, err error) {
|
||||
nodeName := "nil"
|
||||
if limit != nil {
|
||||
nodeName = limit.GetLimit().StorageNodeId.String()[0:8]
|
||||
|
Loading…
Reference in New Issue
Block a user