Add timestamp and piece size to piece hash (#2198)

This commit is contained in:
Michal Niewrzal 2019-07-03 18:14:37 +02:00 committed by GitHub
parent 2f6e193c32
commit 61dfa61e3a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 257 additions and 107 deletions

2
go.mod
View File

@ -115,7 +115,7 @@ require (
go.uber.org/multierr v1.1.0 // indirect
go.uber.org/zap v1.10.0
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8
golang.org/x/net v0.0.0-20190628185345-da137c7871d7 // indirect
golang.org/x/net v0.0.0-20190628185345-da137c7871d7
golang.org/x/sync v0.0.0-20190423024810-112230192c58
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb
golang.org/x/text v0.3.2 // indirect

View File

@ -3,15 +3,17 @@
package pb
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import timestamp "github.com/golang/protobuf/ptypes/timestamp"
import time "time"
import (
context "context"
fmt "fmt"
_ "github.com/gogo/protobuf/gogoproto"
proto "github.com/gogo/protobuf/proto"
timestamp "github.com/golang/protobuf/ptypes/timestamp"
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
math "math"
time "time"
)
// Reference imports to suppress errors if they are not otherwise used.
@ -48,7 +50,6 @@ var PieceAction_name = map[int32]string{
5: "PUT_REPAIR",
6: "DELETE",
}
var PieceAction_value = map[string]int32{
"INVALID": 0,
"PUT": 1,
@ -62,9 +63,8 @@ var PieceAction_value = map[string]int32{
func (x PieceAction) String() string {
return proto.EnumName(PieceAction_name, int32(x))
}
func (PieceAction) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_e0f5d4cf0fc9e41b, []int{0}
return fileDescriptor_orders_3cfba8bd45227bd2, []int{0}
}
type SettlementResponse_Status int32
@ -80,7 +80,6 @@ var SettlementResponse_Status_name = map[int32]string{
1: "ACCEPTED",
2: "REJECTED",
}
var SettlementResponse_Status_value = map[string]int32{
"INVALID": 0,
"ACCEPTED": 1,
@ -90,9 +89,8 @@ var SettlementResponse_Status_value = map[string]int32{
func (x SettlementResponse_Status) String() string {
return proto.EnumName(SettlementResponse_Status_name, int32(x))
}
func (SettlementResponse_Status) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_e0f5d4cf0fc9e41b, []int{4, 0}
return fileDescriptor_orders_3cfba8bd45227bd2, []int{4, 0}
}
// OrderLimit is provided by satellite to execute specific action on storage node within some limits
@ -126,7 +124,7 @@ func (m *OrderLimit) Reset() { *m = OrderLimit{} }
func (m *OrderLimit) String() string { return proto.CompactTextString(m) }
func (*OrderLimit) ProtoMessage() {}
func (*OrderLimit) Descriptor() ([]byte, []int) {
return fileDescriptor_e0f5d4cf0fc9e41b, []int{0}
return fileDescriptor_orders_3cfba8bd45227bd2, []int{0}
}
func (m *OrderLimit) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_OrderLimit.Unmarshal(m, b)
@ -134,8 +132,8 @@ func (m *OrderLimit) XXX_Unmarshal(b []byte) error {
func (m *OrderLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_OrderLimit.Marshal(b, m, deterministic)
}
func (m *OrderLimit) XXX_Merge(src proto.Message) {
xxx_messageInfo_OrderLimit.Merge(m, src)
func (dst *OrderLimit) XXX_Merge(src proto.Message) {
xxx_messageInfo_OrderLimit.Merge(dst, src)
}
func (m *OrderLimit) XXX_Size() int {
return xxx_messageInfo_OrderLimit.Size(m)
@ -212,7 +210,7 @@ func (m *Order) Reset() { *m = Order{} }
func (m *Order) String() string { return proto.CompactTextString(m) }
func (*Order) ProtoMessage() {}
func (*Order) Descriptor() ([]byte, []int) {
return fileDescriptor_e0f5d4cf0fc9e41b, []int{1}
return fileDescriptor_orders_3cfba8bd45227bd2, []int{1}
}
func (m *Order) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_Order.Unmarshal(m, b)
@ -220,8 +218,8 @@ func (m *Order) XXX_Unmarshal(b []byte) error {
func (m *Order) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_Order.Marshal(b, m, deterministic)
}
func (m *Order) XXX_Merge(src proto.Message) {
xxx_messageInfo_Order.Merge(m, src)
func (dst *Order) XXX_Merge(src proto.Message) {
xxx_messageInfo_Order.Merge(dst, src)
}
func (m *Order) XXX_Size() int {
return xxx_messageInfo_Order.Size(m)
@ -252,17 +250,21 @@ type PieceHash struct {
// hash of the piece that was/is uploaded
Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"`
// signature either satellite or storage node
Signature []byte `protobuf:"bytes,3,opt,name=signature,proto3" json:"signature,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
Signature []byte `protobuf:"bytes,3,opt,name=signature,proto3" json:"signature,omitempty"`
// size of uploaded piece
PieceSize int64 `protobuf:"varint,4,opt,name=piece_size,json=pieceSize,proto3" json:"piece_size,omitempty"`
// timestamp when upload occur
Timestamp time.Time `protobuf:"bytes,5,opt,name=timestamp,proto3,stdtime" json:"timestamp"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *PieceHash) Reset() { *m = PieceHash{} }
func (m *PieceHash) String() string { return proto.CompactTextString(m) }
func (*PieceHash) ProtoMessage() {}
func (*PieceHash) Descriptor() ([]byte, []int) {
return fileDescriptor_e0f5d4cf0fc9e41b, []int{2}
return fileDescriptor_orders_3cfba8bd45227bd2, []int{2}
}
func (m *PieceHash) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_PieceHash.Unmarshal(m, b)
@ -270,8 +272,8 @@ func (m *PieceHash) XXX_Unmarshal(b []byte) error {
func (m *PieceHash) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_PieceHash.Marshal(b, m, deterministic)
}
func (m *PieceHash) XXX_Merge(src proto.Message) {
xxx_messageInfo_PieceHash.Merge(m, src)
func (dst *PieceHash) XXX_Merge(src proto.Message) {
xxx_messageInfo_PieceHash.Merge(dst, src)
}
func (m *PieceHash) XXX_Size() int {
return xxx_messageInfo_PieceHash.Size(m)
@ -296,6 +298,20 @@ func (m *PieceHash) GetSignature() []byte {
return nil
}
func (m *PieceHash) GetPieceSize() int64 {
if m != nil {
return m.PieceSize
}
return 0
}
func (m *PieceHash) GetTimestamp() time.Time {
if m != nil {
return m.Timestamp
}
return time.Time{}
}
type SettlementRequest struct {
Limit *OrderLimit `protobuf:"bytes,1,opt,name=limit,proto3" json:"limit,omitempty"`
Order *Order `protobuf:"bytes,2,opt,name=order,proto3" json:"order,omitempty"`
@ -308,7 +324,7 @@ func (m *SettlementRequest) Reset() { *m = SettlementRequest{} }
func (m *SettlementRequest) String() string { return proto.CompactTextString(m) }
func (*SettlementRequest) ProtoMessage() {}
func (*SettlementRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_e0f5d4cf0fc9e41b, []int{3}
return fileDescriptor_orders_3cfba8bd45227bd2, []int{3}
}
func (m *SettlementRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SettlementRequest.Unmarshal(m, b)
@ -316,8 +332,8 @@ func (m *SettlementRequest) XXX_Unmarshal(b []byte) error {
func (m *SettlementRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SettlementRequest.Marshal(b, m, deterministic)
}
func (m *SettlementRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_SettlementRequest.Merge(m, src)
func (dst *SettlementRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_SettlementRequest.Merge(dst, src)
}
func (m *SettlementRequest) XXX_Size() int {
return xxx_messageInfo_SettlementRequest.Size(m)
@ -354,7 +370,7 @@ func (m *SettlementResponse) Reset() { *m = SettlementResponse{} }
func (m *SettlementResponse) String() string { return proto.CompactTextString(m) }
func (*SettlementResponse) ProtoMessage() {}
func (*SettlementResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_e0f5d4cf0fc9e41b, []int{4}
return fileDescriptor_orders_3cfba8bd45227bd2, []int{4}
}
func (m *SettlementResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SettlementResponse.Unmarshal(m, b)
@ -362,8 +378,8 @@ func (m *SettlementResponse) XXX_Unmarshal(b []byte) error {
func (m *SettlementResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SettlementResponse.Marshal(b, m, deterministic)
}
func (m *SettlementResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_SettlementResponse.Merge(m, src)
func (dst *SettlementResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_SettlementResponse.Merge(dst, src)
}
func (m *SettlementResponse) XXX_Size() int {
return xxx_messageInfo_SettlementResponse.Size(m)
@ -382,63 +398,13 @@ func (m *SettlementResponse) GetStatus() SettlementResponse_Status {
}
func init() {
proto.RegisterEnum("orders.PieceAction", PieceAction_name, PieceAction_value)
proto.RegisterEnum("orders.SettlementResponse_Status", SettlementResponse_Status_name, SettlementResponse_Status_value)
proto.RegisterType((*OrderLimit)(nil), "orders.OrderLimit")
proto.RegisterType((*Order)(nil), "orders.Order")
proto.RegisterType((*PieceHash)(nil), "orders.PieceHash")
proto.RegisterType((*SettlementRequest)(nil), "orders.SettlementRequest")
proto.RegisterType((*SettlementResponse)(nil), "orders.SettlementResponse")
}
func init() { proto.RegisterFile("orders.proto", fileDescriptor_e0f5d4cf0fc9e41b) }
var fileDescriptor_e0f5d4cf0fc9e41b = []byte{
// 697 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0xcf, 0x6e, 0xd3, 0x4e,
0x10, 0xee, 0xe6, 0x8f, 0x93, 0x4c, 0xfe, 0xb9, 0xdb, 0xea, 0xa7, 0xfc, 0x22, 0xa4, 0x84, 0x70,
0x09, 0xad, 0x94, 0xd2, 0x20, 0x21, 0xf5, 0x82, 0x94, 0x26, 0x56, 0x31, 0x54, 0x25, 0xda, 0xa4,
0x1c, 0xb8, 0x44, 0x4e, 0xbd, 0xa4, 0x16, 0x89, 0x6d, 0xbc, 0x6b, 0x89, 0x17, 0xe0, 0xce, 0x99,
0x77, 0xe1, 0xce, 0x33, 0x70, 0x28, 0xaf, 0x82, 0x76, 0xd6, 0x4e, 0x52, 0x28, 0xea, 0xa1, 0x37,
0x7f, 0x33, 0xdf, 0x37, 0xe3, 0x99, 0xf9, 0x16, 0x2a, 0x41, 0xe4, 0xf2, 0x48, 0xf4, 0xc2, 0x28,
0x90, 0x01, 0x35, 0x34, 0x6a, 0xc2, 0x22, 0x58, 0x04, 0x3a, 0xd6, 0x6c, 0x2d, 0x82, 0x60, 0xb1,
0xe4, 0x47, 0x88, 0xe6, 0xf1, 0x87, 0x23, 0xe9, 0xad, 0xb8, 0x90, 0xce, 0x2a, 0x4c, 0x08, 0xe0,
0x07, 0x2e, 0xd7, 0xdf, 0x9d, 0x6f, 0x79, 0x80, 0xb7, 0xaa, 0xc6, 0xb9, 0xb7, 0xf2, 0x24, 0x3d,
0x81, 0xaa, 0xe0, 0x91, 0xe7, 0x2c, 0x67, 0x7e, 0xbc, 0x9a, 0xf3, 0xa8, 0x41, 0xda, 0xa4, 0x5b,
0x39, 0xdd, 0xff, 0x71, 0xd3, 0xda, 0xf9, 0x79, 0xd3, 0xaa, 0x4c, 0x30, 0x79, 0x81, 0x39, 0x56,
0x11, 0x5b, 0x88, 0x1e, 0x43, 0x45, 0x38, 0x92, 0x2f, 0x97, 0x9e, 0xe4, 0x33, 0xcf, 0x6d, 0x64,
0x50, 0x59, 0x4b, 0x94, 0xc6, 0x45, 0xe0, 0x72, 0x7b, 0xc4, 0xca, 0x6b, 0x8e, 0xed, 0xd2, 0x43,
0x28, 0xc5, 0xe1, 0xd2, 0xf3, 0x3f, 0x2a, 0x7e, 0xf6, 0x4e, 0x7e, 0x51, 0x13, 0x6c, 0x97, 0xbe,
0x80, 0xba, 0x90, 0x41, 0xe4, 0x2c, 0xf8, 0x4c, 0xfd, 0xbf, 0x92, 0xe4, 0xee, 0x94, 0x54, 0x13,
0x1a, 0x42, 0x97, 0x1e, 0x40, 0x31, 0xf4, 0xf8, 0x15, 0x0a, 0xf2, 0x28, 0xa8, 0x27, 0x82, 0xc2,
0x58, 0xc5, 0xed, 0x11, 0x2b, 0x20, 0xc1, 0x76, 0xe9, 0x3e, 0xe4, 0x97, 0x6a, 0x0f, 0x0d, 0xa3,
0x4d, 0xba, 0x59, 0xa6, 0x01, 0x3d, 0x04, 0xc3, 0xb9, 0x92, 0x5e, 0xe0, 0x37, 0x0a, 0x6d, 0xd2,
0xad, 0xf5, 0xf7, 0x7a, 0xc9, 0x0d, 0x50, 0x3f, 0xc0, 0x14, 0x4b, 0x28, 0xd4, 0x02, 0x53, 0xb7,
0xe3, 0x9f, 0x43, 0x2f, 0x72, 0x50, 0x56, 0x6c, 0x93, 0x6e, 0xb9, 0xdf, 0xec, 0xe9, 0xc3, 0xf4,
0xd2, 0xc3, 0xf4, 0xa6, 0xe9, 0x61, 0x58, 0x1d, 0x35, 0xd6, 0x5a, 0xa2, 0xca, 0x60, 0x93, 0xed,
0x32, 0xa5, 0xfb, 0xcb, 0xa0, 0x66, 0xab, 0xcc, 0x1b, 0xa8, 0xe9, 0x32, 0x57, 0x11, 0xd7, 0x45,
0x2a, 0xf7, 0x15, 0x39, 0x2d, 0xaa, 0xf5, 0x7c, 0xfd, 0xd5, 0x22, 0xac, 0x8a, 0xda, 0x61, 0x22,
0xa5, 0x47, 0xb0, 0xb7, 0xb9, 0xb0, 0xf0, 0x16, 0xbe, 0x23, 0xe3, 0x88, 0x37, 0x40, 0x2d, 0x95,
0xd1, 0x75, 0x6a, 0x92, 0x66, 0xe8, 0x4b, 0xd8, 0xdd, 0x08, 0x1c, 0xd7, 0x8d, 0xb8, 0x10, 0x8d,
0x32, 0xfe, 0xc0, 0x6e, 0x0f, 0x4d, 0xa8, 0x6e, 0x34, 0xd0, 0x09, 0x66, 0xae, 0xb9, 0x49, 0xa4,
0xf3, 0x85, 0x40, 0x1e, 0xcd, 0xf9, 0x10, 0x5f, 0xfe, 0x07, 0x86, 0xb3, 0x0a, 0x62, 0x5f, 0xa2,
0x23, 0xb3, 0x2c, 0x41, 0xf4, 0x29, 0x98, 0x89, 0xf9, 0x36, 0xa3, 0xa0, 0x07, 0x59, 0x5d, 0xc7,
0xd7, 0x73, 0x74, 0x3c, 0x28, 0xe1, 0xa9, 0x5f, 0x39, 0xe2, 0xfa, 0x96, 0x9f, 0xc8, 0x3d, 0x7e,
0xa2, 0x90, 0xbb, 0x76, 0xc4, 0xb5, 0x7e, 0x0b, 0x0c, 0xbf, 0xe9, 0x23, 0x28, 0xfd, 0xd9, 0x70,
0x13, 0xe8, 0xcc, 0x61, 0x77, 0xc2, 0xa5, 0x5c, 0xf2, 0x15, 0xf7, 0x25, 0xe3, 0x9f, 0x62, 0x2e,
0x24, 0xed, 0xa6, 0xb6, 0x24, 0xb8, 0x3b, 0x9a, 0xfa, 0x6f, 0xf3, 0x70, 0x53, 0xab, 0x3e, 0x81,
0x3c, 0xe6, 0xb0, 0x63, 0xb9, 0x5f, 0xbd, 0xc5, 0x64, 0x3a, 0xd7, 0xf9, 0x4e, 0x80, 0x6e, 0x37,
0x11, 0x61, 0xe0, 0x0b, 0xfe, 0x90, 0x1d, 0x9f, 0x80, 0x21, 0xa4, 0x23, 0x63, 0x81, 0x7d, 0x6b,
0xfd, 0xc7, 0x69, 0xdf, 0xbf, 0xdb, 0xf4, 0x26, 0x48, 0x64, 0x89, 0xa0, 0x73, 0x0c, 0x86, 0x8e,
0xd0, 0x32, 0x14, 0xec, 0x8b, 0x77, 0x83, 0x73, 0x7b, 0x64, 0xee, 0xd0, 0x0a, 0x14, 0x07, 0xc3,
0xa1, 0x35, 0x9e, 0x5a, 0x23, 0x93, 0x28, 0xc4, 0xac, 0xd7, 0xd6, 0x50, 0xa1, 0xcc, 0xc1, 0x02,
0xca, 0x5b, 0x2f, 0xef, 0xb6, 0xae, 0x00, 0xd9, 0xf1, 0xe5, 0xd4, 0x24, 0xea, 0xe3, 0xcc, 0x9a,
0x9a, 0x19, 0x5a, 0x85, 0xd2, 0x99, 0x35, 0x9d, 0x0d, 0x2e, 0x47, 0xf6, 0xd4, 0xcc, 0xd2, 0x1a,
0x80, 0x82, 0xcc, 0x1a, 0x0f, 0x6c, 0x66, 0xe6, 0x14, 0x1e, 0x5f, 0xae, 0x71, 0x9e, 0x02, 0x18,
0x23, 0xeb, 0xdc, 0x9a, 0x5a, 0xa6, 0xd1, 0x9f, 0x80, 0x81, 0x8b, 0x13, 0xd4, 0x06, 0xd8, 0x8c,
0x42, 0xff, 0xbf, 0x6b, 0x3c, 0x3c, 0x55, 0xb3, 0xf9, 0xef, 0xc9, 0x3b, 0x3b, 0x5d, 0xf2, 0x8c,
0x9c, 0xe6, 0xde, 0x67, 0xc2, 0xf9, 0xdc, 0xc0, 0x87, 0xf7, 0xfc, 0x77, 0x00, 0x00, 0x00, 0xff,
0xff, 0x7b, 0xc5, 0x41, 0x5c, 0xcf, 0x05, 0x00, 0x00,
proto.RegisterEnum("orders.PieceAction", PieceAction_name, PieceAction_value)
proto.RegisterEnum("orders.SettlementResponse_Status", SettlementResponse_Status_name, SettlementResponse_Status_value)
}
// Reference imports to suppress errors if they are not otherwise used.
@ -544,3 +510,55 @@ var _Orders_serviceDesc = grpc.ServiceDesc{
},
Metadata: "orders.proto",
}
func init() { proto.RegisterFile("orders.proto", fileDescriptor_orders_3cfba8bd45227bd2) }
var fileDescriptor_orders_3cfba8bd45227bd2 = []byte{
// 727 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4d, 0x6f, 0xd3, 0x40,
0x10, 0xed, 0xe6, 0xc3, 0x49, 0x26, 0x5f, 0xee, 0xb6, 0x42, 0x21, 0x02, 0x25, 0x84, 0x4b, 0x68,
0xa5, 0x94, 0x06, 0x09, 0xa9, 0x17, 0xa4, 0x7c, 0x58, 0xc5, 0x50, 0x95, 0x68, 0x93, 0x72, 0xe0,
0x12, 0x39, 0xf5, 0x92, 0x5a, 0x24, 0x76, 0xf0, 0x6e, 0x24, 0xd4, 0x3b, 0x77, 0xce, 0xfc, 0x17,
0xee, 0x1c, 0xf8, 0x05, 0x1c, 0xca, 0x5f, 0x41, 0x3b, 0xeb, 0xc4, 0x29, 0x14, 0x15, 0xa9, 0xb7,
0x7d, 0x33, 0xef, 0xcd, 0x78, 0x76, 0xde, 0x1a, 0x0a, 0x41, 0xe8, 0xf2, 0x50, 0xb4, 0x16, 0x61,
0x20, 0x03, 0x6a, 0x68, 0x54, 0x85, 0x69, 0x30, 0x0d, 0x74, 0xac, 0x5a, 0x9b, 0x06, 0xc1, 0x74,
0xc6, 0x0f, 0x10, 0x4d, 0x96, 0xef, 0x0f, 0xa4, 0x37, 0xe7, 0x42, 0x3a, 0xf3, 0x45, 0x44, 0x00,
0x3f, 0x70, 0xb9, 0x3e, 0x37, 0xbe, 0xa6, 0x01, 0xde, 0xa8, 0x1a, 0x27, 0xde, 0xdc, 0x93, 0xf4,
0x08, 0x8a, 0x82, 0x87, 0x9e, 0x33, 0x1b, 0xfb, 0xcb, 0xf9, 0x84, 0x87, 0x15, 0x52, 0x27, 0xcd,
0x42, 0x77, 0xf7, 0xfb, 0x55, 0x6d, 0xeb, 0xe7, 0x55, 0xad, 0x30, 0xc4, 0xe4, 0x29, 0xe6, 0x58,
0x41, 0x6c, 0x20, 0x7a, 0x08, 0x05, 0xe1, 0x48, 0x3e, 0x9b, 0x79, 0x92, 0x8f, 0x3d, 0xb7, 0x92,
0x40, 0x65, 0x29, 0x52, 0x1a, 0xa7, 0x81, 0xcb, 0xed, 0x3e, 0xcb, 0xaf, 0x39, 0xb6, 0x4b, 0xf7,
0x21, 0xb7, 0x5c, 0xcc, 0x3c, 0xff, 0x83, 0xe2, 0x27, 0x6f, 0xe4, 0x67, 0x35, 0xc1, 0x76, 0xe9,
0x73, 0x28, 0x0b, 0x19, 0x84, 0xce, 0x94, 0x8f, 0xd5, 0xf7, 0x2b, 0x49, 0xea, 0x46, 0x49, 0x31,
0xa2, 0x21, 0x74, 0xe9, 0x1e, 0x64, 0x17, 0x1e, 0x3f, 0x47, 0x41, 0x1a, 0x05, 0xe5, 0x48, 0x90,
0x19, 0xa8, 0xb8, 0xdd, 0x67, 0x19, 0x24, 0xd8, 0x2e, 0xdd, 0x85, 0xf4, 0x4c, 0xdd, 0x43, 0xc5,
0xa8, 0x93, 0x66, 0x92, 0x69, 0x40, 0xf7, 0xc1, 0x70, 0xce, 0xa5, 0x17, 0xf8, 0x95, 0x4c, 0x9d,
0x34, 0x4b, 0xed, 0x9d, 0x56, 0xb4, 0x03, 0xd4, 0x77, 0x30, 0xc5, 0x22, 0x0a, 0xb5, 0xc0, 0xd4,
0xed, 0xf8, 0xa7, 0x85, 0x17, 0x3a, 0x28, 0xcb, 0xd6, 0x49, 0x33, 0xdf, 0xae, 0xb6, 0xf4, 0x62,
0x5a, 0xab, 0xc5, 0xb4, 0x46, 0xab, 0xc5, 0xb0, 0x32, 0x6a, 0xac, 0xb5, 0x44, 0x95, 0xc1, 0x26,
0x9b, 0x65, 0x72, 0xb7, 0x97, 0x41, 0xcd, 0x46, 0x99, 0xd7, 0x50, 0xd2, 0x65, 0xce, 0x43, 0xae,
0x8b, 0x14, 0x6e, 0x2b, 0xd2, 0xcd, 0xaa, 0xeb, 0xf9, 0xf2, 0xab, 0x46, 0x58, 0x11, 0xb5, 0xbd,
0x48, 0x4a, 0x0f, 0x60, 0x27, 0xde, 0xb0, 0xf0, 0xa6, 0xbe, 0x23, 0x97, 0x21, 0xaf, 0x80, 0xba,
0x54, 0x46, 0xd7, 0xa9, 0xe1, 0x2a, 0x43, 0x5f, 0xc0, 0x76, 0x2c, 0x70, 0x5c, 0x37, 0xe4, 0x42,
0x54, 0xf2, 0xf8, 0x01, 0xdb, 0x2d, 0x34, 0xa1, 0xda, 0x51, 0x47, 0x27, 0x98, 0xb9, 0xe6, 0x46,
0x91, 0xc6, 0x67, 0x02, 0x69, 0x34, 0xe7, 0x5d, 0x7c, 0x79, 0x0f, 0x0c, 0x67, 0x1e, 0x2c, 0x7d,
0x89, 0x8e, 0x4c, 0xb2, 0x08, 0xd1, 0x27, 0x60, 0x46, 0xe6, 0x8b, 0x47, 0x41, 0x0f, 0xb2, 0xb2,
0x8e, 0xaf, 0xe7, 0x68, 0xfc, 0x20, 0x90, 0xc3, 0x5d, 0xbf, 0x74, 0xc4, 0xc5, 0x35, 0x43, 0x91,
0x5b, 0x0c, 0x45, 0x21, 0x75, 0xe1, 0x88, 0x0b, 0xfd, 0x18, 0x18, 0x9e, 0xe9, 0x03, 0xc8, 0xfd,
0xd9, 0x31, 0x0e, 0xd0, 0x87, 0x00, 0xba, 0xba, 0xf0, 0x2e, 0x39, 0x3a, 0x3c, 0xc9, 0x72, 0x18,
0x19, 0x7a, 0x97, 0x9c, 0x76, 0x21, 0xb7, 0x7e, 0xce, 0x68, 0xe7, 0xff, 0xdd, 0x65, 0x2c, 0x6b,
0x4c, 0x60, 0x7b, 0xc8, 0xa5, 0x9c, 0xf1, 0x39, 0xf7, 0x25, 0xe3, 0x1f, 0x97, 0x5c, 0x48, 0xda,
0x5c, 0x59, 0x9f, 0x60, 0x51, 0xba, 0xf2, 0x78, 0xfc, 0x73, 0x58, 0x3d, 0x87, 0xc7, 0x90, 0xc6,
0x1c, 0x0e, 0x95, 0x6f, 0x17, 0xaf, 0x31, 0x99, 0xce, 0x35, 0xbe, 0x11, 0xa0, 0x9b, 0x4d, 0xc4,
0x22, 0xf0, 0x05, 0xbf, 0xcb, 0x1e, 0x8f, 0xc0, 0x10, 0xd2, 0x91, 0x4b, 0x81, 0x7d, 0x4b, 0xed,
0x47, 0xab, 0xbe, 0x7f, 0xb7, 0x69, 0x0d, 0x91, 0xc8, 0x22, 0x41, 0xe3, 0x10, 0x0c, 0x1d, 0xa1,
0x79, 0xc8, 0xd8, 0xa7, 0x6f, 0x3b, 0x27, 0x76, 0xdf, 0xdc, 0xa2, 0x05, 0xc8, 0x76, 0x7a, 0x3d,
0x6b, 0x30, 0xb2, 0xfa, 0x26, 0x51, 0x88, 0x59, 0xaf, 0xac, 0x9e, 0x42, 0x89, 0xbd, 0x29, 0xe4,
0x37, 0x5e, 0xf7, 0x75, 0x5d, 0x06, 0x92, 0x83, 0xb3, 0x91, 0x49, 0xd4, 0xe1, 0xd8, 0x1a, 0x99,
0x09, 0x5a, 0x84, 0xdc, 0xb1, 0x35, 0x1a, 0x77, 0xce, 0xfa, 0xf6, 0xc8, 0x4c, 0xd2, 0x12, 0x80,
0x82, 0xcc, 0x1a, 0x74, 0x6c, 0x66, 0xa6, 0x14, 0x1e, 0x9c, 0xad, 0x71, 0x9a, 0x02, 0x18, 0x7d,
0xeb, 0xc4, 0x1a, 0x59, 0xa6, 0xd1, 0x1e, 0x82, 0x81, 0x17, 0x27, 0xa8, 0x0d, 0x10, 0x8f, 0x42,
0xef, 0xdf, 0x34, 0x1e, 0xae, 0xaa, 0x5a, 0xfd, 0xf7, 0xe4, 0x8d, 0xad, 0x26, 0x79, 0x4a, 0xba,
0xa9, 0x77, 0x89, 0xc5, 0x64, 0x62, 0xa0, 0x21, 0x9e, 0xfd, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x71,
0xd3, 0xb9, 0x60, 0x33, 0x06, 0x00, 0x00,
}

View File

@ -66,6 +66,10 @@ message PieceHash {
bytes hash = 2;
// signature either satellite or storage node
bytes signature = 3;
// size of uploaded piece
int64 piece_size = 4;
// timestamp when upload occur
google.protobuf.Timestamp timestamp = 5 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
}

View File

@ -2848,6 +2848,26 @@
"id": 3,
"name": "signature",
"type": "bytes"
},
{
"id": 4,
"name": "piece_size",
"type": "int64"
},
{
"id": 5,
"name": "timestamp",
"type": "google.protobuf.Timestamp",
"options": [
{
"name": "(gogoproto.stdtime)",
"value": "true"
},
{
"name": "(gogoproto.nullable)",
"value": "false"
}
]
}
]
},

View File

@ -30,6 +30,8 @@ import (
"storj.io/storj/storage"
)
const pieceHashExpiration = 2 * time.Hour
var (
mon = monkit.Package()
// Error general metainfo error
@ -235,7 +237,7 @@ func (endpoint *Endpoint) CommitSegment(ctx context.Context, req *pb.SegmentComm
return nil, status.Errorf(codes.Internal, err.Error())
}
err = endpoint.filterValidPieces(ctx, req.Pointer)
err = endpoint.filterValidPieces(ctx, req.Pointer, req.OriginalLimits)
if err != nil {
return nil, status.Errorf(codes.Internal, err.Error())
}
@ -453,12 +455,14 @@ func createBucketID(projectID uuid.UUID, bucket []byte) []byte {
return []byte(storj.JoinPaths(entries...))
}
func (endpoint *Endpoint) filterValidPieces(ctx context.Context, pointer *pb.Pointer) (err error) {
func (endpoint *Endpoint) filterValidPieces(ctx context.Context, pointer *pb.Pointer, limits []*pb.OrderLimit) (err error) {
defer mon.Task()(&ctx)(&err)
if pointer.Type == pb.Pointer_REMOTE {
var remotePieces []*pb.RemotePiece
remote := pointer.Remote
allSizesValid := true
lastPieceSize := int64(0)
for _, piece := range remote.RemotePieces {
// TODO enable verification
@ -472,9 +476,36 @@ func (endpoint *Endpoint) filterValidPieces(ctx context.Context, pointer *pb.Poi
// s.logger.Warn("unable to verify piece hash: %v", zap.Error(err))
// }
err = endpoint.validatePieceHash(ctx, piece, limits)
if err != nil {
// TODO maybe this should be logged also to uplink too
endpoint.log.Sugar().Warn(err)
continue
}
if piece.Hash.PieceSize <= 0 || (lastPieceSize > 0 && lastPieceSize != piece.Hash.PieceSize) {
allSizesValid = false
break
}
lastPieceSize = piece.Hash.PieceSize
remotePieces = append(remotePieces, piece)
}
if allSizesValid {
redundancy, err := eestream.NewRedundancyStrategyFromProto(pointer.GetRemote().GetRedundancy())
if err != nil {
return Error.Wrap(err)
}
expectedPieceSize := eestream.CalcPieceSize(pointer.SegmentSize, redundancy)
if expectedPieceSize != lastPieceSize {
return Error.New("expected piece size is different from provided (%v != %v)", expectedPieceSize, lastPieceSize)
}
} else {
return Error.New("all pieces needs to have the same size")
}
// we repair when the number of healthy files is less than or equal to the repair threshold
// except for the case when the repair and success thresholds are the same (a case usually seen during testing)
if int32(len(remotePieces)) <= remote.Redundancy.RepairThreshold && int32(len(remotePieces)) < remote.Redundancy.SuccessThreshold {

View File

@ -21,6 +21,7 @@ import (
"storj.io/storj/internal/testcontext"
"storj.io/storj/internal/testplanet"
"storj.io/storj/internal/testrand"
"storj.io/storj/pkg/eestream"
"storj.io/storj/pkg/macaroon"
"storj.io/storj/pkg/pb"
"storj.io/storj/pkg/storj"
@ -289,11 +290,6 @@ func TestServiceList(t *testing.T) {
func TestCommitSegment(t *testing.T) {
testplanet.Run(t, testplanet.Config{
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
Reconfigure: testplanet.Reconfigure{
Satellite: func(log *zap.Logger, index int, config *satellite.Config) {
config.Metainfo.RS.Validate = true
},
},
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
apiKey := planet.Uplinks[0].APIKey[planet.Satellites[0].ID()]
@ -326,6 +322,11 @@ func TestCommitSegment(t *testing.T) {
pieces[i] = &pb.RemotePiece{
PieceNum: int32(i),
NodeId: limit.Limit.StorageNodeId,
Hash: &pb.PieceHash{
PieceId: limit.Limit.PieceId,
PieceSize: 256,
Timestamp: time.Now(),
},
}
}
@ -333,7 +334,8 @@ func TestCommitSegment(t *testing.T) {
require.NoError(t, err)
pointer := &pb.Pointer{
Type: pb.Pointer_REMOTE,
Type: pb.Pointer_REMOTE,
SegmentSize: 10,
Remote: &pb.RemoteSegment{
RootPieceId: rootPieceID,
Redundancy: redundancy,
@ -530,6 +532,39 @@ func TestCommitSegmentPointer(t *testing.T) {
},
ErrorMessage: "pointer type is INLINE but remote segment is set",
},
{
// no piece hash removes piece from pointer, not enough pieces for successful upload
Modify: func(pointer *pb.Pointer) {
pointer.Remote.RemotePieces[0].Hash = nil
},
ErrorMessage: "Number of valid pieces (1) is less than or equal to the repair threshold (1)",
},
{
// invalid timestamp removes piece from pointer, not enough pieces for successful upload
Modify: func(pointer *pb.Pointer) {
pointer.Remote.RemotePieces[0].Hash.Timestamp = time.Now().Add(-24 * time.Hour)
},
ErrorMessage: "Number of valid pieces (1) is less than or equal to the repair threshold (1)",
},
{
// invalid hash PieceID removes piece from pointer, not enough pieces for successful upload
Modify: func(pointer *pb.Pointer) {
pointer.Remote.RemotePieces[0].Hash.PieceId = storj.PieceID{1}
},
ErrorMessage: "Number of valid pieces (1) is less than or equal to the repair threshold (1)",
},
{
Modify: func(pointer *pb.Pointer) {
pointer.Remote.RemotePieces[0].Hash.PieceSize = 1
},
ErrorMessage: "all pieces needs to have the same size",
},
{
Modify: func(pointer *pb.Pointer) {
pointer.SegmentSize = 100
},
ErrorMessage: "expected piece size is different from provided",
},
}
testplanet.Run(t, testplanet.Config{
@ -541,13 +576,13 @@ func TestCommitSegmentPointer(t *testing.T) {
require.NoError(t, err)
defer ctx.Check(metainfo.Close)
for _, test := range tests {
for i, test := range tests {
pointer, limits := runCreateSegment(ctx, t, metainfo)
test.Modify(pointer)
_, err = metainfo.CommitSegment(ctx, "my-bucket-name", "file/path", -1, pointer, limits)
require.Error(t, err)
require.Contains(t, err.Error(), test.ErrorMessage)
require.Error(t, err, "Case #%v", i)
require.Contains(t, err.Error(), test.ErrorMessage, "Case #%v", i)
}
})
}
@ -637,12 +672,15 @@ func runCreateSegment(ctx context.Context, t *testing.T, metainfo *metainfo.Clie
require.NoError(t, err)
pointer.Remote.RootPieceId = rootPieceID
pointer.Remote.RemotePieces[0].NodeId = addressedLimits[0].Limit.StorageNodeId
pointer.Remote.RemotePieces[1].NodeId = addressedLimits[1].Limit.StorageNodeId
limits := make([]*pb.OrderLimit, len(addressedLimits))
for i, addressedLimit := range addressedLimits {
limits[i] = addressedLimit.Limit
if len(pointer.Remote.RemotePieces) > i {
pointer.Remote.RemotePieces[i].NodeId = addressedLimits[i].Limit.StorageNodeId
pointer.Remote.RemotePieces[i].Hash.PieceId = addressedLimits[i].Limit.PieceId
}
}
return pointer, limits
@ -658,16 +696,30 @@ func createTestPointer(t *testing.T) *pb.Pointer {
Type: pb.RedundancyScheme_RS,
}
redundancy, err := eestream.NewRedundancyStrategyFromProto(rs)
require.NoError(t, err)
segmentSize := 4 * memory.KiB.Int64()
pieceSize := eestream.CalcPieceSize(segmentSize, redundancy)
timestamp := time.Now()
pointer := &pb.Pointer{
Type: pb.Pointer_REMOTE,
Type: pb.Pointer_REMOTE,
SegmentSize: segmentSize,
Remote: &pb.RemoteSegment{
Redundancy: rs,
RemotePieces: []*pb.RemotePiece{
{
PieceNum: 0,
Hash: &pb.PieceHash{
PieceSize: pieceSize,
Timestamp: timestamp,
},
},
{
PieceNum: 1,
Hash: &pb.PieceHash{
PieceSize: pieceSize,
Timestamp: timestamp,
},
},
},
},

View File

@ -12,6 +12,7 @@ import (
"github.com/gogo/protobuf/proto"
"github.com/golang/protobuf/ptypes/timestamp"
"github.com/zeebo/errs"
"go.uber.org/zap"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@ -301,15 +302,13 @@ func (endpoint *Endpoint) validatePointer(ctx context.Context, pointer *pb.Point
return Error.New("pointer type is INLINE but remote segment is set")
}
// TODO does it all?
if pointer.Type == pb.Pointer_REMOTE {
if pointer.Remote == nil {
switch {
case pointer.Remote == nil:
return Error.New("no remote segment specified")
}
if pointer.Remote.RemotePieces == nil {
case pointer.Remote.RemotePieces == nil:
return Error.New("no remote segment pieces specified")
}
if pointer.Remote.Redundancy == nil {
case pointer.Remote.Redundancy == nil:
return Error.New("no redundancy scheme specified")
}
}
@ -343,3 +342,27 @@ func (endpoint *Endpoint) validateRedundancy(ctx context.Context, redundancy *pb
return nil
}
func (endpoint *Endpoint) validatePieceHash(ctx context.Context, piece *pb.RemotePiece, limits []*pb.OrderLimit) (err error) {
defer mon.Task()(&ctx)(&err)
if piece.Hash == nil {
return errs.New("no piece hash, removing from pointer %v (%v)", piece.NodeId, piece.PieceNum)
}
timestamp := piece.Hash.Timestamp
if timestamp.Before(time.Now().Add(-pieceHashExpiration)) {
return errs.New("piece hash timestamp is too old (%v), removing from pointer %v (num: %v)", timestamp, piece.NodeId, piece.PieceNum)
}
limit := limits[piece.PieceNum]
if limit != nil {
switch {
case limit.PieceId != piece.Hash.PieceId:
return errs.New("piece hash pieceID doesn't match limit pieceID, removing from pointer (%v != %v)", piece.Hash.PieceId, limit.PieceId)
case limit.Limit < piece.Hash.PieceSize:
return errs.New("piece hash PieceSize is larger than order limit, removing from pointer (%v > %v)", piece.Hash.PieceSize, limit.Limit)
}
}
return nil
}

View File

@ -321,8 +321,10 @@ func (endpoint *Endpoint) Upload(stream pb.Piecestore_UploadServer) (err error)
}
storageNodeHash, err := signing.SignPieceHash(ctx, endpoint.signer, &pb.PieceHash{
PieceId: limit.PieceId,
Hash: expectedHash,
PieceId: limit.PieceId,
Hash: expectedHash,
PieceSize: pieceWriter.Size(),
Timestamp: time.Now(),
})
if err != nil {
return ErrInternal.Wrap(err)