Ensure we force a segment size and account storage before committing them (#2473)

This commit is contained in:
Stefan Benten 2019-07-08 18:24:38 -04:00 committed by GitHub
parent 3587e1a579
commit 16156e3b3d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 34 additions and 3 deletions

View File

@ -135,6 +135,7 @@ func (planet *Planet) newSatellites(count int) ([]*satellite.Peer, error) {
Overlay: true,
BwExpiration: 45,
RS: metainfo.RSConfig{
MaxSegmentSize: 64 * memory.MiB,
MaxBufferMem: memory.Size(256),
ErasureShareSize: memory.Size(256),
MinThreshold: (planet.config.StorageNodeCount * 1 / 5),

View File

@ -21,7 +21,8 @@ const (
// RSConfig is a configuration struct that keeps details about default
// redundancy strategy information
type RSConfig struct {
MaxBufferMem memory.Size `help:"maximum buffer memory (in bytes) to be allocated for read buffers" default:"4MiB"`
MaxSegmentSize memory.Size `help:"maximum segment size" default:"64MiB"`
MaxBufferMem memory.Size `help:"maximum buffer memory to be allocated for read buffers" default:"4MiB"`
ErasureShareSize memory.Size `help:"the size of each new erasure share in bytes" default:"256B"`
MinThreshold int `help:"the minimum pieces required to recover a segment. k." releaseDefault:"29" devDefault:"4"`
RepairThreshold int `help:"the minimum safe pieces before a repair is triggered. m." releaseDefault:"35" devDefault:"6"`

View File

@ -259,7 +259,29 @@ func (endpoint *Endpoint) CommitSegmentOld(ctx context.Context, req *pb.SegmentC
return nil, status.Errorf(codes.InvalidArgument, err.Error())
}
exceeded, limit, err := endpoint.projectUsage.ExceedsStorageUsage(ctx, keyInfo.ProjectID)
if err != nil {
return nil, status.Errorf(codes.Internal, err.Error())
}
if exceeded {
endpoint.log.Sugar().Errorf("monthly project limits are %s of storage and bandwidth usage. This limit has been exceeded for storage for projectID %s.",
limit, keyInfo.ProjectID,
)
return nil, status.Errorf(codes.ResourceExhausted, "Exceeded Usage Limit")
}
inlineUsed, remoteUsed := calculateSpaceUsed(req.Pointer)
// ToDo: Replace with hash & signature validation
// Ensure neither uplink or storage nodes are cheating on us
if req.Pointer.Type == pb.Pointer_REMOTE {
//We cannot have more redundancy than total/min
if float64(remoteUsed) > (float64(req.Pointer.SegmentSize)/float64(req.Pointer.Remote.Redundancy.MinReq))*float64(req.Pointer.Remote.Redundancy.Total) {
endpoint.log.Sugar().Debugf("data size mismatch, got segment: %d, pieces: %d, RS Min, Total: %d,%d", req.Pointer.SegmentSize, remoteUsed, req.Pointer.Remote.Redundancy.MinReq, req.Pointer.Remote.Redundancy.Total)
return nil, status.Errorf(codes.InvalidArgument, "mismatched segment size and piece usage")
}
}
if err := endpoint.projectUsage.AddProjectStorageUsage(ctx, keyInfo.ProjectID, inlineUsed, remoteUsed); err != nil {
endpoint.log.Sugar().Errorf("Could not track new storage usage by project %v: %v", keyInfo.ProjectID, err)
// but continue. it's most likely our own fault that we couldn't track it, and the only thing

View File

@ -189,6 +189,10 @@ func (endpoint *Endpoint) validateCommitSegment(ctx context.Context, req *pb.Seg
return Error.New("invalid no order limit for piece")
}
if req.Pointer.SegmentSize > endpoint.rsConfig.MaxSegmentSize.Int64() || req.Pointer.SegmentSize < 0 {
return Error.New("segment size %v is out of range, maximum is %v", req.Pointer.SegmentSize, endpoint.rsConfig.MaxSegmentSize)
}
for _, piece := range remote.RemotePieces {
limit := req.OriginalLimits[piece.PieceNum]
@ -198,7 +202,7 @@ func (endpoint *Endpoint) validateCommitSegment(ctx context.Context, req *pb.Seg
}
if limit == nil {
return Error.New("invalid no order limit for piece")
return Error.New("empty order limit for piece")
}
derivedPieceID := remote.RootPieceId.Derive(piece.NodeId, piece.PieceNum)
if limit.PieceId.IsZero() || limit.PieceId != derivedPieceID {

View File

@ -184,9 +184,12 @@ kademlia.operator.wallet: ""
# the size of each new erasure share in bytes
# metainfo.rs.erasure-share-size: 256 B
# maximum buffer memory (in bytes) to be allocated for read buffers
# maximum buffer memory to be allocated for read buffers
# metainfo.rs.max-buffer-mem: 4.0 MiB
# maximum segment size
# metainfo.rs.max-segment-size: 64.0 MiB
# the largest amount of pieces to encode to. n.
# metainfo.rs.max-threshold: 130