2018-10-09 22:10:37 +01:00
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package audit
import (
"bytes"
"context"
"io"
2018-11-08 16:04:52 +00:00
"time"
2018-10-09 22:10:37 +01:00
2018-10-30 19:03:41 +00:00
"github.com/gogo/protobuf/proto"
2018-10-09 22:10:37 +01:00
"github.com/vivint/infectious"
2018-12-14 20:17:30 +00:00
monkit "gopkg.in/spacemonkeygo/monkit.v2"
2018-10-09 22:10:37 +01:00
"storj.io/storj/pkg/overlay"
"storj.io/storj/pkg/pb"
2018-11-06 17:49:17 +00:00
"storj.io/storj/pkg/piecestore/psclient"
2018-10-09 22:10:37 +01:00
"storj.io/storj/pkg/provider"
2018-12-14 20:17:30 +00:00
"storj.io/storj/pkg/statdb"
2018-11-29 18:39:27 +00:00
"storj.io/storj/pkg/storj"
2018-10-09 22:10:37 +01:00
"storj.io/storj/pkg/transport"
2018-11-07 01:16:43 +00:00
"storj.io/storj/pkg/utils"
2018-10-09 22:10:37 +01:00
)
var mon = monkit . Package ( )
type share struct {
Error error
PieceNumber int
Data [ ] byte
}
2018-10-10 19:25:46 +01:00
// Verifier helps verify the correctness of a given stripe
type Verifier struct {
2018-10-09 22:10:37 +01:00
downloader downloader
}
type downloader interface {
2018-11-28 07:33:17 +00:00
DownloadShares ( ctx context . Context , pointer * pb . Pointer , stripeIndex int , authorization * pb . SignedMessage ) ( shares map [ int ] share , nodes map [ int ] * pb . Node , err error )
2018-10-09 22:10:37 +01:00
}
2018-10-10 19:25:46 +01:00
// defaultDownloader downloads shares from networked storage nodes
2018-10-09 22:10:37 +01:00
type defaultDownloader struct {
transport transport . Client
overlay overlay . Client
identity provider . FullIdentity
2018-10-16 18:40:34 +01:00
reporter
2018-10-09 22:10:37 +01:00
}
2018-10-10 19:25:46 +01:00
// newDefaultDownloader creates a defaultDownloader
func newDefaultDownloader ( transport transport . Client , overlay overlay . Client , id provider . FullIdentity ) * defaultDownloader {
return & defaultDownloader { transport : transport , overlay : overlay , identity : id }
2018-10-09 22:10:37 +01:00
}
2018-10-10 19:25:46 +01:00
// NewVerifier creates a Verifier
func NewVerifier ( transport transport . Client , overlay overlay . Client , id provider . FullIdentity ) * Verifier {
return & Verifier { downloader : newDefaultDownloader ( transport , overlay , id ) }
2018-10-09 22:10:37 +01:00
}
// getShare use piece store clients to download shares from a given node
func ( d * defaultDownloader ) getShare ( ctx context . Context , stripeIndex , shareSize , pieceNumber int ,
2018-11-06 17:49:17 +00:00
id psclient . PieceID , pieceSize int64 , fromNode * pb . Node , authorization * pb . SignedMessage ) ( s share , err error ) {
2018-10-09 22:10:37 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2018-11-06 17:49:17 +00:00
ps , err := psclient . NewPSClient ( ctx , d . transport , fromNode , 0 )
2018-10-09 22:10:37 +01:00
if err != nil {
return s , err
}
2018-11-29 18:39:27 +00:00
derivedPieceID , err := id . Derive ( fromNode . Id . Bytes ( ) )
2018-10-09 22:10:37 +01:00
if err != nil {
return s , err
}
2018-10-30 19:03:41 +00:00
allocationData := & pb . PayerBandwidthAllocation_Data {
2018-11-08 16:04:52 +00:00
Action : pb . PayerBandwidthAllocation_GET ,
CreatedUnixSec : time . Now ( ) . Unix ( ) ,
2018-10-30 19:03:41 +00:00
}
serializedAllocation , err := proto . Marshal ( allocationData )
if err != nil {
return s , err
}
pba := & pb . PayerBandwidthAllocation {
Data : serializedAllocation ,
}
rr , err := ps . Get ( ctx , derivedPieceID , pieceSize , pba , authorization )
2018-10-09 22:10:37 +01:00
if err != nil {
return s , err
}
offset := shareSize * stripeIndex
rc , err := rr . Range ( ctx , int64 ( offset ) , int64 ( shareSize ) )
if err != nil {
return s , err
}
2018-11-07 01:16:43 +00:00
defer utils . LogClose ( rc )
2018-10-09 22:10:37 +01:00
buf := make ( [ ] byte , shareSize )
_ , err = io . ReadFull ( rc , buf )
if err != nil {
return s , err
}
s = share {
Error : nil ,
PieceNumber : pieceNumber ,
Data : buf ,
}
return s , nil
}
// Download Shares downloads shares from the nodes where remote pieces are located
func ( d * defaultDownloader ) DownloadShares ( ctx context . Context , pointer * pb . Pointer ,
2018-11-28 07:33:17 +00:00
stripeIndex int , authorization * pb . SignedMessage ) ( shares map [ int ] share , nodes map [ int ] * pb . Node , err error ) {
2018-10-09 22:10:37 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2018-11-28 07:33:17 +00:00
2018-11-29 18:39:27 +00:00
var nodeIds storj . NodeIDList
2018-10-09 22:10:37 +01:00
pieces := pointer . Remote . GetRemotePieces ( )
for _ , p := range pieces {
2018-11-29 18:39:27 +00:00
nodeIds = append ( nodeIds , p . NodeId )
2018-10-09 22:10:37 +01:00
}
2018-11-07 21:23:05 +00:00
2018-11-28 07:33:17 +00:00
// TODO(moby) nodeSlice will not include offline nodes, so overlay should update uptime for these nodes
nodeSlice , err := d . overlay . BulkLookup ( ctx , nodeIds )
2018-10-09 22:10:37 +01:00
if err != nil {
return nil , nodes , err
}
2018-11-28 07:33:17 +00:00
shares = make ( map [ int ] share , len ( nodeSlice ) )
nodes = make ( map [ int ] * pb . Node , len ( nodeSlice ) )
2018-10-09 22:10:37 +01:00
shareSize := int ( pointer . Remote . Redundancy . GetErasureShareSize ( ) )
2018-11-06 17:49:17 +00:00
pieceID := psclient . PieceID ( pointer . Remote . GetPieceId ( ) )
2018-10-09 22:10:37 +01:00
// this downloads shares from nodes at the given stripe index
2018-11-28 07:33:17 +00:00
for i , node := range nodeSlice {
2018-11-20 17:09:35 +00:00
paddedSize := calcPadded ( pointer . GetSegmentSize ( ) , shareSize )
2018-10-09 22:10:37 +01:00
pieceSize := paddedSize / int64 ( pointer . Remote . Redundancy . GetMinReq ( ) )
2018-11-07 01:16:43 +00:00
s , err := d . getShare ( ctx , stripeIndex , shareSize , int ( pieces [ i ] . PieceNum ) , pieceID , pieceSize , node , authorization )
2018-10-09 22:10:37 +01:00
if err != nil {
s = share {
Error : err ,
2018-11-07 01:16:43 +00:00
PieceNumber : int ( pieces [ i ] . PieceNum ) ,
2018-10-09 22:10:37 +01:00
Data : nil ,
}
}
2018-11-28 07:33:17 +00:00
shares [ s . PieceNumber ] = s
nodes [ s . PieceNumber ] = node
2018-10-09 22:10:37 +01:00
}
2018-10-16 18:40:34 +01:00
2018-10-09 22:10:37 +01:00
return shares , nodes , nil
}
2018-11-28 07:33:17 +00:00
func makeCopies ( ctx context . Context , originals map [ int ] share ) ( copies [ ] infectious . Share , err error ) {
2018-10-09 22:10:37 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2018-11-07 01:16:43 +00:00
copies = make ( [ ] infectious . Share , 0 , len ( originals ) )
for _ , original := range originals {
2018-10-09 22:10:37 +01:00
if original . Error != nil {
continue
}
2018-11-07 01:16:43 +00:00
copies = append ( copies , infectious . Share {
Data : append ( [ ] byte { } , original . Data ... ) ,
Number : original . PieceNumber } )
2018-10-09 22:10:37 +01:00
}
return copies , nil
}
// auditShares takes the downloaded shares and uses infectious's Correct function to check that they
// haven't been altered. auditShares returns a slice containing the piece numbers of altered shares.
2018-11-28 07:33:17 +00:00
func auditShares ( ctx context . Context , required , total int , originals map [ int ] share ) ( pieceNums [ ] int , err error ) {
2018-10-09 22:10:37 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
f , err := infectious . NewFEC ( required , total )
if err != nil {
return nil , err
}
2018-11-07 01:16:43 +00:00
2018-10-09 22:10:37 +01:00
copies , err := makeCopies ( ctx , originals )
if err != nil {
return nil , err
}
err = f . Correct ( copies )
if err != nil {
return nil , err
}
2018-11-28 07:33:17 +00:00
for _ , share := range copies {
if ! bytes . Equal ( originals [ share . Number ] . Data , share . Data ) {
2018-10-09 22:10:37 +01:00
pieceNums = append ( pieceNums , share . Number )
}
}
return pieceNums , nil
}
func calcPadded ( size int64 , blockSize int ) int64 {
mod := size % int64 ( blockSize )
if mod == 0 {
return size
}
return size + int64 ( blockSize ) - mod
}
2018-10-10 19:25:46 +01:00
// verify downloads shares then verifies the data correctness at the given stripe
2018-12-14 20:17:30 +00:00
func ( verifier * Verifier ) verify ( ctx context . Context , stripeIndex int , pointer * pb . Pointer , authorization * pb . SignedMessage ) ( verifiedNodes [ ] * statdb . UpdateRequest , err error ) {
2018-10-09 22:10:37 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2018-10-17 12:40:11 +01:00
shares , nodes , err := verifier . downloader . DownloadShares ( ctx , pointer , stripeIndex , authorization )
2018-10-09 22:10:37 +01:00
if err != nil {
return nil , err
}
2018-11-29 18:39:27 +00:00
var offlineNodes storj . NodeIDList
2018-11-28 07:33:17 +00:00
for pieceNum := range shares {
if shares [ pieceNum ] . Error != nil {
2018-11-29 18:39:27 +00:00
offlineNodes = append ( offlineNodes , nodes [ pieceNum ] . Id )
2018-10-16 18:40:34 +01:00
}
}
2018-10-09 22:10:37 +01:00
required := int ( pointer . Remote . Redundancy . GetMinReq ( ) )
total := int ( pointer . Remote . Redundancy . GetTotal ( ) )
pieceNums , err := auditShares ( ctx , required , total , shares )
if err != nil {
return nil , err
}
2018-10-10 19:25:46 +01:00
2018-11-29 18:39:27 +00:00
var failedNodes storj . NodeIDList
2018-10-09 22:10:37 +01:00
for _ , pieceNum := range pieceNums {
2018-11-29 18:39:27 +00:00
failedNodes = append ( failedNodes , nodes [ pieceNum ] . Id )
2018-10-16 18:40:34 +01:00
}
successNodes := getSuccessNodes ( ctx , nodes , failedNodes , offlineNodes )
2018-11-28 07:33:17 +00:00
verifiedNodes = setVerifiedNodes ( ctx , offlineNodes , failedNodes , successNodes )
2018-10-16 18:40:34 +01:00
return verifiedNodes , nil
}
// getSuccessNodes uses the failed nodes and offline nodes arrays to determine which nodes passed the audit
2018-11-29 18:39:27 +00:00
func getSuccessNodes ( ctx context . Context , nodes map [ int ] * pb . Node , failedNodes , offlineNodes storj . NodeIDList ) ( successNodes storj . NodeIDList ) {
fails := make ( map [ storj . NodeID ] bool )
2018-10-16 18:40:34 +01:00
for _ , fail := range failedNodes {
fails [ fail ] = true
2018-10-09 22:10:37 +01:00
}
2018-10-16 18:40:34 +01:00
for _ , offline := range offlineNodes {
fails [ offline ] = true
}
for _ , node := range nodes {
2018-11-29 18:39:27 +00:00
if ! fails [ node . Id ] {
successNodes = append ( successNodes , node . Id )
2018-10-16 18:40:34 +01:00
}
}
return successNodes
}
// setVerifiedNodes creates a combined array of offline nodes, failed audit nodes, and success nodes with their stats set to the statdb proto Node type
2018-12-14 20:17:30 +00:00
func setVerifiedNodes ( ctx context . Context , offlineNodes , failedNodes , successNodes storj . NodeIDList ) ( verifiedNodes [ ] * statdb . UpdateRequest ) {
2018-10-16 18:40:34 +01:00
offlineStatusNodes := setOfflineStatus ( ctx , offlineNodes )
failStatusNodes := setAuditFailStatus ( ctx , failedNodes )
successStatusNodes := setSuccessStatus ( ctx , successNodes )
verifiedNodes = append ( verifiedNodes , offlineStatusNodes ... )
verifiedNodes = append ( verifiedNodes , failStatusNodes ... )
verifiedNodes = append ( verifiedNodes , successStatusNodes ... )
return verifiedNodes
2018-10-09 22:10:37 +01:00
}