2018-10-09 22:10:37 +01:00
// Copyright (C) 2018 Storj Labs, Inc.
// See LICENSE for copying information.
package audit
import (
"bytes"
"context"
"io"
2018-11-08 16:04:52 +00:00
"time"
2018-10-09 22:10:37 +01:00
2018-10-30 19:03:41 +00:00
"github.com/gogo/protobuf/proto"
2018-10-09 22:10:37 +01:00
"github.com/vivint/infectious"
monkit "gopkg.in/spacemonkeygo/monkit.v2"
"storj.io/storj/pkg/dht"
"storj.io/storj/pkg/node"
"storj.io/storj/pkg/overlay"
"storj.io/storj/pkg/pb"
2018-11-06 17:49:17 +00:00
"storj.io/storj/pkg/piecestore/psclient"
2018-10-09 22:10:37 +01:00
"storj.io/storj/pkg/provider"
2018-10-30 19:03:41 +00:00
sdbproto "storj.io/storj/pkg/statdb/proto"
2018-10-09 22:10:37 +01:00
"storj.io/storj/pkg/transport"
2018-11-07 01:16:43 +00:00
"storj.io/storj/pkg/utils"
2018-10-09 22:10:37 +01:00
)
var mon = monkit . Package ( )
type share struct {
Error error
PieceNumber int
Data [ ] byte
}
2018-10-10 19:25:46 +01:00
// Verifier helps verify the correctness of a given stripe
type Verifier struct {
2018-10-09 22:10:37 +01:00
downloader downloader
}
type downloader interface {
2018-10-17 12:40:11 +01:00
DownloadShares ( ctx context . Context , pointer * pb . Pointer , stripeIndex int , authorization * pb . SignedMessage ) ( shares [ ] share , nodes [ ] * pb . Node , err error )
2018-10-09 22:10:37 +01:00
}
2018-10-10 19:25:46 +01:00
// defaultDownloader downloads shares from networked storage nodes
2018-10-09 22:10:37 +01:00
type defaultDownloader struct {
transport transport . Client
overlay overlay . Client
identity provider . FullIdentity
2018-10-16 18:40:34 +01:00
reporter
2018-10-09 22:10:37 +01:00
}
2018-10-10 19:25:46 +01:00
// newDefaultDownloader creates a defaultDownloader
func newDefaultDownloader ( transport transport . Client , overlay overlay . Client , id provider . FullIdentity ) * defaultDownloader {
return & defaultDownloader { transport : transport , overlay : overlay , identity : id }
2018-10-09 22:10:37 +01:00
}
2018-10-10 19:25:46 +01:00
// NewVerifier creates a Verifier
func NewVerifier ( transport transport . Client , overlay overlay . Client , id provider . FullIdentity ) * Verifier {
return & Verifier { downloader : newDefaultDownloader ( transport , overlay , id ) }
2018-10-09 22:10:37 +01:00
}
// getShare use piece store clients to download shares from a given node
func ( d * defaultDownloader ) getShare ( ctx context . Context , stripeIndex , shareSize , pieceNumber int ,
2018-11-06 17:49:17 +00:00
id psclient . PieceID , pieceSize int64 , fromNode * pb . Node , authorization * pb . SignedMessage ) ( s share , err error ) {
2018-10-09 22:10:37 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2018-11-06 17:49:17 +00:00
ps , err := psclient . NewPSClient ( ctx , d . transport , fromNode , 0 )
2018-10-09 22:10:37 +01:00
if err != nil {
return s , err
}
2018-11-06 17:49:17 +00:00
nodeID := node . IDFromString ( fromNode . GetId ( ) )
derivedPieceID , err := id . Derive ( nodeID . Bytes ( ) )
2018-10-09 22:10:37 +01:00
if err != nil {
return s , err
}
2018-10-30 19:03:41 +00:00
allocationData := & pb . PayerBandwidthAllocation_Data {
2018-11-08 16:04:52 +00:00
Action : pb . PayerBandwidthAllocation_GET ,
CreatedUnixSec : time . Now ( ) . Unix ( ) ,
2018-10-30 19:03:41 +00:00
}
serializedAllocation , err := proto . Marshal ( allocationData )
if err != nil {
return s , err
}
pba := & pb . PayerBandwidthAllocation {
Data : serializedAllocation ,
}
rr , err := ps . Get ( ctx , derivedPieceID , pieceSize , pba , authorization )
2018-10-09 22:10:37 +01:00
if err != nil {
return s , err
}
offset := shareSize * stripeIndex
rc , err := rr . Range ( ctx , int64 ( offset ) , int64 ( shareSize ) )
if err != nil {
return s , err
}
2018-11-07 01:16:43 +00:00
defer utils . LogClose ( rc )
2018-10-09 22:10:37 +01:00
buf := make ( [ ] byte , shareSize )
_ , err = io . ReadFull ( rc , buf )
if err != nil {
return s , err
}
s = share {
Error : nil ,
PieceNumber : pieceNumber ,
Data : buf ,
}
return s , nil
}
// Download Shares downloads shares from the nodes where remote pieces are located
func ( d * defaultDownloader ) DownloadShares ( ctx context . Context , pointer * pb . Pointer ,
2018-10-17 12:40:11 +01:00
stripeIndex int , authorization * pb . SignedMessage ) ( shares [ ] share , nodes [ ] * pb . Node , err error ) {
2018-10-09 22:10:37 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
var nodeIds [ ] dht . NodeID
pieces := pointer . Remote . GetRemotePieces ( )
for _ , p := range pieces {
nodeIds = append ( nodeIds , node . IDFromString ( p . GetNodeId ( ) ) )
}
2018-11-07 21:23:05 +00:00
// TODO(moby) nodes will not include offline nodes, so overlay should update uptime for these nodes
2018-10-09 22:10:37 +01:00
nodes , err = d . overlay . BulkLookup ( ctx , nodeIds )
if err != nil {
return nil , nodes , err
}
shareSize := int ( pointer . Remote . Redundancy . GetErasureShareSize ( ) )
2018-11-06 17:49:17 +00:00
pieceID := psclient . PieceID ( pointer . Remote . GetPieceId ( ) )
2018-10-09 22:10:37 +01:00
// this downloads shares from nodes at the given stripe index
for i , node := range nodes {
paddedSize := calcPadded ( pointer . GetSize ( ) , shareSize )
pieceSize := paddedSize / int64 ( pointer . Remote . Redundancy . GetMinReq ( ) )
2018-11-07 01:16:43 +00:00
s , err := d . getShare ( ctx , stripeIndex , shareSize , int ( pieces [ i ] . PieceNum ) , pieceID , pieceSize , node , authorization )
2018-10-09 22:10:37 +01:00
if err != nil {
s = share {
Error : err ,
2018-11-07 01:16:43 +00:00
PieceNumber : int ( pieces [ i ] . PieceNum ) ,
2018-10-09 22:10:37 +01:00
Data : nil ,
}
}
shares = append ( shares , s )
}
2018-10-16 18:40:34 +01:00
2018-10-09 22:10:37 +01:00
return shares , nodes , nil
}
func makeCopies ( ctx context . Context , originals [ ] share ) ( copies [ ] infectious . Share , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
2018-11-07 01:16:43 +00:00
copies = make ( [ ] infectious . Share , 0 , len ( originals ) )
for _ , original := range originals {
2018-10-09 22:10:37 +01:00
if original . Error != nil {
continue
}
2018-11-07 01:16:43 +00:00
copies = append ( copies , infectious . Share {
Data : append ( [ ] byte { } , original . Data ... ) ,
Number : original . PieceNumber } )
2018-10-09 22:10:37 +01:00
}
return copies , nil
}
// auditShares takes the downloaded shares and uses infectious's Correct function to check that they
// haven't been altered. auditShares returns a slice containing the piece numbers of altered shares.
func auditShares ( ctx context . Context , required , total int , originals [ ] share ) ( pieceNums [ ] int , err error ) {
defer mon . Task ( ) ( & ctx ) ( & err )
f , err := infectious . NewFEC ( required , total )
if err != nil {
return nil , err
}
2018-11-07 01:16:43 +00:00
2018-10-09 22:10:37 +01:00
copies , err := makeCopies ( ctx , originals )
if err != nil {
return nil , err
}
err = f . Correct ( copies )
if err != nil {
return nil , err
}
for i , share := range copies {
if ! bytes . Equal ( originals [ i ] . Data , share . Data ) {
pieceNums = append ( pieceNums , share . Number )
}
}
return pieceNums , nil
}
func calcPadded ( size int64 , blockSize int ) int64 {
mod := size % int64 ( blockSize )
if mod == 0 {
return size
}
return size + int64 ( blockSize ) - mod
}
2018-10-10 19:25:46 +01:00
// verify downloads shares then verifies the data correctness at the given stripe
2018-10-30 19:03:41 +00:00
func ( verifier * Verifier ) verify ( ctx context . Context , stripeIndex int , pointer * pb . Pointer , authorization * pb . SignedMessage ) ( verifiedNodes [ ] * sdbproto . Node , err error ) {
2018-10-09 22:10:37 +01:00
defer mon . Task ( ) ( & ctx ) ( & err )
2018-10-17 12:40:11 +01:00
shares , nodes , err := verifier . downloader . DownloadShares ( ctx , pointer , stripeIndex , authorization )
2018-10-09 22:10:37 +01:00
if err != nil {
return nil , err
}
2018-10-16 18:40:34 +01:00
var offlineNodes [ ] string
2018-11-07 21:23:05 +00:00
for i := range shares {
2018-10-16 18:40:34 +01:00
if shares [ i ] . Error != nil {
2018-11-07 21:23:05 +00:00
offlineNodes = append ( offlineNodes , nodes [ i ] . GetId ( ) )
2018-10-16 18:40:34 +01:00
}
}
2018-10-09 22:10:37 +01:00
required := int ( pointer . Remote . Redundancy . GetMinReq ( ) )
total := int ( pointer . Remote . Redundancy . GetTotal ( ) )
pieceNums , err := auditShares ( ctx , required , total , shares )
if err != nil {
return nil , err
}
2018-10-10 19:25:46 +01:00
2018-10-16 18:40:34 +01:00
var failedNodes [ ] string
2018-10-09 22:10:37 +01:00
for _ , pieceNum := range pieceNums {
2018-10-16 18:40:34 +01:00
failedNodes = append ( failedNodes , nodes [ pieceNum ] . GetId ( ) )
}
successNodes := getSuccessNodes ( ctx , nodes , failedNodes , offlineNodes )
verifiedNodes = setVerifiedNodes ( ctx , nodes , offlineNodes , failedNodes , successNodes )
return verifiedNodes , nil
}
// getSuccessNodes uses the failed nodes and offline nodes arrays to determine which nodes passed the audit
func getSuccessNodes ( ctx context . Context , nodes [ ] * pb . Node , failedNodes , offlineNodes [ ] string ) ( successNodes [ ] string ) {
fails := make ( map [ string ] bool )
for _ , fail := range failedNodes {
fails [ fail ] = true
2018-10-09 22:10:37 +01:00
}
2018-10-16 18:40:34 +01:00
for _ , offline := range offlineNodes {
fails [ offline ] = true
}
for _ , node := range nodes {
if ! fails [ node . GetId ( ) ] {
successNodes = append ( successNodes , node . GetId ( ) )
}
}
return successNodes
}
// setVerifiedNodes creates a combined array of offline nodes, failed audit nodes, and success nodes with their stats set to the statdb proto Node type
2018-10-30 19:03:41 +00:00
func setVerifiedNodes ( ctx context . Context , nodes [ ] * pb . Node , offlineNodes , failedNodes , successNodes [ ] string ) ( verifiedNodes [ ] * sdbproto . Node ) {
2018-10-16 18:40:34 +01:00
offlineStatusNodes := setOfflineStatus ( ctx , offlineNodes )
failStatusNodes := setAuditFailStatus ( ctx , failedNodes )
successStatusNodes := setSuccessStatus ( ctx , successNodes )
verifiedNodes = append ( verifiedNodes , offlineStatusNodes ... )
verifiedNodes = append ( verifiedNodes , failStatusNodes ... )
verifiedNodes = append ( verifiedNodes , successStatusNodes ... )
return verifiedNodes
2018-10-09 22:10:37 +01:00
}