cmd/inspector: remove
this is a very old tool built in the very early days of v3, when we didn't know how the network would be used. this tool anticipated being able to query remote nodes for internal state. we don't do that. i don't think anyone uses this. Change-Id: Ie1ded3ecbedb09313f2d6fc721039e0f15e4ee85
This commit is contained in:
parent
3dc01bd25d
commit
b502310fe5
@ -1,367 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/csv"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/zeebo/errs"
|
||||
|
||||
"storj.io/common/identity"
|
||||
"storj.io/common/rpc"
|
||||
"storj.io/common/storj"
|
||||
"storj.io/private/process"
|
||||
_ "storj.io/storj/private/version" // This attaches version information during release builds.
|
||||
"storj.io/storj/satellite/internalpb"
|
||||
"storj.io/uplink/private/eestream"
|
||||
)
|
||||
|
||||
var (
|
||||
// Addr is the address of peer from command flags.
|
||||
Addr = flag.String("address", "127.0.0.1:7778", "address of peer to inspect")
|
||||
|
||||
// IdentityPath is the path to the identity the inspector should use for network communication.
|
||||
IdentityPath = flag.String("identity-path", "", "path to the identity certificate for use on the network")
|
||||
|
||||
// CSVPath is the csv path where command output is written.
|
||||
CSVPath string
|
||||
|
||||
// ErrInspectorDial throws when there are errors dialing the inspector server.
|
||||
ErrInspectorDial = errs.Class("dialing inspector server")
|
||||
|
||||
// ErrRequest is for request errors after dialing.
|
||||
ErrRequest = errs.Class("processing request")
|
||||
|
||||
// ErrIdentity is for errors during identity creation for this CLI.
|
||||
ErrIdentity = errs.Class("creating identity")
|
||||
|
||||
// ErrArgs throws when there are errors with CLI args.
|
||||
ErrArgs = errs.Class("invalid CLI args")
|
||||
|
||||
// Commander CLI.
|
||||
rootCmd = &cobra.Command{
|
||||
Use: "inspector",
|
||||
Short: "CLI for interacting with Storj network",
|
||||
}
|
||||
statsCmd = &cobra.Command{
|
||||
Use: "statdb",
|
||||
Short: "commands for statdb",
|
||||
}
|
||||
healthCmd = &cobra.Command{
|
||||
Use: "health",
|
||||
Short: "commands for querying health of a stored data",
|
||||
}
|
||||
objectHealthCmd = &cobra.Command{
|
||||
Use: "object <project-id> <bucket> <encrypted-path>",
|
||||
Short: "Get stats about an object's health",
|
||||
Args: cobra.MinimumNArgs(3),
|
||||
RunE: ObjectHealth,
|
||||
}
|
||||
segmentHealthCmd = &cobra.Command{
|
||||
Use: "segment <project-id> <segment-index> <bucket> <encrypted-path>",
|
||||
Short: "Get stats about a segment's health",
|
||||
Args: cobra.MinimumNArgs(4),
|
||||
RunE: SegmentHealth,
|
||||
}
|
||||
)
|
||||
|
||||
// Inspector gives access to overlay.
|
||||
type Inspector struct {
|
||||
conn *rpc.Conn
|
||||
identity *identity.FullIdentity
|
||||
healthclient internalpb.DRPCHealthInspectorClient
|
||||
}
|
||||
|
||||
// NewInspector creates a new inspector client for access to overlay.
|
||||
func NewInspector(ctx context.Context, address, path string) (*Inspector, error) {
|
||||
id, err := identity.Config{
|
||||
CertPath: fmt.Sprintf("%s/identity.cert", path),
|
||||
KeyPath: fmt.Sprintf("%s/identity.key", path),
|
||||
}.Load()
|
||||
if err != nil {
|
||||
return nil, ErrIdentity.Wrap(err)
|
||||
}
|
||||
|
||||
conn, err := rpc.NewDefaultDialer(nil).DialAddressUnencrypted(ctx, address)
|
||||
if err != nil {
|
||||
return &Inspector{}, ErrInspectorDial.Wrap(err)
|
||||
}
|
||||
|
||||
return &Inspector{
|
||||
conn: conn,
|
||||
identity: id,
|
||||
healthclient: internalpb.NewDRPCHealthInspectorClient(conn),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Close closes the inspector.
|
||||
func (i *Inspector) Close() error { return i.conn.Close() }
|
||||
|
||||
// ObjectHealth gets information about the health of an object on the network.
|
||||
func ObjectHealth(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx, _ := process.Ctx(cmd)
|
||||
i, err := NewInspector(ctx, *Addr, *IdentityPath)
|
||||
if err != nil {
|
||||
return ErrArgs.Wrap(err)
|
||||
}
|
||||
defer func() { err = errs.Combine(err, i.Close()) }()
|
||||
|
||||
startAfterSegment := int64(0) // start from first segment
|
||||
endBeforeSegment := int64(0) // No end, so we stop when we've hit limit or arrived at the last segment
|
||||
limit := int64(0) // No limit, so we stop when we've arrived at the last segment
|
||||
|
||||
switch len(args) {
|
||||
case 6:
|
||||
limit, err = strconv.ParseInt(args[5], 10, 64)
|
||||
if err != nil {
|
||||
return ErrRequest.Wrap(err)
|
||||
}
|
||||
fallthrough
|
||||
case 5:
|
||||
endBeforeSegment, err = strconv.ParseInt(args[4], 10, 64)
|
||||
if err != nil {
|
||||
return ErrRequest.Wrap(err)
|
||||
}
|
||||
fallthrough
|
||||
case 4:
|
||||
startAfterSegment, err = strconv.ParseInt(args[3], 10, 64)
|
||||
if err != nil {
|
||||
return ErrRequest.Wrap(err)
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
}
|
||||
decodedPath, err := base64.URLEncoding.DecodeString(args[2])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req := &internalpb.ObjectHealthRequest{
|
||||
ProjectId: []byte(args[0]),
|
||||
Bucket: []byte(args[1]),
|
||||
EncryptedPath: decodedPath,
|
||||
StartAfterSegment: startAfterSegment,
|
||||
EndBeforeSegment: endBeforeSegment,
|
||||
Limit: int32(limit),
|
||||
}
|
||||
|
||||
resp, err := i.healthclient.ObjectHealth(ctx, req)
|
||||
if err != nil {
|
||||
return ErrRequest.Wrap(err)
|
||||
}
|
||||
|
||||
f, err := csvOutput()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
err := f.Close()
|
||||
if err != nil {
|
||||
fmt.Printf("error closing file: %+v\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
w := csv.NewWriter(f)
|
||||
defer w.Flush()
|
||||
|
||||
redundancy, err := eestream.NewRedundancyStrategyFromProto(resp.GetRedundancy())
|
||||
if err != nil {
|
||||
return ErrRequest.Wrap(err)
|
||||
}
|
||||
|
||||
if err := printRedundancyTable(w, redundancy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := printSegmentHealthAndNodeTables(w, redundancy, resp.GetSegments()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SegmentHealth gets information about the health of a segment on the network.
|
||||
func SegmentHealth(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx, _ := process.Ctx(cmd)
|
||||
i, err := NewInspector(ctx, *Addr, *IdentityPath)
|
||||
if err != nil {
|
||||
return ErrArgs.Wrap(err)
|
||||
}
|
||||
defer func() { err = errs.Combine(err, i.Close()) }()
|
||||
|
||||
segmentIndex, err := strconv.ParseInt(args[1], 10, 64)
|
||||
if err != nil {
|
||||
return ErrRequest.Wrap(err)
|
||||
}
|
||||
|
||||
req := &internalpb.SegmentHealthRequest{
|
||||
ProjectId: []byte(args[0]),
|
||||
SegmentIndex: segmentIndex,
|
||||
Bucket: []byte(args[2]),
|
||||
EncryptedPath: []byte(args[3]),
|
||||
}
|
||||
|
||||
resp, err := i.healthclient.SegmentHealth(ctx, req)
|
||||
if err != nil {
|
||||
return ErrRequest.Wrap(err)
|
||||
}
|
||||
|
||||
f, err := csvOutput()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
err := f.Close()
|
||||
if err != nil {
|
||||
fmt.Printf("error closing file: %+v\n", err)
|
||||
}
|
||||
}()
|
||||
|
||||
w := csv.NewWriter(f)
|
||||
defer w.Flush()
|
||||
|
||||
redundancy, err := eestream.NewRedundancyStrategyFromProto(resp.GetRedundancy())
|
||||
if err != nil {
|
||||
return ErrRequest.Wrap(err)
|
||||
}
|
||||
|
||||
if err := printRedundancyTable(w, redundancy); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := printSegmentHealthAndNodeTables(w, redundancy, []*internalpb.SegmentHealth{resp.GetHealth()}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func csvOutput() (*os.File, error) {
|
||||
if CSVPath == "stdout" {
|
||||
return os.Stdout, nil
|
||||
}
|
||||
|
||||
return os.Create(CSVPath)
|
||||
}
|
||||
|
||||
func printSegmentHealthAndNodeTables(w *csv.Writer, redundancy eestream.RedundancyStrategy, segments []*internalpb.SegmentHealth) error {
|
||||
segmentTableHeader := []string{
|
||||
"Segment Index", "Healthy Nodes", "Unhealthy Nodes", "Offline Nodes",
|
||||
}
|
||||
|
||||
if err := w.Write(segmentTableHeader); err != nil {
|
||||
return fmt.Errorf("error writing record to csv: %w", err)
|
||||
}
|
||||
|
||||
currentNodeIndex := 1 // start at index 1 to leave first column empty
|
||||
nodeIndices := make(map[storj.NodeID]int) // to keep track of node positions for node table
|
||||
// Add each segment to the segmentTable
|
||||
for _, segment := range segments {
|
||||
healthyNodes := segment.HealthyIds // healthy nodes with pieces currently online
|
||||
unhealthyNodes := segment.UnhealthyIds // unhealthy nodes with pieces currently online
|
||||
offlineNodes := segment.OfflineIds // offline nodes
|
||||
segmentIndexPath := string(segment.GetSegment()) // path formatted Segment Index
|
||||
|
||||
row := []string{
|
||||
segmentIndexPath,
|
||||
strconv.FormatInt(int64(len(healthyNodes)), 10),
|
||||
strconv.FormatInt(int64(len(unhealthyNodes)), 10),
|
||||
strconv.FormatInt(int64(len(offlineNodes)), 10),
|
||||
}
|
||||
|
||||
if err := w.Write(row); err != nil {
|
||||
return fmt.Errorf("error writing record to csv: %w", err)
|
||||
}
|
||||
|
||||
allNodes := []storj.NodeID{}
|
||||
allNodes = append(allNodes, healthyNodes...)
|
||||
allNodes = append(allNodes, unhealthyNodes...)
|
||||
allNodes = append(allNodes, offlineNodes...)
|
||||
for _, id := range allNodes {
|
||||
if nodeIndices[id] == 0 {
|
||||
nodeIndices[id] = currentNodeIndex
|
||||
currentNodeIndex++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := w.Write([]string{}); err != nil {
|
||||
return fmt.Errorf("error writing record to csv: %w", err)
|
||||
}
|
||||
|
||||
numNodes := len(nodeIndices)
|
||||
nodeTableHeader := make([]string, numNodes+1)
|
||||
for id, i := range nodeIndices {
|
||||
nodeTableHeader[i] = id.String()
|
||||
}
|
||||
if err := w.Write(nodeTableHeader); err != nil {
|
||||
return fmt.Errorf("error writing record to csv: %w", err)
|
||||
}
|
||||
|
||||
// Add online/offline info to the node table
|
||||
for _, segment := range segments {
|
||||
row := make([]string, numNodes+1)
|
||||
for _, id := range segment.HealthyIds {
|
||||
i := nodeIndices[id]
|
||||
row[i] = "healthy"
|
||||
}
|
||||
for _, id := range segment.UnhealthyIds {
|
||||
i := nodeIndices[id]
|
||||
row[i] = "unhealthy"
|
||||
}
|
||||
for _, id := range segment.OfflineIds {
|
||||
i := nodeIndices[id]
|
||||
row[i] = "offline"
|
||||
}
|
||||
row[0] = string(segment.GetSegment())
|
||||
if err := w.Write(row); err != nil {
|
||||
return fmt.Errorf("error writing record to csv: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func printRedundancyTable(w *csv.Writer, redundancy eestream.RedundancyStrategy) error {
|
||||
total := redundancy.TotalCount() // total amount of pieces we generated (n)
|
||||
required := redundancy.RequiredCount() // minimum required stripes for reconstruction (k)
|
||||
optimalThreshold := redundancy.OptimalThreshold() // amount of pieces we need to store to call it a success (o)
|
||||
repairThreshold := redundancy.RepairThreshold() // amount of pieces we need to drop to before triggering repair (m)
|
||||
|
||||
redundancyTable := [][]string{
|
||||
{"Total Pieces (n)", "Minimum Required (k)", "Optimal Threshold (o)", "Repair Threshold (m)"},
|
||||
{strconv.Itoa(total), strconv.Itoa(required), strconv.Itoa(optimalThreshold), strconv.Itoa(repairThreshold)},
|
||||
{},
|
||||
}
|
||||
|
||||
for _, row := range redundancyTable {
|
||||
if err := w.Write(row); err != nil {
|
||||
return fmt.Errorf("error writing record to csv: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(statsCmd)
|
||||
rootCmd.AddCommand(healthCmd)
|
||||
|
||||
healthCmd.AddCommand(objectHealthCmd)
|
||||
healthCmd.AddCommand(segmentHealthCmd)
|
||||
|
||||
objectHealthCmd.Flags().StringVar(&CSVPath, "csv-path", "stdout", "csv path where command output is written")
|
||||
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
func main() {
|
||||
process.Exec(rootCmd)
|
||||
}
|
@ -1,2 +0,0 @@
|
||||
nodeid1,20,10,20,10
|
||||
nodeid2,10,10,25,20
|
|
@ -42,7 +42,6 @@ import (
|
||||
"storj.io/storj/satellite/contact"
|
||||
"storj.io/storj/satellite/gc/sender"
|
||||
"storj.io/storj/satellite/gracefulexit"
|
||||
"storj.io/storj/satellite/inspector"
|
||||
"storj.io/storj/satellite/mailservice"
|
||||
"storj.io/storj/satellite/metabase"
|
||||
"storj.io/storj/satellite/metabase/zombiedeletion"
|
||||
@ -116,10 +115,6 @@ type Satellite struct {
|
||||
DB *metabase.DB
|
||||
}
|
||||
|
||||
Inspector struct {
|
||||
Endpoint *inspector.Endpoint
|
||||
}
|
||||
|
||||
Orders struct {
|
||||
DB orders.DB
|
||||
Endpoint *orders.Endpoint
|
||||
@ -615,8 +610,6 @@ func createNewSystem(name string, log *zap.Logger, config satellite.Config, peer
|
||||
|
||||
system.Metabase.DB = api.Metainfo.Metabase
|
||||
|
||||
system.Inspector.Endpoint = api.Inspector.Endpoint
|
||||
|
||||
system.Orders.DB = api.Orders.DB
|
||||
system.Orders.Endpoint = api.Orders.Endpoint
|
||||
system.Orders.Service = api.Orders.Service
|
||||
|
@ -38,8 +38,6 @@ import (
|
||||
"storj.io/storj/satellite/console/userinfo"
|
||||
"storj.io/storj/satellite/contact"
|
||||
"storj.io/storj/satellite/gracefulexit"
|
||||
"storj.io/storj/satellite/inspector"
|
||||
"storj.io/storj/satellite/internalpb"
|
||||
"storj.io/storj/satellite/mailservice"
|
||||
"storj.io/storj/satellite/metabase"
|
||||
"storj.io/storj/satellite/metainfo"
|
||||
@ -111,10 +109,6 @@ type API struct {
|
||||
Endpoint *userinfo.Endpoint
|
||||
}
|
||||
|
||||
Inspector struct {
|
||||
Endpoint *inspector.Endpoint
|
||||
}
|
||||
|
||||
Accounting struct {
|
||||
ProjectUsage *accounting.Service
|
||||
}
|
||||
@ -505,17 +499,6 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
|
||||
}
|
||||
}
|
||||
|
||||
{ // setup inspector
|
||||
peer.Inspector.Endpoint = inspector.NewEndpoint(
|
||||
peer.Log.Named("inspector"),
|
||||
peer.Overlay.Service,
|
||||
peer.Metainfo.Metabase,
|
||||
)
|
||||
if err := internalpb.DRPCRegisterHealthInspector(peer.Server.PrivateDRPC(), peer.Inspector.Endpoint); err != nil {
|
||||
return nil, errs.Combine(err, peer.Close())
|
||||
}
|
||||
}
|
||||
|
||||
{ // setup payments
|
||||
pc := config.Payments
|
||||
|
||||
|
@ -1,204 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package inspector
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
|
||||
"github.com/spacemonkeygo/monkit/v3"
|
||||
"github.com/zeebo/errs"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"storj.io/common/pb"
|
||||
"storj.io/common/storj"
|
||||
"storj.io/common/uuid"
|
||||
"storj.io/storj/satellite/internalpb"
|
||||
"storj.io/storj/satellite/metabase"
|
||||
"storj.io/storj/satellite/overlay"
|
||||
)
|
||||
|
||||
var (
|
||||
mon = monkit.Package()
|
||||
// Error wraps errors returned from Server struct methods.
|
||||
Error = errs.Class("inspector")
|
||||
)
|
||||
|
||||
// Endpoint for checking object and segment health.
|
||||
//
|
||||
// architecture: Endpoint
|
||||
type Endpoint struct {
|
||||
internalpb.DRPCHealthInspectorUnimplementedServer
|
||||
log *zap.Logger
|
||||
overlay *overlay.Service
|
||||
metabase *metabase.DB
|
||||
}
|
||||
|
||||
// NewEndpoint will initialize an Endpoint struct.
|
||||
func NewEndpoint(log *zap.Logger, cache *overlay.Service, metabase *metabase.DB) *Endpoint {
|
||||
return &Endpoint{
|
||||
log: log,
|
||||
overlay: cache,
|
||||
metabase: metabase,
|
||||
}
|
||||
}
|
||||
|
||||
// ObjectHealth will check the health of an object.
|
||||
func (endpoint *Endpoint) ObjectHealth(ctx context.Context, in *internalpb.ObjectHealthRequest) (resp *internalpb.ObjectHealthResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
var segmentHealthResponses []*internalpb.SegmentHealth
|
||||
var redundancy *pb.RedundancyScheme
|
||||
|
||||
limit := int(100)
|
||||
if in.GetLimit() > 0 {
|
||||
limit = int(in.GetLimit())
|
||||
}
|
||||
|
||||
var startPosition metabase.SegmentPosition
|
||||
|
||||
if in.GetStartAfterSegment() > 0 {
|
||||
startPosition = metabase.SegmentPositionFromEncoded(uint64(in.GetStartAfterSegment()))
|
||||
}
|
||||
|
||||
projectID, err := uuid.FromBytes(in.GetProjectId())
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
objectLocation := metabase.ObjectLocation{
|
||||
ProjectID: projectID,
|
||||
BucketName: string(in.GetBucket()),
|
||||
ObjectKey: metabase.ObjectKey(in.GetEncryptedPath()),
|
||||
}
|
||||
|
||||
object, err := endpoint.metabase.GetObjectLastCommitted(ctx, metabase.GetObjectLastCommitted{
|
||||
ObjectLocation: objectLocation,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
listResult, err := endpoint.metabase.ListSegments(ctx, metabase.ListSegments{
|
||||
StreamID: object.StreamID,
|
||||
Cursor: startPosition,
|
||||
Limit: limit,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
for _, segment := range listResult.Segments {
|
||||
if !segment.Inline() {
|
||||
segmentHealth, err := endpoint.segmentHealth(ctx, segment)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
segmentHealthResponses = append(segmentHealthResponses, segmentHealth.GetHealth())
|
||||
redundancy = segmentHealth.GetRedundancy()
|
||||
}
|
||||
}
|
||||
|
||||
return &internalpb.ObjectHealthResponse{
|
||||
Segments: segmentHealthResponses,
|
||||
Redundancy: redundancy,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SegmentHealth will check the health of a segment.
|
||||
func (endpoint *Endpoint) SegmentHealth(ctx context.Context, in *internalpb.SegmentHealthRequest) (_ *internalpb.SegmentHealthResponse, err error) {
|
||||
defer mon.Task()(&ctx)(&err)
|
||||
|
||||
projectID, err := uuid.FromBytes(in.GetProjectId())
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
objectLocation := metabase.ObjectLocation{
|
||||
ProjectID: projectID,
|
||||
BucketName: string(in.GetBucket()),
|
||||
ObjectKey: metabase.ObjectKey(in.GetEncryptedPath()),
|
||||
}
|
||||
|
||||
object, err := endpoint.metabase.GetObjectLastCommitted(ctx, metabase.GetObjectLastCommitted{
|
||||
ObjectLocation: objectLocation,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
segment, err := endpoint.metabase.GetSegmentByPosition(ctx, metabase.GetSegmentByPosition{
|
||||
StreamID: object.StreamID,
|
||||
Position: metabase.SegmentPositionFromEncoded(uint64(in.GetSegmentIndex())),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
if segment.Inline() {
|
||||
return nil, Error.New("cannot check health of inline segment")
|
||||
}
|
||||
|
||||
return endpoint.segmentHealth(ctx, segment)
|
||||
}
|
||||
|
||||
func (endpoint *Endpoint) segmentHealth(ctx context.Context, segment metabase.Segment) (_ *internalpb.SegmentHealthResponse, err error) {
|
||||
|
||||
health := &internalpb.SegmentHealth{}
|
||||
var nodeIDs storj.NodeIDList
|
||||
for _, piece := range segment.Pieces {
|
||||
nodeIDs = append(nodeIDs, piece.StorageNode)
|
||||
}
|
||||
|
||||
unreliableOrOfflineNodes, err := endpoint.overlay.KnownUnreliableOrOffline(ctx, nodeIDs)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
offlineNodes, err := endpoint.overlay.KnownOffline(ctx, nodeIDs)
|
||||
if err != nil {
|
||||
return nil, Error.Wrap(err)
|
||||
}
|
||||
|
||||
offlineMap := make(map[storj.NodeID]bool)
|
||||
for _, id := range offlineNodes {
|
||||
offlineMap[id] = true
|
||||
}
|
||||
unreliableOfflineMap := make(map[storj.NodeID]bool)
|
||||
for _, id := range unreliableOrOfflineNodes {
|
||||
unreliableOfflineMap[id] = true
|
||||
}
|
||||
|
||||
redundancy := &pb.RedundancyScheme{
|
||||
MinReq: int32(segment.Redundancy.RequiredShares),
|
||||
RepairThreshold: int32(segment.Redundancy.RepairShares),
|
||||
SuccessThreshold: int32(segment.Redundancy.OptimalShares),
|
||||
Total: int32(segment.Redundancy.TotalShares),
|
||||
}
|
||||
|
||||
var healthyNodes storj.NodeIDList
|
||||
var unhealthyNodes storj.NodeIDList
|
||||
for _, id := range nodeIDs {
|
||||
if offlineMap[id] {
|
||||
continue
|
||||
}
|
||||
if unreliableOfflineMap[id] {
|
||||
unhealthyNodes = append(unhealthyNodes, id)
|
||||
} else {
|
||||
healthyNodes = append(healthyNodes, id)
|
||||
}
|
||||
}
|
||||
health.HealthyIds = healthyNodes
|
||||
health.UnhealthyIds = unhealthyNodes
|
||||
health.OfflineIds = offlineNodes
|
||||
|
||||
health.Segment = make([]byte, 8)
|
||||
|
||||
binary.LittleEndian.PutUint64(health.Segment, segment.Position.Encode())
|
||||
|
||||
return &internalpb.SegmentHealthResponse{
|
||||
Health: health,
|
||||
Redundancy: redundancy,
|
||||
}, nil
|
||||
}
|
@ -1,133 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
package inspector_test
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"storj.io/common/base58"
|
||||
"storj.io/common/encryption"
|
||||
"storj.io/common/memory"
|
||||
"storj.io/common/paths"
|
||||
"storj.io/common/pb"
|
||||
"storj.io/common/storj"
|
||||
"storj.io/common/testcontext"
|
||||
"storj.io/common/testrand"
|
||||
"storj.io/storj/private/testplanet"
|
||||
"storj.io/storj/satellite/internalpb"
|
||||
"storj.io/storj/satellite/metabase"
|
||||
"storj.io/uplink/private/eestream"
|
||||
)
|
||||
|
||||
func TestInspectorStats(t *testing.T) {
|
||||
testplanet.Run(t, testplanet.Config{
|
||||
SatelliteCount: 1, StorageNodeCount: 6, UplinkCount: 1,
|
||||
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
|
||||
satellite := planet.Satellites[0]
|
||||
upl := planet.Uplinks[0]
|
||||
testData := testrand.Bytes(1 * memory.MiB)
|
||||
|
||||
bucket := "testbucket"
|
||||
projectID := upl.Projects[0].ID
|
||||
|
||||
err := upl.Upload(ctx, planet.Satellites[0], bucket, "test/path", testData)
|
||||
require.NoError(t, err)
|
||||
|
||||
healthEndpoint := planet.Satellites[0].Inspector.Endpoint
|
||||
|
||||
// Get path of random segment we just uploaded and check the health
|
||||
access := upl.Access[satellite.ID()]
|
||||
serializedAccess, err := access.Serialize()
|
||||
require.NoError(t, err)
|
||||
|
||||
store, err := encryptionAccess(serializedAccess)
|
||||
require.NoError(t, err)
|
||||
|
||||
encryptedPath, err := encryption.EncryptPathWithStoreCipher(bucket, paths.NewUnencrypted("test/path"), store)
|
||||
require.NoError(t, err)
|
||||
|
||||
objectLocation := metabase.ObjectLocation{
|
||||
ProjectID: projectID,
|
||||
BucketName: "testbucket",
|
||||
ObjectKey: metabase.ObjectKey(encryptedPath.Raw()),
|
||||
}
|
||||
|
||||
segment, err := satellite.Metabase.DB.GetLatestObjectLastSegment(ctx, metabase.GetLatestObjectLastSegment{
|
||||
ObjectLocation: objectLocation,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
{ // Test Segment Health Request
|
||||
req := &internalpb.SegmentHealthRequest{
|
||||
ProjectId: projectID[:],
|
||||
EncryptedPath: []byte(encryptedPath.Raw()),
|
||||
Bucket: []byte(bucket),
|
||||
SegmentIndex: int64(segment.Position.Encode()),
|
||||
}
|
||||
|
||||
resp, err := healthEndpoint.SegmentHealth(ctx, req)
|
||||
require.NoError(t, err)
|
||||
|
||||
redundancy, err := eestream.NewRedundancyStrategyFromProto(resp.GetRedundancy())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 4, redundancy.TotalCount())
|
||||
encodedPosition := binary.LittleEndian.Uint64(resp.GetHealth().GetSegment())
|
||||
position := metabase.SegmentPositionFromEncoded(encodedPosition)
|
||||
require.Equal(t, segment.Position, position)
|
||||
}
|
||||
|
||||
{ // Test Object Health Request
|
||||
objectHealthReq := &internalpb.ObjectHealthRequest{
|
||||
ProjectId: projectID[:],
|
||||
EncryptedPath: []byte(encryptedPath.Raw()),
|
||||
Bucket: []byte(bucket),
|
||||
StartAfterSegment: 0,
|
||||
EndBeforeSegment: 0,
|
||||
Limit: 0,
|
||||
}
|
||||
resp, err := healthEndpoint.ObjectHealth(ctx, objectHealthReq)
|
||||
require.NoError(t, err)
|
||||
|
||||
segments := resp.GetSegments()
|
||||
require.Len(t, segments, 1)
|
||||
|
||||
redundancy, err := eestream.NewRedundancyStrategyFromProto(resp.GetRedundancy())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 4, redundancy.TotalCount())
|
||||
encodedPosition := binary.LittleEndian.Uint64(segments[0].GetSegment())
|
||||
position := metabase.SegmentPositionFromEncoded(encodedPosition)
|
||||
require.Equal(t, segment.Position, position)
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
func encryptionAccess(access string) (*encryption.Store, error) {
|
||||
data, version, err := base58.CheckDecode(access)
|
||||
if err != nil || version != 0 {
|
||||
return nil, errors.New("invalid access grant format")
|
||||
}
|
||||
|
||||
p := new(pb.Scope)
|
||||
if err := pb.Unmarshal(data, p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
key, err := storj.NewKey(p.EncryptionAccess.DefaultKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
store := encryption.NewStore()
|
||||
store.SetDefaultKey(key)
|
||||
store.SetDefaultPathCipher(storj.EncAESGCM)
|
||||
|
||||
return store, nil
|
||||
}
|
@ -1,344 +0,0 @@
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: inspector.proto
|
||||
|
||||
package internalpb
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
math "math"
|
||||
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
|
||||
pb "storj.io/common/pb"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
// A compilation error at this line likely means your copy of the
|
||||
// proto package needs to be updated.
|
||||
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
|
||||
type ObjectHealthRequest struct {
|
||||
EncryptedPath []byte `protobuf:"bytes,1,opt,name=encrypted_path,json=encryptedPath,proto3" json:"encrypted_path,omitempty"`
|
||||
Bucket []byte `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"`
|
||||
ProjectId []byte `protobuf:"bytes,3,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
|
||||
StartAfterSegment int64 `protobuf:"varint,4,opt,name=start_after_segment,json=startAfterSegment,proto3" json:"start_after_segment,omitempty"`
|
||||
EndBeforeSegment int64 `protobuf:"varint,5,opt,name=end_before_segment,json=endBeforeSegment,proto3" json:"end_before_segment,omitempty"`
|
||||
Limit int32 `protobuf:"varint,6,opt,name=limit,proto3" json:"limit,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ObjectHealthRequest) Reset() { *m = ObjectHealthRequest{} }
|
||||
func (m *ObjectHealthRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ObjectHealthRequest) ProtoMessage() {}
|
||||
func (*ObjectHealthRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{0}
|
||||
}
|
||||
func (m *ObjectHealthRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ObjectHealthRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ObjectHealthRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ObjectHealthRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ObjectHealthRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ObjectHealthRequest.Merge(m, src)
|
||||
}
|
||||
func (m *ObjectHealthRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_ObjectHealthRequest.Size(m)
|
||||
}
|
||||
func (m *ObjectHealthRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ObjectHealthRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ObjectHealthRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *ObjectHealthRequest) GetEncryptedPath() []byte {
|
||||
if m != nil {
|
||||
return m.EncryptedPath
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ObjectHealthRequest) GetBucket() []byte {
|
||||
if m != nil {
|
||||
return m.Bucket
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ObjectHealthRequest) GetProjectId() []byte {
|
||||
if m != nil {
|
||||
return m.ProjectId
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ObjectHealthRequest) GetStartAfterSegment() int64 {
|
||||
if m != nil {
|
||||
return m.StartAfterSegment
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ObjectHealthRequest) GetEndBeforeSegment() int64 {
|
||||
if m != nil {
|
||||
return m.EndBeforeSegment
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *ObjectHealthRequest) GetLimit() int32 {
|
||||
if m != nil {
|
||||
return m.Limit
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type ObjectHealthResponse struct {
|
||||
Segments []*SegmentHealth `protobuf:"bytes,1,rep,name=segments,proto3" json:"segments,omitempty"`
|
||||
Redundancy *pb.RedundancyScheme `protobuf:"bytes,2,opt,name=redundancy,proto3" json:"redundancy,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *ObjectHealthResponse) Reset() { *m = ObjectHealthResponse{} }
|
||||
func (m *ObjectHealthResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ObjectHealthResponse) ProtoMessage() {}
|
||||
func (*ObjectHealthResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{1}
|
||||
}
|
||||
func (m *ObjectHealthResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_ObjectHealthResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *ObjectHealthResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_ObjectHealthResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *ObjectHealthResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ObjectHealthResponse.Merge(m, src)
|
||||
}
|
||||
func (m *ObjectHealthResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_ObjectHealthResponse.Size(m)
|
||||
}
|
||||
func (m *ObjectHealthResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ObjectHealthResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ObjectHealthResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *ObjectHealthResponse) GetSegments() []*SegmentHealth {
|
||||
if m != nil {
|
||||
return m.Segments
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ObjectHealthResponse) GetRedundancy() *pb.RedundancyScheme {
|
||||
if m != nil {
|
||||
return m.Redundancy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type SegmentHealthRequest struct {
|
||||
Bucket []byte `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`
|
||||
EncryptedPath []byte `protobuf:"bytes,2,opt,name=encrypted_path,json=encryptedPath,proto3" json:"encrypted_path,omitempty"`
|
||||
SegmentIndex int64 `protobuf:"varint,3,opt,name=segment_index,json=segmentIndex,proto3" json:"segment_index,omitempty"`
|
||||
ProjectId []byte `protobuf:"bytes,4,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SegmentHealthRequest) Reset() { *m = SegmentHealthRequest{} }
|
||||
func (m *SegmentHealthRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*SegmentHealthRequest) ProtoMessage() {}
|
||||
func (*SegmentHealthRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{2}
|
||||
}
|
||||
func (m *SegmentHealthRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SegmentHealthRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SegmentHealthRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SegmentHealthRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *SegmentHealthRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SegmentHealthRequest.Merge(m, src)
|
||||
}
|
||||
func (m *SegmentHealthRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_SegmentHealthRequest.Size(m)
|
||||
}
|
||||
func (m *SegmentHealthRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SegmentHealthRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SegmentHealthRequest proto.InternalMessageInfo
|
||||
|
||||
func (m *SegmentHealthRequest) GetBucket() []byte {
|
||||
if m != nil {
|
||||
return m.Bucket
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *SegmentHealthRequest) GetEncryptedPath() []byte {
|
||||
if m != nil {
|
||||
return m.EncryptedPath
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *SegmentHealthRequest) GetSegmentIndex() int64 {
|
||||
if m != nil {
|
||||
return m.SegmentIndex
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *SegmentHealthRequest) GetProjectId() []byte {
|
||||
if m != nil {
|
||||
return m.ProjectId
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type SegmentHealthResponse struct {
|
||||
Health *SegmentHealth `protobuf:"bytes,1,opt,name=health,proto3" json:"health,omitempty"`
|
||||
Redundancy *pb.RedundancyScheme `protobuf:"bytes,2,opt,name=redundancy,proto3" json:"redundancy,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SegmentHealthResponse) Reset() { *m = SegmentHealthResponse{} }
|
||||
func (m *SegmentHealthResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*SegmentHealthResponse) ProtoMessage() {}
|
||||
func (*SegmentHealthResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{3}
|
||||
}
|
||||
func (m *SegmentHealthResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SegmentHealthResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SegmentHealthResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SegmentHealthResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *SegmentHealthResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SegmentHealthResponse.Merge(m, src)
|
||||
}
|
||||
func (m *SegmentHealthResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_SegmentHealthResponse.Size(m)
|
||||
}
|
||||
func (m *SegmentHealthResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SegmentHealthResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SegmentHealthResponse proto.InternalMessageInfo
|
||||
|
||||
func (m *SegmentHealthResponse) GetHealth() *SegmentHealth {
|
||||
if m != nil {
|
||||
return m.Health
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *SegmentHealthResponse) GetRedundancy() *pb.RedundancyScheme {
|
||||
if m != nil {
|
||||
return m.Redundancy
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type SegmentHealth struct {
|
||||
HealthyIds []NodeID `protobuf:"bytes,1,rep,name=healthy_ids,json=healthyIds,proto3,customtype=NodeID" json:"healthy_ids,omitempty"`
|
||||
UnhealthyIds []NodeID `protobuf:"bytes,2,rep,name=unhealthy_ids,json=unhealthyIds,proto3,customtype=NodeID" json:"unhealthy_ids,omitempty"`
|
||||
OfflineIds []NodeID `protobuf:"bytes,3,rep,name=offline_ids,json=offlineIds,proto3,customtype=NodeID" json:"offline_ids,omitempty"`
|
||||
Segment []byte `protobuf:"bytes,4,opt,name=segment,proto3" json:"segment,omitempty"`
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SegmentHealth) Reset() { *m = SegmentHealth{} }
|
||||
func (m *SegmentHealth) String() string { return proto.CompactTextString(m) }
|
||||
func (*SegmentHealth) ProtoMessage() {}
|
||||
func (*SegmentHealth) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_a07d9034b2dd9d26, []int{4}
|
||||
}
|
||||
func (m *SegmentHealth) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_SegmentHealth.Unmarshal(m, b)
|
||||
}
|
||||
func (m *SegmentHealth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_SegmentHealth.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *SegmentHealth) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_SegmentHealth.Merge(m, src)
|
||||
}
|
||||
func (m *SegmentHealth) XXX_Size() int {
|
||||
return xxx_messageInfo_SegmentHealth.Size(m)
|
||||
}
|
||||
func (m *SegmentHealth) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_SegmentHealth.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_SegmentHealth proto.InternalMessageInfo
|
||||
|
||||
func (m *SegmentHealth) GetSegment() []byte {
|
||||
if m != nil {
|
||||
return m.Segment
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*ObjectHealthRequest)(nil), "satellite.inspector.ObjectHealthRequest")
|
||||
proto.RegisterType((*ObjectHealthResponse)(nil), "satellite.inspector.ObjectHealthResponse")
|
||||
proto.RegisterType((*SegmentHealthRequest)(nil), "satellite.inspector.SegmentHealthRequest")
|
||||
proto.RegisterType((*SegmentHealthResponse)(nil), "satellite.inspector.SegmentHealthResponse")
|
||||
proto.RegisterType((*SegmentHealth)(nil), "satellite.inspector.SegmentHealth")
|
||||
}
|
||||
|
||||
func init() { proto.RegisterFile("inspector.proto", fileDescriptor_a07d9034b2dd9d26) }
|
||||
|
||||
var fileDescriptor_a07d9034b2dd9d26 = []byte{
|
||||
// 524 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4d, 0x8f, 0xd3, 0x30,
|
||||
0x10, 0xc5, 0xdb, 0x6d, 0x81, 0x69, 0x4a, 0xc1, 0x2d, 0x28, 0x2a, 0x42, 0x54, 0x59, 0xad, 0xd4,
|
||||
0x65, 0x51, 0x2a, 0x95, 0x1b, 0x48, 0x48, 0x54, 0x1c, 0xe8, 0x05, 0x50, 0xf6, 0xc6, 0x25, 0x4a,
|
||||
0xe2, 0x69, 0x93, 0x25, 0xb5, 0x83, 0xed, 0x4a, 0xf4, 0x5f, 0x20, 0x71, 0xe2, 0x7f, 0xf0, 0x6b,
|
||||
0x38, 0x70, 0xe0, 0xc2, 0xdf, 0x40, 0x75, 0xdc, 0x6c, 0xbf, 0x0e, 0x95, 0xb8, 0x65, 0xe6, 0xbd,
|
||||
0x79, 0x9e, 0xbc, 0xe7, 0x04, 0xda, 0x19, 0x57, 0x05, 0x26, 0x5a, 0x48, 0xbf, 0x90, 0x42, 0x0b,
|
||||
0xda, 0x51, 0x91, 0xc6, 0x3c, 0xcf, 0x34, 0xfa, 0x15, 0xd4, 0x83, 0x99, 0x98, 0x89, 0x92, 0xd0,
|
||||
0x6b, 0x17, 0x22, 0xe3, 0x1a, 0x25, 0x8b, 0xcb, 0x86, 0xf7, 0x97, 0x40, 0xe7, 0x43, 0x7c, 0x8d,
|
||||
0x89, 0x7e, 0x87, 0x51, 0xae, 0xd3, 0x00, 0xbf, 0x2c, 0x50, 0x69, 0x7a, 0x0e, 0xf7, 0x90, 0x27,
|
||||
0x72, 0x59, 0x68, 0x64, 0x61, 0x11, 0xe9, 0xd4, 0x25, 0x7d, 0x32, 0x70, 0x82, 0x56, 0xd5, 0xfd,
|
||||
0x18, 0xe9, 0x94, 0x3e, 0x82, 0x46, 0xbc, 0x48, 0x3e, 0xa3, 0x76, 0x4f, 0x0c, 0x6c, 0x2b, 0xfa,
|
||||
0x04, 0xa0, 0x90, 0x62, 0x25, 0x1b, 0x66, 0xcc, 0xad, 0x19, 0xec, 0xae, 0xed, 0x4c, 0x18, 0xf5,
|
||||
0xa1, 0xa3, 0x74, 0x24, 0x75, 0x18, 0x4d, 0x35, 0xca, 0x50, 0xe1, 0x6c, 0x8e, 0x5c, 0xbb, 0xa7,
|
||||
0x7d, 0x32, 0xa8, 0x05, 0x0f, 0x0c, 0xf4, 0x66, 0x85, 0x5c, 0x95, 0x00, 0x7d, 0x0e, 0x14, 0x39,
|
||||
0x0b, 0x63, 0x9c, 0x0a, 0x89, 0x15, 0xbd, 0x6e, 0xe8, 0xf7, 0x91, 0xb3, 0xb1, 0x01, 0xd6, 0xec,
|
||||
0x2e, 0xd4, 0xf3, 0x6c, 0x9e, 0x69, 0xb7, 0xd1, 0x27, 0x83, 0x7a, 0x50, 0x16, 0xde, 0x77, 0x02,
|
||||
0xdd, 0xed, 0x37, 0x55, 0x85, 0xe0, 0x0a, 0xe9, 0x6b, 0xb8, 0x63, 0x15, 0x95, 0x4b, 0xfa, 0xb5,
|
||||
0x41, 0x73, 0xe4, 0xf9, 0x07, 0x7c, 0xf4, 0xad, 0xbc, 0x9d, 0xae, 0x66, 0xe8, 0x2b, 0x00, 0x89,
|
||||
0x6c, 0xc1, 0x59, 0xc4, 0x93, 0xa5, 0xf1, 0xa1, 0x39, 0x7a, 0xec, 0xdf, 0x18, 0x1d, 0x54, 0xe0,
|
||||
0x55, 0x92, 0xe2, 0x1c, 0x83, 0x0d, 0xba, 0xf7, 0x83, 0x40, 0x77, 0x5b, 0xd8, 0x06, 0x70, 0xe3,
|
||||
0x2c, 0xd9, 0x72, 0x76, 0x3f, 0x98, 0x93, 0x43, 0xc1, 0x9c, 0x41, 0xcb, 0x2e, 0x18, 0x66, 0x9c,
|
||||
0xe1, 0x57, 0x93, 0x41, 0x2d, 0x70, 0x6c, 0x73, 0xb2, 0xea, 0xed, 0xa4, 0x74, 0xba, 0x93, 0x92,
|
||||
0xf7, 0x8d, 0xc0, 0xc3, 0x9d, 0xdd, 0xac, 0x65, 0x2f, 0xa1, 0x91, 0x9a, 0x8e, 0x59, 0xee, 0x38,
|
||||
0xc3, 0xec, 0xc4, 0xff, 0xd9, 0xf5, 0x93, 0x40, 0x6b, 0x4b, 0x96, 0x5e, 0x42, 0xb3, 0x14, 0x5e,
|
||||
0x86, 0x19, 0x2b, 0x03, 0x74, 0xc6, 0xf0, 0xeb, 0xf7, 0xd3, 0xc6, 0x7b, 0xc1, 0x70, 0xf2, 0x36,
|
||||
0x00, 0x0b, 0x4f, 0x98, 0xa2, 0x43, 0x68, 0x2d, 0xf8, 0x26, 0xfd, 0x64, 0x8f, 0xee, 0x54, 0x84,
|
||||
0xd5, 0xc0, 0x25, 0x34, 0xc5, 0x74, 0x9a, 0x67, 0x1c, 0x0d, 0xbd, 0xb6, 0xaf, 0x6e, 0xe1, 0x15,
|
||||
0xd9, 0x85, 0xdb, 0x9b, 0x37, 0xd9, 0x09, 0xd6, 0xe5, 0xe8, 0x0f, 0x81, 0x76, 0xb9, 0xef, 0x64,
|
||||
0xed, 0x0e, 0x45, 0x70, 0x36, 0xaf, 0x23, 0x1d, 0x1c, 0xf4, 0xf0, 0xc0, 0xb7, 0xd9, 0xbb, 0x38,
|
||||
0x82, 0x59, 0x06, 0xe5, 0xdd, 0xa2, 0xe9, 0xae, 0x61, 0x17, 0x47, 0x64, 0x65, 0x0f, 0x7a, 0x76,
|
||||
0x0c, 0x75, 0x7d, 0xd2, 0xf8, 0xfc, 0xd3, 0x99, 0xd2, 0x42, 0x5e, 0xfb, 0x99, 0x18, 0x9a, 0x87,
|
||||
0x61, 0x35, 0x3d, 0x34, 0xe1, 0xf2, 0x28, 0x2f, 0xe2, 0xb8, 0x61, 0x7e, 0x3c, 0x2f, 0xfe, 0x05,
|
||||
0x00, 0x00, 0xff, 0xff, 0x03, 0xb4, 0x8a, 0x34, 0xbd, 0x04, 0x00, 0x00,
|
||||
}
|
@ -1,50 +0,0 @@
|
||||
// Copyright (C) 2019 Storj Labs, Inc.
|
||||
// See LICENSE for copying information.
|
||||
|
||||
syntax = "proto3";
|
||||
option go_package = "storj.io/storj/satellite/internalpb";
|
||||
|
||||
import "gogo.proto";
|
||||
import "pointerdb.proto";
|
||||
|
||||
package satellite.inspector;
|
||||
|
||||
service HealthInspector {
|
||||
// ObjectHealth will return stats about the health of an object
|
||||
rpc ObjectHealth(ObjectHealthRequest) returns (ObjectHealthResponse) {}
|
||||
// SegmentHealth will return stats about the health of a segment
|
||||
rpc SegmentHealth(SegmentHealthRequest) returns (SegmentHealthResponse) {}
|
||||
}
|
||||
|
||||
message ObjectHealthRequest {
|
||||
bytes encrypted_path = 1; // object encrypted path
|
||||
bytes bucket = 2; // object bucket name
|
||||
bytes project_id = 3; // object project id
|
||||
int64 start_after_segment = 4; // Get all segments after specified segment index
|
||||
int64 end_before_segment = 5; // Stop at segment before specified segment index
|
||||
int32 limit = 6; // Max number of segments that are checked
|
||||
}
|
||||
|
||||
message ObjectHealthResponse {
|
||||
repeated SegmentHealth segments = 1; // actual segment info
|
||||
pointerdb.RedundancyScheme redundancy = 2; // expected segment info
|
||||
}
|
||||
|
||||
message SegmentHealthRequest {
|
||||
bytes bucket = 1; // segment bucket name
|
||||
bytes encrypted_path = 2; // segment encrypted path
|
||||
int64 segment_index = 3; // segment index
|
||||
bytes project_id = 4; // segment project id
|
||||
}
|
||||
|
||||
message SegmentHealthResponse {
|
||||
SegmentHealth health = 1; // Information about a segment's health
|
||||
pointerdb.RedundancyScheme redundancy = 2; // expected segment info
|
||||
}
|
||||
|
||||
message SegmentHealth {
|
||||
repeated bytes healthy_ids = 1 [(gogoproto.customtype) = "NodeID"]; // online + not disqualified
|
||||
repeated bytes unhealthy_ids = 2 [(gogoproto.customtype) = "NodeID"]; // online + disqualified
|
||||
repeated bytes offline_ids = 3 [(gogoproto.customtype) = "NodeID"]; // offline
|
||||
bytes segment = 4; // path formatted segment index
|
||||
}
|
@ -1,155 +0,0 @@
|
||||
// Code generated by protoc-gen-go-drpc. DO NOT EDIT.
|
||||
// protoc-gen-go-drpc version: v0.0.28
|
||||
// source: inspector.proto
|
||||
|
||||
package internalpb
|
||||
|
||||
import (
|
||||
bytes "bytes"
|
||||
context "context"
|
||||
errors "errors"
|
||||
|
||||
jsonpb "github.com/gogo/protobuf/jsonpb"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
|
||||
drpc "storj.io/drpc"
|
||||
drpcerr "storj.io/drpc/drpcerr"
|
||||
)
|
||||
|
||||
type drpcEncoding_File_inspector_proto struct{}
|
||||
|
||||
func (drpcEncoding_File_inspector_proto) Marshal(msg drpc.Message) ([]byte, error) {
|
||||
return proto.Marshal(msg.(proto.Message))
|
||||
}
|
||||
|
||||
func (drpcEncoding_File_inspector_proto) Unmarshal(buf []byte, msg drpc.Message) error {
|
||||
return proto.Unmarshal(buf, msg.(proto.Message))
|
||||
}
|
||||
|
||||
func (drpcEncoding_File_inspector_proto) JSONMarshal(msg drpc.Message) ([]byte, error) {
|
||||
var buf bytes.Buffer
|
||||
err := new(jsonpb.Marshaler).Marshal(&buf, msg.(proto.Message))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func (drpcEncoding_File_inspector_proto) JSONUnmarshal(buf []byte, msg drpc.Message) error {
|
||||
return jsonpb.Unmarshal(bytes.NewReader(buf), msg.(proto.Message))
|
||||
}
|
||||
|
||||
type DRPCHealthInspectorClient interface {
|
||||
DRPCConn() drpc.Conn
|
||||
|
||||
ObjectHealth(ctx context.Context, in *ObjectHealthRequest) (*ObjectHealthResponse, error)
|
||||
SegmentHealth(ctx context.Context, in *SegmentHealthRequest) (*SegmentHealthResponse, error)
|
||||
}
|
||||
|
||||
type drpcHealthInspectorClient struct {
|
||||
cc drpc.Conn
|
||||
}
|
||||
|
||||
func NewDRPCHealthInspectorClient(cc drpc.Conn) DRPCHealthInspectorClient {
|
||||
return &drpcHealthInspectorClient{cc}
|
||||
}
|
||||
|
||||
func (c *drpcHealthInspectorClient) DRPCConn() drpc.Conn { return c.cc }
|
||||
|
||||
func (c *drpcHealthInspectorClient) ObjectHealth(ctx context.Context, in *ObjectHealthRequest) (*ObjectHealthResponse, error) {
|
||||
out := new(ObjectHealthResponse)
|
||||
err := c.cc.Invoke(ctx, "/satellite.inspector.HealthInspector/ObjectHealth", drpcEncoding_File_inspector_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *drpcHealthInspectorClient) SegmentHealth(ctx context.Context, in *SegmentHealthRequest) (*SegmentHealthResponse, error) {
|
||||
out := new(SegmentHealthResponse)
|
||||
err := c.cc.Invoke(ctx, "/satellite.inspector.HealthInspector/SegmentHealth", drpcEncoding_File_inspector_proto{}, in, out)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
type DRPCHealthInspectorServer interface {
|
||||
ObjectHealth(context.Context, *ObjectHealthRequest) (*ObjectHealthResponse, error)
|
||||
SegmentHealth(context.Context, *SegmentHealthRequest) (*SegmentHealthResponse, error)
|
||||
}
|
||||
|
||||
type DRPCHealthInspectorUnimplementedServer struct{}
|
||||
|
||||
func (s *DRPCHealthInspectorUnimplementedServer) ObjectHealth(context.Context, *ObjectHealthRequest) (*ObjectHealthResponse, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
func (s *DRPCHealthInspectorUnimplementedServer) SegmentHealth(context.Context, *SegmentHealthRequest) (*SegmentHealthResponse, error) {
|
||||
return nil, drpcerr.WithCode(errors.New("Unimplemented"), drpcerr.Unimplemented)
|
||||
}
|
||||
|
||||
type DRPCHealthInspectorDescription struct{}
|
||||
|
||||
func (DRPCHealthInspectorDescription) NumMethods() int { return 2 }
|
||||
|
||||
func (DRPCHealthInspectorDescription) Method(n int) (string, drpc.Encoding, drpc.Receiver, interface{}, bool) {
|
||||
switch n {
|
||||
case 0:
|
||||
return "/satellite.inspector.HealthInspector/ObjectHealth", drpcEncoding_File_inspector_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCHealthInspectorServer).
|
||||
ObjectHealth(
|
||||
ctx,
|
||||
in1.(*ObjectHealthRequest),
|
||||
)
|
||||
}, DRPCHealthInspectorServer.ObjectHealth, true
|
||||
case 1:
|
||||
return "/satellite.inspector.HealthInspector/SegmentHealth", drpcEncoding_File_inspector_proto{},
|
||||
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
|
||||
return srv.(DRPCHealthInspectorServer).
|
||||
SegmentHealth(
|
||||
ctx,
|
||||
in1.(*SegmentHealthRequest),
|
||||
)
|
||||
}, DRPCHealthInspectorServer.SegmentHealth, true
|
||||
default:
|
||||
return "", nil, nil, nil, false
|
||||
}
|
||||
}
|
||||
|
||||
func DRPCRegisterHealthInspector(mux drpc.Mux, impl DRPCHealthInspectorServer) error {
|
||||
return mux.Register(impl, DRPCHealthInspectorDescription{})
|
||||
}
|
||||
|
||||
type DRPCHealthInspector_ObjectHealthStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*ObjectHealthResponse) error
|
||||
}
|
||||
|
||||
type drpcHealthInspector_ObjectHealthStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcHealthInspector_ObjectHealthStream) SendAndClose(m *ObjectHealthResponse) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_inspector_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
||||
|
||||
type DRPCHealthInspector_SegmentHealthStream interface {
|
||||
drpc.Stream
|
||||
SendAndClose(*SegmentHealthResponse) error
|
||||
}
|
||||
|
||||
type drpcHealthInspector_SegmentHealthStream struct {
|
||||
drpc.Stream
|
||||
}
|
||||
|
||||
func (x *drpcHealthInspector_SegmentHealthStream) SendAndClose(m *SegmentHealthResponse) error {
|
||||
if err := x.MsgSend(m, drpcEncoding_File_inspector_proto{}); err != nil {
|
||||
return err
|
||||
}
|
||||
return x.CloseSend()
|
||||
}
|
Loading…
Reference in New Issue
Block a user