Merge branch 'master'.

Change-Id: Ic14325edc291573582dce0cea3e04991a820b48b
This commit is contained in:
Egon Elbre 2020-11-02 12:59:38 +02:00
commit 716068a1e0
51 changed files with 3620 additions and 204 deletions

View File

@ -18,12 +18,12 @@ import (
"github.com/zeebo/errs"
"storj.io/common/identity"
"storj.io/common/pb"
"storj.io/common/rpc"
"storj.io/common/storj"
"storj.io/private/process"
"storj.io/storj/private/prompt"
_ "storj.io/storj/private/version" // This attaches version information during release builds.
"storj.io/storj/satellite/internalpb"
"storj.io/uplink/private/eestream"
)
@ -87,9 +87,9 @@ var (
type Inspector struct {
conn *rpc.Conn
identity *identity.FullIdentity
overlayclient pb.DRPCOverlayInspectorClient
irrdbclient pb.DRPCIrreparableInspectorClient
healthclient pb.DRPCHealthInspectorClient
overlayclient internalpb.DRPCOverlayInspectorClient
irrdbclient internalpb.DRPCIrreparableInspectorClient
healthclient internalpb.DRPCHealthInspectorClient
}
// NewInspector creates a new inspector client for access to overlay.
@ -110,9 +110,9 @@ func NewInspector(ctx context.Context, address, path string) (*Inspector, error)
return &Inspector{
conn: conn,
identity: id,
overlayclient: pb.NewDRPCOverlayInspectorClient(conn),
irrdbclient: pb.NewDRPCIrreparableInspectorClient(conn),
healthclient: pb.NewDRPCHealthInspectorClient(conn),
overlayclient: internalpb.NewDRPCOverlayInspectorClient(conn),
irrdbclient: internalpb.NewDRPCIrreparableInspectorClient(conn),
healthclient: internalpb.NewDRPCHealthInspectorClient(conn),
}, nil
}
@ -157,7 +157,7 @@ func ObjectHealth(cmd *cobra.Command, args []string) (err error) {
if err != nil {
return err
}
req := &pb.ObjectHealthRequest{
req := &internalpb.ObjectHealthRequest{
ProjectId: []byte(args[0]),
Bucket: []byte(args[1]),
EncryptedPath: decodedPath,
@ -215,7 +215,7 @@ func SegmentHealth(cmd *cobra.Command, args []string) (err error) {
return ErrRequest.Wrap(err)
}
req := &pb.SegmentHealthRequest{
req := &internalpb.SegmentHealthRequest{
ProjectId: []byte(args[0]),
SegmentIndex: segmentIndex,
Bucket: []byte(args[2]),
@ -250,7 +250,7 @@ func SegmentHealth(cmd *cobra.Command, args []string) (err error) {
return err
}
if err := printSegmentHealthAndNodeTables(w, redundancy, []*pb.SegmentHealth{resp.GetHealth()}); err != nil {
if err := printSegmentHealthAndNodeTables(w, redundancy, []*internalpb.SegmentHealth{resp.GetHealth()}); err != nil {
return err
}
@ -265,7 +265,7 @@ func csvOutput() (*os.File, error) {
return os.Create(CSVPath)
}
func printSegmentHealthAndNodeTables(w *csv.Writer, redundancy eestream.RedundancyStrategy, segments []*pb.SegmentHealth) error {
func printSegmentHealthAndNodeTables(w *csv.Writer, redundancy eestream.RedundancyStrategy, segments []*internalpb.SegmentHealth) error {
segmentTableHeader := []string{
"Segment Index", "Healthy Nodes", "Unhealthy Nodes", "Offline Nodes",
}
@ -378,7 +378,7 @@ func getSegments(cmd *cobra.Command, args []string) error {
// query DB and paginate results
for {
req := &pb.ListIrreparableSegmentsRequest{
req := &internalpb.ListIrreparableSegmentsRequest{
Limit: irreparableLimit,
LastSeenSegmentPath: lastSeenSegmentPath,
}
@ -416,8 +416,8 @@ func getSegments(cmd *cobra.Command, args []string) error {
}
// sortSegments by the object they belong to.
func sortSegments(segments []*pb.IrreparableSegment) map[string][]*pb.IrreparableSegment {
objects := make(map[string][]*pb.IrreparableSegment)
func sortSegments(segments []*internalpb.IrreparableSegment) map[string][]*internalpb.IrreparableSegment {
objects := make(map[string][]*internalpb.IrreparableSegment)
for _, seg := range segments {
pathElements := storj.SplitPath(string(seg.Path))

View File

@ -18,10 +18,10 @@ import (
"go.uber.org/zap"
"storj.io/common/memory"
"storj.io/common/pb"
"storj.io/common/rpc"
"storj.io/private/process"
"storj.io/private/version"
"storj.io/storj/storagenode/internalpb"
)
const contactWindow = time.Hour * 2
@ -38,8 +38,8 @@ func dialDashboardClient(ctx context.Context, address string) (*dashboardClient,
return &dashboardClient{conn: conn}, nil
}
func (dash *dashboardClient) dashboard(ctx context.Context) (*pb.DashboardResponse, error) {
return pb.NewDRPCPieceStoreInspectorClient(dash.conn).Dashboard(ctx, &pb.DashboardRequest{})
func (dash *dashboardClient) dashboard(ctx context.Context) (*internalpb.DashboardResponse, error) {
return internalpb.NewDRPCPieceStoreInspectorClient(dash.conn).Dashboard(ctx, &internalpb.DashboardRequest{})
}
func (dash *dashboardClient) close() error {
@ -81,7 +81,7 @@ func cmdDashboard(cmd *cobra.Command, args []string) (err error) {
}
}
func printDashboard(data *pb.DashboardResponse) error {
func printDashboard(data *internalpb.DashboardResponse) error {
clearScreen()
var warnFlag bool
color.NoColor = !useColor

View File

@ -17,12 +17,12 @@ import (
"go.uber.org/zap"
"storj.io/common/memory"
"storj.io/common/pb"
"storj.io/common/rpc"
"storj.io/common/storj"
"storj.io/private/process"
"storj.io/storj/private/date"
"storj.io/storj/private/prompt"
"storj.io/storj/storagenode/internalpb"
)
type gracefulExitClient struct {
@ -42,20 +42,20 @@ func dialGracefulExitClient(ctx context.Context, address string) (*gracefulExitC
return &gracefulExitClient{conn: conn}, nil
}
func (client *gracefulExitClient) getNonExitingSatellites(ctx context.Context) (*pb.GetNonExitingSatellitesResponse, error) {
return pb.NewDRPCNodeGracefulExitClient(client.conn).GetNonExitingSatellites(ctx, &pb.GetNonExitingSatellitesRequest{})
func (client *gracefulExitClient) getNonExitingSatellites(ctx context.Context) (*internalpb.GetNonExitingSatellitesResponse, error) {
return internalpb.NewDRPCNodeGracefulExitClient(client.conn).GetNonExitingSatellites(ctx, &internalpb.GetNonExitingSatellitesRequest{})
}
func (client *gracefulExitClient) initGracefulExit(ctx context.Context, req *pb.InitiateGracefulExitRequest) (*pb.ExitProgress, error) {
return pb.NewDRPCNodeGracefulExitClient(client.conn).InitiateGracefulExit(ctx, req)
func (client *gracefulExitClient) initGracefulExit(ctx context.Context, req *internalpb.InitiateGracefulExitRequest) (*internalpb.ExitProgress, error) {
return internalpb.NewDRPCNodeGracefulExitClient(client.conn).InitiateGracefulExit(ctx, req)
}
func (client *gracefulExitClient) getExitProgress(ctx context.Context) (*pb.GetExitProgressResponse, error) {
return pb.NewDRPCNodeGracefulExitClient(client.conn).GetExitProgress(ctx, &pb.GetExitProgressRequest{})
func (client *gracefulExitClient) getExitProgress(ctx context.Context) (*internalpb.GetExitProgressResponse, error) {
return internalpb.NewDRPCNodeGracefulExitClient(client.conn).GetExitProgress(ctx, &internalpb.GetExitProgressRequest{})
}
func (client *gracefulExitClient) gracefulExitFeasibility(ctx context.Context, id storj.NodeID) (*pb.GracefulExitFeasibilityResponse, error) {
return pb.NewDRPCNodeGracefulExitClient(client.conn).GracefulExitFeasibility(ctx, &pb.GracefulExitFeasibilityNodeRequest{NodeId: id})
func (client *gracefulExitClient) gracefulExitFeasibility(ctx context.Context, id storj.NodeID) (*internalpb.GracefulExitFeasibilityResponse, error) {
return internalpb.NewDRPCNodeGracefulExitClient(client.conn).GracefulExitFeasibility(ctx, &internalpb.GracefulExitFeasibilityRequest{NodeId: id})
}
func (client *gracefulExitClient) close() error {
@ -185,7 +185,7 @@ func cmdGracefulExitStatus(cmd *cobra.Command, args []string) (err error) {
return nil
}
func displayExitProgress(w io.Writer, progresses []*pb.ExitProgress) {
func displayExitProgress(w io.Writer, progresses []*internalpb.ExitProgress) {
fmt.Fprintln(w, "\nDomain Name\tNode ID\tPercent Complete\tSuccessful\tCompletion Receipt")
for _, progress := range progresses {
@ -229,10 +229,10 @@ func gracefulExitInit(ctx context.Context, satelliteIDs []storj.NodeID, w *tabwr
}
// save satellites for graceful exit into the db
progresses := make([]*pb.ExitProgress, 0, len(satelliteIDs))
progresses := make([]*internalpb.ExitProgress, 0, len(satelliteIDs))
var errgroup errs.Group
for _, id := range satelliteIDs {
req := &pb.InitiateGracefulExitRequest{
req := &internalpb.InitiateGracefulExitRequest{
NodeId: id,
}
resp, err := client.initGracefulExit(ctx, req)

11
go.mod
View File

@ -13,6 +13,7 @@ require (
github.com/gogo/protobuf v1.3.1
github.com/golang-migrate/migrate/v4 v4.7.0
github.com/google/go-cmp v0.5.2
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3 // indirect
github.com/gorilla/mux v1.8.0
github.com/gorilla/schema v1.2.0
github.com/graphql-go/graphql v0.7.9
@ -25,7 +26,7 @@ require (
github.com/nsf/jsondiff v0.0.0-20200515183724-f29ed568f4ce
github.com/nsf/termbox-go v0.0.0-20200418040025-38ba6e5628f1
github.com/shopspring/decimal v1.2.0
github.com/spacemonkeygo/monkit/v3 v3.0.7-0.20200515175308-072401d8c752
github.com/spacemonkeygo/monkit/v3 v3.0.7
github.com/spf13/cobra v1.0.0
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.7.1
@ -37,12 +38,14 @@ require (
go.etcd.io/bbolt v1.3.5
go.uber.org/zap v1.16.0
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b // indirect
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208
golang.org/x/sys v0.0.0-20200929083018-4d22bbb62b3c
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e
storj.io/common v0.0.0-20201014090530-c4af8e54d5c4
google.golang.org/api v0.20.0 // indirect
storj.io/common v0.0.0-20201030120157-90ae6720d87e
storj.io/drpc v0.0.14
storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b
storj.io/private v0.0.0-20200925142346-4c879709882f
storj.io/uplink v1.3.1
storj.io/private v0.0.0-20201026143115-bc926bfa3bca
storj.io/uplink v1.3.2-0.20201028181609-f6efc8fcf771
)

23
go.sum
View File

@ -355,7 +355,6 @@ github.com/mattn/go-sqlite3 v2.0.3+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U=
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
@ -442,6 +441,8 @@ github.com/spacemonkeygo/monkit/v3 v3.0.4/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drX
github.com/spacemonkeygo/monkit/v3 v3.0.5/go.mod h1:JcK1pCbReQsOsMKF/POFSZCq7drXFybgGmbc27tuwes=
github.com/spacemonkeygo/monkit/v3 v3.0.7-0.20200515175308-072401d8c752 h1:WcQDknqg0qajLNYKv3mXgbkWlYs5rPgZehGJFWePHVI=
github.com/spacemonkeygo/monkit/v3 v3.0.7-0.20200515175308-072401d8c752/go.mod h1:kj1ViJhlyADa7DiA4xVnTuPA46lFKbM7mxQTrXCuJP4=
github.com/spacemonkeygo/monkit/v3 v3.0.7 h1:LsGdIXl8mccqJrYEh4Uf4sLVGu/g0tjhNqQzdn9MzVk=
github.com/spacemonkeygo/monkit/v3 v3.0.7/go.mod h1:kj1ViJhlyADa7DiA4xVnTuPA46lFKbM7mxQTrXCuJP4=
github.com/spacemonkeygo/monotime v0.0.0-20180824235756-e3f48a95f98a/go.mod h1:ul4bvvnCOPZgq8w0nTkSmWVg/hauVpFS97Am1YM1XXo=
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572 h1:RC6RW7j+1+HkWaX/Yh71Ee5ZHaHYt7ZP4sQgUrm6cDU=
github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc=
@ -516,9 +517,13 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
@ -757,17 +762,17 @@ honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXe
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
storj.io/common v0.0.0-20200424175742-65ac59022f4f/go.mod h1:pZyXiIE7bGETIRXtfs0nICqMwp7PM8HqnDuyUeldNA0=
storj.io/common v0.0.0-20200729140050-4c1ddac6fa63/go.mod h1:ILr54ISCqCQ6MmIwT7eaR/fEGrBfgfxiPt8nmpWqnUM=
storj.io/common v0.0.0-20201006183456-4f16ac657da9/go.mod h1:ILr54ISCqCQ6MmIwT7eaR/fEGrBfgfxiPt8nmpWqnUM=
storj.io/common v0.0.0-20201014090530-c4af8e54d5c4 h1:KHcudAhPu2tyCzVVWe2CzZsbmuNO6fKoiJya7rN4wZ8=
storj.io/common v0.0.0-20201014090530-c4af8e54d5c4/go.mod h1:M/wBe7B2Z8B8AEnyp3c0pKeSMg25o/PjVkRg+ey1mrE=
storj.io/common v0.0.0-20201026135900-1aaeec90670b/go.mod h1:GqdmNf3fLm2UZX/7Zr0BLFCJ4gFjgm6eHrk/fnmr5jQ=
storj.io/common v0.0.0-20201027143432-3718579e12bf/go.mod h1:9iobNl9eI6C2M23FS/b37yFYOdHpoeJ8BFFcxsmv538=
storj.io/common v0.0.0-20201030120157-90ae6720d87e h1:6baDicBbR0/2XgcQ068KN+B4dF6akkdh2vemmXka1ns=
storj.io/common v0.0.0-20201030120157-90ae6720d87e/go.mod h1:9iobNl9eI6C2M23FS/b37yFYOdHpoeJ8BFFcxsmv538=
storj.io/drpc v0.0.11/go.mod h1:TiFc2obNjL9/3isMW1Rpxjy8V9uE0B2HMeMFGiiI7Iw=
storj.io/drpc v0.0.11/go.mod h1:TiFc2obNjL9/3isMW1Rpxjy8V9uE0B2HMeMFGiiI7Iw=
storj.io/drpc v0.0.14 h1:GCBdymTt1BRw4oHmmUZZlxYXLVRxxYj6x3Ivide2J+I=
storj.io/drpc v0.0.14/go.mod h1:82nfl+6YwRwF6UG31cEWWUqv/FaKvP5SGqUvoqTxCMA=
storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b h1:Bbg9JCtY6l3HrDxs3BXzT2UYnYCBLqNi6i84Y8QIPUs=
storj.io/monkit-jaeger v0.0.0-20200518165323-80778fc3f91b/go.mod h1:gj4vuCeyCRjRmH8LIrgoyU9Dc9uR6H+/GcDUXmTbf80=
storj.io/private v0.0.0-20200925142346-4c879709882f h1:0csZNWzYRhgBFp58rbBgqSp/62jtK2n0DIZ3Z/41Wso=
storj.io/private v0.0.0-20200925142346-4c879709882f/go.mod h1:3BB0H9SmnJDfgk55uZli6DLHmhLiOdKiDY58ZI2e+pk=
storj.io/uplink v1.3.1 h1:TN89B7WrPWbY2QqIkqZFOv1VR5iO6in/KtfBA2kGlsY=
storj.io/uplink v1.3.1/go.mod h1:/1h+wzy/7hX+RWJFzdsv2MUlX/GUrTPg/K+lJr+QwxU=
storj.io/private v0.0.0-20201026143115-bc926bfa3bca h1:ekR7vtUYC5+cDyim0ZJaSZeXidyzQqDYsnFPYXgTozc=
storj.io/private v0.0.0-20201026143115-bc926bfa3bca/go.mod h1:EaLnIyNyqWQUJB+7+KWVez0In9czl0nHHlm2WobebuA=
storj.io/uplink v1.3.2-0.20201028181609-f6efc8fcf771 h1:jPbw74xt8bvv8nOfBaM4g9Ts4moX8mqfD4N/B8vEJrA=
storj.io/uplink v1.3.2-0.20201028181609-f6efc8fcf771/go.mod h1:5do8jvbs4ao4tLdIZKzNFJPVKOH1oDfvVf8OIsR5Z9E=

View File

@ -37,6 +37,7 @@ import (
"storj.io/storj/satellite/contact"
"storj.io/storj/satellite/gracefulexit"
"storj.io/storj/satellite/inspector"
"storj.io/storj/satellite/internalpb"
"storj.io/storj/satellite/mailservice"
"storj.io/storj/satellite/mailservice/simulate"
"storj.io/storj/satellite/marketingweb"
@ -257,7 +258,7 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
})
peer.Overlay.Inspector = overlay.NewInspector(peer.Overlay.Service)
if err := pb.DRPCRegisterOverlayInspector(peer.Server.PrivateDRPC(), peer.Overlay.Inspector); err != nil {
if err := internalpb.DRPCRegisterOverlayInspector(peer.Server.PrivateDRPC(), peer.Overlay.Inspector); err != nil {
return nil, errs.Combine(err, peer.Close())
}
}
@ -448,7 +449,7 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
{ // setup datarepair
peer.Repair.Inspector = irreparable.NewInspector(peer.DB.Irreparable())
if err := pb.DRPCRegisterIrreparableInspector(peer.Server.PrivateDRPC(), peer.Repair.Inspector); err != nil {
if err := internalpb.DRPCRegisterIrreparableInspector(peer.Server.PrivateDRPC(), peer.Repair.Inspector); err != nil {
return nil, errs.Combine(err, peer.Close())
}
}
@ -459,7 +460,7 @@ func NewAPI(log *zap.Logger, full *identity.FullIdentity, db DB,
peer.Overlay.Service,
peer.Metainfo.Service,
)
if err := pb.DRPCRegisterHealthInspector(peer.Server.PrivateDRPC(), peer.Inspector.Endpoint); err != nil {
if err := internalpb.DRPCRegisterHealthInspector(peer.Server.PrivateDRPC(), peer.Inspector.Endpoint); err != nil {
return nil, errs.Combine(err, peer.Close())
}
}

View File

@ -14,6 +14,7 @@ import (
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/common/uuid"
"storj.io/storj/satellite/internalpb"
"storj.io/storj/satellite/metainfo"
"storj.io/storj/satellite/overlay"
)
@ -45,10 +46,10 @@ func NewEndpoint(log *zap.Logger, cache *overlay.Service, metainfo *metainfo.Ser
}
// ObjectHealth will check the health of an object.
func (endpoint *Endpoint) ObjectHealth(ctx context.Context, in *pb.ObjectHealthRequest) (resp *pb.ObjectHealthResponse, err error) {
func (endpoint *Endpoint) ObjectHealth(ctx context.Context, in *internalpb.ObjectHealthRequest) (resp *internalpb.ObjectHealthResponse, err error) {
defer mon.Task()(&ctx)(&err)
var segmentHealthResponses []*pb.SegmentHealth
var segmentHealthResponses []*internalpb.SegmentHealth
var redundancy *pb.RedundancyScheme
limit := int64(100)
@ -76,7 +77,7 @@ func (endpoint *Endpoint) ObjectHealth(ctx context.Context, in *pb.ObjectHealthR
break
}
segment := &pb.SegmentHealthRequest{
segment := &internalpb.SegmentHealthRequest{
Bucket: bucket,
EncryptedPath: encryptedPath,
SegmentIndex: segmentIndex,
@ -103,17 +104,17 @@ func (endpoint *Endpoint) ObjectHealth(ctx context.Context, in *pb.ObjectHealthR
segmentIndex++
}
return &pb.ObjectHealthResponse{
return &internalpb.ObjectHealthResponse{
Segments: segmentHealthResponses,
Redundancy: redundancy,
}, nil
}
// SegmentHealth will check the health of a segment.
func (endpoint *Endpoint) SegmentHealth(ctx context.Context, in *pb.SegmentHealthRequest) (resp *pb.SegmentHealthResponse, err error) {
func (endpoint *Endpoint) SegmentHealth(ctx context.Context, in *internalpb.SegmentHealthRequest) (resp *internalpb.SegmentHealthResponse, err error) {
defer mon.Task()(&ctx)(&err)
health := &pb.SegmentHealth{}
health := &internalpb.SegmentHealth{}
projectID, err := uuid.FromString(string(in.GetProjectId()))
if err != nil {
@ -180,7 +181,7 @@ func (endpoint *Endpoint) SegmentHealth(ctx context.Context, in *pb.SegmentHealt
health.Segment = []byte("l")
}
return &pb.SegmentHealthResponse{
return &internalpb.SegmentHealthResponse{
Health: health,
Redundancy: pointer.GetRemote().GetRedundancy(),
}, nil

View File

@ -13,11 +13,11 @@ import (
"github.com/stretchr/testify/require"
"storj.io/common/memory"
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite/internalpb"
"storj.io/storj/storage"
"storj.io/uplink/private/eestream"
)
@ -54,7 +54,7 @@ func TestInspectorStats(t *testing.T) {
encryptedPath := strings.Join(fullPath[3:], "/")
{ // Test Segment Health Request
req := &pb.SegmentHealthRequest{
req := &internalpb.SegmentHealthRequest{
ProjectId: []byte(projectID),
EncryptedPath: []byte(encryptedPath),
Bucket: []byte(bucket),
@ -72,7 +72,7 @@ func TestInspectorStats(t *testing.T) {
}
{ // Test Object Health Request
objectHealthReq := &pb.ObjectHealthRequest{
objectHealthReq := &internalpb.ObjectHealthRequest{
ProjectId: []byte(projectID),
EncryptedPath: []byte(encryptedPath),
Bucket: []byte(bucket),

View File

@ -0,0 +1,153 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: audithistory.proto
package internalpb
import (
fmt "fmt"
math "math"
time "time"
proto "github.com/gogo/protobuf/proto"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
var _ = time.Kitchen
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type AuditHistory struct {
Windows []*AuditWindow `protobuf:"bytes,1,rep,name=windows,proto3" json:"windows,omitempty"`
Score float64 `protobuf:"fixed64,2,opt,name=score,proto3" json:"score,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AuditHistory) Reset() { *m = AuditHistory{} }
func (m *AuditHistory) String() string { return proto.CompactTextString(m) }
func (*AuditHistory) ProtoMessage() {}
func (*AuditHistory) Descriptor() ([]byte, []int) {
return fileDescriptor_2ab8e94de62e54ec, []int{0}
}
func (m *AuditHistory) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AuditHistory.Unmarshal(m, b)
}
func (m *AuditHistory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AuditHistory.Marshal(b, m, deterministic)
}
func (m *AuditHistory) XXX_Merge(src proto.Message) {
xxx_messageInfo_AuditHistory.Merge(m, src)
}
func (m *AuditHistory) XXX_Size() int {
return xxx_messageInfo_AuditHistory.Size(m)
}
func (m *AuditHistory) XXX_DiscardUnknown() {
xxx_messageInfo_AuditHistory.DiscardUnknown(m)
}
var xxx_messageInfo_AuditHistory proto.InternalMessageInfo
func (m *AuditHistory) GetWindows() []*AuditWindow {
if m != nil {
return m.Windows
}
return nil
}
func (m *AuditHistory) GetScore() float64 {
if m != nil {
return m.Score
}
return 0
}
type AuditWindow struct {
WindowStart time.Time `protobuf:"bytes,1,opt,name=window_start,json=windowStart,proto3,stdtime" json:"window_start"`
OnlineCount int32 `protobuf:"varint,2,opt,name=online_count,json=onlineCount,proto3" json:"online_count,omitempty"`
TotalCount int32 `protobuf:"varint,3,opt,name=total_count,json=totalCount,proto3" json:"total_count,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *AuditWindow) Reset() { *m = AuditWindow{} }
func (m *AuditWindow) String() string { return proto.CompactTextString(m) }
func (*AuditWindow) ProtoMessage() {}
func (*AuditWindow) Descriptor() ([]byte, []int) {
return fileDescriptor_2ab8e94de62e54ec, []int{1}
}
func (m *AuditWindow) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_AuditWindow.Unmarshal(m, b)
}
func (m *AuditWindow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_AuditWindow.Marshal(b, m, deterministic)
}
func (m *AuditWindow) XXX_Merge(src proto.Message) {
xxx_messageInfo_AuditWindow.Merge(m, src)
}
func (m *AuditWindow) XXX_Size() int {
return xxx_messageInfo_AuditWindow.Size(m)
}
func (m *AuditWindow) XXX_DiscardUnknown() {
xxx_messageInfo_AuditWindow.DiscardUnknown(m)
}
var xxx_messageInfo_AuditWindow proto.InternalMessageInfo
func (m *AuditWindow) GetWindowStart() time.Time {
if m != nil {
return m.WindowStart
}
return time.Time{}
}
func (m *AuditWindow) GetOnlineCount() int32 {
if m != nil {
return m.OnlineCount
}
return 0
}
func (m *AuditWindow) GetTotalCount() int32 {
if m != nil {
return m.TotalCount
}
return 0
}
func init() {
proto.RegisterType((*AuditHistory)(nil), "satellite.audithistory.AuditHistory")
proto.RegisterType((*AuditWindow)(nil), "satellite.audithistory.AuditWindow")
}
func init() { proto.RegisterFile("audithistory.proto", fileDescriptor_2ab8e94de62e54ec) }
var fileDescriptor_2ab8e94de62e54ec = []byte{
// 274 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x8f, 0xc1, 0x4a, 0xc4, 0x30,
0x10, 0x86, 0x8d, 0xcb, 0xaa, 0x24, 0x7b, 0x0a, 0x22, 0xa5, 0x97, 0xd6, 0x5d, 0x84, 0x9e, 0x52,
0x58, 0xcf, 0x1e, 0x5c, 0x0f, 0x7a, 0xae, 0x82, 0xe0, 0x65, 0x49, 0xbb, 0xb1, 0x46, 0xb2, 0x99,
0x92, 0x4c, 0x59, 0x7c, 0x0b, 0xcf, 0x3e, 0x91, 0x4f, 0xa1, 0xaf, 0x22, 0x4d, 0xac, 0xec, 0xc1,
0x5b, 0xe6, 0xcf, 0xf7, 0x31, 0xff, 0x50, 0x2e, 0xfb, 0x8d, 0xc6, 0x17, 0xed, 0x11, 0xdc, 0x9b,
0xe8, 0x1c, 0x20, 0xf0, 0x33, 0x2f, 0x51, 0x19, 0xa3, 0x51, 0x89, 0xfd, 0xdf, 0x94, 0xb6, 0xd0,
0x42, 0x64, 0xd2, 0xac, 0x05, 0x68, 0x8d, 0x2a, 0xc3, 0x54, 0xf7, 0xcf, 0x25, 0xea, 0xad, 0xf2,
0x28, 0xb7, 0x5d, 0x04, 0xe6, 0x0d, 0x9d, 0x5d, 0x0f, 0xf2, 0x5d, 0x94, 0xf9, 0x15, 0x3d, 0xde,
0x69, 0xbb, 0x81, 0x9d, 0x4f, 0x48, 0x3e, 0x29, 0xd8, 0x72, 0x21, 0xfe, 0x5f, 0x23, 0x82, 0xf6,
0x18, 0xd8, 0x6a, 0x74, 0xf8, 0x29, 0x9d, 0xfa, 0x06, 0x9c, 0x4a, 0x0e, 0x73, 0x52, 0x90, 0x2a,
0x0e, 0xf3, 0x0f, 0x42, 0xd9, 0x1e, 0xce, 0x6f, 0xe9, 0x2c, 0x0a, 0x6b, 0x8f, 0xd2, 0x61, 0x42,
0x72, 0x52, 0xb0, 0x65, 0x2a, 0x62, 0x59, 0x31, 0x96, 0x15, 0x0f, 0x63, 0xd9, 0xd5, 0xc9, 0xe7,
0x57, 0x76, 0xf0, 0xfe, 0x9d, 0x91, 0x8a, 0x45, 0xf3, 0x7e, 0x10, 0xf9, 0x39, 0x9d, 0x81, 0x35,
0xda, 0xaa, 0x75, 0x03, 0xbd, 0xc5, 0xb0, 0x75, 0x5a, 0xb1, 0x98, 0xdd, 0x0c, 0x11, 0xcf, 0x28,
0x43, 0x40, 0x69, 0x7e, 0x89, 0x49, 0x20, 0x68, 0x88, 0x02, 0xb0, 0xba, 0x78, 0x5a, 0x0c, 0x07,
0xbd, 0x0a, 0x0d, 0x65, 0x78, 0x94, 0x7f, 0x07, 0x97, 0xda, 0xa2, 0x72, 0x56, 0x9a, 0xae, 0xae,
0x8f, 0x42, 0xab, 0xcb, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbc, 0xff, 0x7f, 0x76, 0x8a, 0x01,
0x00, 0x00,
}

View File

@ -0,0 +1,21 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
syntax = "proto3";
option go_package = "storj.io/storj/satellite/internalpb";
package satellite.audithistory;
import "gogo.proto";
import "google/protobuf/timestamp.proto";
message AuditHistory {
repeated AuditWindow windows = 1;
double score = 2;
}
message AuditWindow {
google.protobuf.Timestamp window_start = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
int32 online_count = 2;
int32 total_count = 3;
}

View File

@ -0,0 +1,104 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: datarepair.proto
package internalpb
import (
fmt "fmt"
math "math"
time "time"
proto "github.com/gogo/protobuf/proto"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
var _ = time.Kitchen
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// InjuredSegment is the queue item used for the data repair queue.
type InjuredSegment struct {
Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
LostPieces []int32 `protobuf:"varint,2,rep,packed,name=lost_pieces,json=lostPieces,proto3" json:"lost_pieces,omitempty"`
InsertedTime time.Time `protobuf:"bytes,3,opt,name=inserted_time,json=insertedTime,proto3,stdtime" json:"inserted_time"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *InjuredSegment) Reset() { *m = InjuredSegment{} }
func (m *InjuredSegment) String() string { return proto.CompactTextString(m) }
func (*InjuredSegment) ProtoMessage() {}
func (*InjuredSegment) Descriptor() ([]byte, []int) {
return fileDescriptor_b1b08e6fe9398aa6, []int{0}
}
func (m *InjuredSegment) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_InjuredSegment.Unmarshal(m, b)
}
func (m *InjuredSegment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_InjuredSegment.Marshal(b, m, deterministic)
}
func (m *InjuredSegment) XXX_Merge(src proto.Message) {
xxx_messageInfo_InjuredSegment.Merge(m, src)
}
func (m *InjuredSegment) XXX_Size() int {
return xxx_messageInfo_InjuredSegment.Size(m)
}
func (m *InjuredSegment) XXX_DiscardUnknown() {
xxx_messageInfo_InjuredSegment.DiscardUnknown(m)
}
var xxx_messageInfo_InjuredSegment proto.InternalMessageInfo
func (m *InjuredSegment) GetPath() []byte {
if m != nil {
return m.Path
}
return nil
}
func (m *InjuredSegment) GetLostPieces() []int32 {
if m != nil {
return m.LostPieces
}
return nil
}
func (m *InjuredSegment) GetInsertedTime() time.Time {
if m != nil {
return m.InsertedTime
}
return time.Time{}
}
func init() {
proto.RegisterType((*InjuredSegment)(nil), "satellite.repair.InjuredSegment")
}
func init() { proto.RegisterFile("datarepair.proto", fileDescriptor_b1b08e6fe9398aa6) }
var fileDescriptor_b1b08e6fe9398aa6 = []byte{
// 230 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x3c, 0x8f, 0x4d, 0x4e, 0xc3, 0x30,
0x10, 0x85, 0x31, 0x05, 0x84, 0xdc, 0x82, 0x2a, 0xaf, 0xa2, 0x6c, 0x12, 0x81, 0x90, 0xb2, 0xb2,
0x25, 0xb8, 0x41, 0x77, 0xdd, 0xa1, 0xc0, 0x8a, 0x4d, 0xe5, 0x90, 0xc1, 0xb8, 0x72, 0x3c, 0x96,
0x3d, 0xbd, 0x47, 0x8e, 0xc5, 0x29, 0xe0, 0x2a, 0x28, 0x8e, 0xd2, 0xdd, 0x9b, 0xf7, 0xe6, 0xe7,
0x1b, 0xbe, 0xed, 0x35, 0xe9, 0x08, 0x41, 0xdb, 0x28, 0x43, 0x44, 0x42, 0xb1, 0x4d, 0x9a, 0xc0,
0x39, 0x4b, 0x20, 0x67, 0xbf, 0xe4, 0x06, 0x0d, 0xce, 0x69, 0x59, 0x19, 0x44, 0xe3, 0x40, 0xe5,
0xaa, 0x3b, 0x7d, 0x29, 0xb2, 0x03, 0x24, 0xd2, 0x43, 0x98, 0x1b, 0x1e, 0x46, 0xc6, 0xef, 0xf7,
0xfe, 0x78, 0x8a, 0xd0, 0xbf, 0x81, 0x19, 0xc0, 0x93, 0x10, 0xfc, 0x2a, 0x68, 0xfa, 0x2e, 0x58,
0xcd, 0x9a, 0x4d, 0x9b, 0xb5, 0xa8, 0xf8, 0xda, 0x61, 0xa2, 0x43, 0xb0, 0xf0, 0x09, 0xa9, 0xb8,
0xac, 0x57, 0xcd, 0x75, 0xcb, 0x27, 0xeb, 0x35, 0x3b, 0x62, 0xcf, 0xef, 0xac, 0x4f, 0x10, 0x09,
0xfa, 0xc3, 0x74, 0xa3, 0x58, 0xd5, 0xac, 0x59, 0x3f, 0x97, 0x72, 0x06, 0x90, 0x0b, 0x80, 0x7c,
0x5f, 0x00, 0x76, 0xb7, 0x3f, 0xbf, 0xd5, 0xc5, 0xf8, 0x57, 0xb1, 0x76, 0xb3, 0x8c, 0x4e, 0xe1,
0xee, 0xe9, 0xe3, 0x31, 0x11, 0xc6, 0xa3, 0xb4, 0xa8, 0xb2, 0x50, 0xe7, 0x17, 0x95, 0xf5, 0x04,
0xd1, 0x6b, 0x17, 0xba, 0xee, 0x26, 0xaf, 0x7c, 0xf9, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x1a, 0xcb,
0xa5, 0x58, 0x13, 0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,17 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
syntax = "proto3";
option go_package = "storj.io/storj/satellite/internalpb";
import "gogo.proto";
import "google/protobuf/timestamp.proto";
package satellite.repair;
// InjuredSegment is the queue item used for the data repair queue.
message InjuredSegment {
bytes path = 1;
repeated int32 lost_pieces = 2;
google.protobuf.Timestamp inserted_time = 3 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
}

View File

@ -0,0 +1,456 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: delegated_repair.proto
package internalpb
import (
context "context"
fmt "fmt"
math "math"
time "time"
proto "github.com/gogo/protobuf/proto"
pb "storj.io/common/pb"
drpc "storj.io/drpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
var _ = time.Kitchen
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type RepairJobRequest struct {
// When not the first request, this will include the result of the last job
LastJobResult *RepairJobResult `protobuf:"bytes,1,opt,name=last_job_result,json=lastJobResult,proto3" json:"last_job_result,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RepairJobRequest) Reset() { *m = RepairJobRequest{} }
func (m *RepairJobRequest) String() string { return proto.CompactTextString(m) }
func (*RepairJobRequest) ProtoMessage() {}
func (*RepairJobRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_04d00d18c724d5a7, []int{0}
}
func (m *RepairJobRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RepairJobRequest.Unmarshal(m, b)
}
func (m *RepairJobRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RepairJobRequest.Marshal(b, m, deterministic)
}
func (m *RepairJobRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_RepairJobRequest.Merge(m, src)
}
func (m *RepairJobRequest) XXX_Size() int {
return xxx_messageInfo_RepairJobRequest.Size(m)
}
func (m *RepairJobRequest) XXX_DiscardUnknown() {
xxx_messageInfo_RepairJobRequest.DiscardUnknown(m)
}
var xxx_messageInfo_RepairJobRequest proto.InternalMessageInfo
func (m *RepairJobRequest) GetLastJobResult() *RepairJobResult {
if m != nil {
return m.LastJobResult
}
return nil
}
type RepairJobResponse struct {
// When a job is available, this will be filled in
NewJob *RepairJobDefinition `protobuf:"bytes,1,opt,name=new_job,json=newJob,proto3" json:"new_job,omitempty"`
// Otherwise, client should wait this many milliseconds and then try again
ComeBackInMillis int32 `protobuf:"varint,2,opt,name=come_back_in_millis,json=comeBackInMillis,proto3" json:"come_back_in_millis,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RepairJobResponse) Reset() { *m = RepairJobResponse{} }
func (m *RepairJobResponse) String() string { return proto.CompactTextString(m) }
func (*RepairJobResponse) ProtoMessage() {}
func (*RepairJobResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_04d00d18c724d5a7, []int{1}
}
func (m *RepairJobResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RepairJobResponse.Unmarshal(m, b)
}
func (m *RepairJobResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RepairJobResponse.Marshal(b, m, deterministic)
}
func (m *RepairJobResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_RepairJobResponse.Merge(m, src)
}
func (m *RepairJobResponse) XXX_Size() int {
return xxx_messageInfo_RepairJobResponse.Size(m)
}
func (m *RepairJobResponse) XXX_DiscardUnknown() {
xxx_messageInfo_RepairJobResponse.DiscardUnknown(m)
}
var xxx_messageInfo_RepairJobResponse proto.InternalMessageInfo
func (m *RepairJobResponse) GetNewJob() *RepairJobDefinition {
if m != nil {
return m.NewJob
}
return nil
}
func (m *RepairJobResponse) GetComeBackInMillis() int32 {
if m != nil {
return m.ComeBackInMillis
}
return 0
}
type RepairJobDefinition struct {
// Identifier for this job
JobId []byte `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
// Signed GET orders for all believed-healthy pieces to be downloaded
GetOrders []*pb.AddressedOrderLimit `protobuf:"bytes,2,rep,name=get_orders,json=getOrders,proto3" json:"get_orders,omitempty"`
// Private piece key to use for fetching
PrivateKeyForGet []byte `protobuf:"bytes,3,opt,name=private_key_for_get,json=privateKeyForGet,proto3" json:"private_key_for_get,omitempty"`
// Signed PUT orders for all possible pieces to be uploaded (not including
// piece numbers in get_orders)
PutOrders []*pb.AddressedOrderLimit `protobuf:"bytes,4,rep,name=put_orders,json=putOrders,proto3" json:"put_orders,omitempty"`
// Private piece key to use for storing
PrivateKeyForPut []byte `protobuf:"bytes,5,opt,name=private_key_for_put,json=privateKeyForPut,proto3" json:"private_key_for_put,omitempty"`
// Redundancy scheme used by the segment to be repaired
Redundancy *pb.RedundancyScheme `protobuf:"bytes,6,opt,name=redundancy,proto3" json:"redundancy,omitempty"`
// Size of the segment to be repaired
SegmentSize int64 `protobuf:"varint,7,opt,name=segment_size,json=segmentSize,proto3" json:"segment_size,omitempty"`
// Target piece count (worker should try to upload enough pieces so that
// this count is achieved)
DesiredPieceCount int32 `protobuf:"varint,8,opt,name=desired_piece_count,json=desiredPieceCount,proto3" json:"desired_piece_count,omitempty"`
// Job expiration time
ExpirationTime time.Time `protobuf:"bytes,9,opt,name=expiration_time,json=expirationTime,proto3,stdtime" json:"expiration_time"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RepairJobDefinition) Reset() { *m = RepairJobDefinition{} }
func (m *RepairJobDefinition) String() string { return proto.CompactTextString(m) }
func (*RepairJobDefinition) ProtoMessage() {}
func (*RepairJobDefinition) Descriptor() ([]byte, []int) {
return fileDescriptor_04d00d18c724d5a7, []int{2}
}
func (m *RepairJobDefinition) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RepairJobDefinition.Unmarshal(m, b)
}
func (m *RepairJobDefinition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RepairJobDefinition.Marshal(b, m, deterministic)
}
func (m *RepairJobDefinition) XXX_Merge(src proto.Message) {
xxx_messageInfo_RepairJobDefinition.Merge(m, src)
}
func (m *RepairJobDefinition) XXX_Size() int {
return xxx_messageInfo_RepairJobDefinition.Size(m)
}
func (m *RepairJobDefinition) XXX_DiscardUnknown() {
xxx_messageInfo_RepairJobDefinition.DiscardUnknown(m)
}
var xxx_messageInfo_RepairJobDefinition proto.InternalMessageInfo
func (m *RepairJobDefinition) GetJobId() []byte {
if m != nil {
return m.JobId
}
return nil
}
func (m *RepairJobDefinition) GetGetOrders() []*pb.AddressedOrderLimit {
if m != nil {
return m.GetOrders
}
return nil
}
func (m *RepairJobDefinition) GetPrivateKeyForGet() []byte {
if m != nil {
return m.PrivateKeyForGet
}
return nil
}
func (m *RepairJobDefinition) GetPutOrders() []*pb.AddressedOrderLimit {
if m != nil {
return m.PutOrders
}
return nil
}
func (m *RepairJobDefinition) GetPrivateKeyForPut() []byte {
if m != nil {
return m.PrivateKeyForPut
}
return nil
}
func (m *RepairJobDefinition) GetRedundancy() *pb.RedundancyScheme {
if m != nil {
return m.Redundancy
}
return nil
}
func (m *RepairJobDefinition) GetSegmentSize() int64 {
if m != nil {
return m.SegmentSize
}
return 0
}
func (m *RepairJobDefinition) GetDesiredPieceCount() int32 {
if m != nil {
return m.DesiredPieceCount
}
return 0
}
func (m *RepairJobDefinition) GetExpirationTime() time.Time {
if m != nil {
return m.ExpirationTime
}
return time.Time{}
}
type RepairJobResult struct {
// Identifier for this job, as given in RepairJobResponse
JobId []byte `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"`
// Set nonzero only if the segment could not be reconstructed because of
// too few pieces available.
IrreparablePiecesRetrieved int32 `protobuf:"varint,2,opt,name=irreparable_pieces_retrieved,json=irreparablePiecesRetrieved,proto3" json:"irreparable_pieces_retrieved,omitempty"`
// Set only if the segment could not be reconstructed.
ReconstructError string `protobuf:"bytes,3,opt,name=reconstruct_error,json=reconstructError,proto3" json:"reconstruct_error,omitempty"`
// Set only if new pieces could not be stored to any new nodes.
StoreError string `protobuf:"bytes,4,opt,name=store_error,json=storeError,proto3" json:"store_error,omitempty"`
// PieceHashes signed by storage nodes which were used to accomplish repair
NewPiecesStored []*pb.PieceHash `protobuf:"bytes,5,rep,name=new_pieces_stored,json=newPiecesStored,proto3" json:"new_pieces_stored,omitempty"`
// A copy of the put_orders list as provided in the corresponding
// RepairJobDefinition
PutOrders []*pb.AddressedOrderLimit `protobuf:"bytes,6,rep,name=put_orders,json=putOrders,proto3" json:"put_orders,omitempty"`
// Pieces which should be _removed_ from the pointer. This will include
// pieces for which the expected owning storage node returned a "not found"
// error, as well as pieces which were downloaded but failed their
// validation check.
DeletePieceNums []int32 `protobuf:"varint,7,rep,packed,name=delete_piece_nums,json=deletePieceNums,proto3" json:"delete_piece_nums,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *RepairJobResult) Reset() { *m = RepairJobResult{} }
func (m *RepairJobResult) String() string { return proto.CompactTextString(m) }
func (*RepairJobResult) ProtoMessage() {}
func (*RepairJobResult) Descriptor() ([]byte, []int) {
return fileDescriptor_04d00d18c724d5a7, []int{3}
}
func (m *RepairJobResult) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_RepairJobResult.Unmarshal(m, b)
}
func (m *RepairJobResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_RepairJobResult.Marshal(b, m, deterministic)
}
func (m *RepairJobResult) XXX_Merge(src proto.Message) {
xxx_messageInfo_RepairJobResult.Merge(m, src)
}
func (m *RepairJobResult) XXX_Size() int {
return xxx_messageInfo_RepairJobResult.Size(m)
}
func (m *RepairJobResult) XXX_DiscardUnknown() {
xxx_messageInfo_RepairJobResult.DiscardUnknown(m)
}
var xxx_messageInfo_RepairJobResult proto.InternalMessageInfo
func (m *RepairJobResult) GetJobId() []byte {
if m != nil {
return m.JobId
}
return nil
}
func (m *RepairJobResult) GetIrreparablePiecesRetrieved() int32 {
if m != nil {
return m.IrreparablePiecesRetrieved
}
return 0
}
func (m *RepairJobResult) GetReconstructError() string {
if m != nil {
return m.ReconstructError
}
return ""
}
func (m *RepairJobResult) GetStoreError() string {
if m != nil {
return m.StoreError
}
return ""
}
func (m *RepairJobResult) GetNewPiecesStored() []*pb.PieceHash {
if m != nil {
return m.NewPiecesStored
}
return nil
}
func (m *RepairJobResult) GetPutOrders() []*pb.AddressedOrderLimit {
if m != nil {
return m.PutOrders
}
return nil
}
func (m *RepairJobResult) GetDeletePieceNums() []int32 {
if m != nil {
return m.DeletePieceNums
}
return nil
}
func init() {
proto.RegisterType((*RepairJobRequest)(nil), "satellite.delegated_repair.RepairJobRequest")
proto.RegisterType((*RepairJobResponse)(nil), "satellite.delegated_repair.RepairJobResponse")
proto.RegisterType((*RepairJobDefinition)(nil), "satellite.delegated_repair.RepairJobDefinition")
proto.RegisterType((*RepairJobResult)(nil), "satellite.delegated_repair.RepairJobResult")
}
func init() { proto.RegisterFile("delegated_repair.proto", fileDescriptor_04d00d18c724d5a7) }
var fileDescriptor_04d00d18c724d5a7 = []byte{
// 701 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xdd, 0x6e, 0xd3, 0x30,
0x18, 0x5d, 0xd7, 0xb5, 0x5b, 0xdd, 0xb1, 0xb6, 0x99, 0x40, 0x51, 0x01, 0xb5, 0x14, 0x21, 0x55,
0x8c, 0xa5, 0xd2, 0xb8, 0x04, 0x24, 0xd8, 0xf8, 0xd9, 0x06, 0x83, 0x29, 0xe5, 0x8a, 0x1b, 0xcb,
0x89, 0xbf, 0x66, 0xee, 0x12, 0x3b, 0xd8, 0xce, 0xc6, 0x76, 0xc3, 0x0b, 0x70, 0xc1, 0x63, 0xf1,
0x14, 0x20, 0x2e, 0x78, 0x0f, 0x64, 0x27, 0xed, 0xaa, 0x69, 0x43, 0xe5, 0x2e, 0xfe, 0xce, 0xb1,
0xcf, 0xc9, 0xf7, 0x1d, 0x1b, 0xdd, 0xa2, 0x10, 0x43, 0x44, 0x34, 0x50, 0x2c, 0x21, 0x25, 0x4c,
0x7a, 0xa9, 0x14, 0x5a, 0x38, 0x6d, 0x45, 0x34, 0xc4, 0x31, 0xd3, 0xe0, 0x5d, 0x66, 0xb4, 0x51,
0x24, 0x22, 0x91, 0xf3, 0xda, 0x9d, 0x48, 0x88, 0x28, 0x86, 0x81, 0x5d, 0x05, 0xd9, 0x68, 0xa0,
0x59, 0x02, 0x4a, 0x93, 0x24, 0x2d, 0x08, 0x6b, 0x09, 0x68, 0xc2, 0xf8, 0x68, 0xb2, 0x61, 0x55,
0x48, 0x0a, 0x52, 0x15, 0xab, 0x46, 0x2a, 0x18, 0xd7, 0x20, 0x69, 0x90, 0x17, 0x7a, 0x11, 0x6a,
0xfa, 0x56, 0x65, 0x5f, 0x04, 0x3e, 0x7c, 0xce, 0x40, 0x69, 0x67, 0x88, 0x1a, 0x31, 0x51, 0x1a,
0x8f, 0x45, 0x80, 0x25, 0xa8, 0x2c, 0xd6, 0x6e, 0xa9, 0x5b, 0xea, 0xd7, 0xb7, 0x36, 0xbc, 0xeb,
0x5d, 0x7a, 0x33, 0xc7, 0x98, 0x2d, 0xfe, 0x0d, 0x73, 0xc6, 0x74, 0xd9, 0xfb, 0x56, 0x42, 0xad,
0x59, 0x4a, 0x2a, 0xb8, 0x02, 0x67, 0x17, 0x2d, 0x73, 0x38, 0x35, 0x4a, 0x85, 0xc4, 0x60, 0x2e,
0x89, 0x97, 0x30, 0x62, 0x9c, 0x69, 0x26, 0xb8, 0x5f, 0xe5, 0x70, 0xba, 0x2f, 0x02, 0x67, 0x13,
0xad, 0x87, 0x22, 0x01, 0x1c, 0x90, 0xf0, 0x18, 0x33, 0x8e, 0x13, 0x16, 0xc7, 0x4c, 0xb9, 0x8b,
0xdd, 0x52, 0xbf, 0xe2, 0x37, 0x0d, 0xb4, 0x4d, 0xc2, 0xe3, 0x3d, 0x7e, 0x60, 0xeb, 0xbd, 0x3f,
0x65, 0xb4, 0x7e, 0xc5, 0x71, 0xce, 0x4d, 0x54, 0x35, 0xbf, 0xcd, 0xa8, 0xf5, 0xb3, 0xea, 0x57,
0xc6, 0x22, 0xd8, 0xa3, 0xce, 0x53, 0x84, 0x22, 0xd0, 0x38, 0xef, 0xa5, 0xbb, 0xd8, 0x2d, 0xf7,
0xeb, 0x5b, 0x77, 0xbd, 0x69, 0xab, 0x5f, 0x50, 0x2a, 0x41, 0x29, 0xa0, 0x1f, 0x0c, 0xe1, 0x1d,
0x4b, 0x98, 0xf6, 0x6b, 0x11, 0x68, 0xbb, 0x54, 0xc6, 0x5b, 0x2a, 0xd9, 0x09, 0xd1, 0x80, 0x8f,
0xe1, 0x0c, 0x8f, 0x84, 0xc4, 0x11, 0x68, 0xb7, 0x6c, 0x15, 0x9a, 0x05, 0xf4, 0x16, 0xce, 0x5e,
0x0b, 0xf9, 0x06, 0xb4, 0x11, 0x4b, 0xb3, 0xa9, 0xd8, 0xd2, 0x5c, 0x62, 0x69, 0xf6, 0x0f, 0xb1,
0x34, 0xd3, 0x6e, 0xe5, 0x0a, 0xb1, 0xc3, 0x4c, 0x3b, 0x4f, 0x10, 0x92, 0x40, 0x33, 0x4e, 0x09,
0x0f, 0xcf, 0xdc, 0xaa, 0x1d, 0xc2, 0x6d, 0xef, 0x22, 0x26, 0xfe, 0x14, 0x1c, 0x86, 0x47, 0x90,
0x80, 0x3f, 0x43, 0x77, 0xee, 0xa1, 0x55, 0x05, 0x51, 0x02, 0x5c, 0x63, 0xc5, 0xce, 0xc1, 0x5d,
0xee, 0x96, 0xfa, 0x65, 0xbf, 0x5e, 0xd4, 0x86, 0xec, 0x1c, 0x1c, 0x0f, 0xad, 0x53, 0x50, 0x4c,
0x02, 0xc5, 0x29, 0x83, 0x10, 0x70, 0x28, 0x32, 0xae, 0xdd, 0x15, 0x3b, 0x97, 0x56, 0x01, 0x1d,
0x1a, 0x64, 0xc7, 0x00, 0xce, 0x01, 0x6a, 0xc0, 0x97, 0x94, 0x49, 0x62, 0xc6, 0x81, 0x4d, 0xba,
0xdd, 0x9a, 0x35, 0xd5, 0xf6, 0xf2, 0xe8, 0x7b, 0x93, 0xe8, 0x7b, 0x1f, 0x27, 0xd1, 0xdf, 0x5e,
0xf9, 0xf1, 0xb3, 0xb3, 0xf0, 0xfd, 0x57, 0xa7, 0xe4, 0xaf, 0x5d, 0x6c, 0x36, 0x70, 0xef, 0xf7,
0x22, 0x6a, 0x5c, 0x4a, 0xe6, 0x75, 0x33, 0x7e, 0x8e, 0xee, 0x30, 0x69, 0x92, 0x26, 0x49, 0x10,
0x43, 0xee, 0x56, 0x61, 0x09, 0x5a, 0x32, 0x38, 0x01, 0x5a, 0x44, 0xa9, 0x3d, 0xc3, 0xb1, 0xb6,
0x95, 0x3f, 0x61, 0x38, 0x1b, 0xa8, 0x25, 0x21, 0x14, 0x5c, 0x69, 0x99, 0x85, 0x1a, 0x83, 0x94,
0x42, 0xda, 0x29, 0xd7, 0xfc, 0xe6, 0x0c, 0xf0, 0xca, 0xd4, 0x9d, 0x0e, 0xaa, 0x2b, 0x2d, 0x24,
0x14, 0xb4, 0x25, 0x4b, 0x43, 0xb6, 0x94, 0x13, 0x9e, 0xa1, 0x96, 0xb9, 0x1b, 0x85, 0x0f, 0x0b,
0x50, 0xb7, 0x62, 0xd3, 0xd0, 0xf2, 0x8a, 0x5b, 0x6d, 0x1d, 0xec, 0x12, 0x75, 0xe4, 0x37, 0x38,
0x9c, 0xe6, 0x7e, 0x86, 0x96, 0x79, 0x29, 0x45, 0xd5, 0xff, 0x4c, 0xd1, 0x43, 0xd4, 0x32, 0xd7,
0x4f, 0x17, 0x7d, 0xc0, 0x3c, 0x4b, 0x94, 0xbb, 0xdc, 0x2d, 0xf7, 0x2b, 0x7e, 0x23, 0x07, 0xac,
0xd8, 0xfb, 0x2c, 0x51, 0x5b, 0x5f, 0x27, 0x37, 0x7b, 0x47, 0x08, 0x49, 0x19, 0x27, 0x5a, 0x48,
0x67, 0x8c, 0x6a, 0xd3, 0xbe, 0x3b, 0x8f, 0xe6, 0x7c, 0x38, 0xec, 0xfb, 0xd3, 0xde, 0x9c, 0xf7,
0x99, 0xb1, 0x6f, 0x48, 0x6f, 0x61, 0xfb, 0xc1, 0xa7, 0xfb, 0xa6, 0x3d, 0x63, 0x8f, 0x89, 0x81,
0xfd, 0x18, 0x4c, 0x0f, 0x18, 0xd8, 0x1c, 0x73, 0x12, 0xa7, 0x41, 0x50, 0xb5, 0xc9, 0x79, 0xfc,
0x37, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x7f, 0xbc, 0x37, 0x84, 0x05, 0x00, 0x00,
}
// --- DRPC BEGIN ---
type DRPCRepairCoordinatorClient interface {
DRPCConn() drpc.Conn
RepairJob(ctx context.Context, in *RepairJobRequest) (*RepairJobResponse, error)
}
type drpcRepairCoordinatorClient struct {
cc drpc.Conn
}
func NewDRPCRepairCoordinatorClient(cc drpc.Conn) DRPCRepairCoordinatorClient {
return &drpcRepairCoordinatorClient{cc}
}
func (c *drpcRepairCoordinatorClient) DRPCConn() drpc.Conn { return c.cc }
func (c *drpcRepairCoordinatorClient) RepairJob(ctx context.Context, in *RepairJobRequest) (*RepairJobResponse, error) {
out := new(RepairJobResponse)
err := c.cc.Invoke(ctx, "/satellite.delegated_repair.RepairCoordinator/RepairJob", in, out)
if err != nil {
return nil, err
}
return out, nil
}
type DRPCRepairCoordinatorServer interface {
RepairJob(context.Context, *RepairJobRequest) (*RepairJobResponse, error)
}
type DRPCRepairCoordinatorDescription struct{}
func (DRPCRepairCoordinatorDescription) NumMethods() int { return 1 }
func (DRPCRepairCoordinatorDescription) Method(n int) (string, drpc.Receiver, interface{}, bool) {
switch n {
case 0:
return "/satellite.delegated_repair.RepairCoordinator/RepairJob",
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCRepairCoordinatorServer).
RepairJob(
ctx,
in1.(*RepairJobRequest),
)
}, DRPCRepairCoordinatorServer.RepairJob, true
default:
return "", nil, nil, false
}
}
func DRPCRegisterRepairCoordinator(mux drpc.Mux, impl DRPCRepairCoordinatorServer) error {
return mux.Register(impl, DRPCRepairCoordinatorDescription{})
}
type DRPCRepairCoordinator_RepairJobStream interface {
drpc.Stream
SendAndClose(*RepairJobResponse) error
}
type drpcRepairCoordinatorRepairJobStream struct {
drpc.Stream
}
func (x *drpcRepairCoordinatorRepairJobStream) SendAndClose(m *RepairJobResponse) error {
if err := x.MsgSend(m); err != nil {
return err
}
return x.CloseSend()
}
// --- DRPC END ---

View File

@ -0,0 +1,74 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
syntax = "proto3";
option go_package = "storj.io/storj/satellite/internalpb";
package satellite.delegated_repair;
import "gogo.proto";
import "google/protobuf/timestamp.proto";
import "metainfo.proto";
import "orders.proto";
import "pointerdb.proto";
service RepairCoordinator {
rpc RepairJob(RepairJobRequest) returns (RepairJobResponse) {}
}
message RepairJobRequest {
// When not the first request, this will include the result of the last job
RepairJobResult last_job_result = 1;
}
message RepairJobResponse {
// When a job is available, this will be filled in
RepairJobDefinition new_job = 1;
// Otherwise, client should wait this many milliseconds and then try again
int32 come_back_in_millis = 2;
}
message RepairJobDefinition {
// Identifier for this job
bytes job_id = 1;
// Signed GET orders for all believed-healthy pieces to be downloaded
repeated metainfo.AddressedOrderLimit get_orders = 2;
// Private piece key to use for fetching
bytes private_key_for_get = 3;
// Signed PUT orders for all possible pieces to be uploaded (not including
// piece numbers in get_orders)
repeated metainfo.AddressedOrderLimit put_orders = 4;
// Private piece key to use for storing
bytes private_key_for_put = 5;
// Redundancy scheme used by the segment to be repaired
pointerdb.RedundancyScheme redundancy = 6;
// Size of the segment to be repaired
int64 segment_size = 7;
// Target piece count (worker should try to upload enough pieces so that
// this count is achieved)
int32 desired_piece_count = 8;
// Job expiration time
google.protobuf.Timestamp expiration_time = 9 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
}
message RepairJobResult {
// Identifier for this job, as given in RepairJobResponse
bytes job_id = 1;
// Set nonzero only if the segment could not be reconstructed because of
// too few pieces available.
int32 irreparable_pieces_retrieved = 2;
// Set only if the segment could not be reconstructed.
string reconstruct_error = 3;
// Set only if new pieces could not be stored to any new nodes.
string store_error = 4;
// PieceHashes signed by storage nodes which were used to accomplish repair
repeated orders.PieceHash new_pieces_stored = 5;
// A copy of the put_orders list as provided in the corresponding
// RepairJobDefinition
repeated metainfo.AddressedOrderLimit put_orders = 6;
// Pieces which should be _removed_ from the pointer. This will include
// pieces for which the expected owning storage node returned a "not found"
// error, as well as pieces which were downloaded but failed their
// validation check.
repeated int32 delete_piece_nums = 7;
}

View File

@ -0,0 +1,7 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
//go:generate go run gen.go
// Package internalpb contains proto definitions for satellite internal tools.
package internalpb

View File

@ -3,8 +3,6 @@
// +build ignore
//go:generate go run gen.go
package main
import (
@ -18,7 +16,7 @@ import (
)
var (
mainpkg = flag.String("pkg", "storj.io/storj/internalpb", "main package name")
mainpkg = flag.String("pkg", "storj.io/storj/satellite/internalpb", "main package name")
protoc = flag.String("protoc", "protoc", "protoc compiler")
)
@ -67,10 +65,10 @@ func main() {
commonPb := os.Getenv("STORJ_COMMON_PB")
if commonPb == "" {
commonPb = "../../common/pb"
commonPb = "../../../common/pb"
}
overrideImports := ",Mgoogle/protobuf/timestamp.proto=storj.io/storj/internalpb"
overrideImports := ",Mgoogle/protobuf/timestamp.proto=storj.io/storj/satellite/internalpb"
args := []string{
"--lint_out=.",
"--drpc_out=plugins=drpc,paths=source_relative" + overrideImports + ":.",

View File

@ -0,0 +1,956 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: inspector.proto
package internalpb
import (
context "context"
fmt "fmt"
math "math"
proto "github.com/gogo/protobuf/proto"
pb "storj.io/common/pb"
drpc "storj.io/drpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type CountNodesRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CountNodesRequest) Reset() { *m = CountNodesRequest{} }
func (m *CountNodesRequest) String() string { return proto.CompactTextString(m) }
func (*CountNodesRequest) ProtoMessage() {}
func (*CountNodesRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_a07d9034b2dd9d26, []int{0}
}
func (m *CountNodesRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CountNodesRequest.Unmarshal(m, b)
}
func (m *CountNodesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CountNodesRequest.Marshal(b, m, deterministic)
}
func (m *CountNodesRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_CountNodesRequest.Merge(m, src)
}
func (m *CountNodesRequest) XXX_Size() int {
return xxx_messageInfo_CountNodesRequest.Size(m)
}
func (m *CountNodesRequest) XXX_DiscardUnknown() {
xxx_messageInfo_CountNodesRequest.DiscardUnknown(m)
}
var xxx_messageInfo_CountNodesRequest proto.InternalMessageInfo
type CountNodesResponse struct {
Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *CountNodesResponse) Reset() { *m = CountNodesResponse{} }
func (m *CountNodesResponse) String() string { return proto.CompactTextString(m) }
func (*CountNodesResponse) ProtoMessage() {}
func (*CountNodesResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_a07d9034b2dd9d26, []int{1}
}
func (m *CountNodesResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_CountNodesResponse.Unmarshal(m, b)
}
func (m *CountNodesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_CountNodesResponse.Marshal(b, m, deterministic)
}
func (m *CountNodesResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_CountNodesResponse.Merge(m, src)
}
func (m *CountNodesResponse) XXX_Size() int {
return xxx_messageInfo_CountNodesResponse.Size(m)
}
func (m *CountNodesResponse) XXX_DiscardUnknown() {
xxx_messageInfo_CountNodesResponse.DiscardUnknown(m)
}
var xxx_messageInfo_CountNodesResponse proto.InternalMessageInfo
func (m *CountNodesResponse) GetCount() int64 {
if m != nil {
return m.Count
}
return 0
}
type DumpNodesRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DumpNodesRequest) Reset() { *m = DumpNodesRequest{} }
func (m *DumpNodesRequest) String() string { return proto.CompactTextString(m) }
func (*DumpNodesRequest) ProtoMessage() {}
func (*DumpNodesRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_a07d9034b2dd9d26, []int{2}
}
func (m *DumpNodesRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DumpNodesRequest.Unmarshal(m, b)
}
func (m *DumpNodesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DumpNodesRequest.Marshal(b, m, deterministic)
}
func (m *DumpNodesRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_DumpNodesRequest.Merge(m, src)
}
func (m *DumpNodesRequest) XXX_Size() int {
return xxx_messageInfo_DumpNodesRequest.Size(m)
}
func (m *DumpNodesRequest) XXX_DiscardUnknown() {
xxx_messageInfo_DumpNodesRequest.DiscardUnknown(m)
}
var xxx_messageInfo_DumpNodesRequest proto.InternalMessageInfo
type DumpNodesResponse struct {
Nodes []*pb.Node `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DumpNodesResponse) Reset() { *m = DumpNodesResponse{} }
func (m *DumpNodesResponse) String() string { return proto.CompactTextString(m) }
func (*DumpNodesResponse) ProtoMessage() {}
func (*DumpNodesResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_a07d9034b2dd9d26, []int{3}
}
func (m *DumpNodesResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DumpNodesResponse.Unmarshal(m, b)
}
func (m *DumpNodesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DumpNodesResponse.Marshal(b, m, deterministic)
}
func (m *DumpNodesResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_DumpNodesResponse.Merge(m, src)
}
func (m *DumpNodesResponse) XXX_Size() int {
return xxx_messageInfo_DumpNodesResponse.Size(m)
}
func (m *DumpNodesResponse) XXX_DiscardUnknown() {
xxx_messageInfo_DumpNodesResponse.DiscardUnknown(m)
}
var xxx_messageInfo_DumpNodesResponse proto.InternalMessageInfo
func (m *DumpNodesResponse) GetNodes() []*pb.Node {
if m != nil {
return m.Nodes
}
return nil
}
type ListIrreparableSegmentsRequest struct {
Limit int32 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"`
LastSeenSegmentPath []byte `protobuf:"bytes,2,opt,name=last_seen_segment_path,json=lastSeenSegmentPath,proto3" json:"last_seen_segment_path,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ListIrreparableSegmentsRequest) Reset() { *m = ListIrreparableSegmentsRequest{} }
func (m *ListIrreparableSegmentsRequest) String() string { return proto.CompactTextString(m) }
func (*ListIrreparableSegmentsRequest) ProtoMessage() {}
func (*ListIrreparableSegmentsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_a07d9034b2dd9d26, []int{4}
}
func (m *ListIrreparableSegmentsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListIrreparableSegmentsRequest.Unmarshal(m, b)
}
func (m *ListIrreparableSegmentsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ListIrreparableSegmentsRequest.Marshal(b, m, deterministic)
}
func (m *ListIrreparableSegmentsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListIrreparableSegmentsRequest.Merge(m, src)
}
func (m *ListIrreparableSegmentsRequest) XXX_Size() int {
return xxx_messageInfo_ListIrreparableSegmentsRequest.Size(m)
}
func (m *ListIrreparableSegmentsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ListIrreparableSegmentsRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ListIrreparableSegmentsRequest proto.InternalMessageInfo
func (m *ListIrreparableSegmentsRequest) GetLimit() int32 {
if m != nil {
return m.Limit
}
return 0
}
func (m *ListIrreparableSegmentsRequest) GetLastSeenSegmentPath() []byte {
if m != nil {
return m.LastSeenSegmentPath
}
return nil
}
type ListIrreparableSegmentsResponse struct {
Segments []*IrreparableSegment `protobuf:"bytes,1,rep,name=segments,proto3" json:"segments,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ListIrreparableSegmentsResponse) Reset() { *m = ListIrreparableSegmentsResponse{} }
func (m *ListIrreparableSegmentsResponse) String() string { return proto.CompactTextString(m) }
func (*ListIrreparableSegmentsResponse) ProtoMessage() {}
func (*ListIrreparableSegmentsResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_a07d9034b2dd9d26, []int{5}
}
func (m *ListIrreparableSegmentsResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ListIrreparableSegmentsResponse.Unmarshal(m, b)
}
func (m *ListIrreparableSegmentsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ListIrreparableSegmentsResponse.Marshal(b, m, deterministic)
}
func (m *ListIrreparableSegmentsResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ListIrreparableSegmentsResponse.Merge(m, src)
}
func (m *ListIrreparableSegmentsResponse) XXX_Size() int {
return xxx_messageInfo_ListIrreparableSegmentsResponse.Size(m)
}
func (m *ListIrreparableSegmentsResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ListIrreparableSegmentsResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ListIrreparableSegmentsResponse proto.InternalMessageInfo
func (m *ListIrreparableSegmentsResponse) GetSegments() []*IrreparableSegment {
if m != nil {
return m.Segments
}
return nil
}
type IrreparableSegment struct {
Path []byte `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"`
SegmentDetail *pb.Pointer `protobuf:"bytes,2,opt,name=segment_detail,json=segmentDetail,proto3" json:"segment_detail,omitempty"`
LostPieces int32 `protobuf:"varint,3,opt,name=lost_pieces,json=lostPieces,proto3" json:"lost_pieces,omitempty"`
LastRepairAttempt int64 `protobuf:"varint,4,opt,name=last_repair_attempt,json=lastRepairAttempt,proto3" json:"last_repair_attempt,omitempty"`
RepairAttemptCount int64 `protobuf:"varint,5,opt,name=repair_attempt_count,json=repairAttemptCount,proto3" json:"repair_attempt_count,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *IrreparableSegment) Reset() { *m = IrreparableSegment{} }
func (m *IrreparableSegment) String() string { return proto.CompactTextString(m) }
func (*IrreparableSegment) ProtoMessage() {}
func (*IrreparableSegment) Descriptor() ([]byte, []int) {
return fileDescriptor_a07d9034b2dd9d26, []int{6}
}
func (m *IrreparableSegment) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_IrreparableSegment.Unmarshal(m, b)
}
func (m *IrreparableSegment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_IrreparableSegment.Marshal(b, m, deterministic)
}
func (m *IrreparableSegment) XXX_Merge(src proto.Message) {
xxx_messageInfo_IrreparableSegment.Merge(m, src)
}
func (m *IrreparableSegment) XXX_Size() int {
return xxx_messageInfo_IrreparableSegment.Size(m)
}
func (m *IrreparableSegment) XXX_DiscardUnknown() {
xxx_messageInfo_IrreparableSegment.DiscardUnknown(m)
}
var xxx_messageInfo_IrreparableSegment proto.InternalMessageInfo
func (m *IrreparableSegment) GetPath() []byte {
if m != nil {
return m.Path
}
return nil
}
func (m *IrreparableSegment) GetSegmentDetail() *pb.Pointer {
if m != nil {
return m.SegmentDetail
}
return nil
}
func (m *IrreparableSegment) GetLostPieces() int32 {
if m != nil {
return m.LostPieces
}
return 0
}
func (m *IrreparableSegment) GetLastRepairAttempt() int64 {
if m != nil {
return m.LastRepairAttempt
}
return 0
}
func (m *IrreparableSegment) GetRepairAttemptCount() int64 {
if m != nil {
return m.RepairAttemptCount
}
return 0
}
type ObjectHealthRequest struct {
EncryptedPath []byte `protobuf:"bytes,1,opt,name=encrypted_path,json=encryptedPath,proto3" json:"encrypted_path,omitempty"`
Bucket []byte `protobuf:"bytes,2,opt,name=bucket,proto3" json:"bucket,omitempty"`
ProjectId []byte `protobuf:"bytes,3,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
StartAfterSegment int64 `protobuf:"varint,4,opt,name=start_after_segment,json=startAfterSegment,proto3" json:"start_after_segment,omitempty"`
EndBeforeSegment int64 `protobuf:"varint,5,opt,name=end_before_segment,json=endBeforeSegment,proto3" json:"end_before_segment,omitempty"`
Limit int32 `protobuf:"varint,6,opt,name=limit,proto3" json:"limit,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ObjectHealthRequest) Reset() { *m = ObjectHealthRequest{} }
func (m *ObjectHealthRequest) String() string { return proto.CompactTextString(m) }
func (*ObjectHealthRequest) ProtoMessage() {}
func (*ObjectHealthRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_a07d9034b2dd9d26, []int{7}
}
func (m *ObjectHealthRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ObjectHealthRequest.Unmarshal(m, b)
}
func (m *ObjectHealthRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ObjectHealthRequest.Marshal(b, m, deterministic)
}
func (m *ObjectHealthRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_ObjectHealthRequest.Merge(m, src)
}
func (m *ObjectHealthRequest) XXX_Size() int {
return xxx_messageInfo_ObjectHealthRequest.Size(m)
}
func (m *ObjectHealthRequest) XXX_DiscardUnknown() {
xxx_messageInfo_ObjectHealthRequest.DiscardUnknown(m)
}
var xxx_messageInfo_ObjectHealthRequest proto.InternalMessageInfo
func (m *ObjectHealthRequest) GetEncryptedPath() []byte {
if m != nil {
return m.EncryptedPath
}
return nil
}
func (m *ObjectHealthRequest) GetBucket() []byte {
if m != nil {
return m.Bucket
}
return nil
}
func (m *ObjectHealthRequest) GetProjectId() []byte {
if m != nil {
return m.ProjectId
}
return nil
}
func (m *ObjectHealthRequest) GetStartAfterSegment() int64 {
if m != nil {
return m.StartAfterSegment
}
return 0
}
func (m *ObjectHealthRequest) GetEndBeforeSegment() int64 {
if m != nil {
return m.EndBeforeSegment
}
return 0
}
func (m *ObjectHealthRequest) GetLimit() int32 {
if m != nil {
return m.Limit
}
return 0
}
type ObjectHealthResponse struct {
Segments []*SegmentHealth `protobuf:"bytes,1,rep,name=segments,proto3" json:"segments,omitempty"`
Redundancy *pb.RedundancyScheme `protobuf:"bytes,2,opt,name=redundancy,proto3" json:"redundancy,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ObjectHealthResponse) Reset() { *m = ObjectHealthResponse{} }
func (m *ObjectHealthResponse) String() string { return proto.CompactTextString(m) }
func (*ObjectHealthResponse) ProtoMessage() {}
func (*ObjectHealthResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_a07d9034b2dd9d26, []int{8}
}
func (m *ObjectHealthResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ObjectHealthResponse.Unmarshal(m, b)
}
func (m *ObjectHealthResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ObjectHealthResponse.Marshal(b, m, deterministic)
}
func (m *ObjectHealthResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_ObjectHealthResponse.Merge(m, src)
}
func (m *ObjectHealthResponse) XXX_Size() int {
return xxx_messageInfo_ObjectHealthResponse.Size(m)
}
func (m *ObjectHealthResponse) XXX_DiscardUnknown() {
xxx_messageInfo_ObjectHealthResponse.DiscardUnknown(m)
}
var xxx_messageInfo_ObjectHealthResponse proto.InternalMessageInfo
func (m *ObjectHealthResponse) GetSegments() []*SegmentHealth {
if m != nil {
return m.Segments
}
return nil
}
func (m *ObjectHealthResponse) GetRedundancy() *pb.RedundancyScheme {
if m != nil {
return m.Redundancy
}
return nil
}
type SegmentHealthRequest struct {
Bucket []byte `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"`
EncryptedPath []byte `protobuf:"bytes,2,opt,name=encrypted_path,json=encryptedPath,proto3" json:"encrypted_path,omitempty"`
SegmentIndex int64 `protobuf:"varint,3,opt,name=segment_index,json=segmentIndex,proto3" json:"segment_index,omitempty"`
ProjectId []byte `protobuf:"bytes,4,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SegmentHealthRequest) Reset() { *m = SegmentHealthRequest{} }
func (m *SegmentHealthRequest) String() string { return proto.CompactTextString(m) }
func (*SegmentHealthRequest) ProtoMessage() {}
func (*SegmentHealthRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_a07d9034b2dd9d26, []int{9}
}
func (m *SegmentHealthRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SegmentHealthRequest.Unmarshal(m, b)
}
func (m *SegmentHealthRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SegmentHealthRequest.Marshal(b, m, deterministic)
}
func (m *SegmentHealthRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_SegmentHealthRequest.Merge(m, src)
}
func (m *SegmentHealthRequest) XXX_Size() int {
return xxx_messageInfo_SegmentHealthRequest.Size(m)
}
func (m *SegmentHealthRequest) XXX_DiscardUnknown() {
xxx_messageInfo_SegmentHealthRequest.DiscardUnknown(m)
}
var xxx_messageInfo_SegmentHealthRequest proto.InternalMessageInfo
func (m *SegmentHealthRequest) GetBucket() []byte {
if m != nil {
return m.Bucket
}
return nil
}
func (m *SegmentHealthRequest) GetEncryptedPath() []byte {
if m != nil {
return m.EncryptedPath
}
return nil
}
func (m *SegmentHealthRequest) GetSegmentIndex() int64 {
if m != nil {
return m.SegmentIndex
}
return 0
}
func (m *SegmentHealthRequest) GetProjectId() []byte {
if m != nil {
return m.ProjectId
}
return nil
}
type SegmentHealthResponse struct {
Health *SegmentHealth `protobuf:"bytes,1,opt,name=health,proto3" json:"health,omitempty"`
Redundancy *pb.RedundancyScheme `protobuf:"bytes,2,opt,name=redundancy,proto3" json:"redundancy,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SegmentHealthResponse) Reset() { *m = SegmentHealthResponse{} }
func (m *SegmentHealthResponse) String() string { return proto.CompactTextString(m) }
func (*SegmentHealthResponse) ProtoMessage() {}
func (*SegmentHealthResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_a07d9034b2dd9d26, []int{10}
}
func (m *SegmentHealthResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SegmentHealthResponse.Unmarshal(m, b)
}
func (m *SegmentHealthResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SegmentHealthResponse.Marshal(b, m, deterministic)
}
func (m *SegmentHealthResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_SegmentHealthResponse.Merge(m, src)
}
func (m *SegmentHealthResponse) XXX_Size() int {
return xxx_messageInfo_SegmentHealthResponse.Size(m)
}
func (m *SegmentHealthResponse) XXX_DiscardUnknown() {
xxx_messageInfo_SegmentHealthResponse.DiscardUnknown(m)
}
var xxx_messageInfo_SegmentHealthResponse proto.InternalMessageInfo
func (m *SegmentHealthResponse) GetHealth() *SegmentHealth {
if m != nil {
return m.Health
}
return nil
}
func (m *SegmentHealthResponse) GetRedundancy() *pb.RedundancyScheme {
if m != nil {
return m.Redundancy
}
return nil
}
type SegmentHealth struct {
HealthyIds []NodeID `protobuf:"bytes,1,rep,name=healthy_ids,json=healthyIds,proto3,customtype=NodeID" json:"healthy_ids,omitempty"`
UnhealthyIds []NodeID `protobuf:"bytes,2,rep,name=unhealthy_ids,json=unhealthyIds,proto3,customtype=NodeID" json:"unhealthy_ids,omitempty"`
OfflineIds []NodeID `protobuf:"bytes,3,rep,name=offline_ids,json=offlineIds,proto3,customtype=NodeID" json:"offline_ids,omitempty"`
Segment []byte `protobuf:"bytes,4,opt,name=segment,proto3" json:"segment,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *SegmentHealth) Reset() { *m = SegmentHealth{} }
func (m *SegmentHealth) String() string { return proto.CompactTextString(m) }
func (*SegmentHealth) ProtoMessage() {}
func (*SegmentHealth) Descriptor() ([]byte, []int) {
return fileDescriptor_a07d9034b2dd9d26, []int{11}
}
func (m *SegmentHealth) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_SegmentHealth.Unmarshal(m, b)
}
func (m *SegmentHealth) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_SegmentHealth.Marshal(b, m, deterministic)
}
func (m *SegmentHealth) XXX_Merge(src proto.Message) {
xxx_messageInfo_SegmentHealth.Merge(m, src)
}
func (m *SegmentHealth) XXX_Size() int {
return xxx_messageInfo_SegmentHealth.Size(m)
}
func (m *SegmentHealth) XXX_DiscardUnknown() {
xxx_messageInfo_SegmentHealth.DiscardUnknown(m)
}
var xxx_messageInfo_SegmentHealth proto.InternalMessageInfo
func (m *SegmentHealth) GetSegment() []byte {
if m != nil {
return m.Segment
}
return nil
}
func init() {
proto.RegisterType((*CountNodesRequest)(nil), "satellite.inspector.CountNodesRequest")
proto.RegisterType((*CountNodesResponse)(nil), "satellite.inspector.CountNodesResponse")
proto.RegisterType((*DumpNodesRequest)(nil), "satellite.inspector.DumpNodesRequest")
proto.RegisterType((*DumpNodesResponse)(nil), "satellite.inspector.DumpNodesResponse")
proto.RegisterType((*ListIrreparableSegmentsRequest)(nil), "satellite.inspector.ListIrreparableSegmentsRequest")
proto.RegisterType((*ListIrreparableSegmentsResponse)(nil), "satellite.inspector.ListIrreparableSegmentsResponse")
proto.RegisterType((*IrreparableSegment)(nil), "satellite.inspector.IrreparableSegment")
proto.RegisterType((*ObjectHealthRequest)(nil), "satellite.inspector.ObjectHealthRequest")
proto.RegisterType((*ObjectHealthResponse)(nil), "satellite.inspector.ObjectHealthResponse")
proto.RegisterType((*SegmentHealthRequest)(nil), "satellite.inspector.SegmentHealthRequest")
proto.RegisterType((*SegmentHealthResponse)(nil), "satellite.inspector.SegmentHealthResponse")
proto.RegisterType((*SegmentHealth)(nil), "satellite.inspector.SegmentHealth")
}
func init() { proto.RegisterFile("inspector.proto", fileDescriptor_a07d9034b2dd9d26) }
var fileDescriptor_a07d9034b2dd9d26 = []byte{
// 833 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xcd, 0x6e, 0x23, 0x45,
0x10, 0xa6, 0xe3, 0xc4, 0xb0, 0x65, 0x67, 0x93, 0x74, 0xcc, 0x62, 0x19, 0x41, 0xa2, 0x59, 0x65,
0xd7, 0xbb, 0x8b, 0xc6, 0xc8, 0x81, 0x03, 0x20, 0x21, 0x6d, 0x36, 0x07, 0x2c, 0x21, 0x36, 0x9a,
0xdc, 0x56, 0x42, 0xa3, 0xf1, 0x4c, 0x39, 0x9e, 0xdd, 0x71, 0xf7, 0xd0, 0xdd, 0x46, 0xf8, 0xce,
0x03, 0x20, 0x71, 0xe2, 0xc0, 0x5b, 0xf0, 0x1a, 0xbc, 0x00, 0x07, 0x0e, 0x1c, 0xe0, 0x35, 0x50,
0xff, 0xcc, 0x78, 0x6c, 0xcf, 0x12, 0xa3, 0xbd, 0x75, 0x57, 0x7d, 0x55, 0xfd, 0x55, 0x7d, 0x35,
0x65, 0xc3, 0x41, 0xca, 0x64, 0x8e, 0xb1, 0xe2, 0xc2, 0xcf, 0x05, 0x57, 0x9c, 0x1e, 0xcb, 0x48,
0x61, 0x96, 0xa5, 0x0a, 0xfd, 0xd2, 0xd5, 0x83, 0x1b, 0x7e, 0xc3, 0x2d, 0xa0, 0x07, 0x8c, 0x27,
0xe8, 0xce, 0x07, 0x39, 0x4f, 0x99, 0x42, 0x91, 0x8c, 0xad, 0xc1, 0x3b, 0x86, 0xa3, 0x67, 0x7c,
0xce, 0xd4, 0x37, 0x3c, 0x41, 0x19, 0xe0, 0x77, 0x73, 0x94, 0xca, 0x7b, 0x0c, 0xb4, 0x6a, 0x94,
0x39, 0x67, 0x12, 0x69, 0x07, 0xf6, 0x62, 0x6d, 0xed, 0x92, 0x53, 0xd2, 0x6f, 0x04, 0xf6, 0xe2,
0x51, 0x38, 0xbc, 0x9c, 0xcf, 0xf2, 0x95, 0xf8, 0x4f, 0xe1, 0xa8, 0x62, 0x73, 0xe1, 0xa7, 0xb0,
0xa7, 0x89, 0xc8, 0x2e, 0x39, 0x6d, 0xf4, 0x5b, 0x43, 0xf0, 0x0d, 0x2d, 0x8d, 0x09, 0xac, 0xc3,
0x7b, 0x05, 0x1f, 0x7e, 0x9d, 0x4a, 0x35, 0x12, 0x02, 0xf3, 0x48, 0x44, 0xe3, 0x0c, 0xaf, 0xf1,
0x66, 0x86, 0x4c, 0x15, 0x89, 0x35, 0x85, 0x2c, 0x9d, 0xa5, 0x96, 0xc2, 0x5e, 0x60, 0x2f, 0xf4,
0x1c, 0xee, 0x65, 0x91, 0x54, 0xa1, 0x44, 0x64, 0xa1, 0xb4, 0x21, 0x61, 0x1e, 0xa9, 0x69, 0x77,
0xe7, 0x94, 0xf4, 0xdb, 0xc1, 0xb1, 0xf6, 0x5e, 0x23, 0x32, 0x97, 0xee, 0x2a, 0x52, 0x53, 0x6f,
0x02, 0x27, 0xaf, 0x7d, 0xcc, 0x31, 0x7e, 0x06, 0xef, 0xb8, 0x6c, 0x05, 0xe9, 0x87, 0x7e, 0x4d,
0xb3, 0xfd, 0xcd, 0x1c, 0x41, 0x19, 0xe8, 0xfd, 0x4d, 0x80, 0x6e, 0x02, 0x28, 0x85, 0x5d, 0xc3,
0x90, 0x18, 0x86, 0xe6, 0x4c, 0x3f, 0x83, 0xbb, 0x05, 0xfb, 0x04, 0x55, 0x94, 0x66, 0x86, 0x7f,
0x6b, 0x48, 0xfd, 0xa5, 0x6a, 0x57, 0xf6, 0x14, 0xec, 0x3b, 0xe4, 0xa5, 0x01, 0xd2, 0x13, 0x68,
0x65, 0x5c, 0xaa, 0x30, 0x4f, 0x31, 0x46, 0xd9, 0x6d, 0x98, 0xf6, 0x80, 0x36, 0x5d, 0x19, 0x0b,
0xf5, 0xc1, 0x74, 0x21, 0xd4, 0x44, 0x52, 0x11, 0x46, 0x4a, 0xe1, 0x2c, 0x57, 0xdd, 0x5d, 0x23,
0xe5, 0x91, 0x76, 0x05, 0xc6, 0xf3, 0xd4, 0x3a, 0xe8, 0xc7, 0xd0, 0x59, 0x85, 0x86, 0x56, 0xfb,
0x3d, 0x13, 0x40, 0x45, 0x15, 0x6c, 0x66, 0xc5, 0xfb, 0x87, 0xc0, 0xf1, 0xf3, 0xf1, 0x4b, 0x8c,
0xd5, 0x57, 0x18, 0x65, 0x6a, 0x5a, 0x68, 0x76, 0x06, 0x77, 0x91, 0xc5, 0x62, 0x91, 0x2b, 0x4c,
0xc2, 0x4a, 0xcd, 0xfb, 0xa5, 0x55, 0xeb, 0x41, 0xef, 0x41, 0x73, 0x3c, 0x8f, 0x5f, 0xa1, 0x72,
0xa2, 0xb9, 0x1b, 0xfd, 0x00, 0x20, 0x17, 0x5c, 0xa7, 0x0d, 0xd3, 0xc4, 0x14, 0xd6, 0x0e, 0xee,
0x38, 0xcb, 0x28, 0xd1, 0x75, 0x49, 0x15, 0x09, 0x15, 0x46, 0x13, 0x85, 0xa2, 0x50, 0xbf, 0xa8,
0xcb, 0xb8, 0x9e, 0x6a, 0x4f, 0xd1, 0xf7, 0x8f, 0x80, 0x22, 0x4b, 0xc2, 0x31, 0x4e, 0xb8, 0xc0,
0x12, 0x6e, 0xab, 0x3a, 0x44, 0x96, 0x5c, 0x18, 0x47, 0x81, 0x2e, 0xe7, 0xad, 0x59, 0x99, 0x37,
0xef, 0x67, 0x02, 0x9d, 0xd5, 0x4a, 0xdd, 0xc0, 0x7c, 0xb9, 0x31, 0x30, 0x5e, 0xed, 0xc0, 0xb8,
0xf4, 0x2e, 0xba, 0x8c, 0xa1, 0x5f, 0x00, 0x08, 0x4c, 0xe6, 0x2c, 0x89, 0x58, 0xbc, 0x70, 0xe2,
0xbf, 0x5f, 0x11, 0x3f, 0x28, 0x9d, 0xd7, 0xf1, 0x14, 0x67, 0x18, 0x54, 0xe0, 0xde, 0x2f, 0x04,
0x3a, 0xab, 0x89, 0x9d, 0x00, 0xcb, 0xce, 0x92, 0x95, 0xce, 0x6e, 0x0a, 0xb3, 0x53, 0x27, 0xcc,
0x7d, 0x28, 0x66, 0x2d, 0x4c, 0x59, 0x82, 0x3f, 0x18, 0x0d, 0x1a, 0x41, 0xdb, 0x19, 0x47, 0xda,
0xb6, 0xa6, 0xd2, 0xee, 0x9a, 0x4a, 0xde, 0x4f, 0x04, 0xde, 0x5d, 0xe3, 0xe6, 0x5a, 0xf6, 0x39,
0x34, 0xa7, 0xc6, 0x62, 0xc8, 0x6d, 0xd7, 0x30, 0x17, 0xf1, 0x66, 0xed, 0xfa, 0x8d, 0xc0, 0xfe,
0x4a, 0x5a, 0xfa, 0x04, 0x5a, 0x36, 0xf1, 0x22, 0x4c, 0x13, 0x2b, 0x60, 0xfb, 0x02, 0xfe, 0xf8,
0xf3, 0xa4, 0xa9, 0x97, 0xd4, 0xe8, 0x32, 0x00, 0xe7, 0x1e, 0x25, 0x92, 0x0e, 0x60, 0x7f, 0xce,
0xaa, 0xf0, 0x9d, 0x0d, 0x78, 0xbb, 0x04, 0xe8, 0x80, 0x27, 0xd0, 0xe2, 0x93, 0x49, 0x96, 0x32,
0x34, 0xf0, 0xc6, 0x66, 0x76, 0xe7, 0xd6, 0xe0, 0x2e, 0xbc, 0x5d, 0x9d, 0xe4, 0x76, 0x50, 0x5c,
0x87, 0xbf, 0x13, 0x38, 0x7c, 0xfe, 0x3d, 0x8a, 0x2c, 0x5a, 0x8c, 0x8a, 0xf6, 0xd0, 0x6f, 0x01,
0x96, 0xfb, 0x9a, 0x3e, 0xa8, 0x6d, 0xe1, 0xc6, 0x96, 0xef, 0x3d, 0xbc, 0x15, 0xe7, 0x34, 0x7a,
0x01, 0x77, 0xca, 0x75, 0x4e, 0xcf, 0x6a, 0xa3, 0xd6, 0x7f, 0x02, 0x7a, 0x0f, 0x6e, 0x83, 0xd9,
0xdc, 0xc3, 0x5f, 0x09, 0x74, 0x2a, 0xeb, 0x71, 0x59, 0xd3, 0x8f, 0x04, 0xde, 0x7b, 0xcd, 0x82,
0xa6, 0xe7, 0xb5, 0xc9, 0xff, 0xfb, 0xb7, 0xa3, 0xf7, 0xc9, 0xff, 0x0b, 0x72, 0xfc, 0xfe, 0x22,
0x70, 0x60, 0xe7, 0x63, 0x49, 0x0d, 0xa1, 0x5d, 0xfd, 0xfc, 0x69, 0xbf, 0x36, 0x73, 0xcd, 0x2e,
0xec, 0x3d, 0xda, 0x02, 0x69, 0x1f, 0xf6, 0xde, 0xa2, 0xd3, 0xf5, 0x01, 0x7d, 0xb4, 0xc5, 0xb7,
0xe1, 0x1e, 0x7a, 0xbc, 0x0d, 0xb4, 0x78, 0xe9, 0xe2, 0xec, 0xc5, 0x7d, 0xa9, 0xb8, 0x78, 0xe9,
0xa7, 0x7c, 0x60, 0x0e, 0x83, 0x32, 0x7a, 0x60, 0x3e, 0x26, 0x16, 0x65, 0xf9, 0x78, 0xdc, 0x34,
0x7f, 0x19, 0xce, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xfd, 0xe6, 0x11, 0x41, 0x83, 0x08, 0x00,
0x00,
}
// --- DRPC BEGIN ---
type DRPCOverlayInspectorClient interface {
DRPCConn() drpc.Conn
// CountNodes returns the number of nodes in the cache
CountNodes(ctx context.Context, in *CountNodesRequest) (*CountNodesResponse, error)
// DumpNodes returns all the nodes in the cache
DumpNodes(ctx context.Context, in *DumpNodesRequest) (*DumpNodesResponse, error)
}
type drpcOverlayInspectorClient struct {
cc drpc.Conn
}
func NewDRPCOverlayInspectorClient(cc drpc.Conn) DRPCOverlayInspectorClient {
return &drpcOverlayInspectorClient{cc}
}
func (c *drpcOverlayInspectorClient) DRPCConn() drpc.Conn { return c.cc }
func (c *drpcOverlayInspectorClient) CountNodes(ctx context.Context, in *CountNodesRequest) (*CountNodesResponse, error) {
out := new(CountNodesResponse)
err := c.cc.Invoke(ctx, "/satellite.inspector.OverlayInspector/CountNodes", in, out)
if err != nil {
return nil, err
}
return out, nil
}
func (c *drpcOverlayInspectorClient) DumpNodes(ctx context.Context, in *DumpNodesRequest) (*DumpNodesResponse, error) {
out := new(DumpNodesResponse)
err := c.cc.Invoke(ctx, "/satellite.inspector.OverlayInspector/DumpNodes", in, out)
if err != nil {
return nil, err
}
return out, nil
}
type DRPCOverlayInspectorServer interface {
// CountNodes returns the number of nodes in the cache
CountNodes(context.Context, *CountNodesRequest) (*CountNodesResponse, error)
// DumpNodes returns all the nodes in the cache
DumpNodes(context.Context, *DumpNodesRequest) (*DumpNodesResponse, error)
}
type DRPCOverlayInspectorDescription struct{}
func (DRPCOverlayInspectorDescription) NumMethods() int { return 2 }
func (DRPCOverlayInspectorDescription) Method(n int) (string, drpc.Receiver, interface{}, bool) {
switch n {
case 0:
return "/satellite.inspector.OverlayInspector/CountNodes",
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCOverlayInspectorServer).
CountNodes(
ctx,
in1.(*CountNodesRequest),
)
}, DRPCOverlayInspectorServer.CountNodes, true
case 1:
return "/satellite.inspector.OverlayInspector/DumpNodes",
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCOverlayInspectorServer).
DumpNodes(
ctx,
in1.(*DumpNodesRequest),
)
}, DRPCOverlayInspectorServer.DumpNodes, true
default:
return "", nil, nil, false
}
}
func DRPCRegisterOverlayInspector(mux drpc.Mux, impl DRPCOverlayInspectorServer) error {
return mux.Register(impl, DRPCOverlayInspectorDescription{})
}
type DRPCOverlayInspector_CountNodesStream interface {
drpc.Stream
SendAndClose(*CountNodesResponse) error
}
type drpcOverlayInspectorCountNodesStream struct {
drpc.Stream
}
func (x *drpcOverlayInspectorCountNodesStream) SendAndClose(m *CountNodesResponse) error {
if err := x.MsgSend(m); err != nil {
return err
}
return x.CloseSend()
}
type DRPCOverlayInspector_DumpNodesStream interface {
drpc.Stream
SendAndClose(*DumpNodesResponse) error
}
type drpcOverlayInspectorDumpNodesStream struct {
drpc.Stream
}
func (x *drpcOverlayInspectorDumpNodesStream) SendAndClose(m *DumpNodesResponse) error {
if err := x.MsgSend(m); err != nil {
return err
}
return x.CloseSend()
}
type DRPCIrreparableInspectorClient interface {
DRPCConn() drpc.Conn
// ListIrreparableSegments returns damaged segments
ListIrreparableSegments(ctx context.Context, in *ListIrreparableSegmentsRequest) (*ListIrreparableSegmentsResponse, error)
}
type drpcIrreparableInspectorClient struct {
cc drpc.Conn
}
func NewDRPCIrreparableInspectorClient(cc drpc.Conn) DRPCIrreparableInspectorClient {
return &drpcIrreparableInspectorClient{cc}
}
func (c *drpcIrreparableInspectorClient) DRPCConn() drpc.Conn { return c.cc }
func (c *drpcIrreparableInspectorClient) ListIrreparableSegments(ctx context.Context, in *ListIrreparableSegmentsRequest) (*ListIrreparableSegmentsResponse, error) {
out := new(ListIrreparableSegmentsResponse)
err := c.cc.Invoke(ctx, "/satellite.inspector.IrreparableInspector/ListIrreparableSegments", in, out)
if err != nil {
return nil, err
}
return out, nil
}
type DRPCIrreparableInspectorServer interface {
// ListIrreparableSegments returns damaged segments
ListIrreparableSegments(context.Context, *ListIrreparableSegmentsRequest) (*ListIrreparableSegmentsResponse, error)
}
type DRPCIrreparableInspectorDescription struct{}
func (DRPCIrreparableInspectorDescription) NumMethods() int { return 1 }
func (DRPCIrreparableInspectorDescription) Method(n int) (string, drpc.Receiver, interface{}, bool) {
switch n {
case 0:
return "/satellite.inspector.IrreparableInspector/ListIrreparableSegments",
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCIrreparableInspectorServer).
ListIrreparableSegments(
ctx,
in1.(*ListIrreparableSegmentsRequest),
)
}, DRPCIrreparableInspectorServer.ListIrreparableSegments, true
default:
return "", nil, nil, false
}
}
func DRPCRegisterIrreparableInspector(mux drpc.Mux, impl DRPCIrreparableInspectorServer) error {
return mux.Register(impl, DRPCIrreparableInspectorDescription{})
}
type DRPCIrreparableInspector_ListIrreparableSegmentsStream interface {
drpc.Stream
SendAndClose(*ListIrreparableSegmentsResponse) error
}
type drpcIrreparableInspectorListIrreparableSegmentsStream struct {
drpc.Stream
}
func (x *drpcIrreparableInspectorListIrreparableSegmentsStream) SendAndClose(m *ListIrreparableSegmentsResponse) error {
if err := x.MsgSend(m); err != nil {
return err
}
return x.CloseSend()
}
type DRPCHealthInspectorClient interface {
DRPCConn() drpc.Conn
// ObjectHealth will return stats about the health of an object
ObjectHealth(ctx context.Context, in *ObjectHealthRequest) (*ObjectHealthResponse, error)
// SegmentHealth will return stats about the health of a segment
SegmentHealth(ctx context.Context, in *SegmentHealthRequest) (*SegmentHealthResponse, error)
}
type drpcHealthInspectorClient struct {
cc drpc.Conn
}
func NewDRPCHealthInspectorClient(cc drpc.Conn) DRPCHealthInspectorClient {
return &drpcHealthInspectorClient{cc}
}
func (c *drpcHealthInspectorClient) DRPCConn() drpc.Conn { return c.cc }
func (c *drpcHealthInspectorClient) ObjectHealth(ctx context.Context, in *ObjectHealthRequest) (*ObjectHealthResponse, error) {
out := new(ObjectHealthResponse)
err := c.cc.Invoke(ctx, "/satellite.inspector.HealthInspector/ObjectHealth", in, out)
if err != nil {
return nil, err
}
return out, nil
}
func (c *drpcHealthInspectorClient) SegmentHealth(ctx context.Context, in *SegmentHealthRequest) (*SegmentHealthResponse, error) {
out := new(SegmentHealthResponse)
err := c.cc.Invoke(ctx, "/satellite.inspector.HealthInspector/SegmentHealth", in, out)
if err != nil {
return nil, err
}
return out, nil
}
type DRPCHealthInspectorServer interface {
// ObjectHealth will return stats about the health of an object
ObjectHealth(context.Context, *ObjectHealthRequest) (*ObjectHealthResponse, error)
// SegmentHealth will return stats about the health of a segment
SegmentHealth(context.Context, *SegmentHealthRequest) (*SegmentHealthResponse, error)
}
type DRPCHealthInspectorDescription struct{}
func (DRPCHealthInspectorDescription) NumMethods() int { return 2 }
func (DRPCHealthInspectorDescription) Method(n int) (string, drpc.Receiver, interface{}, bool) {
switch n {
case 0:
return "/satellite.inspector.HealthInspector/ObjectHealth",
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCHealthInspectorServer).
ObjectHealth(
ctx,
in1.(*ObjectHealthRequest),
)
}, DRPCHealthInspectorServer.ObjectHealth, true
case 1:
return "/satellite.inspector.HealthInspector/SegmentHealth",
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCHealthInspectorServer).
SegmentHealth(
ctx,
in1.(*SegmentHealthRequest),
)
}, DRPCHealthInspectorServer.SegmentHealth, true
default:
return "", nil, nil, false
}
}
func DRPCRegisterHealthInspector(mux drpc.Mux, impl DRPCHealthInspectorServer) error {
return mux.Register(impl, DRPCHealthInspectorDescription{})
}
type DRPCHealthInspector_ObjectHealthStream interface {
drpc.Stream
SendAndClose(*ObjectHealthResponse) error
}
type drpcHealthInspectorObjectHealthStream struct {
drpc.Stream
}
func (x *drpcHealthInspectorObjectHealthStream) SendAndClose(m *ObjectHealthResponse) error {
if err := x.MsgSend(m); err != nil {
return err
}
return x.CloseSend()
}
type DRPCHealthInspector_SegmentHealthStream interface {
drpc.Stream
SendAndClose(*SegmentHealthResponse) error
}
type drpcHealthInspectorSegmentHealthStream struct {
drpc.Stream
}
func (x *drpcHealthInspectorSegmentHealthStream) SendAndClose(m *SegmentHealthResponse) error {
if err := x.MsgSend(m); err != nil {
return err
}
return x.CloseSend()
}
// --- DRPC END ---

View File

@ -0,0 +1,93 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
syntax = "proto3";
option go_package = "storj.io/storj/satellite/internalpb";
import "gogo.proto";
import "node.proto";
import "pointerdb.proto";
package satellite.inspector;
service OverlayInspector {
// CountNodes returns the number of nodes in the cache
rpc CountNodes(CountNodesRequest) returns (CountNodesResponse);
// DumpNodes returns all the nodes in the cache
rpc DumpNodes(DumpNodesRequest) returns (DumpNodesResponse);
}
message CountNodesRequest {
}
message CountNodesResponse {
int64 count = 1;
}
message DumpNodesRequest {}
message DumpNodesResponse {
repeated node.Node nodes = 1;
}
service IrreparableInspector {
// ListIrreparableSegments returns damaged segments
rpc ListIrreparableSegments(ListIrreparableSegmentsRequest) returns (ListIrreparableSegmentsResponse);
}
message ListIrreparableSegmentsRequest {
int32 limit = 1;
bytes last_seen_segment_path = 2;
}
message ListIrreparableSegmentsResponse {
repeated IrreparableSegment segments = 1;
}
message IrreparableSegment {
bytes path = 1;
pointerdb.Pointer segment_detail = 2;
int32 lost_pieces = 3;
int64 last_repair_attempt = 4;
int64 repair_attempt_count = 5;
}
service HealthInspector {
// ObjectHealth will return stats about the health of an object
rpc ObjectHealth(ObjectHealthRequest) returns (ObjectHealthResponse) {}
// SegmentHealth will return stats about the health of a segment
rpc SegmentHealth(SegmentHealthRequest) returns (SegmentHealthResponse) {}
}
message ObjectHealthRequest {
bytes encrypted_path = 1; // object encrypted path
bytes bucket = 2; // object bucket name
bytes project_id = 3; // object project id
int64 start_after_segment = 4; // Get all segments after specified segment index
int64 end_before_segment = 5; // Stop at segment before specified segment index
int32 limit = 6; // Max number of segments that are checked
}
message ObjectHealthResponse {
repeated SegmentHealth segments = 1; // actual segment info
pointerdb.RedundancyScheme redundancy = 2; // expected segment info
}
message SegmentHealthRequest {
bytes bucket = 1; // segment bucket name
bytes encrypted_path = 2; // segment encrypted path
int64 segment_index = 3; // segment index
bytes project_id = 4; // segment project id
}
message SegmentHealthResponse {
SegmentHealth health = 1; // Information about a segment's health
pointerdb.RedundancyScheme redundancy = 2; // expected segment info
}
message SegmentHealth {
repeated bytes healthy_ids = 1 [(gogoproto.customtype) = "NodeID"]; // online + not disqualified
repeated bytes unhealthy_ids = 2 [(gogoproto.customtype) = "NodeID"]; // online + disqualified
repeated bytes offline_ids = 3 [(gogoproto.customtype) = "NodeID"]; // offline
bytes segment = 4; // path formatted segment index
}

View File

@ -119,7 +119,6 @@ func (m *StreamID) GetStreamId() []byte {
return nil
}
// only for satellite use
type SegmentID struct {
StreamId *StreamID `protobuf:"bytes,1,opt,name=stream_id,json=streamId,proto3" json:"stream_id,omitempty"`
PartNumber int32 `protobuf:"varint,2,opt,name=part_number,json=partNumber,proto3" json:"part_number,omitempty"`
@ -200,43 +199,44 @@ func (m *SegmentID) GetSatelliteSignature() []byte {
}
func init() {
proto.RegisterType((*StreamID)(nil), "metainfo.StreamID")
proto.RegisterType((*SegmentID)(nil), "metainfo.SegmentID")
proto.RegisterType((*StreamID)(nil), "satellite.metainfo.StreamID")
proto.RegisterType((*SegmentID)(nil), "satellite.metainfo.SegmentID")
}
func init() { proto.RegisterFile("metainfo_sat.proto", fileDescriptor_47c60bd892d94aaf) }
var fileDescriptor_47c60bd892d94aaf = []byte{
// 492 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x52, 0xcb, 0x8e, 0xd3, 0x40,
0x10, 0xc4, 0x1b, 0xf2, 0x9a, 0xbc, 0xa4, 0x59, 0x40, 0x26, 0x11, 0x4a, 0xb4, 0x12, 0x52, 0x4e,
0xb6, 0xb4, 0x7b, 0xe4, 0x44, 0x94, 0x8b, 0x25, 0x1e, 0x8b, 0xc3, 0x89, 0x8b, 0x35, 0xf6, 0xf4,
0x3a, 0x03, 0xf6, 0x8c, 0x35, 0xd3, 0x46, 0xbb, 0x7f, 0xc0, 0x91, 0xcf, 0xe2, 0x1b, 0x38, 0x2c,
0x9f, 0xc1, 0x15, 0x79, 0x1c, 0xc7, 0x91, 0xd0, 0x1e, 0xe0, 0xe6, 0xaa, 0xae, 0xea, 0xe9, 0xee,
0x32, 0xa1, 0x39, 0x20, 0x13, 0xf2, 0x46, 0x45, 0x86, 0xa1, 0x57, 0x68, 0x85, 0x8a, 0x0e, 0x1a,
0x6e, 0x4e, 0x52, 0x95, 0xaa, 0x9a, 0x9d, 0x2f, 0x53, 0xa5, 0xd2, 0x0c, 0x7c, 0x8b, 0xe2, 0xf2,
0xc6, 0x47, 0x91, 0x83, 0x41, 0x96, 0x17, 0x07, 0xc1, 0xac, 0x50, 0x42, 0x22, 0x68, 0x1e, 0x1f,
0x88, 0x69, 0xd3, 0xa7, 0xc6, 0x17, 0xdf, 0x3a, 0x64, 0xb0, 0x43, 0x0d, 0x2c, 0x0f, 0xb6, 0xf4,
0x19, 0xe9, 0xc5, 0x65, 0xf2, 0x05, 0xd0, 0x75, 0x56, 0xce, 0x7a, 0x1c, 0x1e, 0x10, 0x7d, 0x49,
0xa6, 0x20, 0x13, 0x7d, 0x57, 0x20, 0xf0, 0xa8, 0x60, 0xb8, 0x77, 0xcf, 0x6c, 0x7d, 0x72, 0x64,
0xaf, 0x19, 0xee, 0xa9, 0x4b, 0xfa, 0x5f, 0x41, 0x1b, 0xa1, 0xa4, 0xdb, 0x59, 0x39, 0xeb, 0x6e,
0xd8, 0x40, 0xfa, 0x8a, 0x10, 0x0d, 0xbc, 0x94, 0x9c, 0xc9, 0xe4, 0xce, 0x7d, 0xbc, 0x72, 0xd6,
0xa3, 0xcb, 0x85, 0xd7, 0xce, 0x16, 0x1e, 0x8b, 0xbb, 0x64, 0x0f, 0x39, 0x84, 0x27, 0x72, 0x1a,
0x90, 0x49, 0xa2, 0x81, 0xa1, 0x50, 0x32, 0xe2, 0x0c, 0xc1, 0xed, 0x5a, 0xff, 0xdc, 0xab, 0x97,
0xf7, 0x9a, 0xe5, 0xbd, 0x8f, 0xcd, 0xf2, 0x9b, 0xc1, 0x8f, 0xfb, 0xe5, 0xa3, 0xef, 0xbf, 0x96,
0x4e, 0x38, 0x6e, 0xac, 0x5b, 0x86, 0x40, 0xdf, 0x92, 0x19, 0xdc, 0x16, 0x42, 0x9f, 0x34, 0xeb,
0xfd, 0x43, 0xb3, 0x69, 0x6b, 0xb6, 0xed, 0x7c, 0x72, 0x6e, 0x18, 0x42, 0x96, 0x09, 0x84, 0xc8,
0x88, 0x54, 0x32, 0x2c, 0x35, 0xb8, 0x43, 0x7b, 0x1c, 0x7a, 0x2c, 0xed, 0x9a, 0x0a, 0x5d, 0x90,
0xa1, 0xb1, 0xc7, 0x8e, 0x04, 0x77, 0x89, 0x95, 0x0d, 0x6a, 0x22, 0xe0, 0x17, 0xbf, 0xcf, 0xc8,
0x70, 0x07, 0x69, 0x0e, 0x12, 0x83, 0x2d, 0xf5, 0x4f, 0xa5, 0x8e, 0x1d, 0x92, 0x7a, 0xc7, 0xf0,
0x9a, 0xc8, 0x5a, 0x3b, 0x5d, 0x92, 0x51, 0xc1, 0x34, 0x46, 0xb2, 0xcc, 0x63, 0xd0, 0x36, 0xa1,
0x6e, 0x48, 0x2a, 0xea, 0x9d, 0x65, 0xe8, 0x13, 0xd2, 0x15, 0x92, 0xc3, 0xed, 0x21, 0x9c, 0x1a,
0xd0, 0x2b, 0x32, 0xd1, 0x4a, 0x61, 0x54, 0x08, 0x48, 0xa0, 0x7a, 0xab, 0xba, 0xee, 0x78, 0x33,
0xab, 0x96, 0xfe, 0x79, 0xbf, 0xec, 0x5f, 0x57, 0x7c, 0xb0, 0x0d, 0x47, 0x95, 0xaa, 0x06, 0x9c,
0x7e, 0x20, 0x4f, 0x95, 0x16, 0xa9, 0x90, 0x2c, 0x8b, 0x94, 0xe6, 0xa0, 0xa3, 0x4c, 0xe4, 0x02,
0x8d, 0xdb, 0x5b, 0x75, 0xd6, 0xa3, 0xcb, 0x17, 0xed, 0xa0, 0xaf, 0x39, 0xd7, 0x60, 0x0c, 0xf0,
0xf7, 0x95, 0xec, 0x4d, 0xa5, 0x0a, 0xcf, 0x1b, 0x6f, 0xcb, 0x99, 0xbf, 0x53, 0xee, 0xff, 0x77,
0xca, 0x0f, 0xc4, 0x32, 0x78, 0x28, 0x96, 0xcd, 0xe2, 0xd3, 0x73, 0x83, 0x4a, 0x7f, 0xf6, 0x84,
0xf2, 0xed, 0x87, 0x6f, 0x7f, 0x4c, 0xc9, 0xb2, 0x22, 0x8e, 0x7b, 0xf6, 0xe5, 0xab, 0x3f, 0x01,
0x00, 0x00, 0xff, 0xff, 0xcf, 0xd5, 0x29, 0xca, 0x96, 0x03, 0x00, 0x00,
// 501 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x52, 0xcd, 0x6e, 0xd3, 0x4c,
0x14, 0xfd, 0xfc, 0x85, 0xfc, 0x4d, 0xfe, 0xa4, 0x29, 0x20, 0x2b, 0x05, 0x25, 0x2a, 0xaa, 0x94,
0x95, 0x2d, 0xb5, 0x2b, 0xc4, 0x8a, 0x28, 0x1b, 0x4b, 0xfc, 0x14, 0x87, 0x15, 0x1b, 0x6b, 0xec,
0xb9, 0x75, 0x06, 0xec, 0x19, 0x6b, 0xe6, 0x1a, 0xb5, 0x4b, 0x76, 0x2c, 0x79, 0x2c, 0x9e, 0x81,
0x45, 0x79, 0x15, 0xe4, 0x71, 0x1c, 0x47, 0xaa, 0xba, 0x80, 0x9d, 0xcf, 0xb9, 0xe7, 0x1e, 0xdf,
0x7b, 0xcf, 0x10, 0x9a, 0x03, 0x32, 0x21, 0xaf, 0x55, 0x64, 0x18, 0x7a, 0x85, 0x56, 0xa8, 0x28,
0x35, 0x0c, 0x21, 0xcb, 0x04, 0x82, 0xd7, 0x54, 0xe7, 0x24, 0x55, 0xa9, 0xaa, 0xeb, 0xf3, 0x45,
0xaa, 0x54, 0x9a, 0x81, 0x6f, 0x51, 0x5c, 0x5e, 0xfb, 0x28, 0x72, 0x30, 0xc8, 0xf2, 0x62, 0x2f,
0x98, 0x15, 0x4a, 0x48, 0x04, 0xcd, 0xe3, 0x3d, 0x31, 0x6d, 0x7c, 0x6a, 0x7c, 0xf6, 0xbd, 0x43,
0x06, 0x5b, 0xd4, 0xc0, 0xf2, 0x60, 0x43, 0x9f, 0x92, 0x5e, 0x5c, 0x26, 0x5f, 0x00, 0x5d, 0x67,
0xe9, 0xac, 0xc6, 0xe1, 0x1e, 0xd1, 0x73, 0x32, 0x05, 0x99, 0xe8, 0xdb, 0x02, 0x81, 0x47, 0x05,
0xc3, 0x9d, 0xfb, 0xbf, 0xad, 0x4f, 0x0e, 0xec, 0x15, 0xc3, 0x1d, 0x75, 0x49, 0xff, 0x2b, 0x68,
0x23, 0x94, 0x74, 0x3b, 0x4b, 0x67, 0xd5, 0x0d, 0x1b, 0x48, 0x5f, 0x11, 0xa2, 0x81, 0x97, 0x92,
0x33, 0x99, 0xdc, 0xba, 0x8f, 0x96, 0xce, 0x6a, 0x74, 0x71, 0xea, 0xb5, 0xb3, 0x85, 0x87, 0xe2,
0x36, 0xd9, 0x41, 0x0e, 0xe1, 0x91, 0x9c, 0x06, 0x64, 0x92, 0x68, 0x60, 0x28, 0x94, 0x8c, 0x38,
0x43, 0x70, 0xbb, 0xb6, 0x7f, 0xee, 0xd5, 0xcb, 0x7b, 0xcd, 0xf2, 0xde, 0xc7, 0x66, 0xf9, 0xf5,
0xe0, 0xe7, 0xdd, 0xe2, 0xbf, 0x1f, 0xbf, 0x17, 0x4e, 0x38, 0x6e, 0x5a, 0x37, 0x0c, 0x81, 0xbe,
0x25, 0x33, 0xb8, 0x29, 0x84, 0x3e, 0x32, 0xeb, 0xfd, 0x85, 0xd9, 0xb4, 0x6d, 0xb6, 0x76, 0x3e,
0x39, 0x39, 0x04, 0x14, 0x19, 0x91, 0x4a, 0x86, 0xa5, 0x06, 0x77, 0x68, 0x8f, 0xd3, 0x66, 0xb7,
0x6d, 0x2a, 0xf4, 0x94, 0x0c, 0x8d, 0x3d, 0x76, 0x24, 0xb8, 0x4b, 0xac, 0x6c, 0x50, 0x13, 0x01,
0x3f, 0xfb, 0xd6, 0x21, 0xc3, 0x2d, 0xa4, 0x39, 0x48, 0x0c, 0x36, 0xf4, 0xe5, 0xb1, 0xd4, 0xb1,
0x43, 0x3e, 0xf3, 0xee, 0x3f, 0x07, 0xaf, 0x09, 0xaf, 0x35, 0xa2, 0x0b, 0x32, 0x2a, 0x98, 0xc6,
0x48, 0x96, 0x79, 0x0c, 0xda, 0x66, 0xd5, 0x0d, 0x49, 0x45, 0xbd, 0xb3, 0x0c, 0x7d, 0x4c, 0xba,
0x42, 0x72, 0xb8, 0xd9, 0xc7, 0x54, 0x03, 0x7a, 0x49, 0x26, 0x5a, 0x29, 0x8c, 0x0a, 0x01, 0x09,
0x54, 0x7f, 0xad, 0xee, 0x3c, 0x5e, 0xcf, 0xaa, 0xf5, 0x7f, 0xdd, 0x2d, 0xfa, 0x57, 0x15, 0x1f,
0x6c, 0xc2, 0x51, 0xa5, 0xaa, 0x01, 0xa7, 0x1f, 0xc8, 0x13, 0xa5, 0x45, 0x2a, 0x24, 0xcb, 0x22,
0xa5, 0x39, 0xe8, 0x28, 0x13, 0xb9, 0x40, 0xe3, 0xf6, 0x96, 0x9d, 0xd5, 0xe8, 0xe2, 0x79, 0x3b,
0xe8, 0x6b, 0xce, 0x35, 0x18, 0x03, 0xfc, 0x7d, 0x25, 0x7b, 0x53, 0xa9, 0xc2, 0x93, 0xa6, 0xb7,
0xe5, 0xcc, 0xfd, 0xbc, 0xfb, 0xff, 0x9c, 0xf7, 0x03, 0x01, 0x0d, 0x1e, 0x0a, 0x68, 0x7d, 0xfe,
0xe9, 0x85, 0x41, 0xa5, 0x3f, 0x7b, 0x42, 0xf9, 0xf6, 0xc3, 0x3f, 0x88, 0x7c, 0xfb, 0x58, 0x25,
0xcb, 0x8a, 0x38, 0xee, 0xd9, 0x19, 0x2e, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x46, 0x2d, 0x8f,
0x40, 0xb4, 0x03, 0x00, 0x00,
}

View File

@ -2,16 +2,15 @@
// See LICENSE for copying information.
syntax = "proto3";
option go_package = "storj.io/storj/internalpb";
option go_package = "storj.io/storj/satellite/internalpb";
package metainfo;
package satellite.metainfo;
import "gogo.proto";
import "google/protobuf/timestamp.proto";
import "pointerdb.proto";
import "metainfo.proto";
message StreamID {
bytes bucket = 1;
bytes encrypted_path = 2;
@ -27,14 +26,13 @@ message StreamID {
bytes stream_id = 10;
}
// only for satellite use
message SegmentID {
StreamID stream_id = 1;
int32 part_number = 2;
int32 index = 3;
bytes root_piece_id = 5 [(gogoproto.customtype) = "PieceID", (gogoproto.nullable) = false];
repeated metainfo.AddressedOrderLimit original_order_limits = 6;
repeated .metainfo.AddressedOrderLimit original_order_limits = 6;
google.protobuf.Timestamp creation_date = 7 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
bytes satellite_signature = 8;

View File

@ -0,0 +1,41 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
package internalpb
import (
"database/sql/driver"
proto "github.com/gogo/protobuf/proto"
"github.com/zeebo/errs"
"storj.io/common/pb"
)
var scanError = errs.Class("Protobuf Scanner")
var valueError = errs.Class("Protobuf Valuer")
// scan automatically converts database []byte to proto.Messages.
func scan(msg proto.Message, value interface{}) error {
bytes, ok := value.([]byte)
if !ok {
return scanError.New("%t was %t, expected []bytes", msg, value)
}
return scanError.Wrap(pb.Unmarshal(bytes, msg))
}
// value automatically converts proto.Messages to database []byte.
func value(msg proto.Message) (driver.Value, error) {
value, err := pb.Marshal(msg)
return value, valueError.Wrap(err)
}
// Scan implements the Scanner interface.
func (n *InjuredSegment) Scan(value interface{}) error {
return scan(n, value)
}
// Value implements the driver Valuer interface.
func (n InjuredSegment) Value() (driver.Value, error) {
return value(&n)
}

View File

@ -7,3 +7,6 @@ import "storj.io/common/storj"
// PieceID is an alias to storj.PieceID for use in generated protobuf code.
type PieceID = storj.PieceID
// NodeID is an alias to storj.NodeID for use in generated protobuf code.
type NodeID = storj.NodeID

View File

@ -29,6 +29,7 @@ import (
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
"storj.io/storj/satellite"
"storj.io/storj/satellite/internalpb"
satMetainfo "storj.io/storj/satellite/metainfo"
"storj.io/storj/satellite/metainfo/metabase"
"storj.io/uplink"
@ -166,10 +167,10 @@ func TestRevokeMacaroon(t *testing.T) {
assert.True(t, errs2.IsRPC(err, rpcstatus.PermissionDenied))
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
satStreamID := &pb.SatStreamID{
satStreamID := &internalpb.StreamID{
CreationDate: time.Now(),
}
signedStreamID, err := signing.SignStreamID(ctx, signer, satStreamID)
signedStreamID, err := satMetainfo.SignStreamID(ctx, signer, satStreamID)
require.NoError(t, err)
encodedStreamID, err := pb.Marshal(signedStreamID)
@ -189,7 +190,7 @@ func TestRevokeMacaroon(t *testing.T) {
// these methods needs SegmentID
signedSegmentID, err := signing.SignSegmentID(ctx, signer, &pb.SatSegmentID{
signedSegmentID, err := satMetainfo.SignSegmentID(ctx, signer, &internalpb.SegmentID{
StreamId: satStreamID,
CreationDate: time.Now(),
})
@ -253,10 +254,10 @@ func TestInvalidAPIKey(t *testing.T) {
// these methods needs StreamID to do authentication
signer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
satStreamID := &pb.SatStreamID{
satStreamID := &internalpb.StreamID{
CreationDate: time.Now(),
}
signedStreamID, err := signing.SignStreamID(ctx, signer, satStreamID)
signedStreamID, err := satMetainfo.SignStreamID(ctx, signer, satStreamID)
require.NoError(t, err)
encodedStreamID, err := pb.Marshal(signedStreamID)
@ -279,7 +280,7 @@ func TestInvalidAPIKey(t *testing.T) {
// these methods needs SegmentID
signedSegmentID, err := signing.SignSegmentID(ctx, signer, &pb.SatSegmentID{
signedSegmentID, err := satMetainfo.SignSegmentID(ctx, signer, &internalpb.SegmentID{
StreamId: satStreamID,
CreationDate: time.Now(),
})
@ -940,7 +941,7 @@ func TestIDs(t *testing.T) {
satellitePeer := signing.SignerFromFullIdentity(planet.Satellites[0].Identity)
{ // streamID expired
signedStreamID, err := signing.SignStreamID(ctx, satellitePeer, &pb.SatStreamID{
signedStreamID, err := satMetainfo.SignStreamID(ctx, satellitePeer, &internalpb.StreamID{
CreationDate: time.Now().Add(-36 * time.Hour),
})
require.NoError(t, err)
@ -958,7 +959,7 @@ func TestIDs(t *testing.T) {
}
{ // segment id missing stream id
signedSegmentID, err := signing.SignSegmentID(ctx, satellitePeer, &pb.SatSegmentID{
signedSegmentID, err := satMetainfo.SignSegmentID(ctx, satellitePeer, &internalpb.SegmentID{
CreationDate: time.Now().Add(-1 * time.Hour),
})
require.NoError(t, err)
@ -976,9 +977,9 @@ func TestIDs(t *testing.T) {
}
{ // segmentID expired
signedSegmentID, err := signing.SignSegmentID(ctx, satellitePeer, &pb.SatSegmentID{
signedSegmentID, err := satMetainfo.SignSegmentID(ctx, satellitePeer, &internalpb.SegmentID{
CreationDate: time.Now().Add(-36 * time.Hour),
StreamId: &pb.SatStreamID{
StreamId: &internalpb.StreamID{
CreationDate: time.Now(),
},
})

View File

@ -8,7 +8,7 @@ import (
"github.com/zeebo/errs"
"storj.io/common/pb"
"storj.io/storj/satellite/internalpb"
)
// Inspector is a RPC service for inspecting overlay internals
@ -24,20 +24,20 @@ func NewInspector(service *Service) *Inspector {
}
// CountNodes returns the number of nodes in the overlay.
func (srv *Inspector) CountNodes(ctx context.Context, req *pb.CountNodesRequest) (_ *pb.CountNodesResponse, err error) {
func (srv *Inspector) CountNodes(ctx context.Context, req *internalpb.CountNodesRequest) (_ *internalpb.CountNodesResponse, err error) {
defer mon.Task()(&ctx)(&err)
overlayKeys, err := srv.service.Inspect(ctx)
if err != nil {
return nil, err
}
return &pb.CountNodesResponse{
return &internalpb.CountNodesResponse{
Count: int64(len(overlayKeys)),
}, nil
}
// DumpNodes returns all of the nodes in the overlay.
func (srv *Inspector) DumpNodes(ctx context.Context, req *pb.DumpNodesRequest) (_ *pb.DumpNodesResponse, err error) {
func (srv *Inspector) DumpNodes(ctx context.Context, req *internalpb.DumpNodesRequest) (_ *internalpb.DumpNodesResponse, err error) {
defer mon.Task()(&ctx)(&err)
return &pb.DumpNodesResponse{}, errs.New("Not Implemented")
return &internalpb.DumpNodesResponse{}, errs.New("Not Implemented")
}

View File

@ -14,6 +14,7 @@ import (
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/storj/satellite/internalpb"
"storj.io/storj/storage"
)
@ -68,7 +69,7 @@ type DB interface {
UpdateCheckIn(ctx context.Context, node NodeCheckInInfo, timestamp time.Time, config NodeSelectionConfig) (err error)
// UpdateAuditHistory updates a node's audit history with an online or offline audit.
UpdateAuditHistory(ctx context.Context, nodeID storj.NodeID, auditTime time.Time, online bool, config AuditHistoryConfig) (auditHistory *pb.AuditHistory, err error)
UpdateAuditHistory(ctx context.Context, nodeID storj.NodeID, auditTime time.Time, online bool, config AuditHistoryConfig) (auditHistory *internalpb.AuditHistory, err error)
// AllPieceCounts returns a map of node IDs to piece counts from the db.
AllPieceCounts(ctx context.Context) (pieceCounts map[storj.NodeID]int, err error)

View File

@ -15,6 +15,7 @@ import (
"storj.io/common/errs2"
"storj.io/common/pb"
"storj.io/common/sync2"
"storj.io/storj/satellite/internalpb"
"storj.io/storj/satellite/metainfo"
"storj.io/storj/satellite/metainfo/metabase"
"storj.io/storj/satellite/overlay"
@ -204,7 +205,7 @@ func (checker *Checker) updateIrreparableSegmentStatus(ctx context.Context, poin
// If the segment is suddenly entirely healthy again, we don't need to repair and we don't need to
// keep it in the irreparabledb queue either.
if numHealthy >= redundancy.MinReq && numHealthy <= repairThreshold && numHealthy < redundancy.SuccessThreshold {
_, err = checker.repairQueue.Insert(ctx, &pb.InjuredSegment{
_, err = checker.repairQueue.Insert(ctx, &internalpb.InjuredSegment{
Path: key,
LostPieces: missingPieces,
InsertedTime: time.Now().UTC(),
@ -221,7 +222,7 @@ func (checker *Checker) updateIrreparableSegmentStatus(ctx context.Context, poin
} else if numHealthy < redundancy.MinReq && numHealthy < repairThreshold {
// make an entry into the irreparable table
segmentInfo := &pb.IrreparableSegment{
segmentInfo := &internalpb.IrreparableSegment{
Path: key,
SegmentDetail: pointer,
LostPieces: int32(len(missingPieces)),
@ -308,7 +309,7 @@ func (obs *checkerObserver) RemoteSegment(ctx context.Context, segment *metainfo
// except for the case when the repair and success thresholds are the same (a case usually seen during testing)
if numHealthy >= required && numHealthy <= repairThreshold && numHealthy < successThreshold {
obs.monStats.remoteSegmentsNeedingRepair++
alreadyInserted, err := obs.repairQueue.Insert(ctx, &pb.InjuredSegment{
alreadyInserted, err := obs.repairQueue.Insert(ctx, &internalpb.InjuredSegment{
Path: key,
LostPieces: missingPieces,
InsertedTime: time.Now().UTC(),
@ -344,7 +345,7 @@ func (obs *checkerObserver) RemoteSegment(ctx context.Context, segment *metainfo
obs.monStats.remoteSegmentsLost++
// make an entry into the irreparable table
segmentInfo := &pb.IrreparableSegment{
segmentInfo := &internalpb.IrreparableSegment{
Path: key,
SegmentDetail: segment.Pointer, // TODO: replace with something better than pb.Pointer
LostPieces: int32(len(missingPieces)),

View File

@ -8,7 +8,7 @@ import (
"github.com/spacemonkeygo/monkit/v3"
"storj.io/common/pb"
"storj.io/storj/satellite/internalpb"
)
var (
@ -28,7 +28,7 @@ func NewInspector(irrdb DB) *Inspector {
}
// ListIrreparableSegments returns a number of irreparable segments by limit and offset.
func (srv *Inspector) ListIrreparableSegments(ctx context.Context, req *pb.ListIrreparableSegmentsRequest) (_ *pb.ListIrreparableSegmentsResponse, err error) {
func (srv *Inspector) ListIrreparableSegments(ctx context.Context, req *internalpb.ListIrreparableSegmentsRequest) (_ *internalpb.ListIrreparableSegmentsResponse, err error) {
defer mon.Task()(&ctx)(&err)
last := req.GetLastSeenSegmentPath()
if len(req.GetLastSeenSegmentPath()) == 0 {
@ -39,5 +39,5 @@ func (srv *Inspector) ListIrreparableSegments(ctx context.Context, req *pb.ListI
return nil, err
}
return &pb.ListIrreparableSegmentsResponse{Segments: segments}, err
return &internalpb.ListIrreparableSegmentsResponse{Segments: segments}, err
}

View File

@ -6,7 +6,7 @@ package irreparable
import (
"context"
"storj.io/common/pb"
"storj.io/storj/satellite/internalpb"
"storj.io/storj/satellite/metainfo/metabase"
)
@ -15,11 +15,11 @@ import (
// architecture: Database
type DB interface {
// IncrementRepairAttempts increments the repair attempts.
IncrementRepairAttempts(ctx context.Context, segmentInfo *pb.IrreparableSegment) error
IncrementRepairAttempts(ctx context.Context, segmentInfo *internalpb.IrreparableSegment) error
// Get returns irreparable segment info based on segmentKey.
Get(ctx context.Context, segmentKey metabase.SegmentKey) (*pb.IrreparableSegment, error)
Get(ctx context.Context, segmentKey metabase.SegmentKey) (*internalpb.IrreparableSegment, error)
// GetLimited returns a list of irreparable segment info starting after the last segment info we retrieved
GetLimited(ctx context.Context, limit int, lastSeenSegmentKey metabase.SegmentKey) ([]*pb.IrreparableSegment, error)
GetLimited(ctx context.Context, limit int, lastSeenSegmentKey metabase.SegmentKey) ([]*internalpb.IrreparableSegment, error)
// Delete removes irreparable segment info based on segmentKey.
Delete(ctx context.Context, segmentKey metabase.SegmentKey) error
}

View File

@ -14,6 +14,7 @@ import (
"storj.io/common/pb"
"storj.io/common/testcontext"
"storj.io/storj/satellite"
"storj.io/storj/satellite/internalpb"
"storj.io/storj/satellite/satellitedb/satellitedbtest"
)
@ -22,9 +23,9 @@ func TestIrreparable(t *testing.T) {
irrdb := db.Irreparable()
// Create and insert test segment infos into DB
var segments []*pb.IrreparableSegment
var segments []*internalpb.IrreparableSegment
for i := 0; i < 3; i++ {
segments = append(segments, &pb.IrreparableSegment{
segments = append(segments, &internalpb.IrreparableSegment{
Path: []byte(strconv.Itoa(i)),
SegmentDetail: &pb.Pointer{
CreationDate: time.Now(),

View File

@ -7,7 +7,7 @@ import (
"context"
"time"
"storj.io/common/pb"
"storj.io/storj/satellite/internalpb"
)
// RepairQueue implements queueing for segments that need repairing.
@ -16,15 +16,15 @@ import (
// architecture: Database
type RepairQueue interface {
// Insert adds an injured segment.
Insert(ctx context.Context, s *pb.InjuredSegment, numHealthy int) (alreadyInserted bool, err error)
Insert(ctx context.Context, s *internalpb.InjuredSegment, numHealthy int) (alreadyInserted bool, err error)
// Select gets an injured segment.
Select(ctx context.Context) (*pb.InjuredSegment, error)
Select(ctx context.Context) (*internalpb.InjuredSegment, error)
// Delete removes an injured segment.
Delete(ctx context.Context, s *pb.InjuredSegment) error
Delete(ctx context.Context, s *internalpb.InjuredSegment) error
// Clean removes all segments last updated before a certain time
Clean(ctx context.Context, before time.Time) (deleted int64, err error)
// SelectN lists limit amount of injured segments.
SelectN(ctx context.Context, limit int) ([]pb.InjuredSegment, error)
SelectN(ctx context.Context, limit int) ([]internalpb.InjuredSegment, error)
// Count counts the number of segments in the repair queue.
Count(ctx context.Context) (count int, err error)
}

View File

@ -13,9 +13,9 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"storj.io/common/pb"
"storj.io/common/testcontext"
"storj.io/storj/satellite"
"storj.io/storj/satellite/internalpb"
"storj.io/storj/satellite/satellitedb/dbx"
"storj.io/storj/satellite/satellitedb/satellitedbtest"
"storj.io/storj/storage"
@ -29,7 +29,7 @@ func TestUntilEmpty(t *testing.T) {
pathsMap := make(map[string]int)
for i := 0; i < 20; i++ {
path := "/path/" + strconv.Itoa(i)
injuredSeg := &pb.InjuredSegment{Path: []byte(path)}
injuredSeg := &internalpb.InjuredSegment{Path: []byte(path)}
alreadyInserted, err := repairQueue.Insert(ctx, injuredSeg, 10)
require.NoError(t, err)
require.False(t, alreadyInserted)
@ -62,7 +62,7 @@ func TestOrder(t *testing.T) {
olderRepairPath := []byte("/path/older")
for _, path := range [][]byte{oldRepairPath, recentRepairPath, nullPath, olderRepairPath} {
injuredSeg := &pb.InjuredSegment{Path: path}
injuredSeg := &internalpb.InjuredSegment{Path: path}
alreadyInserted, err := repairQueue.Insert(ctx, injuredSeg, 10)
require.NoError(t, err)
require.False(t, alreadyInserted)
@ -157,7 +157,7 @@ func TestOrderHealthyPieces(t *testing.T) {
})
for _, item := range injuredSegList {
// first, insert the injured segment
injuredSeg := &pb.InjuredSegment{Path: item.path}
injuredSeg := &internalpb.InjuredSegment{Path: item.path}
alreadyInserted, err := repairQueue.Insert(ctx, injuredSeg, item.health)
require.NoError(t, err)
require.False(t, alreadyInserted)
@ -221,7 +221,7 @@ func TestOrderOverwrite(t *testing.T) {
{[]byte("path/a"), 8},
}
for i, item := range injuredSegList {
injuredSeg := &pb.InjuredSegment{Path: item.path}
injuredSeg := &internalpb.InjuredSegment{Path: item.path}
alreadyInserted, err := repairQueue.Insert(ctx, injuredSeg, item.health)
require.NoError(t, err)
if i == 2 {
@ -256,7 +256,7 @@ func TestCount(t *testing.T) {
numSegments := 20
for i := 0; i < numSegments; i++ {
path := "/path/" + strconv.Itoa(i)
injuredSeg := &pb.InjuredSegment{Path: []byte(path)}
injuredSeg := &internalpb.InjuredSegment{Path: []byte(path)}
alreadyInserted, err := repairQueue.Insert(ctx, injuredSeg, 10)
require.NoError(t, err)
require.False(t, alreadyInserted)

View File

@ -15,6 +15,7 @@ import (
"storj.io/common/pb"
"storj.io/common/testcontext"
"storj.io/storj/satellite"
"storj.io/storj/satellite/internalpb"
"storj.io/storj/satellite/satellitedb/satellitedbtest"
"storj.io/storj/storage"
)
@ -23,7 +24,7 @@ func TestInsertSelect(t *testing.T) {
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
q := db.RepairQueue()
seg := &pb.InjuredSegment{
seg := &internalpb.InjuredSegment{
Path: []byte("abc"),
LostPieces: []int32{int32(1), int32(3)},
}
@ -42,7 +43,7 @@ func TestInsertDuplicate(t *testing.T) {
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
q := db.RepairQueue()
seg := &pb.InjuredSegment{
seg := &internalpb.InjuredSegment{
Path: []byte("abc"),
LostPieces: []int32{int32(1), int32(3)},
}
@ -70,9 +71,9 @@ func TestSequential(t *testing.T) {
q := db.RepairQueue()
const N = 20
var addSegs []*pb.InjuredSegment
var addSegs []*internalpb.InjuredSegment
for i := 0; i < N; i++ {
seg := &pb.InjuredSegment{
seg := &internalpb.InjuredSegment{
Path: []byte(strconv.Itoa(i)),
LostPieces: []int32{int32(i)},
}
@ -108,14 +109,14 @@ func TestParallel(t *testing.T) {
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
q := db.RepairQueue()
const N = 20
entries := make(chan *pb.InjuredSegment, N)
entries := make(chan *internalpb.InjuredSegment, N)
var inserts errs2.Group
// Add to queue concurrently
for i := 0; i < N; i++ {
i := i
inserts.Go(func() error {
_, err := q.Insert(ctx, &pb.InjuredSegment{
_, err := q.Insert(ctx, &internalpb.InjuredSegment{
Path: []byte(strconv.Itoa(i)),
LostPieces: []int32{int32(i)},
}, 10)
@ -146,7 +147,7 @@ func TestParallel(t *testing.T) {
require.Empty(t, remove.Wait(), "unexpected queue.Select/Delete errors")
close(entries)
var items []*pb.InjuredSegment
var items []*internalpb.InjuredSegment
for segment := range entries {
items = append(items, segment)
}
@ -166,15 +167,15 @@ func TestClean(t *testing.T) {
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) {
q := db.RepairQueue()
seg1 := &pb.InjuredSegment{
seg1 := &internalpb.InjuredSegment{
Path: []byte("seg1"),
LostPieces: []int32{int32(1), int32(3)},
}
seg2 := &pb.InjuredSegment{
seg2 := &internalpb.InjuredSegment{
Path: []byte("seg2"),
LostPieces: []int32{int32(1), int32(3)},
}
seg3 := &pb.InjuredSegment{
seg3 := &internalpb.InjuredSegment{
Path: []byte("seg3"),
LostPieces: []int32{int32(1), int32(3)},
}

View File

@ -13,8 +13,8 @@ import (
"golang.org/x/sync/semaphore"
"storj.io/common/memory"
"storj.io/common/pb"
"storj.io/common/sync2"
"storj.io/storj/satellite/internalpb"
"storj.io/storj/satellite/repair/irreparable"
"storj.io/storj/satellite/repair/queue"
"storj.io/storj/storage"
@ -149,7 +149,7 @@ func (service *Service) process(ctx context.Context) (err error) {
return nil
}
func (service *Service) worker(ctx context.Context, seg *pb.InjuredSegment) (err error) {
func (service *Service) worker(ctx context.Context, seg *internalpb.InjuredSegment) (err error) {
defer mon.Task()(&ctx)(&err)
workerStartTime := time.Now().UTC()
@ -161,7 +161,7 @@ func (service *Service) worker(ctx context.Context, seg *pb.InjuredSegment) (err
if irreparableErr, ok := err.(*irreparableError); ok {
service.log.Error("segment could not be repaired! adding to irreparableDB for more attention",
zap.Error(err))
segmentInfo := &pb.IrreparableSegment{
segmentInfo := &internalpb.IrreparableSegment{
Path: seg.GetPath(),
SegmentDetail: irreparableErr.segmentInfo,
LostPieces: irreparableErr.piecesRequired - irreparableErr.piecesAvailable,

View File

@ -12,11 +12,12 @@ import (
"storj.io/common/pb"
"storj.io/common/storj"
"storj.io/storj/satellite/internalpb"
"storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/satellitedb/dbx"
)
func addAudit(a *pb.AuditHistory, auditTime time.Time, online bool, config overlay.AuditHistoryConfig) error {
func addAudit(a *internalpb.AuditHistory, auditTime time.Time, online bool, config overlay.AuditHistoryConfig) error {
newAuditWindowStartTime := auditTime.Truncate(config.WindowSize)
earliestWindow := newAuditWindowStartTime.Add(-config.TrackingPeriod)
// windowsModified is used to determine whether we will need to recalculate the score because windows have been added or removed.
@ -38,7 +39,7 @@ func addAudit(a *pb.AuditHistory, auditTime time.Time, online bool, config overl
// if there are no windows or the latest window has passed, add another window
if len(a.Windows) == 0 || a.Windows[len(a.Windows)-1].WindowStart.Before(newAuditWindowStartTime) {
windowsModified = true
a.Windows = append(a.Windows, &pb.AuditWindow{WindowStart: newAuditWindowStartTime})
a.Windows = append(a.Windows, &internalpb.AuditWindow{WindowStart: newAuditWindowStartTime})
}
latestIndex := len(a.Windows) - 1
@ -77,7 +78,7 @@ func addAudit(a *pb.AuditHistory, auditTime time.Time, online bool, config overl
}
// UpdateAuditHistory updates a node's audit history with an online or offline audit.
func (cache *overlaycache) UpdateAuditHistory(ctx context.Context, nodeID storj.NodeID, auditTime time.Time, online bool, config overlay.AuditHistoryConfig) (history *pb.AuditHistory, err error) {
func (cache *overlaycache) UpdateAuditHistory(ctx context.Context, nodeID storj.NodeID, auditTime time.Time, online bool, config overlay.AuditHistoryConfig) (history *internalpb.AuditHistory, err error) {
err = cache.db.WithTx(ctx, func(ctx context.Context, tx *dbx.Tx) (err error) {
_, err = tx.Tx.ExecContext(ctx, "SET TRANSACTION ISOLATION LEVEL SERIALIZABLE")
if err != nil {
@ -93,7 +94,7 @@ func (cache *overlaycache) UpdateAuditHistory(ctx context.Context, nodeID storj.
return history, err
}
func (cache *overlaycache) updateAuditHistoryWithTx(ctx context.Context, tx *dbx.Tx, nodeID storj.NodeID, auditTime time.Time, online bool, config overlay.AuditHistoryConfig) (*pb.AuditHistory, error) {
func (cache *overlaycache) updateAuditHistoryWithTx(ctx context.Context, tx *dbx.Tx, nodeID storj.NodeID, auditTime time.Time, online bool, config overlay.AuditHistoryConfig) (*internalpb.AuditHistory, error) {
// get and deserialize node audit history
historyBytes := []byte{}
newEntry := false
@ -110,7 +111,7 @@ func (cache *overlaycache) updateAuditHistoryWithTx(ctx context.Context, tx *dbx
historyBytes = dbAuditHistory.History
}
history := &pb.AuditHistory{}
history := &internalpb.AuditHistory{}
err = pb.Unmarshal(historyBytes, history)
if err != nil {
return history, err

View File

@ -9,6 +9,7 @@ import (
"errors"
"storj.io/common/pb"
"storj.io/storj/satellite/internalpb"
"storj.io/storj/satellite/metainfo/metabase"
"storj.io/storj/satellite/satellitedb/dbx"
)
@ -18,7 +19,7 @@ type irreparableDB struct {
}
// IncrementRepairAttempts a db entry for to increment the repair attempts field.
func (db *irreparableDB) IncrementRepairAttempts(ctx context.Context, segmentInfo *pb.IrreparableSegment) (err error) {
func (db *irreparableDB) IncrementRepairAttempts(ctx context.Context, segmentInfo *internalpb.IrreparableSegment) (err error) {
defer mon.Task()(&ctx)(&err)
err = db.db.WithTx(ctx, func(ctx context.Context, tx *dbx.Tx) (err error) {
bytes, err := pb.Marshal(segmentInfo.SegmentDetail)
@ -58,7 +59,7 @@ func (db *irreparableDB) IncrementRepairAttempts(ctx context.Context, segmentInf
}
// Get a irreparable's segment info from the db.
func (db *irreparableDB) Get(ctx context.Context, segmentKey metabase.SegmentKey) (resp *pb.IrreparableSegment, err error) {
func (db *irreparableDB) Get(ctx context.Context, segmentKey metabase.SegmentKey) (resp *internalpb.IrreparableSegment, err error) {
defer mon.Task()(&ctx)(&err)
dbxInfo, err := db.db.Get_Irreparabledb_By_Segmentpath(ctx, dbx.Irreparabledb_Segmentpath(segmentKey))
if err != nil {
@ -72,7 +73,7 @@ func (db *irreparableDB) Get(ctx context.Context, segmentKey metabase.SegmentKey
return nil, Error.Wrap(err)
}
return &pb.IrreparableSegment{
return &internalpb.IrreparableSegment{
Path: dbxInfo.Segmentpath,
SegmentDetail: p,
LostPieces: int32(dbxInfo.PiecesLostCount),
@ -82,7 +83,7 @@ func (db *irreparableDB) Get(ctx context.Context, segmentKey metabase.SegmentKey
}
// GetLimited returns a list of irreparable segment info starting after the last segment info we retrieved.
func (db *irreparableDB) GetLimited(ctx context.Context, limit int, lastSeenSegmentKey metabase.SegmentKey) (resp []*pb.IrreparableSegment, err error) {
func (db *irreparableDB) GetLimited(ctx context.Context, limit int, lastSeenSegmentKey metabase.SegmentKey) (resp []*internalpb.IrreparableSegment, err error) {
defer mon.Task()(&ctx)(&err)
// the offset is hardcoded to 0 since we are using the lastSeenSegmentPath to
// indicate the item we last listed instead. In a perfect world this db query would
@ -102,7 +103,7 @@ func (db *irreparableDB) GetLimited(ctx context.Context, limit int, lastSeenSegm
if err != nil {
return nil, err
}
segment := &pb.IrreparableSegment{
segment := &internalpb.IrreparableSegment{
Path: row.Segmentpath,
SegmentDetail: p,
LostPieces: int32(row.PiecesLostCount),

View File

@ -21,6 +21,7 @@ import (
"storj.io/private/version"
"storj.io/storj/private/dbutil/pgutil"
"storj.io/storj/private/tagsql"
"storj.io/storj/satellite/internalpb"
"storj.io/storj/satellite/overlay"
"storj.io/storj/satellite/satellitedb/dbx"
)
@ -1191,7 +1192,7 @@ type updateNodeStats struct {
OnlineScore float64Field
}
func (cache *overlaycache) populateUpdateNodeStats(dbNode *dbx.Node, updateReq *overlay.UpdateRequest, auditHistory *pb.AuditHistory, now time.Time) updateNodeStats {
func (cache *overlaycache) populateUpdateNodeStats(dbNode *dbx.Node, updateReq *overlay.UpdateRequest, auditHistory *internalpb.AuditHistory, now time.Time) updateNodeStats {
// there are three audit outcomes: success, failure, and unknown
// if a node fails enough audits, it gets disqualified
// if a node gets enough "unknown" audits, it gets put into suspension
@ -1378,7 +1379,7 @@ func (cache *overlaycache) populateUpdateNodeStats(dbNode *dbx.Node, updateReq *
return updateFields
}
func (cache *overlaycache) populateUpdateFields(dbNode *dbx.Node, updateReq *overlay.UpdateRequest, auditHistory *pb.AuditHistory, now time.Time) dbx.Node_Update_Fields {
func (cache *overlaycache) populateUpdateFields(dbNode *dbx.Node, updateReq *overlay.UpdateRequest, auditHistory *internalpb.AuditHistory, now time.Time) dbx.Node_Update_Fields {
update := cache.populateUpdateNodeStats(dbNode, updateReq, auditHistory, now)
updateFields := dbx.Node_Update_Fields{}

View File

@ -11,8 +11,8 @@ import (
"github.com/zeebo/errs"
"storj.io/common/pb"
"storj.io/storj/private/dbutil"
"storj.io/storj/satellite/internalpb"
"storj.io/storj/satellite/satellitedb/dbx"
"storj.io/storj/storage"
)
@ -24,7 +24,7 @@ type repairQueue struct {
db *satelliteDB
}
func (r *repairQueue) Insert(ctx context.Context, seg *pb.InjuredSegment, numHealthy int) (alreadyInserted bool, err error) {
func (r *repairQueue) Insert(ctx context.Context, seg *internalpb.InjuredSegment, numHealthy int) (alreadyInserted bool, err error) {
defer mon.Task()(&ctx)(&err)
// insert if not exists, or update healthy count if does exist
var query string
@ -77,7 +77,7 @@ func (r *repairQueue) Insert(ctx context.Context, seg *pb.InjuredSegment, numHea
return alreadyInserted, rows.Err()
}
func (r *repairQueue) Select(ctx context.Context) (seg *pb.InjuredSegment, err error) {
func (r *repairQueue) Select(ctx context.Context) (seg *internalpb.InjuredSegment, err error) {
defer mon.Task()(&ctx)(&err)
switch r.db.implementation {
case dbutil.Cockroach:
@ -103,7 +103,7 @@ func (r *repairQueue) Select(ctx context.Context) (seg *pb.InjuredSegment, err e
return seg, err
}
func (r *repairQueue) Delete(ctx context.Context, seg *pb.InjuredSegment) (err error) {
func (r *repairQueue) Delete(ctx context.Context, seg *internalpb.InjuredSegment) (err error) {
defer mon.Task()(&ctx)(&err)
_, err = r.db.ExecContext(ctx, r.db.Rebind(`DELETE FROM injuredsegments WHERE path = ?`), seg.Path)
return Error.Wrap(err)
@ -115,7 +115,7 @@ func (r *repairQueue) Clean(ctx context.Context, before time.Time) (deleted int6
return n, Error.Wrap(err)
}
func (r *repairQueue) SelectN(ctx context.Context, limit int) (segs []pb.InjuredSegment, err error) {
func (r *repairQueue) SelectN(ctx context.Context, limit int) (segs []internalpb.InjuredSegment, err error) {
defer mon.Task()(&ctx)(&err)
if limit <= 0 || limit > RepairQueueSelectLimit {
limit = RepairQueueSelectLimit
@ -128,7 +128,7 @@ func (r *repairQueue) SelectN(ctx context.Context, limit int) (segs []pb.Injured
defer func() { err = errs.Combine(err, rows.Close()) }()
for rows.Next() {
var seg pb.InjuredSegment
var seg internalpb.InjuredSegment
err = rows.Scan(&seg)
if err != nil {
return segs, Error.Wrap(err)

View File

@ -13,6 +13,7 @@ import (
"storj.io/common/pb"
"storj.io/common/rpc"
"storj.io/common/rpc/rpcstatus"
"storj.io/storj/storagenode/internalpb"
"storj.io/storj/storagenode/pieces"
"storj.io/storj/storagenode/satellites"
"storj.io/storj/storagenode/trust"
@ -39,12 +40,12 @@ func NewEndpoint(log *zap.Logger, trust *trust.Pool, satellites satellites.DB, d
}
// GetNonExitingSatellites returns a list of satellites that the storagenode has not begun a graceful exit for.
func (e *Endpoint) GetNonExitingSatellites(ctx context.Context, req *pb.GetNonExitingSatellitesRequest) (*pb.GetNonExitingSatellitesResponse, error) {
func (e *Endpoint) GetNonExitingSatellites(ctx context.Context, req *internalpb.GetNonExitingSatellitesRequest) (*internalpb.GetNonExitingSatellitesResponse, error) {
e.log.Debug("initialize graceful exit: GetSatellitesList")
// get all trusted satellites
trustedSatellites := e.trust.GetSatellites(ctx)
availableSatellites := make([]*pb.NonExitingSatellite, 0, len(trustedSatellites))
availableSatellites := make([]*internalpb.NonExitingSatellite, 0, len(trustedSatellites))
// filter out satellites that are already exiting
exitingSatellites, err := e.satellites.ListGracefulExits(ctx)
@ -77,20 +78,20 @@ func (e *Endpoint) GetNonExitingSatellites(ctx context.Context, req *pb.GetNonEx
e.log.Debug("graceful exit: get space used by satellite", zap.Stringer("Satellite ID", trusted), zap.Error(err))
continue
}
availableSatellites = append(availableSatellites, &pb.NonExitingSatellite{
availableSatellites = append(availableSatellites, &internalpb.NonExitingSatellite{
DomainName: nodeurl.Address,
NodeId: trusted,
SpaceUsed: float64(piecesContentSize),
})
}
return &pb.GetNonExitingSatellitesResponse{
return &internalpb.GetNonExitingSatellitesResponse{
Satellites: availableSatellites,
}, nil
}
// InitiateGracefulExit updates one or more satellites in the storagenode's database to be gracefully exiting.
func (e *Endpoint) InitiateGracefulExit(ctx context.Context, req *pb.InitiateGracefulExitRequest) (*pb.ExitProgress, error) {
func (e *Endpoint) InitiateGracefulExit(ctx context.Context, req *internalpb.InitiateGracefulExitRequest) (*internalpb.ExitProgress, error) {
e.log.Debug("initialize graceful exit: start", zap.Stringer("Satellite ID", req.NodeId))
nodeurl, err := e.trust.GetNodeURL(ctx, req.NodeId)
@ -112,7 +113,7 @@ func (e *Endpoint) InitiateGracefulExit(ctx context.Context, req *pb.InitiateGra
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
}
return &pb.ExitProgress{
return &internalpb.ExitProgress{
DomainName: nodeurl.Address,
NodeId: req.NodeId,
PercentComplete: float32(0),
@ -120,14 +121,14 @@ func (e *Endpoint) InitiateGracefulExit(ctx context.Context, req *pb.InitiateGra
}
// GetExitProgress returns graceful exit progress on each satellite that a storagde node has started exiting.
func (e *Endpoint) GetExitProgress(ctx context.Context, req *pb.GetExitProgressRequest) (*pb.GetExitProgressResponse, error) {
func (e *Endpoint) GetExitProgress(ctx context.Context, req *internalpb.GetExitProgressRequest) (*internalpb.GetExitProgressResponse, error) {
exitProgress, err := e.satellites.ListGracefulExits(ctx)
if err != nil {
return nil, rpcstatus.Error(rpcstatus.Internal, err.Error())
}
resp := &pb.GetExitProgressResponse{
Progress: make([]*pb.ExitProgress, 0, len(exitProgress)),
resp := &internalpb.GetExitProgressResponse{
Progress: make([]*internalpb.ExitProgress, 0, len(exitProgress)),
}
for _, progress := range exitProgress {
nodeurl, err := e.trust.GetNodeURL(ctx, progress.SatelliteID)
@ -148,7 +149,7 @@ func (e *Endpoint) GetExitProgress(ctx context.Context, req *pb.GetExitProgressR
}
resp.Progress = append(resp.Progress,
&pb.ExitProgress{
&internalpb.ExitProgress{
DomainName: nodeurl.Address,
NodeId: progress.SatelliteID,
PercentComplete: percentCompleted,
@ -161,7 +162,7 @@ func (e *Endpoint) GetExitProgress(ctx context.Context, req *pb.GetExitProgressR
}
// GracefulExitFeasibility returns graceful exit feasibility by node's age on chosen satellite.
func (e *Endpoint) GracefulExitFeasibility(ctx context.Context, request *pb.GracefulExitFeasibilityNodeRequest) (*pb.GracefulExitFeasibilityResponse, error) {
func (e *Endpoint) GracefulExitFeasibility(ctx context.Context, request *internalpb.GracefulExitFeasibilityRequest) (*internalpb.GracefulExitFeasibilityResponse, error) {
nodeurl, err := e.trust.GetNodeURL(ctx, request.NodeId)
if err != nil {
return nil, errs.New("unable to find satellite %s: %w", request.NodeId, err)
@ -181,5 +182,7 @@ func (e *Endpoint) GracefulExitFeasibility(ctx context.Context, request *pb.Grac
if err != nil {
return nil, errs.Wrap(err)
}
return feasibility, nil
response := (internalpb.GracefulExitFeasibilityResponse)(*feasibility)
return &response, nil
}

View File

@ -9,9 +9,9 @@ import (
"github.com/stretchr/testify/require"
"storj.io/common/pb"
"storj.io/common/testcontext"
"storj.io/storj/private/testplanet"
"storj.io/storj/storagenode/internalpb"
)
func TestGetNonExitingSatellites(t *testing.T) {
@ -27,7 +27,7 @@ func TestGetNonExitingSatellites(t *testing.T) {
err := storagenode.DB.Satellites().InitiateGracefulExit(ctx, exitingSatellite.ID(), time.Now(), 0)
require.NoError(t, err)
nonExitingSatellites, err := storagenode.GracefulExit.Endpoint.GetNonExitingSatellites(ctx, &pb.GetNonExitingSatellitesRequest{})
nonExitingSatellites, err := storagenode.GracefulExit.Endpoint.GetNonExitingSatellites(ctx, &internalpb.GetNonExitingSatellitesRequest{})
require.NoError(t, err)
require.Len(t, nonExitingSatellites.GetSatellites(), totalSatelliteCount-exitingSatelliteCount)
@ -44,7 +44,7 @@ func TestInitiateGracefulExit(t *testing.T) {
storagenode := planet.StorageNodes[0]
exitingSatelliteID := planet.Satellites[0].ID()
req := &pb.InitiateGracefulExitRequest{
req := &internalpb.InitiateGracefulExitRequest{
NodeId: exitingSatelliteID,
}
@ -75,7 +75,7 @@ func TestGetExitProgress(t *testing.T) {
require.NoError(t, err)
// check graceful exit progress
resp, err := storagenode.GracefulExit.Endpoint.GetExitProgress(ctx, &pb.GetExitProgressRequest{})
resp, err := storagenode.GracefulExit.Endpoint.GetExitProgress(ctx, &internalpb.GetExitProgressRequest{})
require.NoError(t, err)
require.Len(t, resp.GetProgress(), 1)
progress := resp.GetProgress()[0]

View File

@ -12,10 +12,10 @@ import (
"github.com/zeebo/errs"
"go.uber.org/zap"
"storj.io/common/pb"
"storj.io/common/rpc/rpcstatus"
"storj.io/storj/storagenode/bandwidth"
"storj.io/storj/storagenode/contact"
"storj.io/storj/storagenode/internalpb"
"storj.io/storj/storagenode/pieces"
"storj.io/storj/storagenode/piecestore"
)
@ -68,12 +68,12 @@ func NewEndpoint(
}
// Stats returns current statistics about the storage node.
func (inspector *Endpoint) Stats(ctx context.Context, in *pb.StatsRequest) (out *pb.StatSummaryResponse, err error) {
func (inspector *Endpoint) Stats(ctx context.Context, in *internalpb.StatsRequest) (out *internalpb.StatSummaryResponse, err error) {
defer mon.Task()(&ctx)(&err)
return inspector.retrieveStats(ctx)
}
func (inspector *Endpoint) retrieveStats(ctx context.Context) (_ *pb.StatSummaryResponse, err error) {
func (inspector *Endpoint) retrieveStats(ctx context.Context) (_ *internalpb.StatSummaryResponse, err error) {
defer mon.Task()(&ctx)(&err)
// Space Usage
@ -91,7 +91,7 @@ func (inspector *Endpoint) retrieveStats(ctx context.Context) (_ *pb.StatSummary
totalUsedBandwidth := usage.Total()
availableSpace := inspector.pieceStoreConfig.AllocatedDiskSpace.Int64() - piecesContentSize
return &pb.StatSummaryResponse{
return &internalpb.StatSummaryResponse{
UsedSpace: piecesContentSize,
AvailableSpace: availableSpace,
UsedIngress: ingress,
@ -101,12 +101,12 @@ func (inspector *Endpoint) retrieveStats(ctx context.Context) (_ *pb.StatSummary
}
// Dashboard returns dashboard information.
func (inspector *Endpoint) Dashboard(ctx context.Context, in *pb.DashboardRequest) (out *pb.DashboardResponse, err error) {
func (inspector *Endpoint) Dashboard(ctx context.Context, in *internalpb.DashboardRequest) (out *internalpb.DashboardResponse, err error) {
defer mon.Task()(&ctx)(&err)
return inspector.getDashboardData(ctx)
}
func (inspector *Endpoint) getDashboardData(ctx context.Context) (_ *pb.DashboardResponse, err error) {
func (inspector *Endpoint) getDashboardData(ctx context.Context) (_ *internalpb.DashboardResponse, err error) {
defer mon.Task()(&ctx)(&err)
statsSummary, err := inspector.retrieveStats(ctx)
@ -116,7 +116,7 @@ func (inspector *Endpoint) getDashboardData(ctx context.Context) (_ *pb.Dashboar
lastPingedAt := inspector.pingStats.WhenLastPinged()
self := inspector.contact.Local()
return &pb.DashboardResponse{
return &internalpb.DashboardResponse{
NodeId: self.ID,
InternalAddress: "",
ExternalAddress: self.Address,

View File

@ -11,11 +11,11 @@ import (
"github.com/stretchr/testify/require"
"storj.io/common/memory"
"storj.io/common/pb"
"storj.io/common/sync2"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
"storj.io/storj/storagenode/internalpb"
)
func TestInspectorStats(t *testing.T) {
@ -29,7 +29,7 @@ func TestInspectorStats(t *testing.T) {
var availableSpace int64
for _, storageNode := range planet.StorageNodes {
response, err := storageNode.Storage2.Inspector.Stats(ctx, &pb.StatsRequest{})
response, err := storageNode.Storage2.Inspector.Stats(ctx, &internalpb.StatsRequest{})
require.NoError(t, err)
assert.Zero(t, response.UsedBandwidth)
@ -65,7 +65,7 @@ func TestInspectorStats(t *testing.T) {
var downloaded int
for _, storageNode := range planet.StorageNodes {
response, err := storageNode.Storage2.Inspector.Stats(ctx, &pb.StatsRequest{})
response, err := storageNode.Storage2.Inspector.Stats(ctx, &internalpb.StatsRequest{})
require.NoError(t, err)
// TODO set more accurate assertions
@ -95,7 +95,7 @@ func TestInspectorDashboard(t *testing.T) {
SatelliteCount: 1, StorageNodeCount: 1, UplinkCount: 1,
}, func(t *testing.T, ctx *testcontext.Context, planet *testplanet.Planet) {
for _, storageNode := range planet.StorageNodes {
response, err := storageNode.Storage2.Inspector.Dashboard(ctx, &pb.DashboardRequest{})
response, err := storageNode.Storage2.Inspector.Dashboard(ctx, &internalpb.DashboardRequest{})
require.NoError(t, err)
uptime, err := time.ParseDuration(response.Uptime)
@ -112,7 +112,7 @@ func TestInspectorDashboard(t *testing.T) {
require.NoError(t, err)
for _, storageNode := range planet.StorageNodes {
response, err := storageNode.Storage2.Inspector.Dashboard(ctx, &pb.DashboardRequest{})
response, err := storageNode.Storage2.Inspector.Dashboard(ctx, &internalpb.DashboardRequest{})
require.NoError(t, err)
assert.True(t, response.LastPinged.After(testStartedTime))

View File

@ -0,0 +1,7 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
//go:generate go run gen.go
// Package internalpb contains proto definitions for storagenode internal tools.
package internalpb

View File

@ -0,0 +1,122 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
// +build ignore
package main
import (
"flag"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strings"
)
var (
mainpkg = flag.String("pkg", "storj.io/storj/storagenode/internalpb", "main package name")
protoc = flag.String("protoc", "protoc", "protoc compiler")
)
var ignoreProto = map[string]bool{
"gogo.proto": true,
}
func ignore(files []string) []string {
xs := []string{}
for _, file := range files {
if !ignoreProto[file] {
xs = append(xs, file)
}
}
return xs
}
// Programs needed for code generation:
//
// github.com/ckaznocha/protoc-gen-lint
// storj.io/drpc/cmd/protoc-gen-drpc
// github.com/nilslice/protolock/cmd/protolock
func main() {
flag.Parse()
// TODO: protolock
{
// cleanup previous files
localfiles, err := filepath.Glob("*.pb.go")
check(err)
all := []string{}
all = append(all, localfiles...)
for _, match := range all {
_ = os.Remove(match)
}
}
{
protofiles, err := filepath.Glob("*.proto")
check(err)
protofiles = ignore(protofiles)
commonPb := os.Getenv("STORJ_COMMON_PB")
if commonPb == "" {
commonPb = "../../../common/pb"
}
overrideImports := ",Mgoogle/protobuf/timestamp.proto=storj.io/storj/storagenode/internalpb"
args := []string{
"--lint_out=.",
"--drpc_out=plugins=drpc,paths=source_relative" + overrideImports + ":.",
"-I=.",
"-I=" + commonPb,
}
args = append(args, protofiles...)
// generate new code
cmd := exec.Command(*protoc, args...)
fmt.Println(strings.Join(cmd.Args, " "))
out, err := cmd.CombinedOutput()
fmt.Println(string(out))
check(err)
}
{
files, err := filepath.Glob("*.pb.go")
check(err)
for _, file := range files {
process(file)
}
}
{
// format code to get rid of extra imports
out, err := exec.Command("goimports", "-local", "storj.io", "-w", ".").CombinedOutput()
fmt.Println(string(out))
check(err)
}
}
func process(file string) {
data, err := ioutil.ReadFile(file)
check(err)
source := string(data)
// When generating code to the same path as proto, it will
// end up generating an `import _ "."`, the following replace removes it.
source = strings.Replace(source, `_ "."`, "", -1)
err = ioutil.WriteFile(file, []byte(source), 0644)
check(err)
}
func check(err error) {
if err != nil {
panic(err)
}
}

View File

@ -0,0 +1,143 @@
// Protocol Buffers for Go with Gadgets
//
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
// http://github.com/gogo/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
syntax = "proto2";
package gogoproto;
import "google/protobuf/descriptor.proto";
option java_package = "com.google.protobuf";
option java_outer_classname = "GoGoProtos";
extend google.protobuf.EnumOptions {
optional bool goproto_enum_prefix = 62001;
optional bool goproto_enum_stringer = 62021;
optional bool enum_stringer = 62022;
optional string enum_customname = 62023;
optional bool enumdecl = 62024;
}
extend google.protobuf.EnumValueOptions {
optional string enumvalue_customname = 66001;
}
extend google.protobuf.FileOptions {
optional bool goproto_getters_all = 63001;
optional bool goproto_enum_prefix_all = 63002;
optional bool goproto_stringer_all = 63003;
optional bool verbose_equal_all = 63004;
optional bool face_all = 63005;
optional bool gostring_all = 63006;
optional bool populate_all = 63007;
optional bool stringer_all = 63008;
optional bool onlyone_all = 63009;
optional bool equal_all = 63013;
optional bool description_all = 63014;
optional bool testgen_all = 63015;
optional bool benchgen_all = 63016;
optional bool marshaler_all = 63017;
optional bool unmarshaler_all = 63018;
optional bool stable_marshaler_all = 63019;
optional bool sizer_all = 63020;
optional bool goproto_enum_stringer_all = 63021;
optional bool enum_stringer_all = 63022;
optional bool unsafe_marshaler_all = 63023;
optional bool unsafe_unmarshaler_all = 63024;
optional bool goproto_extensions_map_all = 63025;
optional bool goproto_unrecognized_all = 63026;
optional bool gogoproto_import = 63027;
optional bool protosizer_all = 63028;
optional bool compare_all = 63029;
optional bool typedecl_all = 63030;
optional bool enumdecl_all = 63031;
optional bool goproto_registration = 63032;
optional bool messagename_all = 63033;
optional bool goproto_sizecache_all = 63034;
optional bool goproto_unkeyed_all = 63035;
}
extend google.protobuf.MessageOptions {
optional bool goproto_getters = 64001;
optional bool goproto_stringer = 64003;
optional bool verbose_equal = 64004;
optional bool face = 64005;
optional bool gostring = 64006;
optional bool populate = 64007;
optional bool stringer = 67008;
optional bool onlyone = 64009;
optional bool equal = 64013;
optional bool description = 64014;
optional bool testgen = 64015;
optional bool benchgen = 64016;
optional bool marshaler = 64017;
optional bool unmarshaler = 64018;
optional bool stable_marshaler = 64019;
optional bool sizer = 64020;
optional bool unsafe_marshaler = 64023;
optional bool unsafe_unmarshaler = 64024;
optional bool goproto_extensions_map = 64025;
optional bool goproto_unrecognized = 64026;
optional bool protosizer = 64028;
optional bool typedecl = 64030;
optional bool messagename = 64033;
optional bool goproto_sizecache = 64034;
optional bool goproto_unkeyed = 64035;
}
extend google.protobuf.FieldOptions {
optional bool nullable = 65001;
optional bool embed = 65002;
optional string customtype = 65003;
optional string customname = 65004;
optional string jsontag = 65005;
optional string moretags = 65006;
optional string casttype = 65007;
optional string castkey = 65008;
optional string castvalue = 65009;
optional bool stdtime = 65010;
optional bool stdduration = 65011;
optional bool wktpointer = 65012;
optional bool compare = 65013;
}

View File

@ -0,0 +1,636 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: gracefulexit.proto
package internalpb
import (
context "context"
fmt "fmt"
math "math"
time "time"
proto "github.com/gogo/protobuf/proto"
drpc "storj.io/drpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
var _ = time.Kitchen
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type GetNonExitingSatellitesRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetNonExitingSatellitesRequest) Reset() { *m = GetNonExitingSatellitesRequest{} }
func (m *GetNonExitingSatellitesRequest) String() string { return proto.CompactTextString(m) }
func (*GetNonExitingSatellitesRequest) ProtoMessage() {}
func (*GetNonExitingSatellitesRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_8f0acbf2ce5fa631, []int{0}
}
func (m *GetNonExitingSatellitesRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetNonExitingSatellitesRequest.Unmarshal(m, b)
}
func (m *GetNonExitingSatellitesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GetNonExitingSatellitesRequest.Marshal(b, m, deterministic)
}
func (m *GetNonExitingSatellitesRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetNonExitingSatellitesRequest.Merge(m, src)
}
func (m *GetNonExitingSatellitesRequest) XXX_Size() int {
return xxx_messageInfo_GetNonExitingSatellitesRequest.Size(m)
}
func (m *GetNonExitingSatellitesRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetNonExitingSatellitesRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GetNonExitingSatellitesRequest proto.InternalMessageInfo
type GetNonExitingSatellitesResponse struct {
Satellites []*NonExitingSatellite `protobuf:"bytes,1,rep,name=satellites,proto3" json:"satellites,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetNonExitingSatellitesResponse) Reset() { *m = GetNonExitingSatellitesResponse{} }
func (m *GetNonExitingSatellitesResponse) String() string { return proto.CompactTextString(m) }
func (*GetNonExitingSatellitesResponse) ProtoMessage() {}
func (*GetNonExitingSatellitesResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_8f0acbf2ce5fa631, []int{1}
}
func (m *GetNonExitingSatellitesResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetNonExitingSatellitesResponse.Unmarshal(m, b)
}
func (m *GetNonExitingSatellitesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GetNonExitingSatellitesResponse.Marshal(b, m, deterministic)
}
func (m *GetNonExitingSatellitesResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetNonExitingSatellitesResponse.Merge(m, src)
}
func (m *GetNonExitingSatellitesResponse) XXX_Size() int {
return xxx_messageInfo_GetNonExitingSatellitesResponse.Size(m)
}
func (m *GetNonExitingSatellitesResponse) XXX_DiscardUnknown() {
xxx_messageInfo_GetNonExitingSatellitesResponse.DiscardUnknown(m)
}
var xxx_messageInfo_GetNonExitingSatellitesResponse proto.InternalMessageInfo
func (m *GetNonExitingSatellitesResponse) GetSatellites() []*NonExitingSatellite {
if m != nil {
return m.Satellites
}
return nil
}
// NonExitingSatellite contains information that's needed for a storagenode to start graceful exit.
type NonExitingSatellite struct {
NodeId NodeID `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"`
DomainName string `protobuf:"bytes,2,opt,name=domain_name,json=domainName,proto3" json:"domain_name,omitempty"`
SpaceUsed float64 `protobuf:"fixed64,3,opt,name=space_used,json=spaceUsed,proto3" json:"space_used,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *NonExitingSatellite) Reset() { *m = NonExitingSatellite{} }
func (m *NonExitingSatellite) String() string { return proto.CompactTextString(m) }
func (*NonExitingSatellite) ProtoMessage() {}
func (*NonExitingSatellite) Descriptor() ([]byte, []int) {
return fileDescriptor_8f0acbf2ce5fa631, []int{2}
}
func (m *NonExitingSatellite) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_NonExitingSatellite.Unmarshal(m, b)
}
func (m *NonExitingSatellite) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_NonExitingSatellite.Marshal(b, m, deterministic)
}
func (m *NonExitingSatellite) XXX_Merge(src proto.Message) {
xxx_messageInfo_NonExitingSatellite.Merge(m, src)
}
func (m *NonExitingSatellite) XXX_Size() int {
return xxx_messageInfo_NonExitingSatellite.Size(m)
}
func (m *NonExitingSatellite) XXX_DiscardUnknown() {
xxx_messageInfo_NonExitingSatellite.DiscardUnknown(m)
}
var xxx_messageInfo_NonExitingSatellite proto.InternalMessageInfo
func (m *NonExitingSatellite) GetDomainName() string {
if m != nil {
return m.DomainName
}
return ""
}
func (m *NonExitingSatellite) GetSpaceUsed() float64 {
if m != nil {
return m.SpaceUsed
}
return 0
}
type InitiateGracefulExitRequest struct {
NodeId NodeID `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *InitiateGracefulExitRequest) Reset() { *m = InitiateGracefulExitRequest{} }
func (m *InitiateGracefulExitRequest) String() string { return proto.CompactTextString(m) }
func (*InitiateGracefulExitRequest) ProtoMessage() {}
func (*InitiateGracefulExitRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_8f0acbf2ce5fa631, []int{3}
}
func (m *InitiateGracefulExitRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_InitiateGracefulExitRequest.Unmarshal(m, b)
}
func (m *InitiateGracefulExitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_InitiateGracefulExitRequest.Marshal(b, m, deterministic)
}
func (m *InitiateGracefulExitRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_InitiateGracefulExitRequest.Merge(m, src)
}
func (m *InitiateGracefulExitRequest) XXX_Size() int {
return xxx_messageInfo_InitiateGracefulExitRequest.Size(m)
}
func (m *InitiateGracefulExitRequest) XXX_DiscardUnknown() {
xxx_messageInfo_InitiateGracefulExitRequest.DiscardUnknown(m)
}
var xxx_messageInfo_InitiateGracefulExitRequest proto.InternalMessageInfo
type GetExitProgressRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetExitProgressRequest) Reset() { *m = GetExitProgressRequest{} }
func (m *GetExitProgressRequest) String() string { return proto.CompactTextString(m) }
func (*GetExitProgressRequest) ProtoMessage() {}
func (*GetExitProgressRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_8f0acbf2ce5fa631, []int{4}
}
func (m *GetExitProgressRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetExitProgressRequest.Unmarshal(m, b)
}
func (m *GetExitProgressRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GetExitProgressRequest.Marshal(b, m, deterministic)
}
func (m *GetExitProgressRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetExitProgressRequest.Merge(m, src)
}
func (m *GetExitProgressRequest) XXX_Size() int {
return xxx_messageInfo_GetExitProgressRequest.Size(m)
}
func (m *GetExitProgressRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GetExitProgressRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GetExitProgressRequest proto.InternalMessageInfo
type GetExitProgressResponse struct {
Progress []*ExitProgress `protobuf:"bytes,1,rep,name=progress,proto3" json:"progress,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GetExitProgressResponse) Reset() { *m = GetExitProgressResponse{} }
func (m *GetExitProgressResponse) String() string { return proto.CompactTextString(m) }
func (*GetExitProgressResponse) ProtoMessage() {}
func (*GetExitProgressResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_8f0acbf2ce5fa631, []int{5}
}
func (m *GetExitProgressResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GetExitProgressResponse.Unmarshal(m, b)
}
func (m *GetExitProgressResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GetExitProgressResponse.Marshal(b, m, deterministic)
}
func (m *GetExitProgressResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_GetExitProgressResponse.Merge(m, src)
}
func (m *GetExitProgressResponse) XXX_Size() int {
return xxx_messageInfo_GetExitProgressResponse.Size(m)
}
func (m *GetExitProgressResponse) XXX_DiscardUnknown() {
xxx_messageInfo_GetExitProgressResponse.DiscardUnknown(m)
}
var xxx_messageInfo_GetExitProgressResponse proto.InternalMessageInfo
func (m *GetExitProgressResponse) GetProgress() []*ExitProgress {
if m != nil {
return m.Progress
}
return nil
}
type ExitProgress struct {
DomainName string `protobuf:"bytes,1,opt,name=domain_name,json=domainName,proto3" json:"domain_name,omitempty"`
NodeId NodeID `protobuf:"bytes,2,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"`
PercentComplete float32 `protobuf:"fixed32,3,opt,name=percent_complete,json=percentComplete,proto3" json:"percent_complete,omitempty"`
Successful bool `protobuf:"varint,4,opt,name=successful,proto3" json:"successful,omitempty"`
CompletionReceipt []byte `protobuf:"bytes,5,opt,name=completion_receipt,json=completionReceipt,proto3" json:"completion_receipt,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ExitProgress) Reset() { *m = ExitProgress{} }
func (m *ExitProgress) String() string { return proto.CompactTextString(m) }
func (*ExitProgress) ProtoMessage() {}
func (*ExitProgress) Descriptor() ([]byte, []int) {
return fileDescriptor_8f0acbf2ce5fa631, []int{6}
}
func (m *ExitProgress) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_ExitProgress.Unmarshal(m, b)
}
func (m *ExitProgress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_ExitProgress.Marshal(b, m, deterministic)
}
func (m *ExitProgress) XXX_Merge(src proto.Message) {
xxx_messageInfo_ExitProgress.Merge(m, src)
}
func (m *ExitProgress) XXX_Size() int {
return xxx_messageInfo_ExitProgress.Size(m)
}
func (m *ExitProgress) XXX_DiscardUnknown() {
xxx_messageInfo_ExitProgress.DiscardUnknown(m)
}
var xxx_messageInfo_ExitProgress proto.InternalMessageInfo
func (m *ExitProgress) GetDomainName() string {
if m != nil {
return m.DomainName
}
return ""
}
func (m *ExitProgress) GetPercentComplete() float32 {
if m != nil {
return m.PercentComplete
}
return 0
}
func (m *ExitProgress) GetSuccessful() bool {
if m != nil {
return m.Successful
}
return false
}
func (m *ExitProgress) GetCompletionReceipt() []byte {
if m != nil {
return m.CompletionReceipt
}
return nil
}
type GracefulExitFeasibilityRequest struct {
NodeId NodeID `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GracefulExitFeasibilityRequest) Reset() { *m = GracefulExitFeasibilityRequest{} }
func (m *GracefulExitFeasibilityRequest) String() string { return proto.CompactTextString(m) }
func (*GracefulExitFeasibilityRequest) ProtoMessage() {}
func (*GracefulExitFeasibilityRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_8f0acbf2ce5fa631, []int{7}
}
func (m *GracefulExitFeasibilityRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GracefulExitFeasibilityRequest.Unmarshal(m, b)
}
func (m *GracefulExitFeasibilityRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GracefulExitFeasibilityRequest.Marshal(b, m, deterministic)
}
func (m *GracefulExitFeasibilityRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_GracefulExitFeasibilityRequest.Merge(m, src)
}
func (m *GracefulExitFeasibilityRequest) XXX_Size() int {
return xxx_messageInfo_GracefulExitFeasibilityRequest.Size(m)
}
func (m *GracefulExitFeasibilityRequest) XXX_DiscardUnknown() {
xxx_messageInfo_GracefulExitFeasibilityRequest.DiscardUnknown(m)
}
var xxx_messageInfo_GracefulExitFeasibilityRequest proto.InternalMessageInfo
type GracefulExitFeasibilityResponse struct {
JoinedAt time.Time `protobuf:"bytes,1,opt,name=joined_at,json=joinedAt,proto3,stdtime" json:"joined_at"`
MonthsRequired int32 `protobuf:"varint,2,opt,name=months_required,json=monthsRequired,proto3" json:"months_required,omitempty"`
IsAllowed bool `protobuf:"varint,3,opt,name=is_allowed,json=isAllowed,proto3" json:"is_allowed,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *GracefulExitFeasibilityResponse) Reset() { *m = GracefulExitFeasibilityResponse{} }
func (m *GracefulExitFeasibilityResponse) String() string { return proto.CompactTextString(m) }
func (*GracefulExitFeasibilityResponse) ProtoMessage() {}
func (*GracefulExitFeasibilityResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_8f0acbf2ce5fa631, []int{8}
}
func (m *GracefulExitFeasibilityResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_GracefulExitFeasibilityResponse.Unmarshal(m, b)
}
func (m *GracefulExitFeasibilityResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_GracefulExitFeasibilityResponse.Marshal(b, m, deterministic)
}
func (m *GracefulExitFeasibilityResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_GracefulExitFeasibilityResponse.Merge(m, src)
}
func (m *GracefulExitFeasibilityResponse) XXX_Size() int {
return xxx_messageInfo_GracefulExitFeasibilityResponse.Size(m)
}
func (m *GracefulExitFeasibilityResponse) XXX_DiscardUnknown() {
xxx_messageInfo_GracefulExitFeasibilityResponse.DiscardUnknown(m)
}
var xxx_messageInfo_GracefulExitFeasibilityResponse proto.InternalMessageInfo
func (m *GracefulExitFeasibilityResponse) GetJoinedAt() time.Time {
if m != nil {
return m.JoinedAt
}
return time.Time{}
}
func (m *GracefulExitFeasibilityResponse) GetMonthsRequired() int32 {
if m != nil {
return m.MonthsRequired
}
return 0
}
func (m *GracefulExitFeasibilityResponse) GetIsAllowed() bool {
if m != nil {
return m.IsAllowed
}
return false
}
func init() {
proto.RegisterType((*GetNonExitingSatellitesRequest)(nil), "storagenode.gracefulexit.GetNonExitingSatellitesRequest")
proto.RegisterType((*GetNonExitingSatellitesResponse)(nil), "storagenode.gracefulexit.GetNonExitingSatellitesResponse")
proto.RegisterType((*NonExitingSatellite)(nil), "storagenode.gracefulexit.NonExitingSatellite")
proto.RegisterType((*InitiateGracefulExitRequest)(nil), "storagenode.gracefulexit.InitiateGracefulExitRequest")
proto.RegisterType((*GetExitProgressRequest)(nil), "storagenode.gracefulexit.GetExitProgressRequest")
proto.RegisterType((*GetExitProgressResponse)(nil), "storagenode.gracefulexit.GetExitProgressResponse")
proto.RegisterType((*ExitProgress)(nil), "storagenode.gracefulexit.ExitProgress")
proto.RegisterType((*GracefulExitFeasibilityRequest)(nil), "storagenode.gracefulexit.GracefulExitFeasibilityRequest")
proto.RegisterType((*GracefulExitFeasibilityResponse)(nil), "storagenode.gracefulexit.GracefulExitFeasibilityResponse")
}
func init() { proto.RegisterFile("gracefulexit.proto", fileDescriptor_8f0acbf2ce5fa631) }
var fileDescriptor_8f0acbf2ce5fa631 = []byte{
// 612 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4f, 0x6f, 0xd3, 0x4e,
0x10, 0xfd, 0xb9, 0xff, 0x7e, 0xc9, 0xb4, 0x6a, 0xcb, 0x82, 0xc0, 0x0a, 0xa2, 0x8e, 0x2c, 0x41,
0xc3, 0xa1, 0x0e, 0x14, 0x21, 0xc1, 0xb1, 0x01, 0x5a, 0xe5, 0x40, 0x85, 0x16, 0xb8, 0x20, 0x21,
0x6b, 0x63, 0x4f, 0xcd, 0x56, 0xf6, 0xae, 0xeb, 0x5d, 0x43, 0xb9, 0xf0, 0x11, 0x10, 0xdf, 0x81,
0x2b, 0x1f, 0x84, 0x33, 0x47, 0x0e, 0xe5, 0xab, 0x20, 0xdb, 0xdb, 0x60, 0xda, 0xd8, 0x6a, 0xb9,
0x25, 0x6f, 0x66, 0x9e, 0x67, 0xde, 0xbe, 0x07, 0x24, 0xca, 0x58, 0x80, 0x07, 0x79, 0x8c, 0xc7,
0x5c, 0x7b, 0x69, 0x26, 0xb5, 0x24, 0xb6, 0xd2, 0x32, 0x63, 0x11, 0x0a, 0x19, 0xa2, 0x57, 0xaf,
0xf7, 0x20, 0x92, 0x91, 0xac, 0xba, 0x7a, 0x4e, 0x24, 0x65, 0x14, 0xe3, 0xb0, 0xfc, 0x37, 0xc9,
0x0f, 0x86, 0x9a, 0x27, 0xa8, 0x34, 0x4b, 0xd2, 0xaa, 0xc1, 0xed, 0xc3, 0xc6, 0x1e, 0xea, 0x7d,
0x29, 0x9e, 0x1d, 0x73, 0xcd, 0x45, 0xf4, 0x92, 0x69, 0x8c, 0x63, 0xae, 0x51, 0x51, 0x3c, 0xca,
0x51, 0x69, 0x37, 0x05, 0xa7, 0xb1, 0x43, 0xa5, 0x52, 0x28, 0x24, 0xcf, 0x01, 0xd4, 0x14, 0xb5,
0xad, 0xfe, 0xfc, 0x60, 0x79, 0x7b, 0xcb, 0x6b, 0x5a, 0xd0, 0x9b, 0xc1, 0x45, 0x6b, 0x04, 0xee,
0x27, 0xb8, 0x3a, 0xa3, 0x85, 0x6c, 0xc2, 0xff, 0x05, 0x97, 0xcf, 0x43, 0xdb, 0xea, 0x5b, 0x83,
0x95, 0xd1, 0xea, 0xf7, 0x13, 0xe7, 0xbf, 0x9f, 0x27, 0xce, 0xd2, 0xbe, 0x0c, 0x71, 0xfc, 0x94,
0x2e, 0x15, 0xe5, 0x71, 0x48, 0x1c, 0x58, 0x0e, 0x65, 0xc2, 0xb8, 0xf0, 0x05, 0x4b, 0xd0, 0x9e,
0xeb, 0x5b, 0x83, 0x2e, 0x85, 0x0a, 0xda, 0x67, 0x09, 0x92, 0x5b, 0x00, 0x2a, 0x65, 0x01, 0xfa,
0xb9, 0xc2, 0xd0, 0x9e, 0xef, 0x5b, 0x03, 0x8b, 0x76, 0x4b, 0xe4, 0xb5, 0xc2, 0xd0, 0xdd, 0x85,
0x9b, 0x63, 0xc1, 0x35, 0x67, 0x1a, 0xf7, 0xcc, 0xde, 0xc5, 0x32, 0x46, 0x90, 0x0b, 0xef, 0xe1,
0xda, 0x70, 0x7d, 0x0f, 0x75, 0x31, 0xfa, 0x22, 0x93, 0x51, 0x86, 0x6a, 0xaa, 0xe9, 0x5b, 0xb8,
0x71, 0xae, 0x62, 0xb4, 0x1c, 0x41, 0x27, 0x35, 0x98, 0x51, 0xf2, 0x4e, 0xb3, 0x92, 0x7f, 0x31,
0x4c, 0xe7, 0xdc, 0x1f, 0x16, 0xac, 0xd4, 0x4b, 0x67, 0x15, 0xb1, 0xce, 0x29, 0x52, 0xbb, 0x69,
0xae, 0x55, 0xdb, 0xbb, 0xb0, 0x9e, 0x62, 0x16, 0xa0, 0xd0, 0x7e, 0x20, 0x93, 0x34, 0x46, 0x8d,
0xa5, 0x80, 0x73, 0x74, 0xcd, 0xe0, 0x4f, 0x0c, 0x4c, 0x36, 0x00, 0x54, 0x1e, 0x04, 0xa8, 0xd4,
0x41, 0x1e, 0xdb, 0x0b, 0x7d, 0x6b, 0xd0, 0xa1, 0x35, 0x84, 0x6c, 0x01, 0x31, 0x14, 0x5c, 0x0a,
0x3f, 0xc3, 0x00, 0x79, 0xaa, 0xed, 0xc5, 0xe2, 0xf3, 0xf4, 0xca, 0x9f, 0x0a, 0xad, 0x0a, 0xee,
0x18, 0x36, 0xea, 0xaf, 0xb1, 0x8b, 0x4c, 0xf1, 0x09, 0x8f, 0xb9, 0xfe, 0x78, 0xe9, 0x87, 0xf9,
0x66, 0x81, 0xd3, 0xc8, 0x65, 0xde, 0x61, 0x07, 0xba, 0x87, 0x92, 0x0b, 0x0c, 0x7d, 0xa6, 0x4b,
0xba, 0xe5, 0xed, 0x9e, 0x57, 0xa5, 0xc9, 0x3b, 0x4d, 0x93, 0xf7, 0xea, 0x34, 0x4d, 0xa3, 0x4e,
0xf1, 0xa9, 0x2f, 0xbf, 0x1c, 0x8b, 0x76, 0xaa, 0xb1, 0x9d, 0x62, 0x9f, 0xb5, 0x44, 0x0a, 0xfd,
0x4e, 0xf9, 0x19, 0x1e, 0xe5, 0x3c, 0xc3, 0x4a, 0xdc, 0x45, 0xba, 0x5a, 0xc1, 0xd4, 0xa0, 0x85,
0x1f, 0xb9, 0xf2, 0x59, 0x1c, 0xcb, 0x0f, 0xc6, 0x8f, 0x1d, 0xda, 0xe5, 0x6a, 0xa7, 0x02, 0xb6,
0xbf, 0x2e, 0xc0, 0x7a, 0x71, 0x41, 0x7d, 0x65, 0xf2, 0xd9, 0x2a, 0x3d, 0x34, 0x2b, 0x97, 0xe4,
0x51, 0xb3, 0x63, 0xda, 0xc3, 0xde, 0x7b, 0xfc, 0x0f, 0x93, 0x46, 0xb0, 0x1c, 0xae, 0xcd, 0x4a,
0x0d, 0x79, 0xd8, 0x4c, 0xd9, 0x92, 0xb2, 0xde, 0x05, 0x5d, 0x4f, 0xde, 0xc3, 0xda, 0x99, 0x28,
0x91, 0x7b, 0xad, 0x47, 0xcc, 0xc8, 0x63, 0xef, 0xfe, 0x25, 0x26, 0xcc, 0xb9, 0xa5, 0xfe, 0xb3,
0x3d, 0xd4, 0xaa, 0x7f, 0xab, 0x85, 0x5b, 0xf5, 0x6f, 0x37, 0xec, 0x68, 0xf3, 0xcd, 0xed, 0x62,
0xf6, 0xd0, 0xe3, 0x72, 0x58, 0xfe, 0x18, 0xd6, 0xa8, 0x86, 0x5c, 0x68, 0xcc, 0x04, 0x8b, 0xd3,
0xc9, 0x64, 0xa9, 0xb4, 0xef, 0x83, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x0e, 0x75, 0x55, 0x23,
0x56, 0x06, 0x00, 0x00,
}
// --- DRPC BEGIN ---
type DRPCNodeGracefulExitClient interface {
DRPCConn() drpc.Conn
// GetSatellitesList returns a list of satellites that the storagenode has not exited.
GetNonExitingSatellites(ctx context.Context, in *GetNonExitingSatellitesRequest) (*GetNonExitingSatellitesResponse, error)
// InitiateGracefulExit updates one or more satellites in the storagenode's database to be gracefully exiting.
InitiateGracefulExit(ctx context.Context, in *InitiateGracefulExitRequest) (*ExitProgress, error)
// GetExitProgress returns graceful exit status on each satellite for a given storagenode.
GetExitProgress(ctx context.Context, in *GetExitProgressRequest) (*GetExitProgressResponse, error)
// GracefulExitFeasibility returns node's join date and satellites config's amount of months required for graceful exit to be allowed.
GracefulExitFeasibility(ctx context.Context, in *GracefulExitFeasibilityRequest) (*GracefulExitFeasibilityResponse, error)
}
type drpcNodeGracefulExitClient struct {
cc drpc.Conn
}
func NewDRPCNodeGracefulExitClient(cc drpc.Conn) DRPCNodeGracefulExitClient {
return &drpcNodeGracefulExitClient{cc}
}
func (c *drpcNodeGracefulExitClient) DRPCConn() drpc.Conn { return c.cc }
func (c *drpcNodeGracefulExitClient) GetNonExitingSatellites(ctx context.Context, in *GetNonExitingSatellitesRequest) (*GetNonExitingSatellitesResponse, error) {
out := new(GetNonExitingSatellitesResponse)
err := c.cc.Invoke(ctx, "/storagenode.gracefulexit.NodeGracefulExit/GetNonExitingSatellites", in, out)
if err != nil {
return nil, err
}
return out, nil
}
func (c *drpcNodeGracefulExitClient) InitiateGracefulExit(ctx context.Context, in *InitiateGracefulExitRequest) (*ExitProgress, error) {
out := new(ExitProgress)
err := c.cc.Invoke(ctx, "/storagenode.gracefulexit.NodeGracefulExit/InitiateGracefulExit", in, out)
if err != nil {
return nil, err
}
return out, nil
}
func (c *drpcNodeGracefulExitClient) GetExitProgress(ctx context.Context, in *GetExitProgressRequest) (*GetExitProgressResponse, error) {
out := new(GetExitProgressResponse)
err := c.cc.Invoke(ctx, "/storagenode.gracefulexit.NodeGracefulExit/GetExitProgress", in, out)
if err != nil {
return nil, err
}
return out, nil
}
func (c *drpcNodeGracefulExitClient) GracefulExitFeasibility(ctx context.Context, in *GracefulExitFeasibilityRequest) (*GracefulExitFeasibilityResponse, error) {
out := new(GracefulExitFeasibilityResponse)
err := c.cc.Invoke(ctx, "/storagenode.gracefulexit.NodeGracefulExit/GracefulExitFeasibility", in, out)
if err != nil {
return nil, err
}
return out, nil
}
type DRPCNodeGracefulExitServer interface {
// GetSatellitesList returns a list of satellites that the storagenode has not exited.
GetNonExitingSatellites(context.Context, *GetNonExitingSatellitesRequest) (*GetNonExitingSatellitesResponse, error)
// InitiateGracefulExit updates one or more satellites in the storagenode's database to be gracefully exiting.
InitiateGracefulExit(context.Context, *InitiateGracefulExitRequest) (*ExitProgress, error)
// GetExitProgress returns graceful exit status on each satellite for a given storagenode.
GetExitProgress(context.Context, *GetExitProgressRequest) (*GetExitProgressResponse, error)
// GracefulExitFeasibility returns node's join date and satellites config's amount of months required for graceful exit to be allowed.
GracefulExitFeasibility(context.Context, *GracefulExitFeasibilityRequest) (*GracefulExitFeasibilityResponse, error)
}
type DRPCNodeGracefulExitDescription struct{}
func (DRPCNodeGracefulExitDescription) NumMethods() int { return 4 }
func (DRPCNodeGracefulExitDescription) Method(n int) (string, drpc.Receiver, interface{}, bool) {
switch n {
case 0:
return "/storagenode.gracefulexit.NodeGracefulExit/GetNonExitingSatellites",
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCNodeGracefulExitServer).
GetNonExitingSatellites(
ctx,
in1.(*GetNonExitingSatellitesRequest),
)
}, DRPCNodeGracefulExitServer.GetNonExitingSatellites, true
case 1:
return "/storagenode.gracefulexit.NodeGracefulExit/InitiateGracefulExit",
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCNodeGracefulExitServer).
InitiateGracefulExit(
ctx,
in1.(*InitiateGracefulExitRequest),
)
}, DRPCNodeGracefulExitServer.InitiateGracefulExit, true
case 2:
return "/storagenode.gracefulexit.NodeGracefulExit/GetExitProgress",
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCNodeGracefulExitServer).
GetExitProgress(
ctx,
in1.(*GetExitProgressRequest),
)
}, DRPCNodeGracefulExitServer.GetExitProgress, true
case 3:
return "/storagenode.gracefulexit.NodeGracefulExit/GracefulExitFeasibility",
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCNodeGracefulExitServer).
GracefulExitFeasibility(
ctx,
in1.(*GracefulExitFeasibilityRequest),
)
}, DRPCNodeGracefulExitServer.GracefulExitFeasibility, true
default:
return "", nil, nil, false
}
}
func DRPCRegisterNodeGracefulExit(mux drpc.Mux, impl DRPCNodeGracefulExitServer) error {
return mux.Register(impl, DRPCNodeGracefulExitDescription{})
}
type DRPCNodeGracefulExit_GetNonExitingSatellitesStream interface {
drpc.Stream
SendAndClose(*GetNonExitingSatellitesResponse) error
}
type drpcNodeGracefulExitGetNonExitingSatellitesStream struct {
drpc.Stream
}
func (x *drpcNodeGracefulExitGetNonExitingSatellitesStream) SendAndClose(m *GetNonExitingSatellitesResponse) error {
if err := x.MsgSend(m); err != nil {
return err
}
return x.CloseSend()
}
type DRPCNodeGracefulExit_InitiateGracefulExitStream interface {
drpc.Stream
SendAndClose(*ExitProgress) error
}
type drpcNodeGracefulExitInitiateGracefulExitStream struct {
drpc.Stream
}
func (x *drpcNodeGracefulExitInitiateGracefulExitStream) SendAndClose(m *ExitProgress) error {
if err := x.MsgSend(m); err != nil {
return err
}
return x.CloseSend()
}
type DRPCNodeGracefulExit_GetExitProgressStream interface {
drpc.Stream
SendAndClose(*GetExitProgressResponse) error
}
type drpcNodeGracefulExitGetExitProgressStream struct {
drpc.Stream
}
func (x *drpcNodeGracefulExitGetExitProgressStream) SendAndClose(m *GetExitProgressResponse) error {
if err := x.MsgSend(m); err != nil {
return err
}
return x.CloseSend()
}
type DRPCNodeGracefulExit_GracefulExitFeasibilityStream interface {
drpc.Stream
SendAndClose(*GracefulExitFeasibilityResponse) error
}
type drpcNodeGracefulExitGracefulExitFeasibilityStream struct {
drpc.Stream
}
func (x *drpcNodeGracefulExitGracefulExitFeasibilityStream) SendAndClose(m *GracefulExitFeasibilityResponse) error {
if err := x.MsgSend(m); err != nil {
return err
}
return x.CloseSend()
}
// --- DRPC END ---

View File

@ -0,0 +1,63 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
syntax = "proto3";
option go_package = "storj.io/storj/storagenode/internalpb";
import "gogo.proto";
import "google/protobuf/timestamp.proto";
package storagenode.gracefulexit;
// NodeGracefulExit is a private service on storagenodes.
service NodeGracefulExit {
// GetSatellitesList returns a list of satellites that the storagenode has not exited.
rpc GetNonExitingSatellites(GetNonExitingSatellitesRequest) returns (GetNonExitingSatellitesResponse);
// InitiateGracefulExit updates one or more satellites in the storagenode's database to be gracefully exiting.
rpc InitiateGracefulExit(InitiateGracefulExitRequest) returns (ExitProgress);
// GetExitProgress returns graceful exit status on each satellite for a given storagenode.
rpc GetExitProgress(GetExitProgressRequest) returns (GetExitProgressResponse);
// GracefulExitFeasibility returns node's join date and satellites config's amount of months required for graceful exit to be allowed.
rpc GracefulExitFeasibility(GracefulExitFeasibilityRequest) returns (GracefulExitFeasibilityResponse);
}
message GetNonExitingSatellitesRequest{}
message GetNonExitingSatellitesResponse {
repeated NonExitingSatellite satellites = 1;
}
// NonExitingSatellite contains information that's needed for a storagenode to start graceful exit.
message NonExitingSatellite {
bytes node_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
string domain_name = 2;
double space_used = 3;
}
message InitiateGracefulExitRequest {
bytes node_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
}
message GetExitProgressRequest {}
message GetExitProgressResponse {
repeated ExitProgress progress = 1;
}
message ExitProgress {
string domain_name = 1;
bytes node_id = 2 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
float percent_complete = 3;
bool successful = 4;
bytes completion_receipt = 5;
}
message GracefulExitFeasibilityRequest {
bytes node_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
}
message GracefulExitFeasibilityResponse {
google.protobuf.Timestamp joined_at = 1 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
int32 months_required = 2;
bool is_allowed = 3;
}

View File

@ -0,0 +1,442 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: inspector.proto
package internalpb
import (
context "context"
fmt "fmt"
math "math"
time "time"
proto "github.com/gogo/protobuf/proto"
drpc "storj.io/drpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
var _ = time.Kitchen
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
type StatsRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *StatsRequest) Reset() { *m = StatsRequest{} }
func (m *StatsRequest) String() string { return proto.CompactTextString(m) }
func (*StatsRequest) ProtoMessage() {}
func (*StatsRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_a07d9034b2dd9d26, []int{0}
}
func (m *StatsRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StatsRequest.Unmarshal(m, b)
}
func (m *StatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_StatsRequest.Marshal(b, m, deterministic)
}
func (m *StatsRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_StatsRequest.Merge(m, src)
}
func (m *StatsRequest) XXX_Size() int {
return xxx_messageInfo_StatsRequest.Size(m)
}
func (m *StatsRequest) XXX_DiscardUnknown() {
xxx_messageInfo_StatsRequest.DiscardUnknown(m)
}
var xxx_messageInfo_StatsRequest proto.InternalMessageInfo
type StatSummaryResponse struct {
UsedSpace int64 `protobuf:"varint,1,opt,name=used_space,json=usedSpace,proto3" json:"used_space,omitempty"`
AvailableSpace int64 `protobuf:"varint,2,opt,name=available_space,json=availableSpace,proto3" json:"available_space,omitempty"`
UsedIngress int64 `protobuf:"varint,3,opt,name=used_ingress,json=usedIngress,proto3" json:"used_ingress,omitempty"`
UsedEgress int64 `protobuf:"varint,4,opt,name=used_egress,json=usedEgress,proto3" json:"used_egress,omitempty"`
UsedBandwidth int64 `protobuf:"varint,5,opt,name=used_bandwidth,json=usedBandwidth,proto3" json:"used_bandwidth,omitempty"`
AvailableBandwidth int64 `protobuf:"varint,6,opt,name=available_bandwidth,json=availableBandwidth,proto3" json:"available_bandwidth,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *StatSummaryResponse) Reset() { *m = StatSummaryResponse{} }
func (m *StatSummaryResponse) String() string { return proto.CompactTextString(m) }
func (*StatSummaryResponse) ProtoMessage() {}
func (*StatSummaryResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_a07d9034b2dd9d26, []int{1}
}
func (m *StatSummaryResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_StatSummaryResponse.Unmarshal(m, b)
}
func (m *StatSummaryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_StatSummaryResponse.Marshal(b, m, deterministic)
}
func (m *StatSummaryResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_StatSummaryResponse.Merge(m, src)
}
func (m *StatSummaryResponse) XXX_Size() int {
return xxx_messageInfo_StatSummaryResponse.Size(m)
}
func (m *StatSummaryResponse) XXX_DiscardUnknown() {
xxx_messageInfo_StatSummaryResponse.DiscardUnknown(m)
}
var xxx_messageInfo_StatSummaryResponse proto.InternalMessageInfo
func (m *StatSummaryResponse) GetUsedSpace() int64 {
if m != nil {
return m.UsedSpace
}
return 0
}
func (m *StatSummaryResponse) GetAvailableSpace() int64 {
if m != nil {
return m.AvailableSpace
}
return 0
}
func (m *StatSummaryResponse) GetUsedIngress() int64 {
if m != nil {
return m.UsedIngress
}
return 0
}
func (m *StatSummaryResponse) GetUsedEgress() int64 {
if m != nil {
return m.UsedEgress
}
return 0
}
func (m *StatSummaryResponse) GetUsedBandwidth() int64 {
if m != nil {
return m.UsedBandwidth
}
return 0
}
func (m *StatSummaryResponse) GetAvailableBandwidth() int64 {
if m != nil {
return m.AvailableBandwidth
}
return 0
}
type DashboardRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DashboardRequest) Reset() { *m = DashboardRequest{} }
func (m *DashboardRequest) String() string { return proto.CompactTextString(m) }
func (*DashboardRequest) ProtoMessage() {}
func (*DashboardRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_a07d9034b2dd9d26, []int{2}
}
func (m *DashboardRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DashboardRequest.Unmarshal(m, b)
}
func (m *DashboardRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DashboardRequest.Marshal(b, m, deterministic)
}
func (m *DashboardRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_DashboardRequest.Merge(m, src)
}
func (m *DashboardRequest) XXX_Size() int {
return xxx_messageInfo_DashboardRequest.Size(m)
}
func (m *DashboardRequest) XXX_DiscardUnknown() {
xxx_messageInfo_DashboardRequest.DiscardUnknown(m)
}
var xxx_messageInfo_DashboardRequest proto.InternalMessageInfo
type DashboardResponse struct {
NodeId NodeID `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3,customtype=NodeID" json:"node_id"`
NodeConnections int64 `protobuf:"varint,2,opt,name=node_connections,json=nodeConnections,proto3" json:"node_connections,omitempty"`
BootstrapAddress string `protobuf:"bytes,3,opt,name=bootstrap_address,json=bootstrapAddress,proto3" json:"bootstrap_address,omitempty"` // Deprecated: Do not use.
InternalAddress string `protobuf:"bytes,4,opt,name=internal_address,json=internalAddress,proto3" json:"internal_address,omitempty"`
ExternalAddress string `protobuf:"bytes,5,opt,name=external_address,json=externalAddress,proto3" json:"external_address,omitempty"`
DashboardAddress string `protobuf:"bytes,6,opt,name=dashboard_address,json=dashboardAddress,proto3" json:"dashboard_address,omitempty"`
Stats *StatSummaryResponse `protobuf:"bytes,7,opt,name=stats,proto3" json:"stats,omitempty"`
Uptime string `protobuf:"bytes,8,opt,name=uptime,proto3" json:"uptime,omitempty"`
LastPinged time.Time `protobuf:"bytes,9,opt,name=last_pinged,json=lastPinged,proto3,stdtime" json:"last_pinged"`
LastQueried time.Time `protobuf:"bytes,10,opt,name=last_queried,json=lastQueried,proto3,stdtime" json:"last_queried"`
LastPingFromId *NodeID `protobuf:"bytes,11,opt,name=last_ping_from_id,json=lastPingFromId,proto3,customtype=NodeID" json:"last_ping_from_id,omitempty"`
LastPingFromAddress string `protobuf:"bytes,12,opt,name=last_ping_from_address,json=lastPingFromAddress,proto3" json:"last_ping_from_address,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *DashboardResponse) Reset() { *m = DashboardResponse{} }
func (m *DashboardResponse) String() string { return proto.CompactTextString(m) }
func (*DashboardResponse) ProtoMessage() {}
func (*DashboardResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_a07d9034b2dd9d26, []int{3}
}
func (m *DashboardResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_DashboardResponse.Unmarshal(m, b)
}
func (m *DashboardResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_DashboardResponse.Marshal(b, m, deterministic)
}
func (m *DashboardResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_DashboardResponse.Merge(m, src)
}
func (m *DashboardResponse) XXX_Size() int {
return xxx_messageInfo_DashboardResponse.Size(m)
}
func (m *DashboardResponse) XXX_DiscardUnknown() {
xxx_messageInfo_DashboardResponse.DiscardUnknown(m)
}
var xxx_messageInfo_DashboardResponse proto.InternalMessageInfo
func (m *DashboardResponse) GetNodeConnections() int64 {
if m != nil {
return m.NodeConnections
}
return 0
}
// Deprecated: Do not use.
func (m *DashboardResponse) GetBootstrapAddress() string {
if m != nil {
return m.BootstrapAddress
}
return ""
}
func (m *DashboardResponse) GetInternalAddress() string {
if m != nil {
return m.InternalAddress
}
return ""
}
func (m *DashboardResponse) GetExternalAddress() string {
if m != nil {
return m.ExternalAddress
}
return ""
}
func (m *DashboardResponse) GetDashboardAddress() string {
if m != nil {
return m.DashboardAddress
}
return ""
}
func (m *DashboardResponse) GetStats() *StatSummaryResponse {
if m != nil {
return m.Stats
}
return nil
}
func (m *DashboardResponse) GetUptime() string {
if m != nil {
return m.Uptime
}
return ""
}
func (m *DashboardResponse) GetLastPinged() time.Time {
if m != nil {
return m.LastPinged
}
return time.Time{}
}
func (m *DashboardResponse) GetLastQueried() time.Time {
if m != nil {
return m.LastQueried
}
return time.Time{}
}
func (m *DashboardResponse) GetLastPingFromAddress() string {
if m != nil {
return m.LastPingFromAddress
}
return ""
}
func init() {
proto.RegisterType((*StatsRequest)(nil), "storagenode.inspector.StatsRequest")
proto.RegisterType((*StatSummaryResponse)(nil), "storagenode.inspector.StatSummaryResponse")
proto.RegisterType((*DashboardRequest)(nil), "storagenode.inspector.DashboardRequest")
proto.RegisterType((*DashboardResponse)(nil), "storagenode.inspector.DashboardResponse")
}
func init() { proto.RegisterFile("inspector.proto", fileDescriptor_a07d9034b2dd9d26) }
var fileDescriptor_a07d9034b2dd9d26 = []byte{
// 598 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0x4f, 0x6f, 0xd3, 0x30,
0x18, 0xc6, 0x97, 0x6d, 0xed, 0xd6, 0xb7, 0xa5, 0x7f, 0x5c, 0x31, 0x45, 0x95, 0x50, 0x47, 0xd1,
0xd4, 0x01, 0x52, 0x22, 0x6d, 0xe2, 0x0e, 0x65, 0x03, 0xf5, 0x82, 0x46, 0xca, 0x69, 0x97, 0xe2,
0xd4, 0xef, 0x32, 0xa3, 0x36, 0xce, 0x6c, 0x87, 0x3f, 0xdf, 0x82, 0x8f, 0xc5, 0x9d, 0x1b, 0x87,
0xf1, 0x19, 0x38, 0x72, 0x43, 0x76, 0xea, 0xac, 0xaa, 0x06, 0x68, 0xb7, 0xf8, 0x79, 0x7f, 0xaf,
0xed, 0x3c, 0xef, 0x63, 0x68, 0xf1, 0x54, 0x65, 0x38, 0xd3, 0x42, 0x06, 0x99, 0x14, 0x5a, 0x90,
0xfb, 0x4a, 0x0b, 0x49, 0x13, 0x4c, 0x05, 0xc3, 0xa0, 0x2c, 0xf6, 0x20, 0x11, 0x89, 0x28, 0x90,
0x5e, 0x3f, 0x11, 0x22, 0x99, 0x63, 0x68, 0x57, 0x71, 0x7e, 0x11, 0x6a, 0xbe, 0x40, 0xa5, 0xe9,
0x22, 0x2b, 0x80, 0x41, 0x13, 0x1a, 0x13, 0x4d, 0xb5, 0x8a, 0xf0, 0x2a, 0x47, 0xa5, 0x07, 0xbf,
0x3d, 0xe8, 0x1a, 0x61, 0x92, 0x2f, 0x16, 0x54, 0x7e, 0x89, 0x50, 0x65, 0x22, 0x55, 0x48, 0x1e,
0x00, 0xe4, 0x0a, 0xd9, 0x54, 0x65, 0x74, 0x86, 0xbe, 0xb7, 0xef, 0x1d, 0x6e, 0x45, 0x35, 0xa3,
0x4c, 0x8c, 0x40, 0x86, 0xd0, 0xa2, 0x1f, 0x29, 0x9f, 0xd3, 0x78, 0x8e, 0x4b, 0x66, 0xd3, 0x32,
0xcd, 0x52, 0x2e, 0xc0, 0x87, 0xd0, 0xb0, 0xfb, 0xf0, 0x34, 0x91, 0xa8, 0x94, 0xbf, 0x65, 0xa9,
0xba, 0xd1, 0xc6, 0x85, 0x44, 0xfa, 0x60, 0x97, 0x53, 0x2c, 0x88, 0x6d, 0x4b, 0xd8, 0xd3, 0x4f,
0x0b, 0xe0, 0x00, 0x9a, 0x16, 0x88, 0x69, 0xca, 0x3e, 0x71, 0xa6, 0x2f, 0xfd, 0x8a, 0x65, 0xee,
0x19, 0x75, 0xe4, 0x44, 0x12, 0x42, 0xf7, 0xe6, 0x4e, 0x37, 0x6c, 0xd5, 0xb2, 0xa4, 0x2c, 0x95,
0x0d, 0x03, 0x02, 0xed, 0x13, 0xaa, 0x2e, 0x63, 0x41, 0x25, 0x73, 0x7e, 0xfc, 0xda, 0x86, 0xce,
0x8a, 0xb8, 0x74, 0x63, 0x08, 0x3b, 0xc6, 0xf4, 0x29, 0x67, 0xd6, 0x8a, 0xc6, 0xa8, 0xf9, 0xed,
0xba, 0xbf, 0xf1, 0xe3, 0xba, 0x5f, 0x7d, 0x23, 0x18, 0x8e, 0x4f, 0xa2, 0xaa, 0x29, 0x8f, 0x19,
0x79, 0x0c, 0x6d, 0x0b, 0xce, 0x44, 0x9a, 0xe2, 0x4c, 0x73, 0x91, 0xaa, 0xa5, 0x31, 0x2d, 0xa3,
0xbf, 0xbc, 0x91, 0x49, 0x08, 0x9d, 0x58, 0x08, 0xad, 0xb4, 0xa4, 0xd9, 0x94, 0x32, 0x56, 0xda,
0x53, 0x1b, 0x6d, 0xfa, 0x5e, 0xd4, 0x2e, 0x8b, 0x2f, 0x8a, 0x9a, 0xd9, 0x9b, 0xa7, 0x1a, 0x65,
0x4a, 0xe7, 0x25, 0x6f, 0xcc, 0xaa, 0x45, 0x2d, 0xa7, 0xaf, 0xa0, 0xf8, 0x79, 0x0d, 0xad, 0x14,
0xa8, 0xd3, 0x1d, 0xfa, 0x14, 0x3a, 0xcc, 0xfd, 0x6f, 0xc9, 0x56, 0x2d, 0xdb, 0x2e, 0x0b, 0x0e,
0x7e, 0x0e, 0x15, 0x65, 0xd2, 0xe3, 0xef, 0xec, 0x7b, 0x87, 0xf5, 0xa3, 0x27, 0xc1, 0xad, 0x89,
0x0c, 0x6e, 0x09, 0x54, 0x54, 0x34, 0x92, 0x3d, 0xa8, 0xe6, 0x99, 0x09, 0xa5, 0xbf, 0x6b, 0xcf,
0x58, 0xae, 0xc8, 0x29, 0xd4, 0xe7, 0x54, 0xe9, 0x69, 0xc6, 0xd3, 0x04, 0x99, 0x5f, 0xb3, 0xfb,
0xf7, 0x82, 0x22, 0xce, 0x81, 0x8b, 0x73, 0xf0, 0xce, 0xc5, 0x79, 0xb4, 0x6b, 0x26, 0xf0, 0xf5,
0x67, 0xdf, 0x8b, 0xc0, 0x34, 0x9e, 0xd9, 0x3e, 0xf2, 0x1a, 0x1a, 0x76, 0x9b, 0xab, 0x1c, 0x25,
0x47, 0xe6, 0xc3, 0x1d, 0xf6, 0xb1, 0x17, 0x78, 0x5b, 0x34, 0x92, 0x67, 0xd0, 0x29, 0xef, 0x33,
0xbd, 0x90, 0x62, 0x61, 0x66, 0x5f, 0xb7, 0xb3, 0x87, 0x95, 0xb9, 0x37, 0xdd, 0xd9, 0xaf, 0xa4,
0x58, 0x8c, 0x19, 0x39, 0x86, 0xbd, 0xb5, 0x36, 0x67, 0x69, 0xc3, 0xfe, 0x6e, 0x77, 0x95, 0x5f,
0xba, 0x7a, 0xf4, 0xdd, 0x83, 0xee, 0x19, 0xc7, 0x19, 0x4e, 0xb4, 0x90, 0x38, 0x76, 0x36, 0x92,
0x73, 0xa8, 0xd8, 0xb7, 0x4a, 0x1e, 0xfd, 0xc3, 0x67, 0xf7, 0x92, 0x7b, 0x77, 0x18, 0xc6, 0x60,
0x83, 0xbc, 0x87, 0x5a, 0x19, 0x73, 0x32, 0xfc, 0x4b, 0xeb, 0xfa, 0xeb, 0xe8, 0x1d, 0xfe, 0x1f,
0x74, 0x27, 0x8c, 0x86, 0xe7, 0x07, 0x06, 0xfe, 0x10, 0x70, 0x11, 0xda, 0x8f, 0x70, 0xa5, 0x37,
0x74, 0x89, 0xcd, 0xe2, 0xb8, 0x6a, 0xa7, 0x72, 0xfc, 0x27, 0x00, 0x00, 0xff, 0xff, 0x8e, 0x0f,
0xce, 0xb4, 0xf0, 0x04, 0x00, 0x00,
}
// --- DRPC BEGIN ---
type DRPCPieceStoreInspectorClient interface {
DRPCConn() drpc.Conn
// Stats return space and bandwidth stats for a storagenode
Stats(ctx context.Context, in *StatsRequest) (*StatSummaryResponse, error)
// Dashboard returns stats for a specific storagenode
Dashboard(ctx context.Context, in *DashboardRequest) (*DashboardResponse, error)
}
type drpcPieceStoreInspectorClient struct {
cc drpc.Conn
}
func NewDRPCPieceStoreInspectorClient(cc drpc.Conn) DRPCPieceStoreInspectorClient {
return &drpcPieceStoreInspectorClient{cc}
}
func (c *drpcPieceStoreInspectorClient) DRPCConn() drpc.Conn { return c.cc }
func (c *drpcPieceStoreInspectorClient) Stats(ctx context.Context, in *StatsRequest) (*StatSummaryResponse, error) {
out := new(StatSummaryResponse)
err := c.cc.Invoke(ctx, "/storagenode.inspector.PieceStoreInspector/Stats", in, out)
if err != nil {
return nil, err
}
return out, nil
}
func (c *drpcPieceStoreInspectorClient) Dashboard(ctx context.Context, in *DashboardRequest) (*DashboardResponse, error) {
out := new(DashboardResponse)
err := c.cc.Invoke(ctx, "/storagenode.inspector.PieceStoreInspector/Dashboard", in, out)
if err != nil {
return nil, err
}
return out, nil
}
type DRPCPieceStoreInspectorServer interface {
// Stats return space and bandwidth stats for a storagenode
Stats(context.Context, *StatsRequest) (*StatSummaryResponse, error)
// Dashboard returns stats for a specific storagenode
Dashboard(context.Context, *DashboardRequest) (*DashboardResponse, error)
}
type DRPCPieceStoreInspectorDescription struct{}
func (DRPCPieceStoreInspectorDescription) NumMethods() int { return 2 }
func (DRPCPieceStoreInspectorDescription) Method(n int) (string, drpc.Receiver, interface{}, bool) {
switch n {
case 0:
return "/storagenode.inspector.PieceStoreInspector/Stats",
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCPieceStoreInspectorServer).
Stats(
ctx,
in1.(*StatsRequest),
)
}, DRPCPieceStoreInspectorServer.Stats, true
case 1:
return "/storagenode.inspector.PieceStoreInspector/Dashboard",
func(srv interface{}, ctx context.Context, in1, in2 interface{}) (drpc.Message, error) {
return srv.(DRPCPieceStoreInspectorServer).
Dashboard(
ctx,
in1.(*DashboardRequest),
)
}, DRPCPieceStoreInspectorServer.Dashboard, true
default:
return "", nil, nil, false
}
}
func DRPCRegisterPieceStoreInspector(mux drpc.Mux, impl DRPCPieceStoreInspectorServer) error {
return mux.Register(impl, DRPCPieceStoreInspectorDescription{})
}
type DRPCPieceStoreInspector_StatsStream interface {
drpc.Stream
SendAndClose(*StatSummaryResponse) error
}
type drpcPieceStoreInspectorStatsStream struct {
drpc.Stream
}
func (x *drpcPieceStoreInspectorStatsStream) SendAndClose(m *StatSummaryResponse) error {
if err := x.MsgSend(m); err != nil {
return err
}
return x.CloseSend()
}
type DRPCPieceStoreInspector_DashboardStream interface {
drpc.Stream
SendAndClose(*DashboardResponse) error
}
type drpcPieceStoreInspectorDashboardStream struct {
drpc.Stream
}
func (x *drpcPieceStoreInspectorDashboardStream) SendAndClose(m *DashboardResponse) error {
if err := x.MsgSend(m); err != nil {
return err
}
return x.CloseSend()
}
// --- DRPC END ---

View File

@ -0,0 +1,48 @@
// Copyright (C) 2019 Storj Labs, Inc.
// See LICENSE for copying information.
syntax = "proto3";
option go_package = "storj.io/storj/storagenode/internalpb";
import "gogo.proto";
import "google/protobuf/timestamp.proto";
package storagenode.inspector;
service PieceStoreInspector {
// Stats return space and bandwidth stats for a storagenode
rpc Stats(StatsRequest) returns (StatSummaryResponse) {}
// Dashboard returns stats for a specific storagenode
rpc Dashboard(DashboardRequest) returns (DashboardResponse) {}
}
message StatsRequest {
}
message StatSummaryResponse {
int64 used_space = 1;
int64 available_space = 2;
int64 used_ingress = 3;
int64 used_egress = 4;
int64 used_bandwidth = 5;
int64 available_bandwidth = 6;
}
message DashboardRequest {
}
message DashboardResponse {
bytes node_id = 1 [(gogoproto.customtype) = "NodeID", (gogoproto.nullable) = false];
int64 node_connections = 2;
string bootstrap_address = 3 [deprecated=true];
string internal_address = 4;
string external_address = 5;
string dashboard_address = 6;
StatSummaryResponse stats = 7;
string uptime = 8;
google.protobuf.Timestamp last_pinged = 9 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
google.protobuf.Timestamp last_queried = 10 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false];
bytes last_ping_from_id = 11 [(gogoproto.customtype) = "NodeID"];
string last_ping_from_address = 12;
}

View File

@ -0,0 +1,12 @@
// Copyright (C) 2020 Storj Labs, Inc.
// See LICENSE for copying information.
package internalpb
import "storj.io/common/storj"
// PieceID is an alias to storj.PieceID for use in generated protobuf code.
type PieceID = storj.PieceID
// NodeID is an alias to storj.NodeID for use in generated protobuf code.
type NodeID = storj.NodeID

View File

@ -10,10 +10,10 @@ import (
"github.com/stretchr/testify/require"
"storj.io/common/memory"
"storj.io/common/pb"
"storj.io/common/testcontext"
"storj.io/common/testrand"
"storj.io/storj/private/testplanet"
"storj.io/storj/storagenode/internalpb"
)
func TestMonitor(t *testing.T) {
@ -34,7 +34,7 @@ func TestMonitor(t *testing.T) {
storageNode.Storage2.Monitor.Loop.TriggerWait()
storageNode.Storage2.Monitor.VerifyDirReadableLoop.TriggerWait()
storageNode.Storage2.Monitor.VerifyDirWritableLoop.TriggerWait()
stats, err := storageNode.Storage2.Inspector.Stats(ctx, &pb.StatsRequest{})
stats, err := storageNode.Storage2.Inspector.Stats(ctx, &internalpb.StatsRequest{})
require.NoError(t, err)
if stats.UsedSpace > 0 {
nodeAssertions++

View File

@ -40,6 +40,7 @@ import (
"storj.io/storj/storagenode/contact"
"storj.io/storj/storagenode/gracefulexit"
"storj.io/storj/storagenode/inspector"
"storj.io/storj/storagenode/internalpb"
"storj.io/storj/storagenode/monitor"
"storj.io/storj/storagenode/nodestats"
"storj.io/storj/storagenode/notifications"
@ -681,7 +682,7 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten
peer.Console.Listener.Addr(),
config.Contact.ExternalAddress,
)
if err := pb.DRPCRegisterPieceStoreInspector(peer.Server.PrivateDRPC(), peer.Storage2.Inspector); err != nil {
if err := internalpb.DRPCRegisterPieceStoreInspector(peer.Server.PrivateDRPC(), peer.Storage2.Inspector); err != nil {
return nil, errs.Combine(err, peer.Close())
}
}
@ -715,7 +716,7 @@ func New(log *zap.Logger, full *identity.FullIdentity, db DB, revocationDB exten
peer.Dialer,
peer.Storage2.BlobsCache,
)
if err := pb.DRPCRegisterNodeGracefulExit(peer.Server.PrivateDRPC(), peer.GracefulExit.Endpoint); err != nil {
if err := internalpb.DRPCRegisterNodeGracefulExit(peer.Server.PrivateDRPC(), peer.GracefulExit.Endpoint); err != nil {
return nil, errs.Combine(err, peer.Close())
}