rename farmers to storagenode (#275)
* rename farmers to storagenode * review changes * merge conflicts
This commit is contained in:
parent
3154086e58
commit
09da23737a
2
Makefile
2
Makefile
@ -106,7 +106,7 @@ satellite-image:
|
|||||||
docker build --build-arg GO_VERSION=${GO_VERSION} -t storjlabs/satellite:${TAG} -f cmd/hc/Dockerfile .
|
docker build --build-arg GO_VERSION=${GO_VERSION} -t storjlabs/satellite:${TAG} -f cmd/hc/Dockerfile .
|
||||||
.PHONY: storage-node-image
|
.PHONY: storage-node-image
|
||||||
storage-node-image:
|
storage-node-image:
|
||||||
docker build --build-arg GO_VERSION=${GO_VERSION} -t storjlabs/storage-node:${TAG} -f cmd/farmer/Dockerfile .
|
docker build --build-arg GO_VERSION=${GO_VERSION} -t storjlabs/storage-node:${TAG} -f cmd/storagenode/Dockerfile .
|
||||||
.PHONY: uplink-image
|
.PHONY: uplink-image
|
||||||
uplink-image:
|
uplink-image:
|
||||||
docker build --build-arg GO_VERSION=${GO_VERSION} -t storjlabs/uplink:${TAG} -f cmd/uplink/Dockerfile .
|
docker build --build-arg GO_VERSION=${GO_VERSION} -t storjlabs/uplink:${TAG} -f cmd/uplink/Dockerfile .
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
farmerCount = 50
|
storagenodeCount = 50
|
||||||
)
|
)
|
||||||
|
|
||||||
type HeavyClient struct {
|
type HeavyClient struct {
|
||||||
@ -31,7 +31,7 @@ type HeavyClient struct {
|
|||||||
MockOverlay bool `default:"true" help:"if false, use real overlay"`
|
MockOverlay bool `default:"true" help:"if false, use real overlay"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type Farmer struct {
|
type StorageNode struct {
|
||||||
Identity provider.IdentityConfig
|
Identity provider.IdentityConfig
|
||||||
Kademlia kademlia.Config
|
Kademlia kademlia.Config
|
||||||
Storage psserver.Config
|
Storage psserver.Config
|
||||||
@ -45,9 +45,9 @@ var (
|
|||||||
}
|
}
|
||||||
|
|
||||||
runCfg struct {
|
runCfg struct {
|
||||||
HeavyClient HeavyClient
|
HeavyClient HeavyClient
|
||||||
Farmers [farmerCount]Farmer
|
StorageNodes [storagenodeCount]StorageNode
|
||||||
Uplink miniogw.Config
|
Uplink miniogw.Config
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -60,25 +60,25 @@ func cmdRun(cmd *cobra.Command, args []string) (err error) {
|
|||||||
ctx := process.Ctx(cmd)
|
ctx := process.Ctx(cmd)
|
||||||
defer mon.Task()(&ctx)(&err)
|
defer mon.Task()(&ctx)(&err)
|
||||||
|
|
||||||
errch := make(chan error, len(runCfg.Farmers)+2)
|
errch := make(chan error, len(runCfg.StorageNodes)+2)
|
||||||
var farmers []string
|
var storagenodes []string
|
||||||
|
|
||||||
// start the farmers
|
// start the storagenodes
|
||||||
for i := 0; i < len(runCfg.Farmers); i++ {
|
for i := 0; i < len(runCfg.StorageNodes); i++ {
|
||||||
identity, err := runCfg.Farmers[i].Identity.Load()
|
identity, err := runCfg.StorageNodes[i].Identity.Load()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
farmer := fmt.Sprintf("%s:%s",
|
storagenode := fmt.Sprintf("%s:%s",
|
||||||
identity.ID.String(), runCfg.Farmers[i].Identity.Address)
|
identity.ID.String(), runCfg.StorageNodes[i].Identity.Address)
|
||||||
farmers = append(farmers, farmer)
|
storagenodes = append(storagenodes, storagenode)
|
||||||
go func(i int, farmer string) {
|
go func(i int, storagenode string) {
|
||||||
_, _ = fmt.Printf("starting farmer %d %s (kad on %s)\n", i, farmer,
|
_, _ = fmt.Printf("starting storagenode %d %s (kad on %s)\n", i, storagenode,
|
||||||
runCfg.Farmers[i].Kademlia.TODOListenAddr)
|
runCfg.StorageNodes[i].Kademlia.TODOListenAddr)
|
||||||
errch <- runCfg.Farmers[i].Identity.Run(ctx,
|
errch <- runCfg.StorageNodes[i].Identity.Run(ctx,
|
||||||
runCfg.Farmers[i].Kademlia,
|
runCfg.StorageNodes[i].Kademlia,
|
||||||
runCfg.Farmers[i].Storage)
|
runCfg.StorageNodes[i].Storage)
|
||||||
}(i, farmer)
|
}(i, storagenode)
|
||||||
}
|
}
|
||||||
|
|
||||||
// start heavy client
|
// start heavy client
|
||||||
@ -87,7 +87,7 @@ func cmdRun(cmd *cobra.Command, args []string) (err error) {
|
|||||||
runCfg.HeavyClient.Identity.Address)
|
runCfg.HeavyClient.Identity.Address)
|
||||||
var o provider.Responsibility = runCfg.HeavyClient.Overlay
|
var o provider.Responsibility = runCfg.HeavyClient.Overlay
|
||||||
if runCfg.HeavyClient.MockOverlay {
|
if runCfg.HeavyClient.MockOverlay {
|
||||||
o = overlay.MockConfig{Nodes: strings.Join(farmers, ",")}
|
o = overlay.MockConfig{Nodes: strings.Join(storagenodes, ",")}
|
||||||
}
|
}
|
||||||
errch <- runCfg.HeavyClient.Identity.Run(ctx,
|
errch <- runCfg.HeavyClient.Identity.Run(ctx,
|
||||||
runCfg.HeavyClient.Kademlia,
|
runCfg.HeavyClient.Kademlia,
|
||||||
|
@ -20,16 +20,16 @@ import (
|
|||||||
|
|
||||||
// Config defines broad Captain Planet configuration
|
// Config defines broad Captain Planet configuration
|
||||||
type Config struct {
|
type Config struct {
|
||||||
HCCA provider.CASetupConfig
|
HCCA provider.CASetupConfig
|
||||||
HCIdentity provider.IdentitySetupConfig
|
HCIdentity provider.IdentitySetupConfig
|
||||||
ULCA provider.CASetupConfig
|
ULCA provider.CASetupConfig
|
||||||
ULIdentity provider.IdentitySetupConfig
|
ULIdentity provider.IdentitySetupConfig
|
||||||
FarmerCA provider.CASetupConfig
|
StorageNodeCA provider.CASetupConfig
|
||||||
FarmerIdentity provider.IdentitySetupConfig
|
StorageNodeIdentity provider.IdentitySetupConfig
|
||||||
BasePath string `help:"base path for captain planet storage" default:"$CONFDIR"`
|
BasePath string `help:"base path for captain planet storage" default:"$CONFDIR"`
|
||||||
ListenHost string `help:"the host for providers to listen on" default:"127.0.0.1"`
|
ListenHost string `help:"the host for providers to listen on" default:"127.0.0.1"`
|
||||||
StartingPort int `help:"all providers will listen on ports consecutively starting with this one" default:"7777"`
|
StartingPort int `help:"all providers will listen on ports consecutively starting with this one" default:"7777"`
|
||||||
Overwrite bool `help:"whether to overwrite pre-existing configuration files" default:"false"`
|
Overwrite bool `help:"whether to overwrite pre-existing configuration files" default:"false"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -75,20 +75,20 @@ func cmdSetup(cmd *cobra.Command, args []string) (err error) {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < len(runCfg.Farmers); i++ {
|
for i := 0; i < len(runCfg.StorageNodes); i++ {
|
||||||
farmerPath := filepath.Join(setupCfg.BasePath, fmt.Sprintf("f%d", i))
|
storagenodePath := filepath.Join(setupCfg.BasePath, fmt.Sprintf("f%d", i))
|
||||||
err = os.MkdirAll(farmerPath, 0700)
|
err = os.MkdirAll(storagenodePath, 0700)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
farmerCA := setupCfg.FarmerCA
|
storagenodeCA := setupCfg.StorageNodeCA
|
||||||
farmerCA.CertPath = filepath.Join(farmerPath, "ca.cert")
|
storagenodeCA.CertPath = filepath.Join(storagenodePath, "ca.cert")
|
||||||
farmerCA.KeyPath = filepath.Join(farmerPath, "ca.key")
|
storagenodeCA.KeyPath = filepath.Join(storagenodePath, "ca.key")
|
||||||
farmerIdentity := setupCfg.FarmerIdentity
|
storagenodeIdentity := setupCfg.StorageNodeIdentity
|
||||||
farmerIdentity.CertPath = filepath.Join(farmerPath, "identity.cert")
|
storagenodeIdentity.CertPath = filepath.Join(storagenodePath, "identity.cert")
|
||||||
farmerIdentity.KeyPath = filepath.Join(farmerPath, "identity.key")
|
storagenodeIdentity.KeyPath = filepath.Join(storagenodePath, "identity.key")
|
||||||
fmt.Printf("creating identity for storage node %d\n", i+1)
|
fmt.Printf("creating identity for storage node %d\n", i+1)
|
||||||
err := provider.SetupIdentity(process.Ctx(cmd), farmerCA, farmerIdentity)
|
err := provider.SetupIdentity(process.Ctx(cmd), storagenodeCA, storagenodeIdentity)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -143,20 +143,20 @@ func cmdSetup(cmd *cobra.Command, args []string) (err error) {
|
|||||||
"pointer-db.auth.api-key": apiKey,
|
"pointer-db.auth.api-key": apiKey,
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < len(runCfg.Farmers); i++ {
|
for i := 0; i < len(runCfg.StorageNodes); i++ {
|
||||||
farmerPath := filepath.Join(setupCfg.BasePath, fmt.Sprintf("f%d", i))
|
storagenodePath := filepath.Join(setupCfg.BasePath, fmt.Sprintf("f%d", i))
|
||||||
farmer := fmt.Sprintf("farmers.%02d.", i)
|
storagenode := fmt.Sprintf("storage-nodes.%02d.", i)
|
||||||
overrides[farmer+"identity.cert-path"] = filepath.Join(
|
overrides[storagenode+"identity.cert-path"] = filepath.Join(
|
||||||
farmerPath, "identity.cert")
|
storagenodePath, "identity.cert")
|
||||||
overrides[farmer+"identity.key-path"] = filepath.Join(
|
overrides[storagenode+"identity.key-path"] = filepath.Join(
|
||||||
farmerPath, "identity.key")
|
storagenodePath, "identity.key")
|
||||||
overrides[farmer+"identity.address"] = joinHostPort(
|
overrides[storagenode+"identity.address"] = joinHostPort(
|
||||||
setupCfg.ListenHost, startingPort+i*2+3)
|
setupCfg.ListenHost, startingPort+i*2+3)
|
||||||
overrides[farmer+"kademlia.todo-listen-addr"] = joinHostPort(
|
overrides[storagenode+"kademlia.todo-listen-addr"] = joinHostPort(
|
||||||
setupCfg.ListenHost, startingPort+i*2+4)
|
setupCfg.ListenHost, startingPort+i*2+4)
|
||||||
overrides[farmer+"kademlia.bootstrap-addr"] = joinHostPort(
|
overrides[storagenode+"kademlia.bootstrap-addr"] = joinHostPort(
|
||||||
setupCfg.ListenHost, startingPort+1)
|
setupCfg.ListenHost, startingPort+1)
|
||||||
overrides[farmer+"storage.path"] = filepath.Join(farmerPath, "data")
|
overrides[storagenode+"storage.path"] = filepath.Join(storagenodePath, "data")
|
||||||
}
|
}
|
||||||
|
|
||||||
return process.SaveConfig(runCmd.Flags(),
|
return process.SaveConfig(runCmd.Flags(),
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# gRPC Server
|
# gRPC Server
|
||||||
|
|
||||||
This is a gRPC server which handles CRUD (create, read, update, delete) requests for storing stats for farmers in a relational database
|
This is a gRPC server which handles CRUD (create, read, update, delete) requests for storing stats for storagenodes in a relational database
|
||||||
|
|
||||||
To run the server:
|
To run the server:
|
||||||
```
|
```
|
||||||
@ -8,7 +8,7 @@ go run cmd/pointerdb/main.go
|
|||||||
```
|
```
|
||||||
You can also run using these flags: `-port=<port-number> -prod=<bool> -db=<db-name>`
|
You can also run using these flags: `-port=<port-number> -prod=<bool> -db=<db-name>`
|
||||||
|
|
||||||
You can then write a client program using the client library to access the Create, Get, Update, and UpdateBatch methods to create and interact with farmer stat entries stored in the DB.
|
You can then write a client program using the client library to access the Create, Get, Update, and UpdateBatch methods to create and interact with storagenode stat entries stored in the DB.
|
||||||
An example program utilizing these functions can be found at `storj.io/storj/examples/statdb-client/main.go`.
|
An example program utilizing these functions can be found at `storj.io/storj/examples/statdb-client/main.go`.
|
||||||
|
|
||||||
If changes are made to `storj.io/storj/pkg/statdb/proto/statdb.proto, the protobuf file will need to be regenerated by running `go generate` inside `pkg/statdb/proto`
|
If changes are made to `storj.io/storj/pkg/statdb/proto/statdb.proto, the protobuf file will need to be regenerated by running `go generate` inside `pkg/statdb/proto`
|
||||||
|
@ -5,7 +5,7 @@ RUN apk add -U curl git musl-dev gcc
|
|||||||
RUN curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
|
RUN curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
|
||||||
COPY . /go/src/storj.io/storj
|
COPY . /go/src/storj.io/storj
|
||||||
RUN cd /go/src/storj.io/storj && dep ensure -vendor-only
|
RUN cd /go/src/storj.io/storj && dep ensure -vendor-only
|
||||||
RUN cd /go/src/storj.io/storj/cmd/farmer && go build -o farmer
|
RUN cd /go/src/storj.io/storj/cmd/storagenode && go build -o storagenode
|
||||||
|
|
||||||
# final stage
|
# final stage
|
||||||
FROM alpine
|
FROM alpine
|
||||||
@ -14,6 +14,6 @@ ENV CONF_PATH= \
|
|||||||
EXPOSE 7777
|
EXPOSE 7777
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /app
|
||||||
COPY --from=build-env /go/src/storj.io/storj/cmd/farmer/farmer /app/
|
COPY --from=build-env /go/src/storj.io/storj/cmd/storagenode/storagenode /app/
|
||||||
COPY cmd/farmer/entrypoint /entrypoint
|
COPY cmd/storagenode/entrypoint /entrypoint
|
||||||
ENTRYPOINT ["/entrypoint"]
|
ENTRYPOINT ["/entrypoint"]
|
@ -6,8 +6,8 @@ RUN_PARAMS="${RUN_PARAMS:-}"
|
|||||||
if [[ -f "${CONF_PATH:-}" ]]; then
|
if [[ -f "${CONF_PATH:-}" ]]; then
|
||||||
RUN_PARAMS="${RUN_PARAMS} --config \"${CONF_PATH}\""
|
RUN_PARAMS="${RUN_PARAMS} --config \"${CONF_PATH}\""
|
||||||
else
|
else
|
||||||
if [[ ! -d $HOME/.storj/farmer ]]; then
|
if [[ ! -d $HOME/.storj/storagenode ]]; then
|
||||||
./farmer setup
|
./storagenode setup
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -15,4 +15,4 @@ if [ -n "${SATELLITE_ADDR:-}" ]; then
|
|||||||
RUN_PARAMS="${RUN_PARAMS} --kademlia.bootstrap-addr $SATELLITE_ADDR"
|
RUN_PARAMS="${RUN_PARAMS} --kademlia.bootstrap-addr $SATELLITE_ADDR"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
exec ./farmer run $RUN_PARAMS "$@"
|
exec ./storagenode run $RUN_PARAMS "$@"
|
@ -18,12 +18,12 @@ import (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
rootCmd = &cobra.Command{
|
rootCmd = &cobra.Command{
|
||||||
Use: "farmer",
|
Use: "storagenode",
|
||||||
Short: "Farmer",
|
Short: "StorageNode",
|
||||||
}
|
}
|
||||||
runCmd = &cobra.Command{
|
runCmd = &cobra.Command{
|
||||||
Use: "run",
|
Use: "run",
|
||||||
Short: "Run the farmer",
|
Short: "Run the storagenode",
|
||||||
RunE: cmdRun,
|
RunE: cmdRun,
|
||||||
}
|
}
|
||||||
setupCmd = &cobra.Command{
|
setupCmd = &cobra.Command{
|
||||||
@ -43,7 +43,7 @@ var (
|
|||||||
Identity provider.IdentitySetupConfig
|
Identity provider.IdentitySetupConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
defaultConfDir = "$HOME/.storj/farmer"
|
defaultConfDir = "$HOME/.storj/storagenode"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
@ -35,7 +35,7 @@ spec:
|
|||||||
spec:
|
spec:
|
||||||
terminationGracePeriodSeconds: 60
|
terminationGracePeriodSeconds: 60
|
||||||
containers:
|
containers:
|
||||||
- image: "storjlabs/piecestore-farmer"
|
- image: "storjlabs/storagenode"
|
||||||
imagePullPolicy: Always
|
imagePullPolicy: Always
|
||||||
name: piecestore
|
name: piecestore
|
||||||
env:
|
env:
|
@ -1,6 +1,6 @@
|
|||||||
# gRPC Client
|
# gRPC Client
|
||||||
|
|
||||||
This is an example gRPC client which makes requests for updating and storing farmer stats in a relational database.
|
This is an example gRPC client which makes requests for updating and storing storagenode stats in a relational database.
|
||||||
|
|
||||||
The gRPC server at `storj.io/storj/cmd/statdb/main.go` needs to be running for this to work.
|
The gRPC server at `storj.io/storj/cmd/statdb/main.go` needs to be running for this to work.
|
||||||
|
|
||||||
|
@ -105,7 +105,7 @@ func RetrieveReader(ctx context.Context, id string, offset int64, length int64,
|
|||||||
return rr.Range(ctx, offset, length)
|
return rr.Range(ctx, offset, length)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete deletes data from farmer
|
// Delete deletes data from storagenode
|
||||||
// id is the id of the data to be stored
|
// id is the id of the data to be stored
|
||||||
// dir is the pstore directory containing all other data stored
|
// dir is the pstore directory containing all other data stored
|
||||||
// returns error if failed and nil if successful
|
// returns error if failed and nil if successful
|
||||||
|
@ -121,7 +121,7 @@ func TestRetrieve(t *testing.T) {
|
|||||||
TS := NewTestServer()
|
TS := NewTestServer()
|
||||||
defer TS.Stop()
|
defer TS.Stop()
|
||||||
|
|
||||||
// simulate piece stored with farmer
|
// simulate piece stored with storagenode
|
||||||
if err := writeFileToDir("11111111111111111111", TS.s.DataDir); err != nil {
|
if err := writeFileToDir("11111111111111111111", TS.s.DataDir); err != nil {
|
||||||
t.Errorf("Error: %v\nCould not create test piece", err)
|
t.Errorf("Error: %v\nCould not create test piece", err)
|
||||||
return
|
return
|
||||||
@ -409,7 +409,7 @@ func TestDelete(t *testing.T) {
|
|||||||
t.Run("should return expected PieceDeleteSummary values", func(t *testing.T) {
|
t.Run("should return expected PieceDeleteSummary values", func(t *testing.T) {
|
||||||
assert := assert.New(t)
|
assert := assert.New(t)
|
||||||
|
|
||||||
// simulate piece stored with farmer
|
// simulate piece stored with storagenode
|
||||||
if err := writeFileToDir("11111111111111111111", TS.s.DataDir); err != nil {
|
if err := writeFileToDir("11111111111111111111", TS.s.DataDir); err != nil {
|
||||||
t.Errorf("Error: %v\nCould not create test piece", err)
|
t.Errorf("Error: %v\nCould not create test piece", err)
|
||||||
return
|
return
|
||||||
|
@ -30,7 +30,7 @@ type Responsibility interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Provider represents a bundle of responsibilities defined by a specific ID.
|
// Provider represents a bundle of responsibilities defined by a specific ID.
|
||||||
// Examples of providers are the heavy client, the farmer, and the gateway.
|
// Examples of providers are the heavy client, the storagenode, and the gateway.
|
||||||
type Provider struct {
|
type Provider struct {
|
||||||
lis net.Listener
|
lis net.Listener
|
||||||
g *grpc.Server
|
g *grpc.Server
|
||||||
|
@ -23,7 +23,7 @@ var _ = math.Inf
|
|||||||
// proto package needs to be updated.
|
// proto package needs to be updated.
|
||||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
||||||
|
|
||||||
// Node is info for a updating a single farmer, used in the Update rpc calls
|
// Node is info for a updating a single storagenode, used in the Update rpc calls
|
||||||
type Node struct {
|
type Node struct {
|
||||||
NodeId []byte `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
|
NodeId []byte `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
|
||||||
LatencyList []int64 `protobuf:"varint,2,rep,packed,name=latency_list,json=latencyList,proto3" json:"latency_list,omitempty"`
|
LatencyList []int64 `protobuf:"varint,2,rep,packed,name=latency_list,json=latencyList,proto3" json:"latency_list,omitempty"`
|
||||||
@ -110,7 +110,7 @@ func (m *Node) GetUpdateUptime() bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeStats is info about a single farmer stored in the stats db
|
// NodeStats is info about a single storagenode stored in the stats db
|
||||||
type NodeStats struct {
|
type NodeStats struct {
|
||||||
NodeId []byte `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
|
NodeId []byte `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
|
||||||
Latency_90 int64 `protobuf:"varint,2,opt,name=latency_90,json=latency90,proto3" json:"latency_90,omitempty"`
|
Latency_90 int64 `protobuf:"varint,2,opt,name=latency_90,json=latency90,proto3" json:"latency_90,omitempty"`
|
||||||
@ -542,13 +542,13 @@ const _ = grpc.SupportPackageIsVersion4
|
|||||||
//
|
//
|
||||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||||
type StatDBClient interface {
|
type StatDBClient interface {
|
||||||
// Create a db entry for the provided farmer ID
|
// Create a db entry for the provided storagenode ID
|
||||||
Create(ctx context.Context, in *CreateRequest, opts ...grpc.CallOption) (*CreateResponse, error)
|
Create(ctx context.Context, in *CreateRequest, opts ...grpc.CallOption) (*CreateResponse, error)
|
||||||
// Get uses a farmer ID to get that farmer's stats
|
// Get uses a storagenode ID to get that storagenode's stats
|
||||||
Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error)
|
Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error)
|
||||||
// Update updates farmer stats for a single farmer
|
// Update updates storagenode stats for a single storagenode
|
||||||
Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error)
|
Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error)
|
||||||
// UpdateBatch updates farmer stats for multiple farmers at a time
|
// UpdateBatch updates storagenode stats for multiple farmers at a time
|
||||||
UpdateBatch(ctx context.Context, in *UpdateBatchRequest, opts ...grpc.CallOption) (*UpdateBatchResponse, error)
|
UpdateBatch(ctx context.Context, in *UpdateBatchRequest, opts ...grpc.CallOption) (*UpdateBatchResponse, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -598,13 +598,13 @@ func (c *statDBClient) UpdateBatch(ctx context.Context, in *UpdateBatchRequest,
|
|||||||
|
|
||||||
// StatDBServer is the server API for StatDB service.
|
// StatDBServer is the server API for StatDB service.
|
||||||
type StatDBServer interface {
|
type StatDBServer interface {
|
||||||
// Create a db entry for the provided farmer ID
|
// Create a db entry for the provided storagenode ID
|
||||||
Create(context.Context, *CreateRequest) (*CreateResponse, error)
|
Create(context.Context, *CreateRequest) (*CreateResponse, error)
|
||||||
// Get uses a farmer ID to get that farmer's stats
|
// Get uses a storagenode ID to get that storagenode's stats
|
||||||
Get(context.Context, *GetRequest) (*GetResponse, error)
|
Get(context.Context, *GetRequest) (*GetResponse, error)
|
||||||
// Update updates farmer stats for a single farmer
|
// Update updates storagenode stats for a single storagenode
|
||||||
Update(context.Context, *UpdateRequest) (*UpdateResponse, error)
|
Update(context.Context, *UpdateRequest) (*UpdateResponse, error)
|
||||||
// UpdateBatch updates farmer stats for multiple farmers at a time
|
// UpdateBatch updates storagenode stats for multiple farmers at a time
|
||||||
UpdateBatch(context.Context, *UpdateBatchRequest) (*UpdateBatchResponse, error)
|
UpdateBatch(context.Context, *UpdateBatchRequest) (*UpdateBatchResponse, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,19 +4,19 @@
|
|||||||
syntax = "proto3";
|
syntax = "proto3";
|
||||||
package statdb;
|
package statdb;
|
||||||
|
|
||||||
// StatDB defines the interface for retrieving and updating farmer stats
|
// StatDB defines the interface for retrieving and updating storagenode stats
|
||||||
service StatDB {
|
service StatDB {
|
||||||
// Create a db entry for the provided farmer ID
|
// Create a db entry for the provided storagenode ID
|
||||||
rpc Create(CreateRequest) returns (CreateResponse);
|
rpc Create(CreateRequest) returns (CreateResponse);
|
||||||
// Get uses a farmer ID to get that farmer's stats
|
// Get uses a storagenode ID to get that storagenode's stats
|
||||||
rpc Get(GetRequest) returns (GetResponse);
|
rpc Get(GetRequest) returns (GetResponse);
|
||||||
// Update updates farmer stats for a single farmer
|
// Update updates storagenode stats for a single storagenode
|
||||||
rpc Update(UpdateRequest) returns (UpdateResponse);
|
rpc Update(UpdateRequest) returns (UpdateResponse);
|
||||||
// UpdateBatch updates farmer stats for multiple farmers at a time
|
// UpdateBatch updates storagenode stats for multiple farmers at a time
|
||||||
rpc UpdateBatch(UpdateBatchRequest) returns (UpdateBatchResponse);
|
rpc UpdateBatch(UpdateBatchRequest) returns (UpdateBatchResponse);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Node is info for a updating a single farmer, used in the Update rpc calls
|
// Node is info for a updating a single storagenode, used in the Update rpc calls
|
||||||
message Node {
|
message Node {
|
||||||
bytes node_id = 1;
|
bytes node_id = 1;
|
||||||
repeated int64 latency_list = 2;
|
repeated int64 latency_list = 2;
|
||||||
@ -27,10 +27,10 @@ message Node {
|
|||||||
bool update_uptime = 7;
|
bool update_uptime = 7;
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeStats is info about a single farmer stored in the stats db
|
// NodeStats is info about a single storagenode stored in the stats db
|
||||||
message NodeStats {
|
message NodeStats {
|
||||||
bytes node_id = 1;
|
bytes node_id = 1;
|
||||||
int64 latency_90 = 2; // 90th percentile measure of farmer latency
|
int64 latency_90 = 2; // 90th percentile measure of storagenode latency
|
||||||
double audit_success_ratio = 3; // (auditSuccessCount / totalAuditCount)
|
double audit_success_ratio = 3; // (auditSuccessCount / totalAuditCount)
|
||||||
double uptime_ratio = 4; // (uptimeCount / totalUptimeCheckCount)
|
double uptime_ratio = 4; // (uptimeCount / totalUptimeCheckCount)
|
||||||
}
|
}
|
||||||
|
@ -49,7 +49,7 @@ func (s *Server) validateAuth(APIKeyBytes []byte) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a db entry for the provided farmer
|
// Create a db entry for the provided storagenode
|
||||||
func (s *Server) Create(ctx context.Context, createReq *pb.CreateRequest) (resp *pb.CreateResponse, err error) {
|
func (s *Server) Create(ctx context.Context, createReq *pb.CreateRequest) (resp *pb.CreateResponse, err error) {
|
||||||
s.logger.Debug("entering statdb Create")
|
s.logger.Debug("entering statdb Create")
|
||||||
|
|
||||||
@ -88,7 +88,7 @@ func (s *Server) Create(ctx context.Context, createReq *pb.CreateRequest) (resp
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get a farmer's stats from the db
|
// Get a storagenode's stats from the db
|
||||||
func (s *Server) Get(ctx context.Context, getReq *pb.GetRequest) (resp *pb.GetResponse, err error) {
|
func (s *Server) Get(ctx context.Context, getReq *pb.GetRequest) (resp *pb.GetResponse, err error) {
|
||||||
s.logger.Debug("entering statdb Get")
|
s.logger.Debug("entering statdb Get")
|
||||||
|
|
||||||
@ -113,7 +113,7 @@ func (s *Server) Get(ctx context.Context, getReq *pb.GetRequest) (resp *pb.GetRe
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update a single farmer's stats in the db
|
// Update a single storagenode's stats in the db
|
||||||
func (s *Server) Update(ctx context.Context, updateReq *pb.UpdateRequest) (resp *pb.UpdateResponse, err error) {
|
func (s *Server) Update(ctx context.Context, updateReq *pb.UpdateRequest) (resp *pb.UpdateResponse, err error) {
|
||||||
s.logger.Debug("entering statdb Update")
|
s.logger.Debug("entering statdb Update")
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user