rename farmers to storagenode (#275)

* rename farmers to storagenode

* review changes

* merge conflicts
This commit is contained in:
aligeti 2018-08-24 21:52:58 -04:00 committed by GitHub
parent 3154086e58
commit 09da23737a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 92 additions and 92 deletions

View File

@ -106,7 +106,7 @@ satellite-image:
docker build --build-arg GO_VERSION=${GO_VERSION} -t storjlabs/satellite:${TAG} -f cmd/hc/Dockerfile .
.PHONY: storage-node-image
storage-node-image:
docker build --build-arg GO_VERSION=${GO_VERSION} -t storjlabs/storage-node:${TAG} -f cmd/farmer/Dockerfile .
docker build --build-arg GO_VERSION=${GO_VERSION} -t storjlabs/storage-node:${TAG} -f cmd/storagenode/Dockerfile .
.PHONY: uplink-image
uplink-image:
docker build --build-arg GO_VERSION=${GO_VERSION} -t storjlabs/uplink:${TAG} -f cmd/uplink/Dockerfile .

View File

@ -20,7 +20,7 @@ import (
)
const (
farmerCount = 50
storagenodeCount = 50
)
type HeavyClient struct {
@ -31,7 +31,7 @@ type HeavyClient struct {
MockOverlay bool `default:"true" help:"if false, use real overlay"`
}
type Farmer struct {
type StorageNode struct {
Identity provider.IdentityConfig
Kademlia kademlia.Config
Storage psserver.Config
@ -46,7 +46,7 @@ var (
runCfg struct {
HeavyClient HeavyClient
Farmers [farmerCount]Farmer
StorageNodes [storagenodeCount]StorageNode
Uplink miniogw.Config
}
)
@ -60,25 +60,25 @@ func cmdRun(cmd *cobra.Command, args []string) (err error) {
ctx := process.Ctx(cmd)
defer mon.Task()(&ctx)(&err)
errch := make(chan error, len(runCfg.Farmers)+2)
var farmers []string
errch := make(chan error, len(runCfg.StorageNodes)+2)
var storagenodes []string
// start the farmers
for i := 0; i < len(runCfg.Farmers); i++ {
identity, err := runCfg.Farmers[i].Identity.Load()
// start the storagenodes
for i := 0; i < len(runCfg.StorageNodes); i++ {
identity, err := runCfg.StorageNodes[i].Identity.Load()
if err != nil {
return err
}
farmer := fmt.Sprintf("%s:%s",
identity.ID.String(), runCfg.Farmers[i].Identity.Address)
farmers = append(farmers, farmer)
go func(i int, farmer string) {
_, _ = fmt.Printf("starting farmer %d %s (kad on %s)\n", i, farmer,
runCfg.Farmers[i].Kademlia.TODOListenAddr)
errch <- runCfg.Farmers[i].Identity.Run(ctx,
runCfg.Farmers[i].Kademlia,
runCfg.Farmers[i].Storage)
}(i, farmer)
storagenode := fmt.Sprintf("%s:%s",
identity.ID.String(), runCfg.StorageNodes[i].Identity.Address)
storagenodes = append(storagenodes, storagenode)
go func(i int, storagenode string) {
_, _ = fmt.Printf("starting storagenode %d %s (kad on %s)\n", i, storagenode,
runCfg.StorageNodes[i].Kademlia.TODOListenAddr)
errch <- runCfg.StorageNodes[i].Identity.Run(ctx,
runCfg.StorageNodes[i].Kademlia,
runCfg.StorageNodes[i].Storage)
}(i, storagenode)
}
// start heavy client
@ -87,7 +87,7 @@ func cmdRun(cmd *cobra.Command, args []string) (err error) {
runCfg.HeavyClient.Identity.Address)
var o provider.Responsibility = runCfg.HeavyClient.Overlay
if runCfg.HeavyClient.MockOverlay {
o = overlay.MockConfig{Nodes: strings.Join(farmers, ",")}
o = overlay.MockConfig{Nodes: strings.Join(storagenodes, ",")}
}
errch <- runCfg.HeavyClient.Identity.Run(ctx,
runCfg.HeavyClient.Kademlia,

View File

@ -24,8 +24,8 @@ type Config struct {
HCIdentity provider.IdentitySetupConfig
ULCA provider.CASetupConfig
ULIdentity provider.IdentitySetupConfig
FarmerCA provider.CASetupConfig
FarmerIdentity provider.IdentitySetupConfig
StorageNodeCA provider.CASetupConfig
StorageNodeIdentity provider.IdentitySetupConfig
BasePath string `help:"base path for captain planet storage" default:"$CONFDIR"`
ListenHost string `help:"the host for providers to listen on" default:"127.0.0.1"`
StartingPort int `help:"all providers will listen on ports consecutively starting with this one" default:"7777"`
@ -75,20 +75,20 @@ func cmdSetup(cmd *cobra.Command, args []string) (err error) {
return err
}
for i := 0; i < len(runCfg.Farmers); i++ {
farmerPath := filepath.Join(setupCfg.BasePath, fmt.Sprintf("f%d", i))
err = os.MkdirAll(farmerPath, 0700)
for i := 0; i < len(runCfg.StorageNodes); i++ {
storagenodePath := filepath.Join(setupCfg.BasePath, fmt.Sprintf("f%d", i))
err = os.MkdirAll(storagenodePath, 0700)
if err != nil {
return err
}
farmerCA := setupCfg.FarmerCA
farmerCA.CertPath = filepath.Join(farmerPath, "ca.cert")
farmerCA.KeyPath = filepath.Join(farmerPath, "ca.key")
farmerIdentity := setupCfg.FarmerIdentity
farmerIdentity.CertPath = filepath.Join(farmerPath, "identity.cert")
farmerIdentity.KeyPath = filepath.Join(farmerPath, "identity.key")
storagenodeCA := setupCfg.StorageNodeCA
storagenodeCA.CertPath = filepath.Join(storagenodePath, "ca.cert")
storagenodeCA.KeyPath = filepath.Join(storagenodePath, "ca.key")
storagenodeIdentity := setupCfg.StorageNodeIdentity
storagenodeIdentity.CertPath = filepath.Join(storagenodePath, "identity.cert")
storagenodeIdentity.KeyPath = filepath.Join(storagenodePath, "identity.key")
fmt.Printf("creating identity for storage node %d\n", i+1)
err := provider.SetupIdentity(process.Ctx(cmd), farmerCA, farmerIdentity)
err := provider.SetupIdentity(process.Ctx(cmd), storagenodeCA, storagenodeIdentity)
if err != nil {
return err
}
@ -143,20 +143,20 @@ func cmdSetup(cmd *cobra.Command, args []string) (err error) {
"pointer-db.auth.api-key": apiKey,
}
for i := 0; i < len(runCfg.Farmers); i++ {
farmerPath := filepath.Join(setupCfg.BasePath, fmt.Sprintf("f%d", i))
farmer := fmt.Sprintf("farmers.%02d.", i)
overrides[farmer+"identity.cert-path"] = filepath.Join(
farmerPath, "identity.cert")
overrides[farmer+"identity.key-path"] = filepath.Join(
farmerPath, "identity.key")
overrides[farmer+"identity.address"] = joinHostPort(
for i := 0; i < len(runCfg.StorageNodes); i++ {
storagenodePath := filepath.Join(setupCfg.BasePath, fmt.Sprintf("f%d", i))
storagenode := fmt.Sprintf("storage-nodes.%02d.", i)
overrides[storagenode+"identity.cert-path"] = filepath.Join(
storagenodePath, "identity.cert")
overrides[storagenode+"identity.key-path"] = filepath.Join(
storagenodePath, "identity.key")
overrides[storagenode+"identity.address"] = joinHostPort(
setupCfg.ListenHost, startingPort+i*2+3)
overrides[farmer+"kademlia.todo-listen-addr"] = joinHostPort(
overrides[storagenode+"kademlia.todo-listen-addr"] = joinHostPort(
setupCfg.ListenHost, startingPort+i*2+4)
overrides[farmer+"kademlia.bootstrap-addr"] = joinHostPort(
overrides[storagenode+"kademlia.bootstrap-addr"] = joinHostPort(
setupCfg.ListenHost, startingPort+1)
overrides[farmer+"storage.path"] = filepath.Join(farmerPath, "data")
overrides[storagenode+"storage.path"] = filepath.Join(storagenodePath, "data")
}
return process.SaveConfig(runCmd.Flags(),

View File

@ -1,6 +1,6 @@
# gRPC Server
This is a gRPC server which handles CRUD (create, read, update, delete) requests for storing stats for farmers in a relational database
This is a gRPC server which handles CRUD (create, read, update, delete) requests for storing stats for storagenodes in a relational database
To run the server:
```
@ -8,7 +8,7 @@ go run cmd/pointerdb/main.go
```
You can also run using these flags: `-port=<port-number> -prod=<bool> -db=<db-name>`
You can then write a client program using the client library to access the Create, Get, Update, and UpdateBatch methods to create and interact with farmer stat entries stored in the DB.
You can then write a client program using the client library to access the Create, Get, Update, and UpdateBatch methods to create and interact with storagenode stat entries stored in the DB.
An example program utilizing these functions can be found at `storj.io/storj/examples/statdb-client/main.go`.
If changes are made to `storj.io/storj/pkg/statdb/proto/statdb.proto, the protobuf file will need to be regenerated by running `go generate` inside `pkg/statdb/proto`

View File

@ -5,7 +5,7 @@ RUN apk add -U curl git musl-dev gcc
RUN curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
COPY . /go/src/storj.io/storj
RUN cd /go/src/storj.io/storj && dep ensure -vendor-only
RUN cd /go/src/storj.io/storj/cmd/farmer && go build -o farmer
RUN cd /go/src/storj.io/storj/cmd/storagenode && go build -o storagenode
# final stage
FROM alpine
@ -14,6 +14,6 @@ ENV CONF_PATH= \
EXPOSE 7777
WORKDIR /app
COPY --from=build-env /go/src/storj.io/storj/cmd/farmer/farmer /app/
COPY cmd/farmer/entrypoint /entrypoint
COPY --from=build-env /go/src/storj.io/storj/cmd/storagenode/storagenode /app/
COPY cmd/storagenode/entrypoint /entrypoint
ENTRYPOINT ["/entrypoint"]

View File

@ -6,8 +6,8 @@ RUN_PARAMS="${RUN_PARAMS:-}"
if [[ -f "${CONF_PATH:-}" ]]; then
RUN_PARAMS="${RUN_PARAMS} --config \"${CONF_PATH}\""
else
if [[ ! -d $HOME/.storj/farmer ]]; then
./farmer setup
if [[ ! -d $HOME/.storj/storagenode ]]; then
./storagenode setup
fi
fi
@ -15,4 +15,4 @@ if [ -n "${SATELLITE_ADDR:-}" ]; then
RUN_PARAMS="${RUN_PARAMS} --kademlia.bootstrap-addr $SATELLITE_ADDR"
fi
exec ./farmer run $RUN_PARAMS "$@"
exec ./storagenode run $RUN_PARAMS "$@"

View File

@ -18,12 +18,12 @@ import (
var (
rootCmd = &cobra.Command{
Use: "farmer",
Short: "Farmer",
Use: "storagenode",
Short: "StorageNode",
}
runCmd = &cobra.Command{
Use: "run",
Short: "Run the farmer",
Short: "Run the storagenode",
RunE: cmdRun,
}
setupCmd = &cobra.Command{
@ -43,7 +43,7 @@ var (
Identity provider.IdentitySetupConfig
}
defaultConfDir = "$HOME/.storj/farmer"
defaultConfDir = "$HOME/.storj/storagenode"
)
func init() {

View File

@ -35,7 +35,7 @@ spec:
spec:
terminationGracePeriodSeconds: 60
containers:
- image: "storjlabs/piecestore-farmer"
- image: "storjlabs/storagenode"
imagePullPolicy: Always
name: piecestore
env:

View File

@ -1,6 +1,6 @@
# gRPC Client
This is an example gRPC client which makes requests for updating and storing farmer stats in a relational database.
This is an example gRPC client which makes requests for updating and storing storagenode stats in a relational database.
The gRPC server at `storj.io/storj/cmd/statdb/main.go` needs to be running for this to work.

View File

@ -105,7 +105,7 @@ func RetrieveReader(ctx context.Context, id string, offset int64, length int64,
return rr.Range(ctx, offset, length)
}
// Delete deletes data from farmer
// Delete deletes data from storagenode
// id is the id of the data to be stored
// dir is the pstore directory containing all other data stored
// returns error if failed and nil if successful

View File

@ -121,7 +121,7 @@ func TestRetrieve(t *testing.T) {
TS := NewTestServer()
defer TS.Stop()
// simulate piece stored with farmer
// simulate piece stored with storagenode
if err := writeFileToDir("11111111111111111111", TS.s.DataDir); err != nil {
t.Errorf("Error: %v\nCould not create test piece", err)
return
@ -409,7 +409,7 @@ func TestDelete(t *testing.T) {
t.Run("should return expected PieceDeleteSummary values", func(t *testing.T) {
assert := assert.New(t)
// simulate piece stored with farmer
// simulate piece stored with storagenode
if err := writeFileToDir("11111111111111111111", TS.s.DataDir); err != nil {
t.Errorf("Error: %v\nCould not create test piece", err)
return

View File

@ -30,7 +30,7 @@ type Responsibility interface {
}
// Provider represents a bundle of responsibilities defined by a specific ID.
// Examples of providers are the heavy client, the farmer, and the gateway.
// Examples of providers are the heavy client, the storagenode, and the gateway.
type Provider struct {
lis net.Listener
g *grpc.Server

View File

@ -23,7 +23,7 @@ var _ = math.Inf
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// Node is info for a updating a single farmer, used in the Update rpc calls
// Node is info for a updating a single storagenode, used in the Update rpc calls
type Node struct {
NodeId []byte `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
LatencyList []int64 `protobuf:"varint,2,rep,packed,name=latency_list,json=latencyList,proto3" json:"latency_list,omitempty"`
@ -110,7 +110,7 @@ func (m *Node) GetUpdateUptime() bool {
return false
}
// NodeStats is info about a single farmer stored in the stats db
// NodeStats is info about a single storagenode stored in the stats db
type NodeStats struct {
NodeId []byte `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"`
Latency_90 int64 `protobuf:"varint,2,opt,name=latency_90,json=latency90,proto3" json:"latency_90,omitempty"`
@ -542,13 +542,13 @@ const _ = grpc.SupportPackageIsVersion4
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type StatDBClient interface {
// Create a db entry for the provided farmer ID
// Create a db entry for the provided storagenode ID
Create(ctx context.Context, in *CreateRequest, opts ...grpc.CallOption) (*CreateResponse, error)
// Get uses a farmer ID to get that farmer's stats
// Get uses a storagenode ID to get that storagenode's stats
Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error)
// Update updates farmer stats for a single farmer
// Update updates storagenode stats for a single storagenode
Update(ctx context.Context, in *UpdateRequest, opts ...grpc.CallOption) (*UpdateResponse, error)
// UpdateBatch updates farmer stats for multiple farmers at a time
// UpdateBatch updates storagenode stats for multiple farmers at a time
UpdateBatch(ctx context.Context, in *UpdateBatchRequest, opts ...grpc.CallOption) (*UpdateBatchResponse, error)
}
@ -598,13 +598,13 @@ func (c *statDBClient) UpdateBatch(ctx context.Context, in *UpdateBatchRequest,
// StatDBServer is the server API for StatDB service.
type StatDBServer interface {
// Create a db entry for the provided farmer ID
// Create a db entry for the provided storagenode ID
Create(context.Context, *CreateRequest) (*CreateResponse, error)
// Get uses a farmer ID to get that farmer's stats
// Get uses a storagenode ID to get that storagenode's stats
Get(context.Context, *GetRequest) (*GetResponse, error)
// Update updates farmer stats for a single farmer
// Update updates storagenode stats for a single storagenode
Update(context.Context, *UpdateRequest) (*UpdateResponse, error)
// UpdateBatch updates farmer stats for multiple farmers at a time
// UpdateBatch updates storagenode stats for multiple farmers at a time
UpdateBatch(context.Context, *UpdateBatchRequest) (*UpdateBatchResponse, error)
}

View File

@ -4,19 +4,19 @@
syntax = "proto3";
package statdb;
// StatDB defines the interface for retrieving and updating farmer stats
// StatDB defines the interface for retrieving and updating storagenode stats
service StatDB {
// Create a db entry for the provided farmer ID
// Create a db entry for the provided storagenode ID
rpc Create(CreateRequest) returns (CreateResponse);
// Get uses a farmer ID to get that farmer's stats
// Get uses a storagenode ID to get that storagenode's stats
rpc Get(GetRequest) returns (GetResponse);
// Update updates farmer stats for a single farmer
// Update updates storagenode stats for a single storagenode
rpc Update(UpdateRequest) returns (UpdateResponse);
// UpdateBatch updates farmer stats for multiple farmers at a time
// UpdateBatch updates storagenode stats for multiple farmers at a time
rpc UpdateBatch(UpdateBatchRequest) returns (UpdateBatchResponse);
}
// Node is info for a updating a single farmer, used in the Update rpc calls
// Node is info for a updating a single storagenode, used in the Update rpc calls
message Node {
bytes node_id = 1;
repeated int64 latency_list = 2;
@ -27,10 +27,10 @@ message Node {
bool update_uptime = 7;
}
// NodeStats is info about a single farmer stored in the stats db
// NodeStats is info about a single storagenode stored in the stats db
message NodeStats {
bytes node_id = 1;
int64 latency_90 = 2; // 90th percentile measure of farmer latency
int64 latency_90 = 2; // 90th percentile measure of storagenode latency
double audit_success_ratio = 3; // (auditSuccessCount / totalAuditCount)
double uptime_ratio = 4; // (uptimeCount / totalUptimeCheckCount)
}

View File

@ -49,7 +49,7 @@ func (s *Server) validateAuth(APIKeyBytes []byte) error {
return nil
}
// Create a db entry for the provided farmer
// Create a db entry for the provided storagenode
func (s *Server) Create(ctx context.Context, createReq *pb.CreateRequest) (resp *pb.CreateResponse, err error) {
s.logger.Debug("entering statdb Create")
@ -88,7 +88,7 @@ func (s *Server) Create(ctx context.Context, createReq *pb.CreateRequest) (resp
}, nil
}
// Get a farmer's stats from the db
// Get a storagenode's stats from the db
func (s *Server) Get(ctx context.Context, getReq *pb.GetRequest) (resp *pb.GetResponse, err error) {
s.logger.Debug("entering statdb Get")
@ -113,7 +113,7 @@ func (s *Server) Get(ctx context.Context, getReq *pb.GetRequest) (resp *pb.GetRe
}, nil
}
// Update a single farmer's stats in the db
// Update a single storagenode's stats in the db
func (s *Server) Update(ctx context.Context, updateReq *pb.UpdateRequest) (resp *pb.UpdateResponse, err error) {
s.logger.Debug("entering statdb Update")