all: fix comments about grpc

Change-Id: Id830fbe2d44f083c88765561b6c07c5689afe5bd
This commit is contained in:
Egon Elbre 2020-05-11 08:26:32 +03:00
parent 7d29f2e0d3
commit ec589a8289
16 changed files with 31 additions and 38 deletions

View File

@ -185,7 +185,7 @@ func TestToken_Equal(t *testing.T) {
}
func TestNewClient(t *testing.T) {
t.Skip("needs proper grpc listener to work")
t.Skip("needs proper rpc listener to work")
ctx := testcontext.New(t)
defer ctx.Cleanup()

View File

@ -6,7 +6,7 @@ metadata:
app: satellite
spec:
ports:
- name: grpc
- name: rpc
port: 7070
targetPort: 7070
- name: http
@ -64,7 +64,7 @@ spec:
- name: HTTP_PORT
value: "8081"
ports:
- name: grpc
- name: rpc
containerPort: 8080
- name: http
containerPort: 8081

View File

@ -6,7 +6,7 @@ metadata:
app: piecestore
spec:
ports:
- name: grpc
- name: rpc
port: 7777
targetPort: 7777
- name: http
@ -46,7 +46,7 @@ spec:
- name: PS_DIR
value: "/home/"
ports:
- name: grpc
- name: rpc
containerPort: 7777
- name: http
containerPort: 7776

View File

@ -56,8 +56,8 @@ const (
storagenodePeer = 3
// Endpoint
publicGRPC = 0
privateGRPC = 1
publicRPC = 0
privateRPC = 1
publicHTTP = 2
privateHTTP = 3
debugHTTP = 9
@ -231,7 +231,7 @@ func newNetwork(flags *Flags) (*Processes, error) {
Name: "versioncontrol/0",
Executable: "versioncontrol",
Directory: filepath.Join(processes.Directory, "versioncontrol", "0"),
Address: net.JoinHostPort(host, port(versioncontrolPeer, 0, publicGRPC)),
Address: net.JoinHostPort(host, port(versioncontrolPeer, 0, publicRPC)),
})
versioncontrol.Arguments = withCommon(versioncontrol.Directory, Arguments{
@ -299,7 +299,7 @@ func newNetwork(flags *Flags) (*Processes, error) {
Name: fmt.Sprintf("satellite/%d", i),
Executable: "satellite",
Directory: filepath.Join(processes.Directory, "satellite", fmt.Sprint(i)),
Address: net.JoinHostPort(host, port(satellitePeer, i, publicGRPC)),
Address: net.JoinHostPort(host, port(satellitePeer, i, publicRPC)),
})
satellites = append(satellites, apiProcess)
@ -322,7 +322,7 @@ func newNetwork(flags *Flags) (*Processes, error) {
"--marketing.address", net.JoinHostPort(host, port(satellitePeer, i, privateHTTP)),
"--marketing.static-dir", filepath.Join(storjRoot, "web/marketing/"),
"--server.address", apiProcess.Address,
"--server.private-address", net.JoinHostPort(host, port(satellitePeer, i, privateGRPC)),
"--server.private-address", net.JoinHostPort(host, port(satellitePeer, i, privateRPC)),
"--live-accounting.storage-backend", "redis://" + redisAddress + "?db=" + strconv.Itoa(redisPortBase),
"--server.revocation-dburl", "redis://" + redisAddress + "?db=" + strconv.Itoa(redisPortBase+1),
@ -434,7 +434,7 @@ func newNetwork(flags *Flags) (*Processes, error) {
Name: fmt.Sprintf("gateway/%d", i),
Executable: "gateway",
Directory: filepath.Join(processes.Directory, "gateway", fmt.Sprint(i)),
Address: net.JoinHostPort(host, port(gatewayPeer, i, publicGRPC)),
Address: net.JoinHostPort(host, port(gatewayPeer, i, publicRPC)),
})
encAccess := uplink.NewEncryptionAccessWithDefaultKey(storj.Key{})
@ -551,7 +551,7 @@ func newNetwork(flags *Flags) (*Processes, error) {
Name: fmt.Sprintf("storagenode/%d", i),
Executable: "storagenode",
Directory: filepath.Join(processes.Directory, "storagenode", fmt.Sprint(i)),
Address: net.JoinHostPort(host, port(storagenodePeer, i, publicGRPC)),
Address: net.JoinHostPort(host, port(storagenodePeer, i, publicRPC)),
})
for _, satellite := range satellites {
@ -564,7 +564,7 @@ func newNetwork(flags *Flags) (*Processes, error) {
"--console.address", net.JoinHostPort(host, port(storagenodePeer, i, publicHTTP)),
"--console.static-dir", filepath.Join(storjRoot, "web/storagenode/"),
"--server.address", process.Address,
"--server.private-address", net.JoinHostPort(host, port(storagenodePeer, i, privateGRPC)),
"--server.private-address", net.JoinHostPort(host, port(storagenodePeer, i, privateRPC)),
"--operator.email", fmt.Sprintf("storage%d@mail.test", i),
"--operator.wallet", "0x0123456789012345678901234567890123456789",

View File

@ -32,7 +32,7 @@ With the Notification System the SA is able to send an email to the SN advertise
* No Push Notification System (getting updates to your mobile phone, etc.)
* Not preventing Notifications from every satellite currently, meaning, that you get an offline notification from every satellite you are connected to. (Possible way to mitigate this, is to first check via GRPC, if the node accepts connections)
* Not preventing Notifications from every satellite currently, meaning, that you get an offline notification from every satellite you are connected to. (Possible way to mitigate this, is to first check via RPC, if the node accepts connections)
## Scenarios

View File

@ -66,8 +66,8 @@ Inspectors allow private diagnostics on certain systems. The following inspector
#### kademlia
Kademlia, discovery, bootstrap, and vouchers are being removed and not included in this doc. See [kademlia removal design doc](https://github.com/storj/storj/blob/master/docs/design/kademlia-removal.md) for more details.
#### GRPC endpoints
The Satellite has the following GRPC endpoints:
#### RPC endpoints
The Satellite has the following RPC endpoints:
- Public: metainfo, nodestats, orders, overlay (currently part of kademlia, but may be added here)
- Private: inspectors
@ -100,10 +100,10 @@ The plan is to break the Satellite into multiple processes. Each process runs in
Currently there is only one Satellite process. We propose to add the following processes:
#### satellite api
The satellite api will handle all public GRPC and HTTP requests, this includes all public endpoints for nodestats, overlay, orders, metainfo, and console web UI. It will need all the code to successfully process these public requests, but no more than that. For example, if the console needs the mail service to successfully complete any request, then that code should be added, but make sure to only include the necessary parts. There shouldn't be any background jobs running here nor persistent state, meaning if there are no requests coming in, the satellite api should be idle.
The satellite api will handle all public RPC and HTTP requests, this includes all public endpoints for nodestats, overlay, orders, metainfo, and console web UI. It will need all the code to successfully process these public requests, but no more than that. For example, if the console needs the mail service to successfully complete any request, then that code should be added, but make sure to only include the necessary parts. There shouldn't be any background jobs running here nor persistent state, meaning if there are no requests coming in, the satellite api should be idle.
#### private api
The private api process handles all private GRPC and HTTP requests, this includes inspectors (overlay, health, irreparable), debug endpoints, and the marketing web UI. Open question: do we need the inspectors, if not should they be removed?
The private api process handles all private RPC and HTTP requests, this includes inspectors (overlay, health, irreparable), debug endpoints, and the marketing web UI. Open question: do we need the inspectors, if not should they be removed?
#### metainfo loop and the observer system
The metainfo loop process iterates over all the segments in metainfoDB repeatedly on an interval. With each loop, the process can also execute the code for the observer systems that take a segment as input and performs some action with it. The observer systems currently include: audit observer, gc observer, repair checker observer, and accounting tally.
@ -158,7 +158,7 @@ For the metainfo loop and observer system its not critical to have high availabi
#### satellite api
For creating the satellite api, there are two options for design. One, a single process containing all public GRPC/HTTP endpoints **and** all code necessary to process any request to those endpoints. Or two, a single process that contains all public GRPC/HTTP endpoints but does **not** contain the code to process requests, instead the api would act as a proxy passing along requests to the correct backend services. Here we will do the first option since it is less complex and it fulfills our need to run replicas to handle lots of traffic. In the future we can migrate to option two should the additional complexity be needed to satisfy some other need.
For creating the satellite api, there are two options for design. One, a single process containing all public RPC/HTTP endpoints **and** all code necessary to process any request to those endpoints. Or two, a single process that contains all public RPC/HTTP endpoints but does **not** contain the code to process requests, instead the api would act as a proxy passing along requests to the correct backend services. Here we will do the first option since it is less complex and it fulfills our need to run replicas to handle lots of traffic. In the future we can migrate to option two should the additional complexity be needed to satisfy some other need.
#### version

View File

@ -196,7 +196,7 @@ message StorageNodeMessage {
message SatelliteMessage {
oneof Message {
message NotReady {} // this could be a grpc error rather than a message
message NotReady {} // this could be a rpc error rather than a message
message TransferPiece {
bytes piece_id; // the current piece-id

View File

@ -46,7 +46,7 @@ Our partners will have connectors that their customers will use to store data on
### Connector
Each partner will have a registered id, (which we will refer to as the partner id) that will identify a partners connector on the Storj network. When a user uploads data to a specified bucket through the connector, the connector will include the partner id in the content of the GRPC request. Before an upload occurs, the uplink will communicate the partner id and bucket name with the tardigrade satellite, checking for a previous attribution. If no attribution is found on the specified bucket and the bucket is currently void of data, the satellite will attribute the partners id to that bucket within the metadata struct. Concurrently to updating the metadata struct the satelitte will add the necessary data to the Attribution table.
Each partner will have a registered id, (which we will refer to as the partner id) that will identify a partners connector on the Storj network. When a user uploads data to a specified bucket through the connector, the connector will include the partner id in the content of the RPC request. Before an upload occurs, the uplink will communicate the partner id and bucket name with the tardigrade satellite, checking for a previous attribution. If no attribution is found on the specified bucket and the bucket is currently void of data, the satellite will attribute the partners id to that bucket within the metadata struct. Concurrently to updating the metadata struct the satelitte will add the necessary data to the Attribution table.
### Database

View File

@ -26,9 +26,10 @@ import (
// Config holds server specific configuration parameters
type Config struct {
tlsopts.Config
Address string `user:"true" help:"public address to listen on" default:":7777"`
PrivateAddress string `user:"true" help:"private address to listen on" default:"127.0.0.1:7778"`
DebugLogTraffic bool `user:"true" help:"log all GRPC traffic to zap logger" default:"false"`
Address string `user:"true" help:"public address to listen on" default:":7777"`
PrivateAddress string `user:"true" help:"private address to listen on" default:"127.0.0.1:7778"`
DebugLogTraffic bool `hidden:"true" default:"false"` // Deprecated
}
type public struct {

View File

@ -75,7 +75,7 @@ func (pieceTracker *PieceTracker) add(nodeID storj.NodeID, pieceID storj.PieceID
if pieceTracker.pieceCounts[nodeID] > 0 {
numPieces = pieceTracker.pieceCounts[nodeID]
}
// limit size of bloom filter to ensure we are under the limit for GRPC
// limit size of bloom filter to ensure we are under the limit for RPC
filter := bloomfilter.NewOptimalMaxSize(numPieces, pieceTracker.config.FalsePositiveRate, 2*memory.MiB)
pieceTracker.retainInfos[nodeID] = &RetainInfo{
Filter: filter,

View File

@ -958,9 +958,8 @@ func TestExitDisabled(t *testing.T) {
// Process endpoint should return immediately if GE is disabled
response, err := processClient.Recv()
require.Error(t, err)
// grpc will return "Unimplemented", drpc will return "Unknown"
unimplementedOrUnknown := errs2.IsRPC(err, rpcstatus.Unimplemented) || errs2.IsRPC(err, rpcstatus.Unknown)
require.True(t, unimplementedOrUnknown)
// drpc will return "Unknown"
require.True(t, errs2.IsRPC(err, rpcstatus.Unknown))
require.Nil(t, response)
})
}

View File

@ -11,7 +11,7 @@ import (
"storj.io/common/pb"
)
// Inspector is a gRPC service for inspecting overlay internals
// Inspector is a RPC service for inspecting overlay internals
//
// architecture: Endpoint
type Inspector struct {

View File

@ -15,7 +15,7 @@ var (
mon = monkit.Package()
)
// Inspector is a gRPC service for inspecting irreparable internals
// Inspector is a RPC service for inspecting irreparable internals
//
// architecture: Endpoint
type Inspector struct {

View File

@ -573,9 +573,6 @@ identity.key-path: /root/.local/share/storj/identity/satellite/identity.key
# public address to listen on
server.address: :7777
# log all GRPC traffic to zap logger
server.debug-log-traffic: false
# if true, client leaves may contain the most recent certificate revocation for the current certificate
# server.extensions.revocation: true

View File

@ -68,7 +68,7 @@ func NewService(log *zap.Logger, db DB, dialer rpc.Dialer, trust *trust.Pool) *S
}
}
// GetPaystubStats retrieves held amount for particular satellite from satellite using grpc.
// GetPaystubStats retrieves held amount for particular satellite from satellite using RPC.
func (service *Service) GetPaystubStats(ctx context.Context, satelliteID storj.NodeID, period string) (_ *PayStub, err error) {
defer mon.Task()(&ctx)(&err)

View File

@ -90,10 +90,6 @@ type Endpoint struct {
usedSerials UsedSerials
pieceDeleter *pieces.Deleter
// liveRequests tracks the total number of incoming rpc requests. For gRPC
// requests only, this number is compared to config.MaxConcurrentRequests
// and limits the number of gRPC requests. dRPC requests are tracked but
// not limited.
liveRequests int32
}