satellite: use IP for all uplink operations, use hostname for audit and repairs

My understanding is that the nodes table has the following fields:
- `address` field which can be a hostname or an IP
- `last_net` field that is the /24 subnet of the IP resolved from the address

This PR does the following:
1) add back the `last_ip` field to the nodes table
2) for uplink operations remove the calls that the satellite makes to `lookupNodeAddress` (which makes the DNS calls to resolve the IP from the hostname) and instead use the data stored in the nodes table `last_ip` field. This means that the IP that the satellite sends to the uplink for the storage nodes could be approx 1 hr stale. In the short term this is fine, next we will be adding changes so that the storage node pushes any IP changes to the satellite in real time.
3) use the address field for repair and audit since we want them to still make DNS calls to confirm the IP is up to date
4) try to reduce confusion about hostname, ip, subnet, and address in the code base

Change-Id: I96ce0d8bb78303f82483d0701bc79544b74057ac
This commit is contained in:
Jessica Grebenschikov 2020-03-06 14:04:23 -08:00
parent 140e2f0045
commit 803e2930f4
18 changed files with 900 additions and 227 deletions

View File

@ -57,7 +57,7 @@ func (endpoint *Endpoint) CheckIn(ctx context.Context, req *pb.CheckInRequest) (
return nil, rpcstatus.Error(rpcstatus.FailedPrecondition, errCheckInIdentity.New("failed to add peer identity entry for ID: %v", err).Error()) return nil, rpcstatus.Error(rpcstatus.FailedPrecondition, errCheckInIdentity.New("failed to add peer identity entry for ID: %v", err).Error())
} }
lastIP, err := overlay.GetNetwork(ctx, req.Address) resolvedIPPort, resolvedNetwork, err := overlay.GetNetwork(ctx, req.Address)
if err != nil { if err != nil {
endpoint.log.Info("failed to resolve IP from address", zap.String("node address", req.Address), zap.Stringer("Node ID", nodeID), zap.Error(err)) endpoint.log.Info("failed to resolve IP from address", zap.String("node address", req.Address), zap.Stringer("Node ID", nodeID), zap.Error(err))
return nil, rpcstatus.Error(rpcstatus.InvalidArgument, errCheckInNetwork.New("failed to resolve IP from address: %s, err: %v", req.Address, err).Error()) return nil, rpcstatus.Error(rpcstatus.InvalidArgument, errCheckInNetwork.New("failed to resolve IP from address: %s, err: %v", req.Address, err).Error())
@ -79,7 +79,8 @@ func (endpoint *Endpoint) CheckIn(ctx context.Context, req *pb.CheckInRequest) (
Address: req.Address, Address: req.Address,
Transport: pb.NodeTransport_TCP_TLS_GRPC, Transport: pb.NodeTransport_TCP_TLS_GRPC,
}, },
LastIP: lastIP, LastNet: resolvedNetwork,
LastIPPort: resolvedIPPort,
IsUp: pingNodeSuccess, IsUp: pingNodeSuccess,
Capacity: req.Capacity, Capacity: req.Capacity,
Operator: req.Operator, Operator: req.Operator,

View File

@ -42,7 +42,9 @@ func TestGetExitingNodes(t *testing.T) {
} }
for _, data := range testData { for _, data := range testData {
err := cache.UpdateAddress(ctx, &pb.Node{Id: data.nodeID}, overlay.NodeSelectionConfig{}) n := pb.Node{Id: data.nodeID}
d := overlay.NodeDossier{Node: n, LastIPPort: "", LastNet: ""}
err := cache.UpdateAddress(ctx, &d, overlay.NodeSelectionConfig{})
require.NoError(t, err) require.NoError(t, err)
req := &overlay.ExitStatusRequest{ req := &overlay.ExitStatusRequest{
@ -116,7 +118,9 @@ func TestGetGracefulExitNodesByTimeframe(t *testing.T) {
} }
for _, data := range testData { for _, data := range testData {
err := cache.UpdateAddress(ctx, &pb.Node{Id: data.nodeID}, overlay.NodeSelectionConfig{}) n := pb.Node{Id: data.nodeID}
d := overlay.NodeDossier{Node: n, LastIPPort: "", LastNet: ""}
err := cache.UpdateAddress(ctx, &d, overlay.NodeSelectionConfig{})
require.NoError(t, err) require.NoError(t, err)
req := &overlay.ExitStatusRequest{ req := &overlay.ExitStatusRequest{

View File

@ -188,9 +188,13 @@ func (service *Service) CreateGetOrderLimitsOld(ctx context.Context, bucketID []
return nil, storj.PiecePrivateKey{}, Error.Wrap(err) return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
} }
// use the lastIP that we have on record to avoid doing extra DNS resolutions
if node.LastIPPort != "" {
node.Address.Address = node.LastIPPort
}
limits = append(limits, &pb.AddressedOrderLimit{ limits = append(limits, &pb.AddressedOrderLimit{
Limit: orderLimit, Limit: orderLimit,
StorageNodeAddress: lookupNodeAddress(ctx, node.Address), StorageNodeAddress: node.Address,
}) })
} }
@ -281,9 +285,13 @@ func (service *Service) CreateGetOrderLimits(ctx context.Context, bucketID []byt
OrderExpiration: orderExpiration, OrderExpiration: orderExpiration,
} }
// use the lastIP that we have on record to avoid doing extra DNS resolutions
if node.LastIPPort != "" {
node.Address.Address = node.LastIPPort
}
limits = append(limits, &pb.AddressedOrderLimit{ limits = append(limits, &pb.AddressedOrderLimit{
Limit: orderLimit, Limit: orderLimit,
StorageNodeAddress: lookupNodeAddress(ctx, node.Address), StorageNodeAddress: node.Address,
}) })
} }
@ -353,7 +361,7 @@ func (service *Service) RandomSampleOfOrderLimits(limits []*pb.AddressedOrderLim
} }
// CreatePutOrderLimits creates the order limits for uploading pieces to nodes. // CreatePutOrderLimits creates the order limits for uploading pieces to nodes.
func (service *Service) CreatePutOrderLimits(ctx context.Context, bucketID []byte, nodes []*pb.Node, expiration time.Time, maxPieceSize int64) (_ storj.PieceID, _ []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, err error) { func (service *Service) CreatePutOrderLimits(ctx context.Context, bucketID []byte, nodes []*overlay.NodeDossier, expiration time.Time, maxPieceSize int64) (_ storj.PieceID, _ []*pb.AddressedOrderLimit, privateKey storj.PiecePrivateKey, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
orderExpiration := time.Now().Add(service.orderExpiration) orderExpiration := time.Now().Add(service.orderExpiration)
@ -387,11 +395,16 @@ func (service *Service) CreatePutOrderLimits(ctx context.Context, bucketID []byt
}) })
if err != nil { if err != nil {
return storj.PieceID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err) return storj.PieceID{}, nil, storj.PiecePrivateKey{}, Error.Wrap(err)
} }
// use the lastIP that we have on record to avoid doing extra DNS resolutions
if node.LastIPPort != "" {
node.Address.Address = node.LastIPPort
}
limits[pieceNum] = &pb.AddressedOrderLimit{ limits[pieceNum] = &pb.AddressedOrderLimit{
Limit: orderLimit, Limit: orderLimit,
StorageNodeAddress: lookupNodeAddress(ctx, node.Address), StorageNodeAddress: node.Address,
} }
pieceNum++ pieceNum++
} }
@ -473,9 +486,13 @@ func (service *Service) CreateDeleteOrderLimits(ctx context.Context, bucketID []
return nil, storj.PiecePrivateKey{}, Error.Wrap(err) return nil, storj.PiecePrivateKey{}, Error.Wrap(err)
} }
// use the lastIP that we have on record to avoid doing extra DNS resolutions
if node.LastIPPort != "" {
node.Address.Address = node.LastIPPort
}
limits = append(limits, &pb.AddressedOrderLimit{ limits = append(limits, &pb.AddressedOrderLimit{
Limit: orderLimit, Limit: orderLimit,
StorageNodeAddress: lookupNodeAddress(ctx, node.Address), StorageNodeAddress: node.Address,
}) })
} }
@ -759,7 +776,7 @@ func (service *Service) CreateGetRepairOrderLimits(ctx context.Context, bucketID
} }
// CreatePutRepairOrderLimits creates the order limits for uploading the repaired pieces of pointer to newNodes. // CreatePutRepairOrderLimits creates the order limits for uploading the repaired pieces of pointer to newNodes.
func (service *Service) CreatePutRepairOrderLimits(ctx context.Context, bucketID []byte, pointer *pb.Pointer, getOrderLimits []*pb.AddressedOrderLimit, newNodes []*pb.Node) (_ []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) { func (service *Service) CreatePutRepairOrderLimits(ctx context.Context, bucketID []byte, pointer *pb.Pointer, getOrderLimits []*pb.AddressedOrderLimit, newNodes []*overlay.NodeDossier) (_ []*pb.AddressedOrderLimit, _ storj.PiecePrivateKey, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
orderExpiration := time.Now().Add(service.orderExpiration) orderExpiration := time.Now().Add(service.orderExpiration)

View File

@ -40,7 +40,9 @@ func BenchmarkOverlay(b *testing.B) {
} }
for _, id := range all { for _, id := range all {
err := overlaydb.UpdateAddress(ctx, &pb.Node{Id: id}, overlay.NodeSelectionConfig{}) n := pb.Node{Id: id}
d := overlay.NodeDossier{Node: n, LastIPPort: "", LastNet: ""}
err := overlaydb.UpdateAddress(ctx, &d, overlay.NodeSelectionConfig{})
require.NoError(b, err) require.NoError(b, err)
} }
@ -65,7 +67,9 @@ func BenchmarkOverlay(b *testing.B) {
b.Run("UpdateAddress", func(b *testing.B) { b.Run("UpdateAddress", func(b *testing.B) {
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
id := all[i%len(all)] id := all[i%len(all)]
err := overlaydb.UpdateAddress(ctx, &pb.Node{Id: id}, overlay.NodeSelectionConfig{}) n := pb.Node{Id: id}
d := overlay.NodeDossier{Node: n, LastIPPort: "", LastNet: ""}
err := overlaydb.UpdateAddress(ctx, &d, overlay.NodeSelectionConfig{})
require.NoError(b, err) require.NoError(b, err)
} }
}) })

View File

@ -13,7 +13,7 @@ import (
type addressInfo struct { type addressInfo struct {
address string address string
lastIP string lastIPPort string
transport pb.NodeTransport transport pb.NodeTransport
} }
@ -46,7 +46,7 @@ func NewCombinedCache(db DB) *CombinedCache {
// to match the values held in the database; however this code does not // to match the values held in the database; however this code does not
// guarantee that concurrent UpdateAddress calls will be handled in any // guarantee that concurrent UpdateAddress calls will be handled in any
// particular order. // particular order.
func (c *CombinedCache) UpdateAddress(ctx context.Context, info *pb.Node, defaults NodeSelectionConfig) (err error) { func (c *CombinedCache) UpdateAddress(ctx context.Context, info *NodeDossier, defaults NodeSelectionConfig) (err error) {
// Update internal cache and check if this call requires a db call // Update internal cache and check if this call requires a db call
if info == nil { if info == nil {
@ -65,8 +65,7 @@ func (c *CombinedCache) UpdateAddress(ctx context.Context, info *pb.Node, defaul
if ok && if ok &&
address.Address == cached.address && address.Address == cached.address &&
address.Transport == cached.transport && address.Transport == cached.transport &&
info.LastIp == cached.lastIP { info.LastIPPort == cached.lastIPPort {
return nil return nil
} }
@ -85,7 +84,7 @@ func (c *CombinedCache) UpdateAddress(ctx context.Context, info *pb.Node, defaul
c.addressLock.Lock() c.addressLock.Lock()
c.addressCache[info.Id] = &addressInfo{ c.addressCache[info.Id] = &addressInfo{
address: address.Address, address: address.Address,
lastIP: info.LastIp, lastIPPort: info.LastIPPort,
transport: address.Transport, transport: address.Transport,
} }
c.addressLock.Unlock() c.addressLock.Unlock()

View File

@ -34,14 +34,15 @@ func TestDB_PieceCounts(t *testing.T) {
} }
for _, node := range nodes { for _, node := range nodes {
require.NoError(t, overlaydb.UpdateAddress(ctx, &pb.Node{ n := pb.Node{
Id: node.ID, Id: node.ID,
Address: &pb.NodeAddress{ Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC, Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: "0.0.0.0", Address: "0.0.0.0",
}, },
LastIp: "0.0.0.0", }
}, overlay.NodeSelectionConfig{})) d := overlay.NodeDossier{Node: n, LastIPPort: "0.0.0.0", LastNet: "0.0.0.0"}
require.NoError(t, overlaydb.UpdateAddress(ctx, &d, overlay.NodeSelectionConfig{}))
} }
// check that they are initialized to zero // check that they are initialized to zero
@ -85,14 +86,15 @@ func BenchmarkDB_PieceCounts(b *testing.B) {
} }
for nodeID := range counts { for nodeID := range counts {
require.NoError(b, overlaydb.UpdateAddress(ctx, &pb.Node{ n := pb.Node{
Id: nodeID, Id: nodeID,
Address: &pb.NodeAddress{ Address: &pb.NodeAddress{
Transport: pb.NodeTransport_TCP_TLS_GRPC, Transport: pb.NodeTransport_TCP_TLS_GRPC,
Address: "0.0.0.0", Address: "0.0.0.0",
}, },
LastIp: "0.0.0.0", }
}, overlay.NodeSelectionConfig{})) d := overlay.NodeDossier{Node: n, LastIPPort: "0.0.0.0", LastNet: "0.0.0.0"}
require.NoError(b, overlaydb.UpdateAddress(ctx, &d, overlay.NodeSelectionConfig{}))
} }
b.Run("Update", func(b *testing.B) { b.Run("Update", func(b *testing.B) {

View File

@ -366,7 +366,7 @@ func TestNodeSelectionGracefulExit(t *testing.T) {
}) })
} }
func TestFindStorageNodesDistinctIPs(t *testing.T) { func TestFindStorageNodesDistinctNetworks(t *testing.T) {
if runtime.GOOS == "darwin" { if runtime.GOOS == "darwin" {
t.Skip("Test does not work with macOS") t.Skip("Test does not work with macOS")
} }
@ -397,7 +397,7 @@ func TestFindStorageNodesDistinctIPs(t *testing.T) {
require.Len(t, excludedNodes, 1) require.Len(t, excludedNodes, 1)
res, err := satellite.Overlay.Service.Get(ctx, excludedNodes[0]) res, err := satellite.Overlay.Service.Get(ctx, excludedNodes[0])
require.NoError(t, err) require.NoError(t, err)
excludedNodeAddr = res.LastIp excludedNodeAddr = res.LastIPPort
req := overlay.FindStorageNodesRequest{ req := overlay.FindStorageNodesRequest{
MinimumRequiredNodes: 2, MinimumRequiredNodes: 2,
@ -407,9 +407,9 @@ func TestFindStorageNodesDistinctIPs(t *testing.T) {
nodes, err := satellite.Overlay.Service.FindStorageNodes(ctx, req) nodes, err := satellite.Overlay.Service.FindStorageNodes(ctx, req)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, nodes, 2) require.Len(t, nodes, 2)
require.NotEqual(t, nodes[0].LastIp, nodes[1].LastIp) require.NotEqual(t, nodes[0].LastIPPort, nodes[1].LastIPPort)
require.NotEqual(t, nodes[0].LastIp, excludedNodeAddr) require.NotEqual(t, nodes[0].LastIPPort, excludedNodeAddr)
require.NotEqual(t, nodes[1].LastIp, excludedNodeAddr) require.NotEqual(t, nodes[1].LastIPPort, excludedNodeAddr)
req = overlay.FindStorageNodesRequest{ req = overlay.FindStorageNodesRequest{
MinimumRequiredNodes: 3, MinimumRequiredNodes: 3,
@ -453,7 +453,7 @@ func TestSelectNewStorageNodesExcludedIPs(t *testing.T) {
require.Len(t, excludedNodes, 1) require.Len(t, excludedNodes, 1)
res, err := satellite.Overlay.Service.Get(ctx, excludedNodes[0]) res, err := satellite.Overlay.Service.Get(ctx, excludedNodes[0])
require.NoError(t, err) require.NoError(t, err)
excludedNodeAddr = res.LastIp excludedNodeAddr = res.LastIPPort
req := overlay.FindStorageNodesRequest{ req := overlay.FindStorageNodesRequest{
MinimumRequiredNodes: 2, MinimumRequiredNodes: 2,
@ -463,9 +463,9 @@ func TestSelectNewStorageNodesExcludedIPs(t *testing.T) {
nodes, err := satellite.Overlay.Service.FindStorageNodes(ctx, req) nodes, err := satellite.Overlay.Service.FindStorageNodes(ctx, req)
require.NoError(t, err) require.NoError(t, err)
require.Len(t, nodes, 2) require.Len(t, nodes, 2)
require.NotEqual(t, nodes[0].LastIp, nodes[1].LastIp) require.NotEqual(t, nodes[0].LastIPPort, nodes[1].LastIPPort)
require.NotEqual(t, nodes[0].LastIp, excludedNodeAddr) require.NotEqual(t, nodes[0].LastIPPort, excludedNodeAddr)
require.NotEqual(t, nodes[1].LastIp, excludedNodeAddr) require.NotEqual(t, nodes[1].LastIPPort, excludedNodeAddr)
}) })
} }
@ -566,8 +566,8 @@ func testDistinctIPs(t *testing.T, ctx *testcontext.Context, planet *testplanet.
if tt.preferences.DistinctIP { if tt.preferences.DistinctIP {
ips := make(map[string]bool) ips := make(map[string]bool)
for _, n := range response { for _, n := range response {
assert.False(t, ips[n.LastIp]) assert.False(t, ips[n.LastIPPort])
ips[n.LastIp] = true ips[n.LastIPPort] = true
} }
} }
@ -580,12 +580,14 @@ func TestAddrtoNetwork_Conversion(t *testing.T) {
defer ctx.Cleanup() defer ctx.Cleanup()
ip := "8.8.8.8:28967" ip := "8.8.8.8:28967"
network, err := overlay.GetNetwork(ctx, ip) resolvedIPPort, network, err := overlay.GetNetwork(ctx, ip)
require.Equal(t, "8.8.8.0", network) require.Equal(t, "8.8.8.0", network)
require.Equal(t, ip, resolvedIPPort)
require.NoError(t, err) require.NoError(t, err)
ipv6 := "[fc00::1:200]:28967" ipv6 := "[fc00::1:200]:28967"
network, err = overlay.GetNetwork(ctx, ipv6) resolvedIPPort, network, err = overlay.GetNetwork(ctx, ipv6)
require.Equal(t, "fc00::", network) require.Equal(t, "fc00::", network)
require.Equal(t, ipv6, resolvedIPPort)
require.NoError(t, err) require.NoError(t, err)
} }

View File

@ -37,9 +37,9 @@ var ErrNotEnoughNodes = errs.Class("not enough nodes")
// architecture: Database // architecture: Database
type DB interface { type DB interface {
// SelectStorageNodes looks up nodes based on criteria // SelectStorageNodes looks up nodes based on criteria
SelectStorageNodes(ctx context.Context, count int, criteria *NodeCriteria) ([]*pb.Node, error) SelectStorageNodes(ctx context.Context, count int, criteria *NodeCriteria) ([]*NodeDossier, error)
// SelectNewStorageNodes looks up nodes based on new node criteria // SelectNewStorageNodes looks up nodes based on new node criteria
SelectNewStorageNodes(ctx context.Context, count int, criteria *NodeCriteria) ([]*pb.Node, error) SelectNewStorageNodes(ctx context.Context, count int, criteria *NodeCriteria) ([]*NodeDossier, error)
// Get looks up the node by nodeID // Get looks up the node by nodeID
Get(ctx context.Context, nodeID storj.NodeID) (*NodeDossier, error) Get(ctx context.Context, nodeID storj.NodeID) (*NodeDossier, error)
@ -56,7 +56,7 @@ type DB interface {
// PaginateQualified will page through the qualified nodes // PaginateQualified will page through the qualified nodes
PaginateQualified(ctx context.Context, offset int64, limit int) ([]*pb.Node, bool, error) PaginateQualified(ctx context.Context, offset int64, limit int) ([]*pb.Node, bool, error)
// Update updates node address // Update updates node address
UpdateAddress(ctx context.Context, value *pb.Node, defaults NodeSelectionConfig) error UpdateAddress(ctx context.Context, value *NodeDossier, defaults NodeSelectionConfig) error
// BatchUpdateStats updates multiple storagenode's stats in one transaction // BatchUpdateStats updates multiple storagenode's stats in one transaction
BatchUpdateStats(ctx context.Context, updateRequests []*UpdateRequest, batchSize int) (failed storj.NodeIDList, err error) BatchUpdateStats(ctx context.Context, updateRequests []*UpdateRequest, batchSize int) (failed storj.NodeIDList, err error)
// UpdateStats all parts of single storagenode's stats. // UpdateStats all parts of single storagenode's stats.
@ -84,8 +84,8 @@ type DB interface {
// GetExitStatus returns a node's graceful exit status. // GetExitStatus returns a node's graceful exit status.
GetExitStatus(ctx context.Context, nodeID storj.NodeID) (exitStatus *ExitStatus, err error) GetExitStatus(ctx context.Context, nodeID storj.NodeID) (exitStatus *ExitStatus, err error)
// GetNodeIPs returns a list of IP addresses associated with given node IDs. // GetNodesNetwork returns the /24 subnet for each storage node, order is not guaranteed.
GetNodeIPs(ctx context.Context, nodeIDs []storj.NodeID) (nodeIPs []string, err error) GetNodesNetwork(ctx context.Context, nodeIDs []storj.NodeID) (nodeNets []string, err error)
// GetSuccesfulNodesNotCheckedInSince returns all nodes that last check-in was successful, but haven't checked-in within a given duration. // GetSuccesfulNodesNotCheckedInSince returns all nodes that last check-in was successful, but haven't checked-in within a given duration.
GetSuccesfulNodesNotCheckedInSince(ctx context.Context, duration time.Duration) (nodeAddresses []NodeLastContact, err error) GetSuccesfulNodesNotCheckedInSince(ctx context.Context, duration time.Duration) (nodeAddresses []NodeLastContact, err error)
@ -100,7 +100,8 @@ type DB interface {
type NodeCheckInInfo struct { type NodeCheckInInfo struct {
NodeID storj.NodeID NodeID storj.NodeID
Address *pb.NodeAddress Address *pb.NodeAddress
LastIP string LastNet string
LastIPPort string
IsUp bool IsUp bool
Operator *pb.NodeOperator Operator *pb.NodeOperator
Capacity *pb.NodeCapacity Capacity *pb.NodeCapacity
@ -171,6 +172,8 @@ type NodeDossier struct {
PieceCount int64 PieceCount int64
ExitStatus ExitStatus ExitStatus ExitStatus
CreatedAt time.Time CreatedAt time.Time
LastNet string
LastIPPort string
} }
// NodeStats contains statistics about a node. // NodeStats contains statistics about a node.
@ -191,6 +194,7 @@ type NodeStats struct {
type NodeLastContact struct { type NodeLastContact struct {
ID storj.NodeID ID storj.NodeID
Address string Address string
LastIPPort string
LastContactSuccess time.Time LastContactSuccess time.Time
LastContactFailure time.Time LastContactFailure time.Time
} }
@ -250,13 +254,13 @@ func (service *Service) IsOnline(node *NodeDossier) bool {
} }
// FindStorageNodes searches the overlay network for nodes that meet the provided requirements // FindStorageNodes searches the overlay network for nodes that meet the provided requirements
func (service *Service) FindStorageNodes(ctx context.Context, req FindStorageNodesRequest) (_ []*pb.Node, err error) { func (service *Service) FindStorageNodes(ctx context.Context, req FindStorageNodesRequest) (_ []*NodeDossier, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
return service.FindStorageNodesWithPreferences(ctx, req, &service.config.Node) return service.FindStorageNodesWithPreferences(ctx, req, &service.config.Node)
} }
// FindStorageNodesWithPreferences searches the overlay network for nodes that meet the provided criteria // FindStorageNodesWithPreferences searches the overlay network for nodes that meet the provided criteria
func (service *Service) FindStorageNodesWithPreferences(ctx context.Context, req FindStorageNodesRequest, preferences *NodeSelectionConfig) (nodes []*pb.Node, err error) { func (service *Service) FindStorageNodesWithPreferences(ctx context.Context, req FindStorageNodesRequest, preferences *NodeSelectionConfig) (nodes []*NodeDossier, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
// TODO: add sanity limits to requested node count // TODO: add sanity limits to requested node count
@ -270,7 +274,7 @@ func (service *Service) FindStorageNodesWithPreferences(ctx context.Context, req
// get and exclude IPs associated with excluded nodes if distinctIP is enabled // get and exclude IPs associated with excluded nodes if distinctIP is enabled
var excludedIPs []string var excludedIPs []string
if preferences.DistinctIP && len(excludedNodes) > 0 { if preferences.DistinctIP && len(excludedNodes) > 0 {
excludedIPs, err = service.db.GetNodeIPs(ctx, excludedNodes) excludedIPs, err = service.db.GetNodesNetwork(ctx, excludedNodes)
if err != nil { if err != nil {
return nil, Error.Wrap(err) return nil, Error.Wrap(err)
} }
@ -281,7 +285,7 @@ func (service *Service) FindStorageNodesWithPreferences(ctx context.Context, req
newNodeCount = int(float64(reputableNodeCount) * preferences.NewNodePercentage) newNodeCount = int(float64(reputableNodeCount) * preferences.NewNodePercentage)
} }
var newNodes []*pb.Node var newNodes []*NodeDossier
if newNodeCount > 0 { if newNodeCount > 0 {
newNodes, err = service.db.SelectNewStorageNodes(ctx, newNodeCount, &NodeCriteria{ newNodes, err = service.db.SelectNewStorageNodes(ctx, newNodeCount, &NodeCriteria{
FreeDisk: preferences.MinimumDiskSpace.Int64(), FreeDisk: preferences.MinimumDiskSpace.Int64(),
@ -301,7 +305,7 @@ func (service *Service) FindStorageNodesWithPreferences(ctx context.Context, req
for _, newNode := range newNodes { for _, newNode := range newNodes {
excludedNodes = append(excludedNodes, newNode.Id) excludedNodes = append(excludedNodes, newNode.Id)
if preferences.DistinctIP { if preferences.DistinctIP {
excludedIPs = append(excludedIPs, newNode.LastIp) excludedIPs = append(excludedIPs, newNode.LastNet)
} }
} }
@ -379,12 +383,19 @@ func (service *Service) Put(ctx context.Context, nodeID storj.NodeID, value pb.N
return errors.New("node has no address") return errors.New("node has no address")
} }
// Resolve IP Address Network to ensure it is set // Resolve the IP and the subnet from the address that is sent
value.LastIp, err = GetNetwork(ctx, value.Address.Address) resolvedIPPort, resolvedNetwork, err := GetNetwork(ctx, value.Address.Address)
if err != nil { if err != nil {
return Error.Wrap(err) return Error.Wrap(err)
} }
return service.db.UpdateAddress(ctx, &value, service.config.Node)
n := NodeDossier{
Node: value,
LastNet: resolvedNetwork,
LastIPPort: resolvedIPPort,
}
return service.db.UpdateAddress(ctx, &n, service.config.Node)
} }
// BatchUpdateStats updates multiple storagenode's stats in one transaction // BatchUpdateStats updates multiple storagenode's stats in one transaction
@ -463,45 +474,36 @@ func (service *Service) DisqualifyNode(ctx context.Context, nodeID storj.NodeID)
return service.db.DisqualifyNode(ctx, nodeID) return service.db.DisqualifyNode(ctx, nodeID)
} }
func getIP(ctx context.Context, target string) (ip net.IPAddr, err error) {
defer mon.Task()(&ctx)(&err)
host, _, err := net.SplitHostPort(target)
if err != nil {
return net.IPAddr{}, err
}
ipAddr, err := net.ResolveIPAddr("ip", host)
if err != nil {
return net.IPAddr{}, err
}
return *ipAddr, nil
}
// GetOfflineNodesLimited returns a list of the first N offline nodes ordered by least recently contacted. // GetOfflineNodesLimited returns a list of the first N offline nodes ordered by least recently contacted.
func (service *Service) GetOfflineNodesLimited(ctx context.Context, limit int) (offlineNodes []NodeLastContact, err error) { func (service *Service) GetOfflineNodesLimited(ctx context.Context, limit int) (offlineNodes []NodeLastContact, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
return service.db.GetOfflineNodesLimited(ctx, limit) return service.db.GetOfflineNodesLimited(ctx, limit)
} }
// GetNetwork resolves the target address and determines its IP /24 Subnet // GetNetwork resolves the target address and determines its IP and /24 Subnet
func GetNetwork(ctx context.Context, target string) (network string, err error) { func GetNetwork(ctx context.Context, target string) (ipPort, network string, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
addr, err := getIP(ctx, target) host, port, err := net.SplitHostPort(target)
if err != nil { if err != nil {
return "", err return "", "", err
}
ipAddr, err := net.ResolveIPAddr("ip", host)
if err != nil {
return "", "", err
} }
// If addr can be converted to 4byte notation, it is an IPv4 address, else its an IPv6 address // If addr can be converted to 4byte notation, it is an IPv4 address, else its an IPv6 address
if ipv4 := addr.IP.To4(); ipv4 != nil { if ipv4 := ipAddr.IP.To4(); ipv4 != nil {
//Filter all IPv4 Addresses into /24 Subnet's //Filter all IPv4 Addresses into /24 Subnet's
mask := net.CIDRMask(24, 32) mask := net.CIDRMask(24, 32)
return ipv4.Mask(mask).String(), nil return net.JoinHostPort(ipAddr.String(), port), ipv4.Mask(mask).String(), nil
} }
if ipv6 := addr.IP.To16(); ipv6 != nil { if ipv6 := ipAddr.IP.To16(); ipv6 != nil {
//Filter all IPv6 Addresses into /64 Subnet's //Filter all IPv6 Addresses into /64 Subnet's
mask := net.CIDRMask(64, 128) mask := net.CIDRMask(64, 128)
return ipv6.Mask(mask).String(), nil return net.JoinHostPort(ipAddr.String(), port), ipv6.Mask(mask).String(), nil
} }
return "", errors.New("unable to get network for address " + addr.String()) return "", "", errors.New("unable to get network for address " + ipAddr.String())
} }

View File

@ -183,8 +183,9 @@ func TestRandomizedSelection(t *testing.T) {
// put nodes in cache // put nodes in cache
for i := 0; i < totalNodes; i++ { for i := 0; i < totalNodes; i++ {
newID := testrand.NodeID() newID := testrand.NodeID()
n := pb.Node{Id: newID}
err := cache.UpdateAddress(ctx, &pb.Node{Id: newID}, defaults) d := overlay.NodeDossier{Node: n, LastIPPort: "", LastNet: ""}
err := cache.UpdateAddress(ctx, &d, defaults)
require.NoError(t, err) require.NoError(t, err)
_, err = cache.UpdateNodeInfo(ctx, newID, &pb.InfoResponse{ _, err = cache.UpdateNodeInfo(ctx, newID, &pb.InfoResponse{
Type: pb.NodeType_STORAGE, Type: pb.NodeType_STORAGE,
@ -210,7 +211,7 @@ func TestRandomizedSelection(t *testing.T) {
// select numNodesToSelect nodes selectIterations times // select numNodesToSelect nodes selectIterations times
for i := 0; i < selectIterations; i++ { for i := 0; i < selectIterations; i++ {
var nodes []*pb.Node var nodes []*overlay.NodeDossier
var err error var err error
if i%2 == 0 { if i%2 == 0 {
@ -334,7 +335,6 @@ func TestKnownReliable(t *testing.T) {
for i, node := range result { for i, node := range result {
assert.Equal(t, expectedReliable[i].Id, node.Id) assert.Equal(t, expectedReliable[i].Id, node.Id)
assert.Equal(t, expectedReliable[i].Address, node.Address) assert.Equal(t, expectedReliable[i].Address, node.Address)
assert.NotNil(t, node.LastIp)
} }
}) })
} }
@ -343,7 +343,7 @@ func TestUpdateCheckIn(t *testing.T) {
satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) { // setup satellitedbtest.Run(t, func(ctx *testcontext.Context, t *testing.T, db satellite.DB) { // setup
nodeID := storj.NodeID{1, 2, 3} nodeID := storj.NodeID{1, 2, 3}
expectedEmail := "test@email.com" expectedEmail := "test@email.com"
expectedAddress := "1.2.4.4" expectedAddress := "1.2.4.4:8080"
info := overlay.NodeCheckInInfo{ info := overlay.NodeCheckInInfo{
NodeID: nodeID, NodeID: nodeID,
Address: &pb.NodeAddress{ Address: &pb.NodeAddress{
@ -363,11 +363,12 @@ func TestUpdateCheckIn(t *testing.T) {
Timestamp: time.Time{}, Timestamp: time.Time{},
Release: false, Release: false,
}, },
LastIPPort: expectedAddress,
LastNet: "1.2.4",
} }
expectedNode := &overlay.NodeDossier{ expectedNode := &overlay.NodeDossier{
Node: pb.Node{ Node: pb.Node{
Id: nodeID, Id: nodeID,
LastIp: info.LastIP,
Address: &pb.NodeAddress{ Address: &pb.NodeAddress{
Address: info.Address.GetAddress(), Address: info.Address.GetAddress(),
Transport: pb.NodeTransport_TCP_TLS_GRPC, Transport: pb.NodeTransport_TCP_TLS_GRPC,
@ -395,6 +396,8 @@ func TestUpdateCheckIn(t *testing.T) {
Disqualified: nil, Disqualified: nil,
PieceCount: 0, PieceCount: 0,
ExitStatus: overlay.ExitStatus{NodeID: nodeID}, ExitStatus: overlay.ExitStatus{NodeID: nodeID},
LastIPPort: expectedAddress,
LastNet: "1.2.4",
} }
// confirm the node doesn't exist in nodes table yet // confirm the node doesn't exist in nodes table yet
@ -437,6 +440,8 @@ func TestUpdateCheckIn(t *testing.T) {
Timestamp: time.Now().UTC(), Timestamp: time.Now().UTC(),
Release: true, Release: true,
}, },
LastIPPort: expectedAddress,
LastNet: "9.8.7",
} }
// confirm that the updated node is in the nodes table with the // confirm that the updated node is in the nodes table with the
// correct updated fields set // correct updated fields set
@ -453,7 +458,7 @@ func TestUpdateCheckIn(t *testing.T) {
require.Equal(t, updatedInfo.Version.GetRelease(), updatedNode.Version.GetRelease()) require.Equal(t, updatedInfo.Version.GetRelease(), updatedNode.Version.GetRelease())
require.True(t, updatedNode.Version.GetTimestamp().After(info.Version.GetTimestamp())) require.True(t, updatedNode.Version.GetTimestamp().After(info.Version.GetTimestamp()))
// confirm we can udpate IsUp field // confirm we can update IsUp field
startOfUpdateTest2 := time.Now().UTC() startOfUpdateTest2 := time.Now().UTC()
updatedInfo2 := overlay.NodeCheckInInfo{ updatedInfo2 := overlay.NodeCheckInInfo{
NodeID: nodeID, NodeID: nodeID,
@ -491,8 +496,9 @@ func TestCache_DowntimeTracking(t *testing.T) {
// put nodes in cache // put nodes in cache
for i := 0; i < totalNodes; i++ { for i := 0; i < totalNodes; i++ {
newID := testrand.NodeID() newID := testrand.NodeID()
n := pb.Node{Id: newID}
err := cache.UpdateAddress(ctx, &pb.Node{Id: newID}, defaults) d := overlay.NodeDossier{Node: n, LastIPPort: "", LastNet: ""}
err := cache.UpdateAddress(ctx, &d, defaults)
require.NoError(t, err) require.NoError(t, err)
_, err = cache.UpdateNodeInfo(ctx, newID, &pb.InfoResponse{ _, err = cache.UpdateNodeInfo(ctx, newID, &pb.InfoResponse{
Type: pb.NodeType_STORAGE, Type: pb.NodeType_STORAGE,

View File

@ -42,8 +42,10 @@ func testDatabase(ctx context.Context, t *testing.T, cache overlay.DB) {
AuditReputationAlpha0: tt.auditAlpha, AuditReputationAlpha0: tt.auditAlpha,
AuditReputationBeta0: tt.auditBeta, AuditReputationBeta0: tt.auditBeta,
} }
n := pb.Node{Id: tt.nodeID}
d := overlay.NodeDossier{Node: n, LastIPPort: "", LastNet: ""}
err := cache.UpdateAddress(ctx, &pb.Node{Id: tt.nodeID}, startingRep) err := cache.UpdateAddress(ctx, &d, startingRep)
require.NoError(t, err) require.NoError(t, err)
// update stats so node disqualification is triggered // update stats so node disqualification is triggered
@ -73,7 +75,10 @@ func testDatabase(ctx context.Context, t *testing.T, cache overlay.DB) {
{ // TestUpdateOperator { // TestUpdateOperator
nodeID := storj.NodeID{10} nodeID := storj.NodeID{10}
err := cache.UpdateAddress(ctx, &pb.Node{Id: nodeID}, overlay.NodeSelectionConfig{}) n := pb.Node{Id: nodeID}
d := overlay.NodeDossier{Node: n, LastIPPort: "", LastNet: ""}
err := cache.UpdateAddress(ctx, &d, overlay.NodeSelectionConfig{})
require.NoError(t, err) require.NoError(t, err)
update, err := cache.UpdateNodeInfo(ctx, nodeID, &pb.InfoResponse{ update, err := cache.UpdateNodeInfo(ctx, nodeID, &pb.InfoResponse{

View File

@ -566,7 +566,7 @@ func updateNodeCheckIn(ctx context.Context, overlayDB overlay.DB, node *storagen
checkInInfo := overlay.NodeCheckInInfo{ checkInInfo := overlay.NodeCheckInInfo{
NodeID: node.ID(), NodeID: node.ID(),
Address: local.Address, Address: local.Address,
LastIP: local.LastIp, LastIPPort: local.LastIPPort,
IsUp: isUp, IsUp: isUp,
Operator: &local.Operator, Operator: &local.Operator,
Capacity: &local.Capacity, Capacity: &local.Capacity,

View File

@ -134,8 +134,11 @@ model node (
) )
field id blob field id blob
// address is how to contact the node, this can be a hostname or IP and it contains the port
field address text ( updatable ) // TODO: use compressed format field address text ( updatable ) // TODO: use compressed format
// last_net is the /24 subnet of the IP
field last_net text ( updatable ) field last_net text ( updatable )
field last_ip_port text ( updatable, nullable )
field protocol int ( updatable ) field protocol int ( updatable )
field type int ( updatable ) field type int ( updatable )
field email text ( updatable ) field email text ( updatable )
@ -209,7 +212,7 @@ read limitoffset (
) )
read limitoffset ( read limitoffset (
select node.id node.last_net node.address node.protocol select node.id node.last_net node.last_ip_port node.address node.protocol
where node.id >= ? where node.id >= ?
where node.disqualified = null where node.disqualified = null
orderby asc node.id orderby asc node.id
@ -221,14 +224,14 @@ read all (
) )
read limitoffset ( read limitoffset (
select node.id node.address node.last_contact_success node.last_contact_failure select node.id node.address node.last_ip_port node.last_contact_success node.last_contact_failure
where node.last_contact_success < node.last_contact_failure where node.last_contact_success < node.last_contact_failure
where node.disqualified = null where node.disqualified = null
orderby asc node.last_contact_failure orderby asc node.last_contact_failure
) )
read all ( read all (
select node.id node.address node.last_contact_success node.last_contact_failure select node.id node.address node.last_ip_port node.last_contact_success node.last_contact_failure
where node.last_contact_success < ? where node.last_contact_success < ?
where node.last_contact_success > node.last_contact_failure where node.last_contact_success > node.last_contact_failure
where node.disqualified = null where node.disqualified = null

View File

@ -135,6 +135,7 @@ CREATE TABLE nodes (
id bytea NOT NULL, id bytea NOT NULL,
address text NOT NULL, address text NOT NULL,
last_net text NOT NULL, last_net text NOT NULL,
last_ip_port text,
protocol integer NOT NULL, protocol integer NOT NULL,
type integer NOT NULL, type integer NOT NULL,
email text NOT NULL, email text NOT NULL,

View File

@ -407,6 +407,7 @@ CREATE TABLE nodes (
id bytea NOT NULL, id bytea NOT NULL,
address text NOT NULL, address text NOT NULL,
last_net text NOT NULL, last_net text NOT NULL,
last_ip_port text,
protocol integer NOT NULL, protocol integer NOT NULL,
type integer NOT NULL, type integer NOT NULL,
email text NOT NULL, email text NOT NULL,
@ -908,6 +909,7 @@ CREATE TABLE nodes (
id bytea NOT NULL, id bytea NOT NULL,
address text NOT NULL, address text NOT NULL,
last_net text NOT NULL, last_net text NOT NULL,
last_ip_port text,
protocol integer NOT NULL, protocol integer NOT NULL,
type integer NOT NULL, type integer NOT NULL,
email text NOT NULL, email text NOT NULL,
@ -3312,6 +3314,7 @@ type Node struct {
Id []byte Id []byte
Address string Address string
LastNet string LastNet string
LastIpPort *string
Protocol int Protocol int
Type int Type int
Email string Email string
@ -3352,6 +3355,7 @@ type Node struct {
func (Node) _Table() string { return "nodes" } func (Node) _Table() string { return "nodes" }
type Node_Create_Fields struct { type Node_Create_Fields struct {
LastIpPort Node_LastIpPort_Field
Disqualified Node_Disqualified_Field Disqualified Node_Disqualified_Field
Suspended Node_Suspended_Field Suspended Node_Suspended_Field
UnknownAuditReputationAlpha Node_UnknownAuditReputationAlpha_Field UnknownAuditReputationAlpha Node_UnknownAuditReputationAlpha_Field
@ -3364,6 +3368,7 @@ type Node_Create_Fields struct {
type Node_Update_Fields struct { type Node_Update_Fields struct {
Address Node_Address_Field Address Node_Address_Field
LastNet Node_LastNet_Field LastNet Node_LastNet_Field
LastIpPort Node_LastIpPort_Field
Protocol Node_Protocol_Field Protocol Node_Protocol_Field
Type Node_Type_Field Type Node_Type_Field
Email Node_Email_Field Email Node_Email_Field
@ -3456,6 +3461,38 @@ func (f Node_LastNet_Field) value() interface{} {
func (Node_LastNet_Field) _Column() string { return "last_net" } func (Node_LastNet_Field) _Column() string { return "last_net" }
type Node_LastIpPort_Field struct {
_set bool
_null bool
_value *string
}
func Node_LastIpPort(v string) Node_LastIpPort_Field {
return Node_LastIpPort_Field{_set: true, _value: &v}
}
func Node_LastIpPort_Raw(v *string) Node_LastIpPort_Field {
if v == nil {
return Node_LastIpPort_Null()
}
return Node_LastIpPort(*v)
}
func Node_LastIpPort_Null() Node_LastIpPort_Field {
return Node_LastIpPort_Field{_set: true, _null: true}
}
func (f Node_LastIpPort_Field) isnull() bool { return !f._set || f._null || f._value == nil }
func (f Node_LastIpPort_Field) value() interface{} {
if !f._set || f._null {
return nil
}
return f._value
}
func (Node_LastIpPort_Field) _Column() string { return "last_ip_port" }
type Node_Protocol_Field struct { type Node_Protocol_Field struct {
_set bool _set bool
_null bool _null bool
@ -8376,16 +8413,18 @@ type CustomerId_Row struct {
CustomerId string CustomerId string
} }
type Id_Address_LastContactSuccess_LastContactFailure_Row struct { type Id_Address_LastIpPort_LastContactSuccess_LastContactFailure_Row struct {
Id []byte Id []byte
Address string Address string
LastIpPort *string
LastContactSuccess time.Time LastContactSuccess time.Time
LastContactFailure time.Time LastContactFailure time.Time
} }
type Id_LastNet_Address_Protocol_Row struct { type Id_LastNet_LastIpPort_Address_Protocol_Row struct {
Id []byte Id []byte
LastNet string LastNet string
LastIpPort *string
Address string Address string
Protocol int Protocol int
} }
@ -8610,6 +8649,7 @@ func (obj *postgresImpl) CreateNoReturn_Node(ctx context.Context,
__id_val := node_id.value() __id_val := node_id.value()
__address_val := node_address.value() __address_val := node_address.value()
__last_net_val := node_last_net.value() __last_net_val := node_last_net.value()
__last_ip_port_val := optional.LastIpPort.value()
__protocol_val := node_protocol.value() __protocol_val := node_protocol.value()
__type_val := node_type.value() __type_val := node_type.value()
__email_val := node_email.value() __email_val := node_email.value()
@ -8644,14 +8684,14 @@ func (obj *postgresImpl) CreateNoReturn_Node(ctx context.Context,
__exit_finished_at_val := optional.ExitFinishedAt.value() __exit_finished_at_val := optional.ExitFinishedAt.value()
__exit_success_val := node_exit_success.value() __exit_success_val := node_exit_success.value()
var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, address, last_net, protocol, type, email, wallet, free_bandwidth, free_disk, piece_count, major, minor, patch, hash, timestamp, release, latency_90, audit_success_count, total_audit_count, uptime_success_count, total_uptime_count, created_at, updated_at, last_contact_success, last_contact_failure, contained, disqualified, suspended, audit_reputation_alpha, audit_reputation_beta, uptime_reputation_alpha, uptime_reputation_beta, exit_initiated_at, exit_loop_completed_at, exit_finished_at, exit_success")} var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, address, last_net, last_ip_port, protocol, type, email, wallet, free_bandwidth, free_disk, piece_count, major, minor, patch, hash, timestamp, release, latency_90, audit_success_count, total_audit_count, uptime_success_count, total_uptime_count, created_at, updated_at, last_contact_success, last_contact_failure, contained, disqualified, suspended, audit_reputation_alpha, audit_reputation_beta, uptime_reputation_alpha, uptime_reputation_beta, exit_initiated_at, exit_loop_completed_at, exit_finished_at, exit_success")}
var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")} var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")}
var __clause = &__sqlbundle_Hole{SQL: __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("("), __columns, __sqlbundle_Literal(") VALUES ("), __placeholders, __sqlbundle_Literal(")")}}} var __clause = &__sqlbundle_Hole{SQL: __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("("), __columns, __sqlbundle_Literal(") VALUES ("), __placeholders, __sqlbundle_Literal(")")}}}
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO nodes "), __clause}} var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO nodes "), __clause}}
var __values []interface{} var __values []interface{}
__values = append(__values, __id_val, __address_val, __last_net_val, __protocol_val, __type_val, __email_val, __wallet_val, __free_bandwidth_val, __free_disk_val, __piece_count_val, __major_val, __minor_val, __patch_val, __hash_val, __timestamp_val, __release_val, __latency_90_val, __audit_success_count_val, __total_audit_count_val, __uptime_success_count_val, __total_uptime_count_val, __created_at_val, __updated_at_val, __last_contact_success_val, __last_contact_failure_val, __contained_val, __disqualified_val, __suspended_val, __audit_reputation_alpha_val, __audit_reputation_beta_val, __uptime_reputation_alpha_val, __uptime_reputation_beta_val, __exit_initiated_at_val, __exit_loop_completed_at_val, __exit_finished_at_val, __exit_success_val) __values = append(__values, __id_val, __address_val, __last_net_val, __last_ip_port_val, __protocol_val, __type_val, __email_val, __wallet_val, __free_bandwidth_val, __free_disk_val, __piece_count_val, __major_val, __minor_val, __patch_val, __hash_val, __timestamp_val, __release_val, __latency_90_val, __audit_success_count_val, __total_audit_count_val, __uptime_success_count_val, __total_uptime_count_val, __created_at_val, __updated_at_val, __last_contact_success_val, __last_contact_failure_val, __contained_val, __disqualified_val, __suspended_val, __audit_reputation_alpha_val, __audit_reputation_beta_val, __uptime_reputation_alpha_val, __uptime_reputation_beta_val, __exit_initiated_at_val, __exit_loop_completed_at_val, __exit_finished_at_val, __exit_success_val)
__optional_columns := __sqlbundle_Literals{Join: ", "} __optional_columns := __sqlbundle_Literals{Join: ", "}
__optional_placeholders := __sqlbundle_Literals{Join: ", "} __optional_placeholders := __sqlbundle_Literals{Join: ", "}
@ -9924,7 +9964,7 @@ func (obj *postgresImpl) Get_Node_By_Id(ctx context.Context,
node *Node, err error) { node *Node, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE nodes.id = ?") var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE nodes.id = ?")
var __values []interface{} var __values []interface{}
__values = append(__values, node_id.value()) __values = append(__values, node_id.value())
@ -9933,7 +9973,7 @@ func (obj *postgresImpl) Get_Node_By_Id(ctx context.Context,
obj.logStmt(__stmt, __values...) obj.logStmt(__stmt, __values...)
node = &Node{} node = &Node{}
err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&node.Id, &node.Address, &node.LastNet, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess) err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&node.Id, &node.Address, &node.LastNet, &node.LastIpPort, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
if err != nil { if err != nil {
return (*Node)(nil), obj.makeErr(err) return (*Node)(nil), obj.makeErr(err)
} }
@ -9979,7 +10019,7 @@ func (obj *postgresImpl) Limited_Node_By_Id_GreaterOrEqual_OrderBy_Asc_Id(ctx co
rows []*Node, err error) { rows []*Node, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE nodes.id >= ? ORDER BY nodes.id LIMIT ? OFFSET ?") var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE nodes.id >= ? ORDER BY nodes.id LIMIT ? OFFSET ?")
var __values []interface{} var __values []interface{}
__values = append(__values, node_id_greater_or_equal.value()) __values = append(__values, node_id_greater_or_equal.value())
@ -9997,7 +10037,7 @@ func (obj *postgresImpl) Limited_Node_By_Id_GreaterOrEqual_OrderBy_Asc_Id(ctx co
for __rows.Next() { for __rows.Next() {
node := &Node{} node := &Node{}
err = __rows.Scan(&node.Id, &node.Address, &node.LastNet, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess) err = __rows.Scan(&node.Id, &node.Address, &node.LastNet, &node.LastIpPort, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
if err != nil { if err != nil {
return nil, obj.makeErr(err) return nil, obj.makeErr(err)
} }
@ -10010,13 +10050,13 @@ func (obj *postgresImpl) Limited_Node_By_Id_GreaterOrEqual_OrderBy_Asc_Id(ctx co
} }
func (obj *postgresImpl) Limited_Node_Id_Node_LastNet_Node_Address_Node_Protocol_By_Id_GreaterOrEqual_And_Disqualified_Is_Null_OrderBy_Asc_Id(ctx context.Context, func (obj *postgresImpl) Limited_Node_Id_Node_LastNet_Node_LastIpPort_Node_Address_Node_Protocol_By_Id_GreaterOrEqual_And_Disqualified_Is_Null_OrderBy_Asc_Id(ctx context.Context,
node_id_greater_or_equal Node_Id_Field, node_id_greater_or_equal Node_Id_Field,
limit int, offset int64) ( limit int, offset int64) (
rows []*Id_LastNet_Address_Protocol_Row, err error) { rows []*Id_LastNet_LastIpPort_Address_Protocol_Row, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.last_net, nodes.address, nodes.protocol FROM nodes WHERE nodes.id >= ? AND nodes.disqualified is NULL ORDER BY nodes.id LIMIT ? OFFSET ?") var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.last_net, nodes.last_ip_port, nodes.address, nodes.protocol FROM nodes WHERE nodes.id >= ? AND nodes.disqualified is NULL ORDER BY nodes.id LIMIT ? OFFSET ?")
var __values []interface{} var __values []interface{}
__values = append(__values, node_id_greater_or_equal.value()) __values = append(__values, node_id_greater_or_equal.value())
@ -10033,8 +10073,8 @@ func (obj *postgresImpl) Limited_Node_Id_Node_LastNet_Node_Address_Node_Protocol
defer __rows.Close() defer __rows.Close()
for __rows.Next() { for __rows.Next() {
row := &Id_LastNet_Address_Protocol_Row{} row := &Id_LastNet_LastIpPort_Address_Protocol_Row{}
err = __rows.Scan(&row.Id, &row.LastNet, &row.Address, &row.Protocol) err = __rows.Scan(&row.Id, &row.LastNet, &row.LastIpPort, &row.Address, &row.Protocol)
if err != nil { if err != nil {
return nil, obj.makeErr(err) return nil, obj.makeErr(err)
} }
@ -10079,12 +10119,12 @@ func (obj *postgresImpl) All_Node_Id_Node_PieceCount_By_PieceCount_Not_Number(ct
} }
func (obj *postgresImpl) Limited_Node_Id_Node_Address_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactFailure(ctx context.Context, func (obj *postgresImpl) Limited_Node_Id_Node_Address_Node_LastIpPort_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactFailure(ctx context.Context,
limit int, offset int64) ( limit int, offset int64) (
rows []*Id_Address_LastContactSuccess_LastContactFailure_Row, err error) { rows []*Id_Address_LastIpPort_LastContactSuccess_LastContactFailure_Row, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_contact_success, nodes.last_contact_failure FROM nodes WHERE nodes.last_contact_success < nodes.last_contact_failure AND nodes.disqualified is NULL ORDER BY nodes.last_contact_failure LIMIT ? OFFSET ?") var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_ip_port, nodes.last_contact_success, nodes.last_contact_failure FROM nodes WHERE nodes.last_contact_success < nodes.last_contact_failure AND nodes.disqualified is NULL ORDER BY nodes.last_contact_failure LIMIT ? OFFSET ?")
var __values []interface{} var __values []interface{}
@ -10100,8 +10140,8 @@ func (obj *postgresImpl) Limited_Node_Id_Node_Address_Node_LastContactSuccess_No
defer __rows.Close() defer __rows.Close()
for __rows.Next() { for __rows.Next() {
row := &Id_Address_LastContactSuccess_LastContactFailure_Row{} row := &Id_Address_LastIpPort_LastContactSuccess_LastContactFailure_Row{}
err = __rows.Scan(&row.Id, &row.Address, &row.LastContactSuccess, &row.LastContactFailure) err = __rows.Scan(&row.Id, &row.Address, &row.LastIpPort, &row.LastContactSuccess, &row.LastContactFailure)
if err != nil { if err != nil {
return nil, obj.makeErr(err) return nil, obj.makeErr(err)
} }
@ -10114,12 +10154,12 @@ func (obj *postgresImpl) Limited_Node_Id_Node_Address_Node_LastContactSuccess_No
} }
func (obj *postgresImpl) All_Node_Id_Node_Address_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_And_LastContactSuccess_Greater_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactSuccess(ctx context.Context, func (obj *postgresImpl) All_Node_Id_Node_Address_Node_LastIpPort_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_And_LastContactSuccess_Greater_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactSuccess(ctx context.Context,
node_last_contact_success_less Node_LastContactSuccess_Field) ( node_last_contact_success_less Node_LastContactSuccess_Field) (
rows []*Id_Address_LastContactSuccess_LastContactFailure_Row, err error) { rows []*Id_Address_LastIpPort_LastContactSuccess_LastContactFailure_Row, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_contact_success, nodes.last_contact_failure FROM nodes WHERE nodes.last_contact_success < ? AND nodes.last_contact_success > nodes.last_contact_failure AND nodes.disqualified is NULL ORDER BY nodes.last_contact_success") var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_ip_port, nodes.last_contact_success, nodes.last_contact_failure FROM nodes WHERE nodes.last_contact_success < ? AND nodes.last_contact_success > nodes.last_contact_failure AND nodes.disqualified is NULL ORDER BY nodes.last_contact_success")
var __values []interface{} var __values []interface{}
__values = append(__values, node_last_contact_success_less.value()) __values = append(__values, node_last_contact_success_less.value())
@ -10134,8 +10174,8 @@ func (obj *postgresImpl) All_Node_Id_Node_Address_Node_LastContactSuccess_Node_L
defer __rows.Close() defer __rows.Close()
for __rows.Next() { for __rows.Next() {
row := &Id_Address_LastContactSuccess_LastContactFailure_Row{} row := &Id_Address_LastIpPort_LastContactSuccess_LastContactFailure_Row{}
err = __rows.Scan(&row.Id, &row.Address, &row.LastContactSuccess, &row.LastContactFailure) err = __rows.Scan(&row.Id, &row.Address, &row.LastIpPort, &row.LastContactSuccess, &row.LastContactFailure)
if err != nil { if err != nil {
return nil, obj.makeErr(err) return nil, obj.makeErr(err)
} }
@ -10729,7 +10769,7 @@ func (obj *postgresImpl) Paged_PendingSerialQueue(ctx context.Context,
rows []*PendingSerialQueue, next *Paged_PendingSerialQueue_Continuation, err error) { rows []*PendingSerialQueue, next *Paged_PendingSerialQueue_Continuation, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number, pending_serial_queue.action, pending_serial_queue.settled, pending_serial_queue.expires_at, pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number FROM pending_serial_queue WHERE (pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number) > (?, ?, ?) ORDER BY pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number LIMIT ?") var __embed_stmt = __sqlbundle_Literal("SELECT pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number, pending_serial_queue.action, pending_serial_queue.settled, pending_serial_queue.expires_at, pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number FROM pending_serial_queue WHERE (pending_serial_queue.storage_node_id > ? OR (pending_serial_queue.storage_node_id = ? AND (pending_serial_queue.bucket_id > ? OR (pending_serial_queue.bucket_id = ? AND pending_serial_queue.serial_number > ?)))) ORDER BY pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number LIMIT ?")
var __embed_first_stmt = __sqlbundle_Literal("SELECT pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number, pending_serial_queue.action, pending_serial_queue.settled, pending_serial_queue.expires_at, pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number FROM pending_serial_queue ORDER BY pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number LIMIT ?") var __embed_first_stmt = __sqlbundle_Literal("SELECT pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number, pending_serial_queue.action, pending_serial_queue.settled, pending_serial_queue.expires_at, pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number FROM pending_serial_queue ORDER BY pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number LIMIT ?")
@ -10737,7 +10777,7 @@ func (obj *postgresImpl) Paged_PendingSerialQueue(ctx context.Context,
var __stmt string var __stmt string
if start != nil && start._set { if start != nil && start._set {
__values = append(__values, start._value_storage_node_id, start._value_bucket_id, start._value_serial_number, limit) __values = append(__values, start._value_storage_node_id, start._value_storage_node_id, start._value_bucket_id, start._value_bucket_id, start._value_serial_number, limit)
__stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
} else { } else {
__values = append(__values, limit) __values = append(__values, limit)
@ -12261,7 +12301,7 @@ func (obj *postgresImpl) Update_Node_By_Id(ctx context.Context,
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
var __sets = &__sqlbundle_Hole{} var __sets = &__sqlbundle_Hole{}
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE nodes SET "), __sets, __sqlbundle_Literal(" WHERE nodes.id = ? RETURNING nodes.id, nodes.address, nodes.last_net, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success")}} var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE nodes SET "), __sets, __sqlbundle_Literal(" WHERE nodes.id = ? RETURNING nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success")}}
__sets_sql := __sqlbundle_Literals{Join: ", "} __sets_sql := __sqlbundle_Literals{Join: ", "}
var __values []interface{} var __values []interface{}
@ -12277,6 +12317,11 @@ func (obj *postgresImpl) Update_Node_By_Id(ctx context.Context,
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_net = ?")) __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_net = ?"))
} }
if update.LastIpPort._set {
__values = append(__values, update.LastIpPort.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_ip_port = ?"))
}
if update.Protocol._set { if update.Protocol._set {
__values = append(__values, update.Protocol.value()) __values = append(__values, update.Protocol.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("protocol = ?")) __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("protocol = ?"))
@ -12456,7 +12501,7 @@ func (obj *postgresImpl) Update_Node_By_Id(ctx context.Context,
obj.logStmt(__stmt, __values...) obj.logStmt(__stmt, __values...)
node = &Node{} node = &Node{}
err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&node.Id, &node.Address, &node.LastNet, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess) err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&node.Id, &node.Address, &node.LastNet, &node.LastIpPort, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
if err == sql.ErrNoRows { if err == sql.ErrNoRows {
return nil, nil return nil, nil
} }
@ -12489,6 +12534,11 @@ func (obj *postgresImpl) UpdateNoReturn_Node_By_Id(ctx context.Context,
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_net = ?")) __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_net = ?"))
} }
if update.LastIpPort._set {
__values = append(__values, update.LastIpPort.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_ip_port = ?"))
}
if update.Protocol._set { if update.Protocol._set {
__values = append(__values, update.Protocol.value()) __values = append(__values, update.Protocol.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("protocol = ?")) __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("protocol = ?"))
@ -14643,6 +14693,7 @@ func (obj *cockroachImpl) CreateNoReturn_Node(ctx context.Context,
__id_val := node_id.value() __id_val := node_id.value()
__address_val := node_address.value() __address_val := node_address.value()
__last_net_val := node_last_net.value() __last_net_val := node_last_net.value()
__last_ip_port_val := optional.LastIpPort.value()
__protocol_val := node_protocol.value() __protocol_val := node_protocol.value()
__type_val := node_type.value() __type_val := node_type.value()
__email_val := node_email.value() __email_val := node_email.value()
@ -14677,14 +14728,14 @@ func (obj *cockroachImpl) CreateNoReturn_Node(ctx context.Context,
__exit_finished_at_val := optional.ExitFinishedAt.value() __exit_finished_at_val := optional.ExitFinishedAt.value()
__exit_success_val := node_exit_success.value() __exit_success_val := node_exit_success.value()
var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, address, last_net, protocol, type, email, wallet, free_bandwidth, free_disk, piece_count, major, minor, patch, hash, timestamp, release, latency_90, audit_success_count, total_audit_count, uptime_success_count, total_uptime_count, created_at, updated_at, last_contact_success, last_contact_failure, contained, disqualified, suspended, audit_reputation_alpha, audit_reputation_beta, uptime_reputation_alpha, uptime_reputation_beta, exit_initiated_at, exit_loop_completed_at, exit_finished_at, exit_success")} var __columns = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("id, address, last_net, last_ip_port, protocol, type, email, wallet, free_bandwidth, free_disk, piece_count, major, minor, patch, hash, timestamp, release, latency_90, audit_success_count, total_audit_count, uptime_success_count, total_uptime_count, created_at, updated_at, last_contact_success, last_contact_failure, contained, disqualified, suspended, audit_reputation_alpha, audit_reputation_beta, uptime_reputation_alpha, uptime_reputation_beta, exit_initiated_at, exit_loop_completed_at, exit_finished_at, exit_success")}
var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")} var __placeholders = &__sqlbundle_Hole{SQL: __sqlbundle_Literal("?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?")}
var __clause = &__sqlbundle_Hole{SQL: __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("("), __columns, __sqlbundle_Literal(") VALUES ("), __placeholders, __sqlbundle_Literal(")")}}} var __clause = &__sqlbundle_Hole{SQL: __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("("), __columns, __sqlbundle_Literal(") VALUES ("), __placeholders, __sqlbundle_Literal(")")}}}
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO nodes "), __clause}} var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("INSERT INTO nodes "), __clause}}
var __values []interface{} var __values []interface{}
__values = append(__values, __id_val, __address_val, __last_net_val, __protocol_val, __type_val, __email_val, __wallet_val, __free_bandwidth_val, __free_disk_val, __piece_count_val, __major_val, __minor_val, __patch_val, __hash_val, __timestamp_val, __release_val, __latency_90_val, __audit_success_count_val, __total_audit_count_val, __uptime_success_count_val, __total_uptime_count_val, __created_at_val, __updated_at_val, __last_contact_success_val, __last_contact_failure_val, __contained_val, __disqualified_val, __suspended_val, __audit_reputation_alpha_val, __audit_reputation_beta_val, __uptime_reputation_alpha_val, __uptime_reputation_beta_val, __exit_initiated_at_val, __exit_loop_completed_at_val, __exit_finished_at_val, __exit_success_val) __values = append(__values, __id_val, __address_val, __last_net_val, __last_ip_port_val, __protocol_val, __type_val, __email_val, __wallet_val, __free_bandwidth_val, __free_disk_val, __piece_count_val, __major_val, __minor_val, __patch_val, __hash_val, __timestamp_val, __release_val, __latency_90_val, __audit_success_count_val, __total_audit_count_val, __uptime_success_count_val, __total_uptime_count_val, __created_at_val, __updated_at_val, __last_contact_success_val, __last_contact_failure_val, __contained_val, __disqualified_val, __suspended_val, __audit_reputation_alpha_val, __audit_reputation_beta_val, __uptime_reputation_alpha_val, __uptime_reputation_beta_val, __exit_initiated_at_val, __exit_loop_completed_at_val, __exit_finished_at_val, __exit_success_val)
__optional_columns := __sqlbundle_Literals{Join: ", "} __optional_columns := __sqlbundle_Literals{Join: ", "}
__optional_placeholders := __sqlbundle_Literals{Join: ", "} __optional_placeholders := __sqlbundle_Literals{Join: ", "}
@ -15957,7 +16008,7 @@ func (obj *cockroachImpl) Get_Node_By_Id(ctx context.Context,
node *Node, err error) { node *Node, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE nodes.id = ?") var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE nodes.id = ?")
var __values []interface{} var __values []interface{}
__values = append(__values, node_id.value()) __values = append(__values, node_id.value())
@ -15966,7 +16017,7 @@ func (obj *cockroachImpl) Get_Node_By_Id(ctx context.Context,
obj.logStmt(__stmt, __values...) obj.logStmt(__stmt, __values...)
node = &Node{} node = &Node{}
err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&node.Id, &node.Address, &node.LastNet, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess) err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&node.Id, &node.Address, &node.LastNet, &node.LastIpPort, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
if err != nil { if err != nil {
return (*Node)(nil), obj.makeErr(err) return (*Node)(nil), obj.makeErr(err)
} }
@ -16012,7 +16063,7 @@ func (obj *cockroachImpl) Limited_Node_By_Id_GreaterOrEqual_OrderBy_Asc_Id(ctx c
rows []*Node, err error) { rows []*Node, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE nodes.id >= ? ORDER BY nodes.id LIMIT ? OFFSET ?") var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success FROM nodes WHERE nodes.id >= ? ORDER BY nodes.id LIMIT ? OFFSET ?")
var __values []interface{} var __values []interface{}
__values = append(__values, node_id_greater_or_equal.value()) __values = append(__values, node_id_greater_or_equal.value())
@ -16030,7 +16081,7 @@ func (obj *cockroachImpl) Limited_Node_By_Id_GreaterOrEqual_OrderBy_Asc_Id(ctx c
for __rows.Next() { for __rows.Next() {
node := &Node{} node := &Node{}
err = __rows.Scan(&node.Id, &node.Address, &node.LastNet, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess) err = __rows.Scan(&node.Id, &node.Address, &node.LastNet, &node.LastIpPort, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
if err != nil { if err != nil {
return nil, obj.makeErr(err) return nil, obj.makeErr(err)
} }
@ -16043,13 +16094,13 @@ func (obj *cockroachImpl) Limited_Node_By_Id_GreaterOrEqual_OrderBy_Asc_Id(ctx c
} }
func (obj *cockroachImpl) Limited_Node_Id_Node_LastNet_Node_Address_Node_Protocol_By_Id_GreaterOrEqual_And_Disqualified_Is_Null_OrderBy_Asc_Id(ctx context.Context, func (obj *cockroachImpl) Limited_Node_Id_Node_LastNet_Node_LastIpPort_Node_Address_Node_Protocol_By_Id_GreaterOrEqual_And_Disqualified_Is_Null_OrderBy_Asc_Id(ctx context.Context,
node_id_greater_or_equal Node_Id_Field, node_id_greater_or_equal Node_Id_Field,
limit int, offset int64) ( limit int, offset int64) (
rows []*Id_LastNet_Address_Protocol_Row, err error) { rows []*Id_LastNet_LastIpPort_Address_Protocol_Row, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.last_net, nodes.address, nodes.protocol FROM nodes WHERE nodes.id >= ? AND nodes.disqualified is NULL ORDER BY nodes.id LIMIT ? OFFSET ?") var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.last_net, nodes.last_ip_port, nodes.address, nodes.protocol FROM nodes WHERE nodes.id >= ? AND nodes.disqualified is NULL ORDER BY nodes.id LIMIT ? OFFSET ?")
var __values []interface{} var __values []interface{}
__values = append(__values, node_id_greater_or_equal.value()) __values = append(__values, node_id_greater_or_equal.value())
@ -16066,8 +16117,8 @@ func (obj *cockroachImpl) Limited_Node_Id_Node_LastNet_Node_Address_Node_Protoco
defer __rows.Close() defer __rows.Close()
for __rows.Next() { for __rows.Next() {
row := &Id_LastNet_Address_Protocol_Row{} row := &Id_LastNet_LastIpPort_Address_Protocol_Row{}
err = __rows.Scan(&row.Id, &row.LastNet, &row.Address, &row.Protocol) err = __rows.Scan(&row.Id, &row.LastNet, &row.LastIpPort, &row.Address, &row.Protocol)
if err != nil { if err != nil {
return nil, obj.makeErr(err) return nil, obj.makeErr(err)
} }
@ -16112,12 +16163,12 @@ func (obj *cockroachImpl) All_Node_Id_Node_PieceCount_By_PieceCount_Not_Number(c
} }
func (obj *cockroachImpl) Limited_Node_Id_Node_Address_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactFailure(ctx context.Context, func (obj *cockroachImpl) Limited_Node_Id_Node_Address_Node_LastIpPort_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactFailure(ctx context.Context,
limit int, offset int64) ( limit int, offset int64) (
rows []*Id_Address_LastContactSuccess_LastContactFailure_Row, err error) { rows []*Id_Address_LastIpPort_LastContactSuccess_LastContactFailure_Row, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_contact_success, nodes.last_contact_failure FROM nodes WHERE nodes.last_contact_success < nodes.last_contact_failure AND nodes.disqualified is NULL ORDER BY nodes.last_contact_failure LIMIT ? OFFSET ?") var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_ip_port, nodes.last_contact_success, nodes.last_contact_failure FROM nodes WHERE nodes.last_contact_success < nodes.last_contact_failure AND nodes.disqualified is NULL ORDER BY nodes.last_contact_failure LIMIT ? OFFSET ?")
var __values []interface{} var __values []interface{}
@ -16133,8 +16184,8 @@ func (obj *cockroachImpl) Limited_Node_Id_Node_Address_Node_LastContactSuccess_N
defer __rows.Close() defer __rows.Close()
for __rows.Next() { for __rows.Next() {
row := &Id_Address_LastContactSuccess_LastContactFailure_Row{} row := &Id_Address_LastIpPort_LastContactSuccess_LastContactFailure_Row{}
err = __rows.Scan(&row.Id, &row.Address, &row.LastContactSuccess, &row.LastContactFailure) err = __rows.Scan(&row.Id, &row.Address, &row.LastIpPort, &row.LastContactSuccess, &row.LastContactFailure)
if err != nil { if err != nil {
return nil, obj.makeErr(err) return nil, obj.makeErr(err)
} }
@ -16147,12 +16198,12 @@ func (obj *cockroachImpl) Limited_Node_Id_Node_Address_Node_LastContactSuccess_N
} }
func (obj *cockroachImpl) All_Node_Id_Node_Address_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_And_LastContactSuccess_Greater_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactSuccess(ctx context.Context, func (obj *cockroachImpl) All_Node_Id_Node_Address_Node_LastIpPort_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_And_LastContactSuccess_Greater_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactSuccess(ctx context.Context,
node_last_contact_success_less Node_LastContactSuccess_Field) ( node_last_contact_success_less Node_LastContactSuccess_Field) (
rows []*Id_Address_LastContactSuccess_LastContactFailure_Row, err error) { rows []*Id_Address_LastIpPort_LastContactSuccess_LastContactFailure_Row, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_contact_success, nodes.last_contact_failure FROM nodes WHERE nodes.last_contact_success < ? AND nodes.last_contact_success > nodes.last_contact_failure AND nodes.disqualified is NULL ORDER BY nodes.last_contact_success") var __embed_stmt = __sqlbundle_Literal("SELECT nodes.id, nodes.address, nodes.last_ip_port, nodes.last_contact_success, nodes.last_contact_failure FROM nodes WHERE nodes.last_contact_success < ? AND nodes.last_contact_success > nodes.last_contact_failure AND nodes.disqualified is NULL ORDER BY nodes.last_contact_success")
var __values []interface{} var __values []interface{}
__values = append(__values, node_last_contact_success_less.value()) __values = append(__values, node_last_contact_success_less.value())
@ -16167,8 +16218,8 @@ func (obj *cockroachImpl) All_Node_Id_Node_Address_Node_LastContactSuccess_Node_
defer __rows.Close() defer __rows.Close()
for __rows.Next() { for __rows.Next() {
row := &Id_Address_LastContactSuccess_LastContactFailure_Row{} row := &Id_Address_LastIpPort_LastContactSuccess_LastContactFailure_Row{}
err = __rows.Scan(&row.Id, &row.Address, &row.LastContactSuccess, &row.LastContactFailure) err = __rows.Scan(&row.Id, &row.Address, &row.LastIpPort, &row.LastContactSuccess, &row.LastContactFailure)
if err != nil { if err != nil {
return nil, obj.makeErr(err) return nil, obj.makeErr(err)
} }
@ -16762,7 +16813,7 @@ func (obj *cockroachImpl) Paged_PendingSerialQueue(ctx context.Context,
rows []*PendingSerialQueue, next *Paged_PendingSerialQueue_Continuation, err error) { rows []*PendingSerialQueue, next *Paged_PendingSerialQueue_Continuation, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
var __embed_stmt = __sqlbundle_Literal("SELECT pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number, pending_serial_queue.action, pending_serial_queue.settled, pending_serial_queue.expires_at, pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number FROM pending_serial_queue WHERE (pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number) > (?, ?, ?) ORDER BY pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number LIMIT ?") var __embed_stmt = __sqlbundle_Literal("SELECT pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number, pending_serial_queue.action, pending_serial_queue.settled, pending_serial_queue.expires_at, pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number FROM pending_serial_queue WHERE (pending_serial_queue.storage_node_id > ? OR (pending_serial_queue.storage_node_id = ? AND (pending_serial_queue.bucket_id > ? OR (pending_serial_queue.bucket_id = ? AND pending_serial_queue.serial_number > ?)))) ORDER BY pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number LIMIT ?")
var __embed_first_stmt = __sqlbundle_Literal("SELECT pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number, pending_serial_queue.action, pending_serial_queue.settled, pending_serial_queue.expires_at, pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number FROM pending_serial_queue ORDER BY pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number LIMIT ?") var __embed_first_stmt = __sqlbundle_Literal("SELECT pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number, pending_serial_queue.action, pending_serial_queue.settled, pending_serial_queue.expires_at, pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number FROM pending_serial_queue ORDER BY pending_serial_queue.storage_node_id, pending_serial_queue.bucket_id, pending_serial_queue.serial_number LIMIT ?")
@ -16770,7 +16821,7 @@ func (obj *cockroachImpl) Paged_PendingSerialQueue(ctx context.Context,
var __stmt string var __stmt string
if start != nil && start._set { if start != nil && start._set {
__values = append(__values, start._value_storage_node_id, start._value_bucket_id, start._value_serial_number, limit) __values = append(__values, start._value_storage_node_id, start._value_storage_node_id, start._value_bucket_id, start._value_bucket_id, start._value_serial_number, limit)
__stmt = __sqlbundle_Render(obj.dialect, __embed_stmt) __stmt = __sqlbundle_Render(obj.dialect, __embed_stmt)
} else { } else {
__values = append(__values, limit) __values = append(__values, limit)
@ -18294,7 +18345,7 @@ func (obj *cockroachImpl) Update_Node_By_Id(ctx context.Context,
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
var __sets = &__sqlbundle_Hole{} var __sets = &__sqlbundle_Hole{}
var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE nodes SET "), __sets, __sqlbundle_Literal(" WHERE nodes.id = ? RETURNING nodes.id, nodes.address, nodes.last_net, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success")}} var __embed_stmt = __sqlbundle_Literals{Join: "", SQLs: []__sqlbundle_SQL{__sqlbundle_Literal("UPDATE nodes SET "), __sets, __sqlbundle_Literal(" WHERE nodes.id = ? RETURNING nodes.id, nodes.address, nodes.last_net, nodes.last_ip_port, nodes.protocol, nodes.type, nodes.email, nodes.wallet, nodes.free_bandwidth, nodes.free_disk, nodes.piece_count, nodes.major, nodes.minor, nodes.patch, nodes.hash, nodes.timestamp, nodes.release, nodes.latency_90, nodes.audit_success_count, nodes.total_audit_count, nodes.uptime_success_count, nodes.total_uptime_count, nodes.created_at, nodes.updated_at, nodes.last_contact_success, nodes.last_contact_failure, nodes.contained, nodes.disqualified, nodes.suspended, nodes.audit_reputation_alpha, nodes.audit_reputation_beta, nodes.unknown_audit_reputation_alpha, nodes.unknown_audit_reputation_beta, nodes.uptime_reputation_alpha, nodes.uptime_reputation_beta, nodes.exit_initiated_at, nodes.exit_loop_completed_at, nodes.exit_finished_at, nodes.exit_success")}}
__sets_sql := __sqlbundle_Literals{Join: ", "} __sets_sql := __sqlbundle_Literals{Join: ", "}
var __values []interface{} var __values []interface{}
@ -18310,6 +18361,11 @@ func (obj *cockroachImpl) Update_Node_By_Id(ctx context.Context,
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_net = ?")) __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_net = ?"))
} }
if update.LastIpPort._set {
__values = append(__values, update.LastIpPort.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_ip_port = ?"))
}
if update.Protocol._set { if update.Protocol._set {
__values = append(__values, update.Protocol.value()) __values = append(__values, update.Protocol.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("protocol = ?")) __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("protocol = ?"))
@ -18489,7 +18545,7 @@ func (obj *cockroachImpl) Update_Node_By_Id(ctx context.Context,
obj.logStmt(__stmt, __values...) obj.logStmt(__stmt, __values...)
node = &Node{} node = &Node{}
err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&node.Id, &node.Address, &node.LastNet, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess) err = obj.driver.QueryRowContext(ctx, __stmt, __values...).Scan(&node.Id, &node.Address, &node.LastNet, &node.LastIpPort, &node.Protocol, &node.Type, &node.Email, &node.Wallet, &node.FreeBandwidth, &node.FreeDisk, &node.PieceCount, &node.Major, &node.Minor, &node.Patch, &node.Hash, &node.Timestamp, &node.Release, &node.Latency90, &node.AuditSuccessCount, &node.TotalAuditCount, &node.UptimeSuccessCount, &node.TotalUptimeCount, &node.CreatedAt, &node.UpdatedAt, &node.LastContactSuccess, &node.LastContactFailure, &node.Contained, &node.Disqualified, &node.Suspended, &node.AuditReputationAlpha, &node.AuditReputationBeta, &node.UnknownAuditReputationAlpha, &node.UnknownAuditReputationBeta, &node.UptimeReputationAlpha, &node.UptimeReputationBeta, &node.ExitInitiatedAt, &node.ExitLoopCompletedAt, &node.ExitFinishedAt, &node.ExitSuccess)
if err == sql.ErrNoRows { if err == sql.ErrNoRows {
return nil, nil return nil, nil
} }
@ -18522,6 +18578,11 @@ func (obj *cockroachImpl) UpdateNoReturn_Node_By_Id(ctx context.Context,
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_net = ?")) __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_net = ?"))
} }
if update.LastIpPort._set {
__values = append(__values, update.LastIpPort.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("last_ip_port = ?"))
}
if update.Protocol._set { if update.Protocol._set {
__values = append(__values, update.Protocol.value()) __values = append(__values, update.Protocol.value())
__sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("protocol = ?")) __sets_sql.SQLs = append(__sets_sql.SQLs, __sqlbundle_Literal("protocol = ?"))
@ -20648,14 +20709,14 @@ func (rx *Rx) All_Node_Id(ctx context.Context) (
return tx.All_Node_Id(ctx) return tx.All_Node_Id(ctx)
} }
func (rx *Rx) All_Node_Id_Node_Address_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_And_LastContactSuccess_Greater_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactSuccess(ctx context.Context, func (rx *Rx) All_Node_Id_Node_Address_Node_LastIpPort_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_And_LastContactSuccess_Greater_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactSuccess(ctx context.Context,
node_last_contact_success_less Node_LastContactSuccess_Field) ( node_last_contact_success_less Node_LastContactSuccess_Field) (
rows []*Id_Address_LastContactSuccess_LastContactFailure_Row, err error) { rows []*Id_Address_LastIpPort_LastContactSuccess_LastContactFailure_Row, err error) {
var tx *Tx var tx *Tx
if tx, err = rx.getTx(ctx); err != nil { if tx, err = rx.getTx(ctx); err != nil {
return return
} }
return tx.All_Node_Id_Node_Address_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_And_LastContactSuccess_Greater_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactSuccess(ctx, node_last_contact_success_less) return tx.All_Node_Id_Node_Address_Node_LastIpPort_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_And_LastContactSuccess_Greater_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactSuccess(ctx, node_last_contact_success_less)
} }
func (rx *Rx) All_Node_Id_Node_PieceCount_By_PieceCount_Not_Number(ctx context.Context) ( func (rx *Rx) All_Node_Id_Node_PieceCount_By_PieceCount_Not_Number(ctx context.Context) (
@ -22056,25 +22117,25 @@ func (rx *Rx) Limited_Node_By_Id_GreaterOrEqual_OrderBy_Asc_Id(ctx context.Conte
return tx.Limited_Node_By_Id_GreaterOrEqual_OrderBy_Asc_Id(ctx, node_id_greater_or_equal, limit, offset) return tx.Limited_Node_By_Id_GreaterOrEqual_OrderBy_Asc_Id(ctx, node_id_greater_or_equal, limit, offset)
} }
func (rx *Rx) Limited_Node_Id_Node_Address_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactFailure(ctx context.Context, func (rx *Rx) Limited_Node_Id_Node_Address_Node_LastIpPort_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactFailure(ctx context.Context,
limit int, offset int64) ( limit int, offset int64) (
rows []*Id_Address_LastContactSuccess_LastContactFailure_Row, err error) { rows []*Id_Address_LastIpPort_LastContactSuccess_LastContactFailure_Row, err error) {
var tx *Tx var tx *Tx
if tx, err = rx.getTx(ctx); err != nil { if tx, err = rx.getTx(ctx); err != nil {
return return
} }
return tx.Limited_Node_Id_Node_Address_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactFailure(ctx, limit, offset) return tx.Limited_Node_Id_Node_Address_Node_LastIpPort_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactFailure(ctx, limit, offset)
} }
func (rx *Rx) Limited_Node_Id_Node_LastNet_Node_Address_Node_Protocol_By_Id_GreaterOrEqual_And_Disqualified_Is_Null_OrderBy_Asc_Id(ctx context.Context, func (rx *Rx) Limited_Node_Id_Node_LastNet_Node_LastIpPort_Node_Address_Node_Protocol_By_Id_GreaterOrEqual_And_Disqualified_Is_Null_OrderBy_Asc_Id(ctx context.Context,
node_id_greater_or_equal Node_Id_Field, node_id_greater_or_equal Node_Id_Field,
limit int, offset int64) ( limit int, offset int64) (
rows []*Id_LastNet_Address_Protocol_Row, err error) { rows []*Id_LastNet_LastIpPort_Address_Protocol_Row, err error) {
var tx *Tx var tx *Tx
if tx, err = rx.getTx(ctx); err != nil { if tx, err = rx.getTx(ctx); err != nil {
return return
} }
return tx.Limited_Node_Id_Node_LastNet_Node_Address_Node_Protocol_By_Id_GreaterOrEqual_And_Disqualified_Is_Null_OrderBy_Asc_Id(ctx, node_id_greater_or_equal, limit, offset) return tx.Limited_Node_Id_Node_LastNet_Node_LastIpPort_Node_Address_Node_Protocol_By_Id_GreaterOrEqual_And_Disqualified_Is_Null_OrderBy_Asc_Id(ctx, node_id_greater_or_equal, limit, offset)
} }
func (rx *Rx) Limited_ProjectMember_By_ProjectId(ctx context.Context, func (rx *Rx) Limited_ProjectMember_By_ProjectId(ctx context.Context,
@ -22423,9 +22484,9 @@ type Methods interface {
All_Node_Id(ctx context.Context) ( All_Node_Id(ctx context.Context) (
rows []*Id_Row, err error) rows []*Id_Row, err error)
All_Node_Id_Node_Address_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_And_LastContactSuccess_Greater_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactSuccess(ctx context.Context, All_Node_Id_Node_Address_Node_LastIpPort_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_And_LastContactSuccess_Greater_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactSuccess(ctx context.Context,
node_last_contact_success_less Node_LastContactSuccess_Field) ( node_last_contact_success_less Node_LastContactSuccess_Field) (
rows []*Id_Address_LastContactSuccess_LastContactFailure_Row, err error) rows []*Id_Address_LastIpPort_LastContactSuccess_LastContactFailure_Row, err error)
All_Node_Id_Node_PieceCount_By_PieceCount_Not_Number(ctx context.Context) ( All_Node_Id_Node_PieceCount_By_PieceCount_Not_Number(ctx context.Context) (
rows []*Id_PieceCount_Row, err error) rows []*Id_PieceCount_Row, err error)
@ -23095,14 +23156,14 @@ type Methods interface {
limit int, offset int64) ( limit int, offset int64) (
rows []*Node, err error) rows []*Node, err error)
Limited_Node_Id_Node_Address_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactFailure(ctx context.Context, Limited_Node_Id_Node_Address_Node_LastIpPort_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactFailure(ctx context.Context,
limit int, offset int64) ( limit int, offset int64) (
rows []*Id_Address_LastContactSuccess_LastContactFailure_Row, err error) rows []*Id_Address_LastIpPort_LastContactSuccess_LastContactFailure_Row, err error)
Limited_Node_Id_Node_LastNet_Node_Address_Node_Protocol_By_Id_GreaterOrEqual_And_Disqualified_Is_Null_OrderBy_Asc_Id(ctx context.Context, Limited_Node_Id_Node_LastNet_Node_LastIpPort_Node_Address_Node_Protocol_By_Id_GreaterOrEqual_And_Disqualified_Is_Null_OrderBy_Asc_Id(ctx context.Context,
node_id_greater_or_equal Node_Id_Field, node_id_greater_or_equal Node_Id_Field,
limit int, offset int64) ( limit int, offset int64) (
rows []*Id_LastNet_Address_Protocol_Row, err error) rows []*Id_LastNet_LastIpPort_Address_Protocol_Row, err error)
Limited_ProjectMember_By_ProjectId(ctx context.Context, Limited_ProjectMember_By_ProjectId(ctx context.Context,
project_member_project_id ProjectMember_ProjectId_Field, project_member_project_id ProjectMember_ProjectId_Field,

View File

@ -135,6 +135,7 @@ CREATE TABLE nodes (
id bytea NOT NULL, id bytea NOT NULL,
address text NOT NULL, address text NOT NULL,
last_net text NOT NULL, last_net text NOT NULL,
last_ip_port text,
protocol integer NOT NULL, protocol integer NOT NULL,
type integer NOT NULL, type integer NOT NULL,
email text NOT NULL, email text NOT NULL,

View File

@ -903,6 +903,11 @@ func (db *satelliteDB) PostgresMigration() *migrate.Migration {
`CREATE INDEX injuredsegments_num_healthy_pieces_index ON injuredsegments ( num_healthy_pieces );`, `CREATE INDEX injuredsegments_num_healthy_pieces_index ON injuredsegments ( num_healthy_pieces );`,
}, },
}, },
{
DB: db.DB, Description: "Add column last_ip_port to nodes table", Version: 96, Action: migrate.SQL{
`ALTER TABLE nodes ADD COLUMN last_ip_port text`,
},
},
}, },
} }
} }

View File

@ -38,7 +38,7 @@ type overlaycache struct {
db *satelliteDB db *satelliteDB
} }
func (cache *overlaycache) SelectStorageNodes(ctx context.Context, count int, criteria *overlay.NodeCriteria) (nodes []*pb.Node, err error) { func (cache *overlaycache) SelectStorageNodes(ctx context.Context, count int, criteria *overlay.NodeCriteria) (nodes []*overlay.NodeDossier, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
nodeType := int(pb.NodeType_STORAGE) nodeType := int(pb.NodeType_STORAGE)
@ -83,7 +83,7 @@ func (cache *overlaycache) SelectStorageNodes(ctx context.Context, count int, cr
for _, n := range moreNodes { for _, n := range moreNodes {
nodes = append(nodes, n) nodes = append(nodes, n)
criteria.ExcludedNodes = append(criteria.ExcludedNodes, n.Id) criteria.ExcludedNodes = append(criteria.ExcludedNodes, n.Id)
criteria.ExcludedIPs = append(criteria.ExcludedIPs, n.LastIp) criteria.ExcludedIPs = append(criteria.ExcludedIPs, n.LastNet)
} }
if len(nodes) == count { if len(nodes) == count {
break break
@ -93,7 +93,7 @@ func (cache *overlaycache) SelectStorageNodes(ctx context.Context, count int, cr
return nodes, nil return nodes, nil
} }
func (cache *overlaycache) SelectNewStorageNodes(ctx context.Context, count int, criteria *overlay.NodeCriteria) (nodes []*pb.Node, err error) { func (cache *overlaycache) SelectNewStorageNodes(ctx context.Context, count int, criteria *overlay.NodeCriteria) (nodes []*overlay.NodeDossier, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
nodeType := int(pb.NodeType_STORAGE) nodeType := int(pb.NodeType_STORAGE)
@ -136,7 +136,7 @@ func (cache *overlaycache) SelectNewStorageNodes(ctx context.Context, count int,
for _, n := range moreNodes { for _, n := range moreNodes {
nodes = append(nodes, n) nodes = append(nodes, n)
criteria.ExcludedNodes = append(criteria.ExcludedNodes, n.Id) criteria.ExcludedNodes = append(criteria.ExcludedNodes, n.Id)
criteria.ExcludedIPs = append(criteria.ExcludedIPs, n.LastIp) criteria.ExcludedIPs = append(criteria.ExcludedIPs, n.LastNet)
} }
if len(nodes) == count { if len(nodes) == count {
break break
@ -146,8 +146,8 @@ func (cache *overlaycache) SelectNewStorageNodes(ctx context.Context, count int,
return nodes, nil return nodes, nil
} }
// GetNodeIPs returns a list of node IP addresses. Warning: these node IP addresses might be returned out of order. // GetNodesNetwork returns the /24 subnet for each storage node, order is not guaranteed.
func (cache *overlaycache) GetNodeIPs(ctx context.Context, nodeIDs []storj.NodeID) (nodeIPs []string, err error) { func (cache *overlaycache) GetNodesNetwork(ctx context.Context, nodeIDs []storj.NodeID) (nodeNets []string, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
var rows *sql.Rows var rows *sql.Rows
@ -167,12 +167,12 @@ func (cache *overlaycache) GetNodeIPs(ctx context.Context, nodeIDs []storj.NodeI
if err != nil { if err != nil {
return nil, err return nil, err
} }
nodeIPs = append(nodeIPs, ip) nodeNets = append(nodeNets, ip)
} }
return nodeIPs, Error.Wrap(rows.Err()) return nodeNets, Error.Wrap(rows.Err())
} }
func (cache *overlaycache) queryNodes(ctx context.Context, excludedNodes []storj.NodeID, count int, safeQuery string, args ...interface{}) (_ []*pb.Node, err error) { func (cache *overlaycache) queryNodes(ctx context.Context, excludedNodes []storj.NodeID, count int, safeQuery string, args ...interface{}) (_ []*overlay.NodeDossier, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
if count == 0 { if count == 0 {
@ -190,7 +190,7 @@ func (cache *overlaycache) queryNodes(ctx context.Context, excludedNodes []storj
args = append(args, count) args = append(args, count)
var rows *sql.Rows var rows *sql.Rows
rows, err = cache.db.Query(ctx, cache.db.Rebind(`SELECT id, type, address, last_net, rows, err = cache.db.Query(ctx, cache.db.Rebind(`SELECT id, type, address, last_net, last_ip_port,
free_disk, total_audit_count, audit_success_count, free_disk, total_audit_count, audit_success_count,
total_uptime_count, uptime_success_count, disqualified, audit_reputation_alpha, total_uptime_count, uptime_success_count, disqualified, audit_reputation_alpha,
audit_reputation_beta audit_reputation_beta
@ -204,14 +204,13 @@ func (cache *overlaycache) queryNodes(ctx context.Context, excludedNodes []storj
} }
defer func() { err = errs.Combine(err, rows.Close()) }() defer func() { err = errs.Combine(err, rows.Close()) }()
var nodes []*pb.Node var nodes []*overlay.NodeDossier
for rows.Next() { for rows.Next() {
dbNode := &dbx.Node{} dbNode := &dbx.Node{}
err = rows.Scan(&dbNode.Id, &dbNode.Type, err = rows.Scan(&dbNode.Id, &dbNode.Type, &dbNode.Address, &dbNode.LastNet, &dbNode.LastIpPort,
&dbNode.Address, &dbNode.LastNet, &dbNode.FreeDisk, &dbNode.FreeDisk, &dbNode.TotalAuditCount, &dbNode.AuditSuccessCount,
&dbNode.TotalAuditCount, &dbNode.AuditSuccessCount, &dbNode.TotalUptimeCount, &dbNode.UptimeSuccessCount, &dbNode.Disqualified, &dbNode.AuditReputationAlpha,
&dbNode.TotalUptimeCount, &dbNode.UptimeSuccessCount, &dbNode.Disqualified, &dbNode.AuditReputationBeta,
&dbNode.AuditReputationAlpha, &dbNode.AuditReputationBeta,
) )
if err != nil { if err != nil {
return nil, err return nil, err
@ -221,13 +220,13 @@ func (cache *overlaycache) queryNodes(ctx context.Context, excludedNodes []storj
if err != nil { if err != nil {
return nil, err return nil, err
} }
nodes = append(nodes, &dossier.Node) nodes = append(nodes, dossier)
} }
return nodes, Error.Wrap(rows.Err()) return nodes, Error.Wrap(rows.Err())
} }
func (cache *overlaycache) queryNodesDistinct(ctx context.Context, excludedNodes []storj.NodeID, excludedIPs []string, count int, safeQuery string, distinctIP bool, args ...interface{}) (_ []*pb.Node, err error) { func (cache *overlaycache) queryNodesDistinct(ctx context.Context, excludedNodes []storj.NodeID, excludedIPs []string, count int, safeQuery string, distinctIP bool, args ...interface{}) (_ []*overlay.NodeDossier, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
if count == 0 { if count == 0 {
@ -255,7 +254,7 @@ func (cache *overlaycache) queryNodesDistinct(ctx context.Context, excludedNodes
SELECT * SELECT *
FROM ( FROM (
SELECT DISTINCT ON (last_net) last_net, -- choose at max 1 node from this IP or network SELECT DISTINCT ON (last_net) last_net, -- choose at max 1 node from this IP or network
id, type, address, free_disk, total_audit_count, id, type, address, last_ip_port, free_disk, total_audit_count,
audit_success_count, total_uptime_count, uptime_success_count, audit_success_count, total_uptime_count, uptime_success_count,
audit_reputation_alpha, audit_reputation_beta audit_reputation_alpha, audit_reputation_beta
FROM nodes FROM nodes
@ -270,13 +269,12 @@ func (cache *overlaycache) queryNodesDistinct(ctx context.Context, excludedNodes
} }
defer func() { err = errs.Combine(err, rows.Close()) }() defer func() { err = errs.Combine(err, rows.Close()) }()
var nodes []*pb.Node var nodes []*overlay.NodeDossier
for rows.Next() { for rows.Next() {
dbNode := &dbx.Node{} dbNode := &dbx.Node{}
err = rows.Scan(&dbNode.LastNet, &dbNode.Id, &dbNode.Type, err = rows.Scan(&dbNode.LastNet,
&dbNode.Address, &dbNode.FreeDisk, &dbNode.Id, &dbNode.Type, &dbNode.Address, &dbNode.LastIpPort, &dbNode.FreeDisk, &dbNode.TotalAuditCount,
&dbNode.TotalAuditCount, &dbNode.AuditSuccessCount, &dbNode.AuditSuccessCount, &dbNode.TotalUptimeCount, &dbNode.UptimeSuccessCount,
&dbNode.TotalUptimeCount, &dbNode.UptimeSuccessCount,
&dbNode.AuditReputationAlpha, &dbNode.AuditReputationBeta, &dbNode.AuditReputationAlpha, &dbNode.AuditReputationBeta,
) )
if err != nil { if err != nil {
@ -286,7 +284,7 @@ func (cache *overlaycache) queryNodesDistinct(ctx context.Context, excludedNodes
if err != nil { if err != nil {
return nil, err return nil, err
} }
nodes = append(nodes, &dossier.Node) nodes = append(nodes, dossier)
} }
return nodes, Error.Wrap(rows.Err()) return nodes, Error.Wrap(rows.Err())
@ -392,7 +390,8 @@ func (cache *overlaycache) KnownReliable(ctx context.Context, onlineWindow time.
// get online nodes // get online nodes
rows, err := cache.db.Query(ctx, cache.db.Rebind(` rows, err := cache.db.Query(ctx, cache.db.Rebind(`
SELECT id, last_net, address, protocol FROM nodes SELECT id, last_net, last_ip_port, address, protocol
FROM nodes
WHERE id = any($1::bytea[]) WHERE id = any($1::bytea[])
AND disqualified IS NULL AND disqualified IS NULL
AND last_contact_success > $2 AND last_contact_success > $2
@ -404,8 +403,8 @@ func (cache *overlaycache) KnownReliable(ctx context.Context, onlineWindow time.
defer func() { err = errs.Combine(err, rows.Close()) }() defer func() { err = errs.Combine(err, rows.Close()) }()
for rows.Next() { for rows.Next() {
row := &dbx.Id_LastNet_Address_Protocol_Row{} row := &dbx.Id_LastNet_LastIpPort_Address_Protocol_Row{}
err = rows.Scan(&row.Id, &row.LastNet, &row.Address, &row.Protocol) err = rows.Scan(&row.Id, &row.LastNet, &row.LastIpPort, &row.Address, &row.Protocol)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -489,7 +488,7 @@ func (cache *overlaycache) PaginateQualified(ctx context.Context, offset int64,
limit = OverlayPaginateLimit limit = OverlayPaginateLimit
} }
dbxInfos, err := cache.db.Limited_Node_Id_Node_LastNet_Node_Address_Node_Protocol_By_Id_GreaterOrEqual_And_Disqualified_Is_Null_OrderBy_Asc_Id(ctx, dbx.Node_Id(cursor.Bytes()), limit, offset) dbxInfos, err := cache.db.Limited_Node_Id_Node_LastNet_Node_LastIpPort_Node_Address_Node_Protocol_By_Id_GreaterOrEqual_And_Disqualified_Is_Null_OrderBy_Asc_Id(ctx, dbx.Node_Id(cursor.Bytes()), limit, offset)
if err != nil { if err != nil {
return nil, false, err return nil, false, err
} }
@ -508,7 +507,7 @@ func (cache *overlaycache) PaginateQualified(ctx context.Context, offset int64,
} }
// Update updates node address // Update updates node address
func (cache *overlaycache) UpdateAddress(ctx context.Context, info *pb.Node, defaults overlay.NodeSelectionConfig) (err error) { func (cache *overlaycache) UpdateAddress(ctx context.Context, info *overlay.NodeDossier, defaults overlay.NodeSelectionConfig) (err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
if info == nil || info.Id.IsZero() { if info == nil || info.Id.IsZero() {
@ -528,7 +527,8 @@ func (cache *overlaycache) UpdateAddress(ctx context.Context, info *pb.Node, def
last_contact_success, last_contact_success,
last_contact_failure, last_contact_failure,
audit_reputation_alpha, audit_reputation_beta, audit_reputation_alpha, audit_reputation_beta,
major, minor, patch, hash, timestamp, release major, minor, patch, hash, timestamp, release,
last_ip_port
) )
VALUES ( VALUES (
$1, $2, $3, $4, $5, $1, $2, $3, $4, $5,
@ -537,22 +537,26 @@ func (cache *overlaycache) UpdateAddress(ctx context.Context, info *pb.Node, def
$8::timestamptz, $8::timestamptz,
'0001-01-01 00:00:00+00'::timestamptz, '0001-01-01 00:00:00+00'::timestamptz,
$6, $7, $6, $7,
0, 0, 0, '', '0001-01-01 00:00:00+00'::timestamptz, false 0, 0, 0, '', '0001-01-01 00:00:00+00'::timestamptz, false,
$9
) )
ON CONFLICT (id) ON CONFLICT (id)
DO UPDATE DO UPDATE
SET SET
address=$2, address=$2,
last_net=$3, last_net=$3,
protocol=$4 protocol=$4,
last_ip_port=$9
` `
_, err = cache.db.ExecContext(ctx, query, _, err = cache.db.ExecContext(ctx, query,
// args $1 - $5 // args $1 - $5
info.Id.Bytes(), address.Address, info.LastIp, int(address.Transport), int(pb.NodeType_INVALID), info.Id.Bytes(), address.Address, info.LastNet, int(address.Transport), int(pb.NodeType_INVALID),
// args $6 - $7 // args $6 - $7
defaults.AuditReputationAlpha0, defaults.AuditReputationBeta0, defaults.AuditReputationAlpha0, defaults.AuditReputationBeta0,
// args $8 // args $8
time.Now(), time.Now(),
// args $9
info.LastIPPort,
) )
return Error.Wrap(err) return Error.Wrap(err)
} }
@ -1003,7 +1007,7 @@ func (cache *overlaycache) GetSuccesfulNodesNotCheckedInSince(ctx context.Contex
// get successful nodes that have not checked-in with the hour // get successful nodes that have not checked-in with the hour
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
dbxNodes, err := cache.db.DB.All_Node_Id_Node_Address_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_And_LastContactSuccess_Greater_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactSuccess( dbxNodes, err := cache.db.DB.All_Node_Id_Node_Address_Node_LastIpPort_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_And_LastContactSuccess_Greater_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactSuccess(
ctx, dbx.Node_LastContactSuccess(time.Now().UTC().Add(-duration))) ctx, dbx.Node_LastContactSuccess(time.Now().UTC().Add(-duration)))
if err != nil { if err != nil {
return nil, Error.Wrap(err) return nil, Error.Wrap(err)
@ -1021,6 +1025,9 @@ func (cache *overlaycache) GetSuccesfulNodesNotCheckedInSince(ctx context.Contex
LastContactSuccess: node.LastContactSuccess.UTC(), LastContactSuccess: node.LastContactSuccess.UTC(),
LastContactFailure: node.LastContactFailure.UTC(), LastContactFailure: node.LastContactFailure.UTC(),
} }
if node.LastIpPort != nil {
nodeLastContact.LastIPPort = *node.LastIpPort
}
nodeLastContacts = append(nodeLastContacts, nodeLastContact) nodeLastContacts = append(nodeLastContacts, nodeLastContact)
} }
@ -1049,7 +1056,7 @@ func populateExitStatusFields(req *overlay.ExitStatusRequest) dbx.Node_Update_Fi
func (cache *overlaycache) GetOfflineNodesLimited(ctx context.Context, limit int) (nodeLastContacts []overlay.NodeLastContact, err error) { func (cache *overlaycache) GetOfflineNodesLimited(ctx context.Context, limit int) (nodeLastContacts []overlay.NodeLastContact, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
dbxNodes, err := cache.db.DB.Limited_Node_Id_Node_Address_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactFailure( dbxNodes, err := cache.db.DB.Limited_Node_Id_Node_Address_Node_LastIpPort_Node_LastContactSuccess_Node_LastContactFailure_By_LastContactSuccess_Less_LastContactFailure_And_Disqualified_Is_Null_OrderBy_Asc_LastContactFailure(
ctx, limit, 0) ctx, limit, 0)
if err != nil { if err != nil {
return nil, Error.Wrap(err) return nil, Error.Wrap(err)
@ -1066,6 +1073,9 @@ func (cache *overlaycache) GetOfflineNodesLimited(ctx context.Context, limit int
LastContactSuccess: node.LastContactSuccess.UTC(), LastContactSuccess: node.LastContactSuccess.UTC(),
LastContactFailure: node.LastContactFailure.UTC(), LastContactFailure: node.LastContactFailure.UTC(),
} }
if node.LastIpPort != nil {
nodeLastContact.LastIPPort = *node.LastIpPort
}
nodeLastContacts = append(nodeLastContacts, nodeLastContact) nodeLastContacts = append(nodeLastContacts, nodeLastContact)
} }
@ -1096,7 +1106,6 @@ func convertDBNode(ctx context.Context, info *dbx.Node) (_ *overlay.NodeDossier,
node := &overlay.NodeDossier{ node := &overlay.NodeDossier{
Node: pb.Node{ Node: pb.Node{
Id: id, Id: id,
LastIp: info.LastNet,
Address: &pb.NodeAddress{ Address: &pb.NodeAddress{
Address: info.Address, Address: info.Address,
Transport: pb.NodeTransport(info.Protocol), Transport: pb.NodeTransport(info.Protocol),
@ -1122,12 +1131,16 @@ func convertDBNode(ctx context.Context, info *dbx.Node) (_ *overlay.NodeDossier,
PieceCount: info.PieceCount, PieceCount: info.PieceCount,
ExitStatus: exitStatus, ExitStatus: exitStatus,
CreatedAt: info.CreatedAt, CreatedAt: info.CreatedAt,
LastNet: info.LastNet,
}
if info.LastIpPort != nil {
node.LastIPPort = *info.LastIpPort
} }
return node, nil return node, nil
} }
func convertDBNodeToPBNode(ctx context.Context, info *dbx.Id_LastNet_Address_Protocol_Row) (_ *pb.Node, err error) { func convertDBNodeToPBNode(ctx context.Context, info *dbx.Id_LastNet_LastIpPort_Address_Protocol_Row) (_ *pb.Node, err error) {
defer mon.Task()(&ctx)(&err) defer mon.Task()(&ctx)(&err)
if info == nil { if info == nil {
return nil, Error.New("missing info") return nil, Error.New("missing info")
@ -1139,7 +1152,6 @@ func convertDBNodeToPBNode(ctx context.Context, info *dbx.Id_LastNet_Address_Pro
} }
return &pb.Node{ return &pb.Node{
Id: id, Id: id,
LastIp: info.LastNet,
Address: &pb.NodeAddress{ Address: &pb.NodeAddress{
Address: info.Address, Address: info.Address,
Transport: pb.NodeTransport(info.Protocol), Transport: pb.NodeTransport(info.Protocol),
@ -1407,7 +1419,8 @@ func (cache *overlaycache) UpdateCheckIn(ctx context.Context, node overlay.NodeC
last_contact_failure, last_contact_failure,
audit_reputation_alpha, audit_reputation_beta, audit_reputation_alpha, audit_reputation_beta,
unknown_audit_reputation_alpha, unknown_audit_reputation_beta, unknown_audit_reputation_alpha, unknown_audit_reputation_beta,
major, minor, patch, hash, timestamp, release major, minor, patch, hash, timestamp, release,
last_ip_port
) )
VALUES ( VALUES (
$1, $2, $3, $4, $5, $1, $2, $3, $4, $5,
@ -1421,7 +1434,8 @@ func (cache *overlaycache) UpdateCheckIn(ctx context.Context, node overlay.NodeC
END, END,
$10, $11, $10, $11,
$10, $11, $10, $11,
$12, $13, $14, $15, $16, $17 $12, $13, $14, $15, $16, $17,
$19
) )
ON CONFLICT (id) ON CONFLICT (id)
DO UPDATE DO UPDATE
@ -1431,7 +1445,6 @@ func (cache *overlaycache) UpdateCheckIn(ctx context.Context, node overlay.NodeC
protocol=$4, protocol=$4,
email=$6, email=$6,
wallet=$7, wallet=$7,
free_disk=$8, free_disk=$8,
major=$12, minor=$13, patch=$14, hash=$15, timestamp=$16, release=$17, major=$12, minor=$13, patch=$14, hash=$15, timestamp=$16, release=$17,
total_uptime_count=nodes.total_uptime_count+1, total_uptime_count=nodes.total_uptime_count+1,
@ -1443,11 +1456,12 @@ func (cache *overlaycache) UpdateCheckIn(ctx context.Context, node overlay.NodeC
last_contact_failure = CASE WHEN $9::bool IS FALSE last_contact_failure = CASE WHEN $9::bool IS FALSE
THEN $18::timestamptz THEN $18::timestamptz
ELSE nodes.last_contact_failure ELSE nodes.last_contact_failure
END; END,
last_ip_port=$19;
` `
_, err = cache.db.ExecContext(ctx, query, _, err = cache.db.ExecContext(ctx, query,
// args $1 - $5 // args $1 - $5
node.NodeID.Bytes(), node.Address.GetAddress(), node.LastIP, node.Address.GetTransport(), int(pb.NodeType_STORAGE), node.NodeID.Bytes(), node.Address.GetAddress(), node.LastNet, node.Address.GetTransport(), int(pb.NodeType_STORAGE),
// args $6 - $8 // args $6 - $8
node.Operator.GetEmail(), node.Operator.GetWallet(), node.Capacity.GetFreeDisk(), node.Operator.GetEmail(), node.Operator.GetWallet(), node.Capacity.GetFreeDisk(),
// args $9 // args $9
@ -1458,6 +1472,8 @@ func (cache *overlaycache) UpdateCheckIn(ctx context.Context, node overlay.NodeC
semVer.Major, semVer.Minor, semVer.Patch, node.Version.GetCommitHash(), node.Version.Timestamp, node.Version.GetRelease(), semVer.Major, semVer.Minor, semVer.Patch, node.Version.GetCommitHash(), node.Version.Timestamp, node.Version.GetRelease(),
// args $18 // args $18
timestamp, timestamp,
// args $19
node.LastIPPort,
) )
if err != nil { if err != nil {
return Error.Wrap(err) return Error.Wrap(err)

View File

@ -0,0 +1,544 @@
-- AUTOGENERATED BY storj.io/dbx
-- DO NOT EDIT
CREATE TABLE accounting_rollups (
id bigserial NOT NULL,
node_id bytea NOT NULL,
start_time timestamp with time zone NOT NULL,
put_total bigint NOT NULL,
get_total bigint NOT NULL,
get_audit_total bigint NOT NULL,
get_repair_total bigint NOT NULL,
put_repair_total bigint NOT NULL,
at_rest_total double precision NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE accounting_timestamps (
name text NOT NULL,
value timestamp with time zone NOT NULL,
PRIMARY KEY ( name )
);
CREATE TABLE bucket_bandwidth_rollups (
bucket_name bytea NOT NULL,
project_id bytea NOT NULL,
interval_start timestamp with time zone NOT NULL,
interval_seconds integer NOT NULL,
action integer NOT NULL,
inline bigint NOT NULL,
allocated bigint NOT NULL,
settled bigint NOT NULL,
PRIMARY KEY ( bucket_name, project_id, interval_start, action )
);
CREATE TABLE bucket_storage_tallies (
bucket_name bytea NOT NULL,
project_id bytea NOT NULL,
interval_start timestamp with time zone NOT NULL,
inline bigint NOT NULL,
remote bigint NOT NULL,
remote_segments_count integer NOT NULL,
inline_segments_count integer NOT NULL,
object_count integer NOT NULL,
metadata_size bigint NOT NULL,
PRIMARY KEY ( bucket_name, project_id, interval_start )
);
CREATE TABLE coinpayments_transactions (
id text NOT NULL,
user_id bytea NOT NULL,
address text NOT NULL,
amount bytea NOT NULL,
received bytea NOT NULL,
status integer NOT NULL,
key text NOT NULL,
timeout integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE consumed_serials (
storage_node_id bytea NOT NULL,
serial_number bytea NOT NULL,
expires_at timestamp with time zone NOT NULL,
PRIMARY KEY ( storage_node_id, serial_number )
);
CREATE TABLE coupons (
id bytea NOT NULL,
project_id bytea NOT NULL,
user_id bytea NOT NULL,
amount bigint NOT NULL,
description text NOT NULL,
type integer NOT NULL,
status integer NOT NULL,
duration bigint NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE coupon_usages (
coupon_id bytea NOT NULL,
amount bigint NOT NULL,
status integer NOT NULL,
period timestamp with time zone NOT NULL,
PRIMARY KEY ( coupon_id, period )
);
CREATE TABLE credits (
user_id bytea NOT NULL,
transaction_id text NOT NULL,
amount bigint NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( transaction_id )
);
CREATE TABLE credits_spendings (
id bytea NOT NULL,
user_id bytea NOT NULL,
project_id bytea NOT NULL,
amount bigint NOT NULL,
status integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE graceful_exit_progress (
node_id bytea NOT NULL,
bytes_transferred bigint NOT NULL,
pieces_transferred bigint NOT NULL,
pieces_failed bigint NOT NULL,
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY ( node_id )
);
CREATE TABLE graceful_exit_transfer_queue (
node_id bytea NOT NULL,
path bytea NOT NULL,
piece_num integer NOT NULL,
root_piece_id bytea,
durability_ratio double precision NOT NULL,
queued_at timestamp with time zone NOT NULL,
requested_at timestamp with time zone,
last_failed_at timestamp with time zone,
last_failed_code integer,
failed_count integer,
finished_at timestamp with time zone,
order_limit_send_count integer NOT NULL,
PRIMARY KEY ( node_id, path, piece_num )
);
CREATE TABLE injuredsegments (
path bytea NOT NULL,
data bytea NOT NULL,
attempted timestamp with time zone,
num_healthy_pieces integer DEFAULT 52 NOT NULL,
PRIMARY KEY ( path )
);
CREATE TABLE irreparabledbs (
segmentpath bytea NOT NULL,
segmentdetail bytea NOT NULL,
pieces_lost_count bigint NOT NULL,
seg_damaged_unix_sec bigint NOT NULL,
repair_attempt_count bigint NOT NULL,
PRIMARY KEY ( segmentpath )
);
CREATE TABLE nodes (
id bytea NOT NULL,
address text NOT NULL,
last_net text NOT NULL,
last_ip_port text,
protocol integer NOT NULL,
type integer NOT NULL,
email text NOT NULL,
wallet text NOT NULL,
free_bandwidth bigint NOT NULL,
free_disk bigint NOT NULL,
piece_count bigint NOT NULL,
major bigint NOT NULL,
minor bigint NOT NULL,
patch bigint NOT NULL,
hash text NOT NULL,
timestamp timestamp with time zone NOT NULL,
release boolean NOT NULL,
latency_90 bigint NOT NULL,
audit_success_count bigint NOT NULL,
total_audit_count bigint NOT NULL,
uptime_success_count bigint NOT NULL,
total_uptime_count bigint NOT NULL,
created_at timestamp with time zone NOT NULL,
updated_at timestamp with time zone NOT NULL,
last_contact_success timestamp with time zone NOT NULL,
last_contact_failure timestamp with time zone NOT NULL,
contained boolean NOT NULL,
disqualified timestamp with time zone,
suspended timestamp with time zone,
audit_reputation_alpha double precision NOT NULL,
audit_reputation_beta double precision NOT NULL,
unknown_audit_reputation_alpha double precision NOT NULL,
unknown_audit_reputation_beta double precision NOT NULL,
uptime_reputation_alpha double precision NOT NULL,
uptime_reputation_beta double precision NOT NULL,
exit_initiated_at timestamp with time zone,
exit_loop_completed_at timestamp with time zone,
exit_finished_at timestamp with time zone,
exit_success boolean NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE nodes_offline_times (
node_id bytea NOT NULL,
tracked_at timestamp with time zone NOT NULL,
seconds integer NOT NULL,
PRIMARY KEY ( node_id, tracked_at )
);
CREATE TABLE offers (
id serial NOT NULL,
name text NOT NULL,
description text NOT NULL,
award_credit_in_cents integer NOT NULL,
invitee_credit_in_cents integer NOT NULL,
award_credit_duration_days integer,
invitee_credit_duration_days integer,
redeemable_cap integer,
expires_at timestamp with time zone NOT NULL,
created_at timestamp with time zone NOT NULL,
status integer NOT NULL,
type integer NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE peer_identities (
node_id bytea NOT NULL,
leaf_serial_number bytea NOT NULL,
chain bytea NOT NULL,
updated_at timestamp with time zone NOT NULL,
PRIMARY KEY ( node_id )
);
CREATE TABLE pending_audits (
node_id bytea NOT NULL,
piece_id bytea NOT NULL,
stripe_index bigint NOT NULL,
share_size bigint NOT NULL,
expected_share_hash bytea NOT NULL,
reverify_count bigint NOT NULL,
path bytea NOT NULL,
PRIMARY KEY ( node_id )
);
CREATE TABLE pending_serial_queue (
storage_node_id bytea NOT NULL,
bucket_id bytea NOT NULL,
serial_number bytea NOT NULL,
action integer NOT NULL,
settled bigint NOT NULL,
expires_at timestamp with time zone NOT NULL,
PRIMARY KEY ( storage_node_id, bucket_id, serial_number )
);
CREATE TABLE projects (
id bytea NOT NULL,
name text NOT NULL,
description text NOT NULL,
usage_limit bigint NOT NULL,
rate_limit integer,
partner_id bytea,
owner_id bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE registration_tokens (
secret bytea NOT NULL,
owner_id bytea,
project_limit integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( secret ),
UNIQUE ( owner_id )
);
CREATE TABLE reported_serials (
expires_at timestamp with time zone NOT NULL,
storage_node_id bytea NOT NULL,
bucket_id bytea NOT NULL,
action integer NOT NULL,
serial_number bytea NOT NULL,
settled bigint NOT NULL,
observed_at timestamp with time zone NOT NULL,
PRIMARY KEY ( expires_at, storage_node_id, bucket_id, action, serial_number )
);
CREATE TABLE reset_password_tokens (
secret bytea NOT NULL,
owner_id bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( secret ),
UNIQUE ( owner_id )
);
CREATE TABLE serial_numbers (
id serial NOT NULL,
serial_number bytea NOT NULL,
bucket_id bytea NOT NULL,
expires_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE storagenode_bandwidth_rollups (
storagenode_id bytea NOT NULL,
interval_start timestamp with time zone NOT NULL,
interval_seconds integer NOT NULL,
action integer NOT NULL,
allocated bigint DEFAULT 0,
settled bigint NOT NULL,
PRIMARY KEY ( storagenode_id, interval_start, action )
);
CREATE TABLE storagenode_payments (
id bigserial NOT NULL,
created_at timestamp with time zone NOT NULL,
node_id bytea NOT NULL,
period text,
amount bigint NOT NULL,
receipt text,
notes text,
PRIMARY KEY ( id )
);
CREATE TABLE storagenode_paystubs (
period text NOT NULL,
node_id bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
codes text NOT NULL,
usage_at_rest double precision NOT NULL,
usage_get bigint NOT NULL,
usage_put bigint NOT NULL,
usage_get_repair bigint NOT NULL,
usage_put_repair bigint NOT NULL,
usage_get_audit bigint NOT NULL,
comp_at_rest bigint NOT NULL,
comp_get bigint NOT NULL,
comp_put bigint NOT NULL,
comp_get_repair bigint NOT NULL,
comp_put_repair bigint NOT NULL,
comp_get_audit bigint NOT NULL,
surge_percent bigint NOT NULL,
held bigint NOT NULL,
owed bigint NOT NULL,
disposed bigint NOT NULL,
paid bigint NOT NULL,
PRIMARY KEY ( period, node_id )
);
CREATE TABLE storagenode_storage_tallies (
id bigserial NOT NULL,
node_id bytea NOT NULL,
interval_end_time timestamp with time zone NOT NULL,
data_total double precision NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE stripe_customers (
user_id bytea NOT NULL,
customer_id text NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( user_id ),
UNIQUE ( customer_id )
);
CREATE TABLE stripecoinpayments_invoice_project_records (
id bytea NOT NULL,
project_id bytea NOT NULL,
storage double precision NOT NULL,
egress bigint NOT NULL,
objects bigint NOT NULL,
period_start timestamp with time zone NOT NULL,
period_end timestamp with time zone NOT NULL,
state integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( project_id, period_start, period_end )
);
CREATE TABLE stripecoinpayments_tx_conversion_rates (
tx_id text NOT NULL,
rate bytea NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( tx_id )
);
CREATE TABLE users (
id bytea NOT NULL,
email text NOT NULL,
normalized_email text NOT NULL,
full_name text NOT NULL,
short_name text,
password_hash bytea NOT NULL,
status integer NOT NULL,
partner_id bytea,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id )
);
CREATE TABLE value_attributions (
project_id bytea NOT NULL,
bucket_name bytea NOT NULL,
partner_id bytea NOT NULL,
last_updated timestamp with time zone NOT NULL,
PRIMARY KEY ( project_id, bucket_name )
);
CREATE TABLE api_keys (
id bytea NOT NULL,
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
head bytea NOT NULL,
name text NOT NULL,
secret bytea NOT NULL,
partner_id bytea,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( head ),
UNIQUE ( name, project_id )
);
CREATE TABLE bucket_metainfos (
id bytea NOT NULL,
project_id bytea NOT NULL REFERENCES projects( id ),
name bytea NOT NULL,
partner_id bytea,
path_cipher integer NOT NULL,
created_at timestamp with time zone NOT NULL,
default_segment_size integer NOT NULL,
default_encryption_cipher_suite integer NOT NULL,
default_encryption_block_size integer NOT NULL,
default_redundancy_algorithm integer NOT NULL,
default_redundancy_share_size integer NOT NULL,
default_redundancy_required_shares integer NOT NULL,
default_redundancy_repair_shares integer NOT NULL,
default_redundancy_optimal_shares integer NOT NULL,
default_redundancy_total_shares integer NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( name, project_id )
);
CREATE TABLE project_invoice_stamps (
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
invoice_id bytea NOT NULL,
start_date timestamp with time zone NOT NULL,
end_date timestamp with time zone NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( project_id, start_date, end_date ),
UNIQUE ( invoice_id )
);
CREATE TABLE project_members (
member_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
project_id bytea NOT NULL REFERENCES projects( id ) ON DELETE CASCADE,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( member_id, project_id )
);
CREATE TABLE stripecoinpayments_apply_balance_intents (
tx_id text NOT NULL REFERENCES coinpayments_transactions( id ) ON DELETE CASCADE,
state integer NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( tx_id )
);
CREATE TABLE used_serials (
serial_number_id integer NOT NULL REFERENCES serial_numbers( id ) ON DELETE CASCADE,
storage_node_id bytea NOT NULL,
PRIMARY KEY ( serial_number_id, storage_node_id )
);
CREATE TABLE user_credits (
id serial NOT NULL,
user_id bytea NOT NULL REFERENCES users( id ) ON DELETE CASCADE,
offer_id integer NOT NULL REFERENCES offers( id ),
referred_by bytea REFERENCES users( id ) ON DELETE SET NULL,
type text NOT NULL,
credits_earned_in_cents integer NOT NULL,
credits_used_in_cents integer NOT NULL,
expires_at timestamp with time zone NOT NULL,
created_at timestamp with time zone NOT NULL,
PRIMARY KEY ( id ),
UNIQUE ( id, offer_id )
);
CREATE INDEX accounting_rollups_start_time_index ON accounting_rollups ( start_time );
CREATE INDEX consumed_serials_expires_at_index ON consumed_serials ( expires_at );
CREATE INDEX injuredsegments_attempted_index ON injuredsegments ( attempted );
CREATE INDEX injuredsegments_num_healthy_pieces_index ON injuredsegments ( num_healthy_pieces );
CREATE INDEX node_last_ip ON nodes ( last_net );
CREATE INDEX nodes_offline_times_node_id_index ON nodes_offline_times ( node_id );
CREATE UNIQUE INDEX serial_number ON serial_numbers ( serial_number );
CREATE INDEX serial_numbers_expires_at_index ON serial_numbers ( expires_at );
CREATE INDEX storagenode_payments_node_id_period_index ON storagenode_payments ( node_id, period );
CREATE INDEX storagenode_paystubs_node_id_index ON storagenode_paystubs ( node_id );
CREATE UNIQUE INDEX credits_earned_user_id_offer_id ON user_credits ( id, offer_id );
INSERT INTO "accounting_rollups"("id", "node_id", "start_time", "put_total", "get_total", "get_audit_total", "get_repair_total", "put_repair_total", "at_rest_total") VALUES (1, E'\\367M\\177\\251]t/\\022\\256\\214\\265\\025\\224\\204:\\217\\212\\0102<\\321\\374\\020&\\271Qc\\325\\261\\354\\246\\233'::bytea, '2019-02-09 00:00:00+00', 1000, 2000, 3000, 4000, 0, 5000);
INSERT INTO "accounting_timestamps" VALUES ('LastAtRestTally', '0001-01-01 00:00:00+00');
INSERT INTO "accounting_timestamps" VALUES ('LastRollup', '0001-01-01 00:00:00+00');
INSERT INTO "accounting_timestamps" VALUES ('LastBandwidthTally', '0001-01-01 00:00:00+00');
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 5, false);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '127.0.0.1:55518', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 3, 3, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014', '127.0.0.1:55517', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 0, 0, 0, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 0, false);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\015', '127.0.0.1:55519', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 1, 2, 1, 2, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 50, 0, 1, 0, 100, 1, false);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', '127.0.0.1:55520', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 300, 400, 300, 400, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, NULL, 300, 0, 1, 0, 300, 100, false);
INSERT INTO "nodes"("id", "address", "last_net", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\001', '127.0.0.1:55516', '', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, '2019-02-14 08:07:31.028103+00', 50, 0, 75, 25, 100, 5, false);
INSERT INTO "users"("id", "full_name", "short_name", "email", "normalized_email", "password_hash", "status", "partner_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'Noahson', 'William', '1email1@mail.test', '1EMAIL1@MAIL.TEST', E'some_readable_hash'::bytea, 1, NULL, '2019-02-14 08:28:24.614594+00');
INSERT INTO "projects"("id", "name", "description", "usage_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 'ProjectName', 'projects description', 0, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.254934+00');
INSERT INTO "projects"("id", "name", "description", "usage_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'projName1', 'Test project 1', 0, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:28:24.636949+00');
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, '2019-02-14 08:28:24.677953+00');
INSERT INTO "project_members"("member_id", "project_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, '2019-02-13 08:28:24.677953+00');
INSERT INTO "irreparabledbs" ("segmentpath", "segmentdetail", "pieces_lost_count", "seg_damaged_unix_sec", "repair_attempt_count") VALUES ('\x49616d5365676d656e746b6579696e666f30', '\x49616d5365676d656e7464657461696c696e666f30', 10, 1550159554, 10);
INSERT INTO "registration_tokens" ("secret", "owner_id", "project_limit", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, null, 1, '2019-02-14 08:28:24.677953+00');
INSERT INTO "serial_numbers" ("id", "serial_number", "bucket_id", "expires_at") VALUES (1, E'0123456701234567'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, '2019-03-06 08:28:24.677953+00');
INSERT INTO "used_serials" ("serial_number_id", "storage_node_id") VALUES (1, E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n');
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "allocated", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024);
INSERT INTO "storagenode_storage_tallies" VALUES (1, E'\\3510\\323\\225"~\\036<\\342\\330m\\0253Jhr\\246\\233K\\246#\\2303\\351\\256\\275j\\212UM\\362\\207', '2019-02-14 08:16:57.812849+00', 1000);
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
INSERT INTO "bucket_bandwidth_rollups" ("bucket_name", "project_id", "interval_start", "interval_seconds", "action", "inline", "allocated", "settled") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 1024, 2024, 3024);
INSERT INTO "bucket_storage_tallies" ("bucket_name", "project_id", "interval_start", "inline", "remote", "remote_segments_count", "inline_segments_count", "object_count", "metadata_size") VALUES (E'testbucket'::bytea, E'\\170\\160\\157\\370\\274\\366\\113\\364\\272\\235\\301\\243\\321\\102\\321\\136'::bytea,'2019-03-06 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 4024, 5024, 0, 0, 0, 0);
INSERT INTO "reset_password_tokens" ("secret", "owner_id", "created_at") VALUES (E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-05-08 08:28:24.677953+00');
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (1, 'Default referral offer', 'Is active when no other active referral offer', 300, 600, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 2, 365, 14);
INSERT INTO "offers" ("id", "name", "description", "award_credit_in_cents", "invitee_credit_in_cents", "expires_at", "created_at", "status", "type", "award_credit_duration_days", "invitee_credit_duration_days") VALUES (2, 'Default free credit offer', 'Is active when no active free credit offer', 0, 300, '2119-03-14 08:28:24.636949+00', '2019-07-14 08:28:24.636949+00', 1, 1, NULL, 14);
INSERT INTO "api_keys" ("id", "project_id", "head", "name", "secret", "partner_id", "created_at") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\111\\142\\147\\304\\132\\375\\070\\163\\270\\160\\251\\370\\126\\063\\351\\037\\257\\071\\143\\375\\351\\320\\253\\232\\220\\260\\075\\173\\306\\307\\115\\136'::bytea, 'key 2', E'\\254\\011\\315\\333\\273\\365\\001\\071\\024\\154\\253\\332\\301\\216\\361\\074\\221\\367\\251\\231\\274\\333\\300\\367\\001\\272\\327\\111\\315\\123\\042\\016'::bytea, NULL, '2019-02-14 08:28:24.267934+00');
INSERT INTO "project_invoice_stamps" ("project_id", "invoice_id", "start_date", "end_date", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\363\\311\\033w\\222\\303,'::bytea, '2019-06-01 08:28:24.267934+00', '2019-06-29 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
INSERT INTO "value_attributions" ("project_id", "bucket_name", "partner_id", "last_updated") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, E''::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea,'2019-02-14 08:07:31.028103+00');
INSERT INTO "user_credits" ("id", "user_id", "offer_id", "referred_by", "credits_earned_in_cents", "credits_used_in_cents", "type", "expires_at", "created_at") VALUES (1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 200, 0, 'invalid', '2019-10-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00');
INSERT INTO "bucket_metainfos" ("id", "project_id", "name", "partner_id", "created_at", "path_cipher", "default_segment_size", "default_encryption_cipher_suite", "default_encryption_block_size", "default_redundancy_algorithm", "default_redundancy_share_size", "default_redundancy_required_shares", "default_redundancy_repair_shares", "default_redundancy_optimal_shares", "default_redundancy_total_shares") VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'testbucketuniquename'::bytea, NULL, '2019-06-14 08:28:24.677953+00', 1, 65536, 1, 8192, 1, 4096, 4, 6, 8, 10);
INSERT INTO "pending_audits" ("node_id", "piece_id", "stripe_index", "share_size", "expected_share_hash", "reverify_count", "path") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 1024, E'\\070\\127\\144\\013\\332\\344\\102\\376\\306\\056\\303\\130\\106\\132\\321\\276\\321\\274\\170\\264\\054\\333\\221\\116\\154\\221\\335\\070\\220\\146\\344\\216'::bytea, 1, 'not null');
INSERT INTO "peer_identities" VALUES (E'\\334/\\302;\\225\\355O\\323\\276f\\247\\354/6\\241\\033'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2019-02-14 08:07:31.335028+00');
INSERT INTO "graceful_exit_progress" ("node_id", "bytes_transferred", "pieces_transferred", "pieces_failed", "updated_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', 1000000000000000, 0, 0, '2019-09-12 10:07:31.028103+00');
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 8, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "stripe_customers" ("user_id", "customer_id", "created_at") VALUES (E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'stripe_id', '2019-06-01 08:28:24.267934+00');
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\312', 9, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "stripecoinpayments_invoice_project_records"("id", "project_id", "storage", "egress", "objects", "period_start", "period_end", "state", "created_at") VALUES (E'\\022\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, E'\\021\\217/\\014\\376!K\\023\\276\\031\\311}m\\236\\205\\300'::bytea, 0, 0, 0, '2019-06-01 08:28:24.267934+00', '2019-06-01 08:28:24.267934+00', 0, '2019-06-01 08:28:24.267934+00');
INSERT INTO "graceful_exit_transfer_queue" ("node_id", "path", "piece_num", "root_piece_id", "durability_ratio", "queued_at", "requested_at", "last_failed_at", "last_failed_code", "failed_count", "finished_at", "order_limit_send_count") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\016', E'f8419768-5baa-4901-b3ba-62808013ec45/s0/test3/\\240\\243\\223n \\334~b}\\2624)\\250m\\201\\202\\235\\276\\361\\3304\\323\\352\\311\\361\\353;\\326\\311', 10, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 1.0, '2019-09-12 10:07:31.028103+00', '2019-09-12 10:07:32.028103+00', null, null, 0, '2019-09-12 10:07:33.028103+00', 0);
INSERT INTO "stripecoinpayments_tx_conversion_rates" ("tx_id", "rate", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci,'::bytea, '2019-06-01 08:28:24.267934+00');
INSERT INTO "coinpayments_transactions" ("id", "user_id", "address", "amount", "received", "status", "key", "timeout", "created_at") VALUES ('tx_id', E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 'address', E'\\363\\311\\033w'::bytea, E'\\363\\311\\033w'::bytea, 1, 'key', 60, '2019-06-01 08:28:24.267934+00');
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, '2019-06-01 09:28:24.267934+00', 3600);
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, '2017-06-01 09:28:24.267934+00', 100);
INSERT INTO "nodes_offline_times" ("node_id", "tracked_at", "seconds") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n'::bytea, '2019-06-01 09:28:24.267934+00', 3600);
INSERT INTO "storagenode_bandwidth_rollups" ("storagenode_id", "interval_start", "interval_seconds", "action", "settled") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', '2020-01-11 08:00:00.000000' AT TIME ZONE current_setting('TIMEZONE'), 3600, 1, 2024);
INSERT INTO "coupons" ("id", "project_id", "user_id", "amount", "description", "type", "status", "duration", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 50, 'description', 0, 0, 2, '2019-06-01 08:28:24.267934+00');
INSERT INTO "coupon_usages" ("coupon_id", "amount", "status", "period") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 22, 0, '2019-06-01 09:28:24.267934+00');
INSERT INTO "reported_serials" ("expires_at", "storage_node_id", "bucket_id", "action", "serial_number", "settled", "observed_at") VALUES ('2020-01-11 08:00:00.000000+00', E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, 1, E'0123456701234567'::bytea, 100, '2020-01-11 08:00:00.000000+00');
INSERT INTO "stripecoinpayments_apply_balance_intents" ("tx_id", "state", "created_at") VALUES ('tx_id', 0, '2019-06-01 08:28:24.267934+00');
INSERT INTO "projects"("id", "name", "description", "usage_limit", "rate_limit", "partner_id", "owner_id", "created_at") VALUES (E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\347'::bytea, 'projName1', 'Test project 1', 0, 2000000, NULL, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, '2020-01-15 08:28:24.636949+00');
INSERT INTO "credits" ("user_id", "transaction_id", "amount", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014'::bytea, 'transactionID', 10, '2019-06-01 08:28:24.267934+00');
INSERT INTO "credits_spendings" ("id", "user_id", "project_id", "amount", "status", "created_at") VALUES (E'\\362\\342\\363\\371>+F\\256\\263\\300\\275|\\342N\\347\\014'::bytea, E'\\153\\313\\233\\074\\327\\177\\136\\070\\346\\001'::bytea, E'\\363\\311\\033w\\222\\303Ci\\265\\343U\\303\\312\\204",'::bytea, 5, 0, '2019-06-01 09:28:24.267934+00');
INSERT INTO "pending_serial_queue" ("storage_node_id", "bucket_id", "serial_number", "action", "settled", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'\\363\\342\\363\\371>+F\\256\\263\\300\\273|\\342N\\347\\014/testbucket'::bytea, E'5123456701234567'::bytea, 1, 100, '2020-01-11 08:00:00.000000+00');
INSERT INTO "consumed_serials" ("storage_node_id", "serial_number", "expires_at") VALUES (E'\\006\\223\\250R\\221\\005\\365\\377v>0\\266\\365\\216\\255?\\347\\244\\371?2\\264\\262\\230\\007<\\001\\262\\263\\237\\247n', E'1234567012345678'::bytea, '2020-01-12 08:00:00.000000+00');
INSERT INTO "injuredsegments" ("path", "data", "num_healthy_pieces") VALUES ('0', '\x0a0130120100', 52);
INSERT INTO "injuredsegments" ("path", "data", "num_healthy_pieces") VALUES ('here''s/a/great/path', '\x0a136865726527732f612f67726561742f70617468120a0102030405060708090a', 30);
INSERT INTO "injuredsegments" ("path", "data", "num_healthy_pieces") VALUES ('yet/another/cool/path', '\x0a157965742f616e6f746865722f636f6f6c2f70617468120a0102030405060708090a', 51);
INSERT INTO "injuredsegments" ("path", "data", "num_healthy_pieces") VALUES ('/this/is/a/new/path', '\x0a23736f2f6d616e792f69636f6e69632f70617468732f746f2f63686f6f73652f66726f6d120a0102030405060708090a', 40);
-- NEW DATA --
INSERT INTO "nodes"("id", "address", "last_net", "last_ip_port", "protocol", "type", "email", "wallet", "free_bandwidth", "free_disk", "piece_count", "major", "minor", "patch", "hash", "timestamp", "release","latency_90", "audit_success_count", "total_audit_count", "uptime_success_count", "total_uptime_count", "created_at", "updated_at", "last_contact_success", "last_contact_failure", "contained", "disqualified", "suspended", "audit_reputation_alpha", "audit_reputation_beta", "unknown_audit_reputation_alpha", "unknown_audit_reputation_beta", "uptime_reputation_alpha", "uptime_reputation_beta", "exit_success") VALUES (E'\\154\\313\\233\\074\\327\\177\\136\\070\\346\\002', '127.0.0.1:55516', '127.0.0.0', '127.0.0.1:55516', 0, 4, '', '', -1, -1, 0, 0, 1, 0, '', 'epoch', false, 0, 0, 5, 0, 5, '2019-02-14 08:07:31.028103+00', '2019-02-14 08:07:31.108963+00', 'epoch', 'epoch', false, NULL, '2019-02-14 08:07:31.028103+00', 50, 0, 75, 25, 100, 5, false);