803e2930f4
My understanding is that the nodes table has the following fields: - `address` field which can be a hostname or an IP - `last_net` field that is the /24 subnet of the IP resolved from the address This PR does the following: 1) add back the `last_ip` field to the nodes table 2) for uplink operations remove the calls that the satellite makes to `lookupNodeAddress` (which makes the DNS calls to resolve the IP from the hostname) and instead use the data stored in the nodes table `last_ip` field. This means that the IP that the satellite sends to the uplink for the storage nodes could be approx 1 hr stale. In the short term this is fine, next we will be adding changes so that the storage node pushes any IP changes to the satellite in real time. 3) use the address field for repair and audit since we want them to still make DNS calls to confirm the IP is up to date 4) try to reduce confusion about hostname, ip, subnet, and address in the code base Change-Id: I96ce0d8bb78303f82483d0701bc79544b74057ac
94 lines
2.3 KiB
Go
94 lines
2.3 KiB
Go
// Copyright (C) 2019 Storj Labs, Inc.
|
|
// See LICENSE for copying information.
|
|
|
|
package overlay
|
|
|
|
import (
|
|
"context"
|
|
"sync"
|
|
|
|
"storj.io/common/pb"
|
|
"storj.io/common/storj"
|
|
)
|
|
|
|
type addressInfo struct {
|
|
address string
|
|
lastIPPort string
|
|
transport pb.NodeTransport
|
|
}
|
|
|
|
var _ DB = (*CombinedCache)(nil)
|
|
|
|
// CombinedCache is a simple caching mechanism for overlaycache updates. It
|
|
// provdes methods to help reduce calls to UpdateAddress and UpdateTime, but can
|
|
// be extended for other calls in the future.
|
|
//
|
|
// architecture: Service
|
|
type CombinedCache struct {
|
|
DB
|
|
addressLock sync.RWMutex
|
|
addressCache map[storj.NodeID]*addressInfo
|
|
|
|
keyLock *KeyLock
|
|
}
|
|
|
|
// NewCombinedCache instantiates a new CombinedCache
|
|
func NewCombinedCache(db DB) *CombinedCache {
|
|
return &CombinedCache{
|
|
DB: db,
|
|
addressCache: make(map[storj.NodeID]*addressInfo),
|
|
keyLock: NewKeyLock(),
|
|
}
|
|
}
|
|
|
|
// UpdateAddress overrides the underlying db.UpdateAddress and provides a simple
|
|
// caching layer to reduce calls to the underlying db. The cache is guaranteed
|
|
// to match the values held in the database; however this code does not
|
|
// guarantee that concurrent UpdateAddress calls will be handled in any
|
|
// particular order.
|
|
func (c *CombinedCache) UpdateAddress(ctx context.Context, info *NodeDossier, defaults NodeSelectionConfig) (err error) {
|
|
// Update internal cache and check if this call requires a db call
|
|
|
|
if info == nil {
|
|
return ErrEmptyNode
|
|
}
|
|
|
|
address := info.Address
|
|
if address == nil {
|
|
address = &pb.NodeAddress{}
|
|
}
|
|
|
|
c.addressLock.RLock()
|
|
cached, ok := c.addressCache[info.Id]
|
|
c.addressLock.RUnlock()
|
|
|
|
if ok &&
|
|
address.Address == cached.address &&
|
|
address.Transport == cached.transport &&
|
|
info.LastIPPort == cached.lastIPPort {
|
|
return nil
|
|
}
|
|
|
|
// Acquire lock for this node ID. This prevents a concurrent db update to
|
|
// this same node ID and guarantees the cache and database stay in sync.
|
|
// This solution works so long as calls to this code are occurring within a
|
|
// single process.
|
|
unlockFunc := c.keyLock.Lock(info.Id)
|
|
defer unlockFunc()
|
|
|
|
err = c.DB.UpdateAddress(ctx, info, defaults)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
c.addressLock.Lock()
|
|
c.addressCache[info.Id] = &addressInfo{
|
|
address: address.Address,
|
|
lastIPPort: info.LastIPPort,
|
|
transport: address.Transport,
|
|
}
|
|
c.addressLock.Unlock()
|
|
|
|
return nil
|
|
}
|