From 323cbf462b6c244547b16ae3f03d2244acc274b9 Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Sat, 20 Feb 2021 23:03:46 +0100 Subject: [PATCH 01/27] les: move client pool to les/vflux/server --- les/api.go | 103 ++++--- les/peer.go | 266 ++++++++++++++++- les/server.go | 86 ++---- les/server_handler.go | 127 ++------ les/test_helper.go | 12 +- les/vflux/server/balance.go | 71 ++--- les/vflux/server/balance_test.go | 28 +- les/vflux/server/balance_tracker.go | 40 ++- les/vflux/server/clientpool.go | 349 ++++++++++++++++++++++ les/{ => vflux/server}/clientpool_test.go | 265 ++++++++-------- les/vflux/server/prioritypool.go | 8 + les/vflux/server/service.go | 6 +- p2p/nodestate/nodestate.go | 17 ++ 13 files changed, 948 insertions(+), 430 deletions(-) create mode 100644 les/vflux/server/clientpool.go rename les/{ => vflux/server}/clientpool_test.go (61%) diff --git a/les/api.go b/les/api.go index a93052451608..f570ffa98f8d 100644 --- a/les/api.go +++ b/les/api.go @@ -31,7 +31,6 @@ var ( errNoCheckpoint = errors.New("no local checkpoint provided") errNotActivated = errors.New("checkpoint registrar is not activated") errUnknownBenchmarkType = errors.New("unknown benchmark type") - errNoPriority = errors.New("priority too low to raise capacity") ) // PrivateLightServerAPI provides an API to access the LES light server. @@ -44,8 +43,8 @@ type PrivateLightServerAPI struct { func NewPrivateLightServerAPI(server *LesServer) *PrivateLightServerAPI { return &PrivateLightServerAPI{ server: server, - defaultPosFactors: server.clientPool.defaultPosFactors, - defaultNegFactors: server.clientPool.defaultNegFactors, + defaultPosFactors: defaultPosFactors, + defaultNegFactors: defaultNegFactors, } } @@ -66,7 +65,9 @@ func (api *PrivateLightServerAPI) ServerInfo() map[string]interface{} { res := make(map[string]interface{}) res["minimumCapacity"] = api.server.minCapacity res["maximumCapacity"] = api.server.maxCapacity - res["totalCapacity"], res["totalConnectedCapacity"], res["priorityConnectedCapacity"] = api.server.clientPool.capacityInfo() + _, res["totalCapacity"] = api.server.clientPool.Limits() + _, res["totalConnectedCapacity"] = api.server.clientPool.Active() + res["priorityConnectedCapacity"] = 0 //TODO connect when token sale module is added return res } @@ -80,9 +81,18 @@ func (api *PrivateLightServerAPI) ClientInfo(nodes []string) map[enode.ID]map[st } res := make(map[enode.ID]map[string]interface{}) - api.server.clientPool.forClients(ids, func(client *clientInfo) { - res[client.node.ID()] = api.clientInfo(client) - }) + if len(ids) == 0 { + ids = api.server.peers.ids() + } + for _, id := range ids { + if peer := api.server.peers.peer(id); peer != nil { + res[id] = api.clientInfo(peer, peer.balance) + } else { + api.server.clientPool.BalanceOperation(id, "", func(balance *vfs.NodeBalance) { + res[id] = api.clientInfo(nil, balance) + }) + } + } return res } @@ -94,31 +104,35 @@ func (api *PrivateLightServerAPI) ClientInfo(nodes []string) map[enode.ID]map[st // assigned to it. func (api *PrivateLightServerAPI) PriorityClientInfo(start, stop enode.ID, maxCount int) map[enode.ID]map[string]interface{} { res := make(map[enode.ID]map[string]interface{}) - ids := api.server.clientPool.bt.GetPosBalanceIDs(start, stop, maxCount+1) + ids := api.server.clientPool.GetPosBalanceIDs(start, stop, maxCount+1) if len(ids) > maxCount { res[ids[maxCount]] = make(map[string]interface{}) ids = ids[:maxCount] } - if len(ids) != 0 { - api.server.clientPool.forClients(ids, func(client *clientInfo) { - res[client.node.ID()] = api.clientInfo(client) - }) + for _, id := range ids { + if peer := api.server.peers.peer(id); peer != nil { + res[id] = api.clientInfo(peer, peer.balance) + } else { + api.server.clientPool.BalanceOperation(id, "", func(balance *vfs.NodeBalance) { + res[id] = api.clientInfo(nil, balance) + }) + } } return res } // clientInfo creates a client info data structure -func (api *PrivateLightServerAPI) clientInfo(c *clientInfo) map[string]interface{} { +func (api *PrivateLightServerAPI) clientInfo(peer *clientPeer, balance *vfs.NodeBalance) map[string]interface{} { info := make(map[string]interface{}) - pb, nb := c.balance.GetBalance() - info["isConnected"] = c.connected + pb, nb := balance.GetBalance() + info["isConnected"] = peer != nil info["pricing/balance"] = pb info["priority"] = pb != 0 // cb := api.server.clientPool.ndb.getCurrencyBalance(id) // info["pricing/currency"] = cb.amount - if c.connected { - info["connectionTime"] = float64(mclock.Now()-c.connectedAt) / float64(time.Second) - info["capacity"], _ = api.server.clientPool.ns.GetField(c.node, priorityPoolSetup.CapacityField).(uint64) + if peer != nil { + info["connectionTime"] = float64(mclock.Now()-peer.connectedAt) / float64(time.Second) + info["capacity"] = peer.getCapacity() info["pricing/negBalance"] = nb } return info @@ -126,7 +140,7 @@ func (api *PrivateLightServerAPI) clientInfo(c *clientInfo) map[string]interface // setParams either sets the given parameters for a single connected client (if specified) // or the default parameters applicable to clients connected in the future -func (api *PrivateLightServerAPI) setParams(params map[string]interface{}, client *clientInfo, posFactors, negFactors *vfs.PriceFactors) (updateFactors bool, err error) { +func (api *PrivateLightServerAPI) setParams(params map[string]interface{}, client *clientPeer, posFactors, negFactors *vfs.PriceFactors) (updateFactors bool, err error) { defParams := client == nil for name, value := range params { errValue := func() error { @@ -156,9 +170,8 @@ func (api *PrivateLightServerAPI) setParams(params map[string]interface{}, clien setFactor(&negFactors.RequestFactor) case !defParams && name == "capacity": if capacity, ok := value.(float64); ok && uint64(capacity) >= api.server.minCapacity { - _, err = api.server.clientPool.setCapacity(client.node, client.address, uint64(capacity), 0, true) - // Don't have to call factor update explicitly. It's already done - // in setCapacity function. + _, err = api.server.clientPool.SetCapacity(client.Node(), uint64(capacity), 0, false) + // time factor recalculation is performed automatically by the balance tracker } else { err = errValue() } @@ -179,31 +192,25 @@ func (api *PrivateLightServerAPI) setParams(params map[string]interface{}, clien // SetClientParams sets client parameters for all clients listed in the ids list // or all connected clients if the list is empty func (api *PrivateLightServerAPI) SetClientParams(nodes []string, params map[string]interface{}) error { - var ( - ids []enode.ID - err error - ) + var err error for _, node := range nodes { - if id, err := parseNode(node); err != nil { + var id enode.ID + if id, err = parseNode(node); err != nil { return err - } else { - ids = append(ids, id) } - } - api.server.clientPool.forClients(ids, func(client *clientInfo) { - if client.connected { - posFactors, negFactors := client.balance.GetPriceFactors() - update, e := api.setParams(params, client, &posFactors, &negFactors) + if peer := api.server.peers.peer(id); peer != nil { + posFactors, negFactors := peer.balance.GetPriceFactors() + update, e := api.setParams(params, peer, &posFactors, &negFactors) if update { - client.balance.SetPriceFactors(posFactors, negFactors) + peer.balance.SetPriceFactors(posFactors, negFactors) } if e != nil { err = e } } else { - err = fmt.Errorf("client %064x is not connected", client.node.ID()) + err = fmt.Errorf("client %064x is not connected", id) } - }) + } return err } @@ -211,7 +218,7 @@ func (api *PrivateLightServerAPI) SetClientParams(nodes []string, params map[str func (api *PrivateLightServerAPI) SetDefaultParams(params map[string]interface{}) error { update, err := api.setParams(params, nil, &api.defaultPosFactors, &api.defaultNegFactors) if update { - api.server.clientPool.setDefaultFactors(api.defaultPosFactors, api.defaultNegFactors) + api.server.clientPool.SetDefaultFactors(api.defaultPosFactors, api.defaultNegFactors) } return err } @@ -224,7 +231,7 @@ func (api *PrivateLightServerAPI) SetConnectedBias(bias time.Duration) error { if bias < time.Duration(0) { return fmt.Errorf("bias illegal: %v less than 0", bias) } - api.server.clientPool.setConnectedBias(bias) + api.server.clientPool.SetConnectedBias(bias) return nil } @@ -235,8 +242,8 @@ func (api *PrivateLightServerAPI) AddBalance(node string, amount int64) (balance if id, err = parseNode(node); err != nil { return } - api.server.clientPool.forClients([]enode.ID{id}, func(c *clientInfo) { - balance[0], balance[1], err = c.balance.AddBalance(amount) + api.server.clientPool.BalanceOperation(id, "", func(nb *vfs.NodeBalance) { + balance[0], balance[1], err = nb.AddBalance(amount) }) return } @@ -338,14 +345,12 @@ func (api *PrivateDebugAPI) FreezeClient(node string) error { if id, err = parseNode(node); err != nil { return err } - api.server.clientPool.forClients([]enode.ID{id}, func(c *clientInfo) { - if c.connected { - c.peer.freeze() - } else { - err = fmt.Errorf("client %064x is not connected", id[:]) - } - }) - return err + if peer := api.server.peers.peer(id); peer != nil { + peer.freeze() + return nil + } else { + return fmt.Errorf("client %064x is not connected", id[:]) + } } // PrivateLightAPI provides an API to access the LES light server or light client. diff --git a/les/peer.go b/les/peer.go index 78019b1d879c..648ed1e18652 100644 --- a/les/peer.go +++ b/les/peer.go @@ -17,6 +17,7 @@ package les import ( + "crypto/ecdsa" "errors" "fmt" "math/big" @@ -37,6 +38,7 @@ import ( vfs "github.com/ethereum/go-ethereum/les/vflux/server" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/p2p" + "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rlp" ) @@ -768,9 +770,16 @@ type clientPeer struct { invalidLock sync.RWMutex invalidCount utils.LinearExpiredValue // Counter the invalid request the client peer has made. - server bool - errCh chan error - fcClient *flowcontrol.ClientNode // Server side mirror token bucket. + capacity uint64 + // lastAnnounce is the last broadcast created by the server; may be newer than the last head + // sent to the specific client (stored in headInfo) if capacity is zero. In this case the + // latest head is sent when the client gains non-zero capacity. + lastAnnounce announceData + + connectedAt mclock.AbsTime + server bool + errCh chan error + fcClient *flowcontrol.ClientNode // Server side mirror token bucket. } func newClientPeer(version int, network uint64, p *p2p.Peer, rw p2p.MsgReadWriter) *clientPeer { @@ -789,9 +798,9 @@ func newClientPeer(version int, network uint64, p *p2p.Peer, rw p2p.MsgReadWrite } } -// freeClientId returns a string identifier for the peer. Multiple peers with +// FreeClientId returns a string identifier for the peer. Multiple peers with // the same identifier can not be connected in free mode simultaneously. -func (p *clientPeer) freeClientId() string { +func (p *clientPeer) FreeClientId() string { if addr, ok := p.RemoteAddr().(*net.TCPAddr); ok { if addr.IP.IsLoopback() { // using peer id instead of loopback ip address allows multiple free @@ -921,25 +930,69 @@ func (p *clientPeer) sendAnnounce(request announceData) error { return p2p.Send(p.rw, AnnounceMsg, request) } -// allowInactive implements clientPoolPeer -func (p *clientPeer) allowInactive() bool { - return false +// InactiveTimeout implements vfs.clientPeer +func (p *clientPeer) InactiveTimeout() time.Duration { + return 0 // will return more than zero for les/5 clients +} + +// getCapacity returns the current capacity of the peer +func (p *clientPeer) getCapacity() uint64 { + p.lock.RLock() + defer p.lock.RUnlock() + + return p.capacity } -// updateCapacity updates the request serving capacity assigned to a given client -// and also sends an announcement about the updated flow control parameters -func (p *clientPeer) updateCapacity(cap uint64) { +// UpdateCapacity updates the request serving capacity assigned to a given client +// and also sends an announcement about the updated flow control parameters. +// Note: UpdateCapacity implements vfs.clientPeer and should not block. The requested +// parameter is true if the callback was initiated by ClientPool.SetCapacity on the given peer. +func (p *clientPeer) UpdateCapacity(newCap uint64, requested bool) { p.lock.Lock() defer p.lock.Unlock() - if cap != p.fcParams.MinRecharge { - p.fcParams = flowcontrol.ServerParams{MinRecharge: cap, BufLimit: cap * bufLimitRatio} + if newCap != p.fcParams.MinRecharge { + p.fcParams = flowcontrol.ServerParams{MinRecharge: newCap, BufLimit: newCap * bufLimitRatio} p.fcClient.UpdateParams(p.fcParams) var kvList keyValueList - kvList = kvList.add("flowControl/MRR", cap) - kvList = kvList.add("flowControl/BL", cap*bufLimitRatio) + kvList = kvList.add("flowControl/MRR", newCap) + kvList = kvList.add("flowControl/BL", newCap*bufLimitRatio) p.queueSend(func() { p.sendAnnounce(announceData{Update: kvList}) }) } + + if p.capacity == 0 && newCap != 0 { + p.sendLastAnnounce() + } + p.capacity = newCap +} + +// announceOrStore sends the given head announcement to the client if the client is +// active (capacity != 0) and the same announcement hasn't been sent before. If the +// client is inactive the announcement is stored and sent later if the client is +// activated again. +func (p *clientPeer) announceOrStore(announce announceData) { + p.lock.Lock() + defer p.lock.Unlock() + + p.lastAnnounce = announce + if p.capacity != 0 { + p.sendLastAnnounce() + } +} + +// announce sends the given head announcement to the client if it hasn't been sent before +func (p *clientPeer) sendLastAnnounce() { + if p.lastAnnounce.Td == nil { + return + } + if p.headInfo.Td == nil || p.lastAnnounce.Td.Cmp(p.headInfo.Td) > 0 { + if !p.queueSend(func() { p.sendAnnounce(p.lastAnnounce) }) { + p.Log().Debug("Dropped announcement because queue is full", "number", p.lastAnnounce.Number, "hash", p.lastAnnounce.Hash) + } else { + p.Log().Debug("Sent announcement", "number", p.lastAnnounce.Number, "hash", p.lastAnnounce.Hash) + } + p.headInfo = blockInfo{Hash: p.lastAnnounce.Hash, Number: p.lastAnnounce.Number, Td: p.lastAnnounce.Td} + } } // freezeClient temporarily puts the client in a frozen state which means all @@ -1064,6 +1117,11 @@ func (p *clientPeer) getInvalid() uint64 { return p.invalidCount.Value(mclock.Now()) } +// Disconnect implements vfs.clientPeer +func (p *clientPeer) Disconnect() { + p.Peer.Disconnect(p2p.DiscRequested) +} + // serverPeerSubscriber is an interface to notify services about added or // removed server peers type serverPeerSubscriber interface { @@ -1221,3 +1279,181 @@ func (ps *serverPeerSet) close() { } ps.closed = true } + +// clientPeerSet represents the set of active client peers currently +// participating in the Light Ethereum sub-protocol. +type clientPeerSet struct { + peers map[enode.ID]*clientPeer + lock sync.RWMutex + closed bool + + privateKey *ecdsa.PrivateKey + lastAnnounce, signedAnnounce announceData +} + +// newClientPeerSet creates a new peer set to track the client peers. +func newClientPeerSet() *clientPeerSet { + return &clientPeerSet{peers: make(map[enode.ID]*clientPeer)} +} + +// register adds a new peer into the peer set, or returns an error if the +// peer is already known. +func (ps *clientPeerSet) register(peer *clientPeer) error { + ps.lock.Lock() + defer ps.lock.Unlock() + + if ps.closed { + return errClosed + } + if _, exist := ps.peers[peer.ID()]; exist { + return errAlreadyRegistered + } + ps.peers[peer.ID()] = peer + ps.announceOrStore(peer) + return nil +} + +// unregister removes a remote peer from the peer set, disabling any further +// actions to/from that particular entity. It also initiates disconnection +// at the networking layer. +func (ps *clientPeerSet) unregister(id enode.ID) error { + ps.lock.Lock() + defer ps.lock.Unlock() + + p, ok := ps.peers[id] + if !ok { + return errNotRegistered + } + delete(ps.peers, id) + p.Peer.Disconnect(p2p.DiscRequested) + return nil +} + +// ids returns a list of all registered peer IDs +func (ps *clientPeerSet) ids() []enode.ID { + ps.lock.RLock() + defer ps.lock.RUnlock() + + var ids []enode.ID + for id := range ps.peers { + ids = append(ids, id) + } + return ids +} + +// peer retrieves the registered peer with the given id. +func (ps *clientPeerSet) peer(id enode.ID) *clientPeer { + ps.lock.RLock() + defer ps.lock.RUnlock() + + return ps.peers[id] +} + +// len returns if the current number of peers in the set. +func (ps *clientPeerSet) len() int { + ps.lock.RLock() + defer ps.lock.RUnlock() + + return len(ps.peers) +} + +// setSignerKey sets the signer key for signed announcements. Should be called before +// starting the protocol handler. +func (ps *clientPeerSet) setSignerKey(privateKey *ecdsa.PrivateKey) { + ps.privateKey = privateKey +} + +// broadcast sends the given announcements to all active peers +func (ps *clientPeerSet) broadcast(announce announceData) { + ps.lock.Lock() + defer ps.lock.Unlock() + + ps.lastAnnounce = announce + for _, peer := range ps.peers { + ps.announceOrStore(peer) + } +} + +// announceOrStore sends the requested type of announcement to the given peer or stores +// it for later if the peer is inactive (capacity == 0). +func (ps *clientPeerSet) announceOrStore(p *clientPeer) { + if ps.lastAnnounce.Td == nil { + return + } + switch p.announceType { + case announceTypeSimple: + p.announceOrStore(ps.lastAnnounce) + case announceTypeSigned: + if ps.signedAnnounce.Hash != ps.lastAnnounce.Hash { + ps.signedAnnounce = ps.lastAnnounce + ps.signedAnnounce.sign(ps.privateKey) + } + p.announceOrStore(ps.signedAnnounce) + } +} + +// close disconnects all peers. No new peers can be registered +// after close has returned. +func (ps *clientPeerSet) close() { + ps.lock.Lock() + defer ps.lock.Unlock() + + for _, p := range ps.peers { + p.Peer.Disconnect(p2p.DiscQuitting) + } + ps.closed = true +} + +// serverSet is a special set which contains all connected les servers. +// Les servers will also be discovered by discovery protocol because they +// also run the LES protocol. We can't drop them although they are useless +// for us(server) but for other protocols(e.g. ETH) upon the devp2p they +// may be useful. +type serverSet struct { + lock sync.Mutex + set map[string]*clientPeer + closed bool +} + +func newServerSet() *serverSet { + return &serverSet{set: make(map[string]*clientPeer)} +} + +func (s *serverSet) register(peer *clientPeer) error { + s.lock.Lock() + defer s.lock.Unlock() + + if s.closed { + return errClosed + } + if _, exist := s.set[peer.id]; exist { + return errAlreadyRegistered + } + s.set[peer.id] = peer + return nil +} + +func (s *serverSet) unregister(peer *clientPeer) error { + s.lock.Lock() + defer s.lock.Unlock() + + if s.closed { + return errClosed + } + if _, exist := s.set[peer.id]; !exist { + return errNotRegistered + } + delete(s.set, peer.id) + peer.Peer.Disconnect(p2p.DiscQuitting) + return nil +} + +func (s *serverSet) close() { + s.lock.Lock() + defer s.lock.Unlock() + + for _, p := range s.set { + p.Peer.Disconnect(p2p.DiscQuitting) + } + s.closed = true +} diff --git a/les/server.go b/les/server.go index be64dfe190ad..2dbcfbd0b0d7 100644 --- a/les/server.go +++ b/les/server.go @@ -18,7 +18,6 @@ package les import ( "crypto/ecdsa" - "reflect" "time" "github.com/ethereum/go-ethereum/common/mclock" @@ -26,7 +25,6 @@ import ( "github.com/ethereum/go-ethereum/eth/ethconfig" "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/les/flowcontrol" - "github.com/ethereum/go-ethereum/les/vflux" vfs "github.com/ethereum/go-ethereum/les/vflux/server" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/log" @@ -34,24 +32,16 @@ import ( "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" - "github.com/ethereum/go-ethereum/p2p/nodestate" "github.com/ethereum/go-ethereum/params" "github.com/ethereum/go-ethereum/rpc" ) var ( - serverSetup = &nodestate.Setup{} - clientPeerField = serverSetup.NewField("clientPeer", reflect.TypeOf(&clientPeer{})) - clientInfoField = serverSetup.NewField("clientInfo", reflect.TypeOf(&clientInfo{})) - connAddressField = serverSetup.NewField("connAddr", reflect.TypeOf("")) - balanceTrackerSetup = vfs.NewBalanceTrackerSetup(serverSetup) - priorityPoolSetup = vfs.NewPriorityPoolSetup(serverSetup) + defaultPosFactors = vfs.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1} + defaultNegFactors = vfs.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1} ) -func init() { - balanceTrackerSetup.Connect(connAddressField, priorityPoolSetup.CapacityField) - priorityPoolSetup.Connect(balanceTrackerSetup.BalanceField, balanceTrackerSetup.UpdateFlag) // NodeBalance implements nodePriority -} +const defaultConnectedBias = time.Minute * 3 type ethBackend interface { ArchiveMode() bool @@ -65,10 +55,10 @@ type ethBackend interface { type LesServer struct { lesCommons - ns *nodestate.NodeStateMachine archiveMode bool // Flag whether the ethereum node runs in archive mode. handler *serverHandler - broadcaster *broadcaster + peers *clientPeerSet + serverset *serverSet vfluxServer *vfs.Server privateKey *ecdsa.PrivateKey @@ -77,7 +67,7 @@ type LesServer struct { costTracker *costTracker defParams flowcontrol.ServerParams servingQueue *servingQueue - clientPool *clientPool + clientPool *vfs.ClientPool minCapacity, maxCapacity uint64 threadsIdle int // Request serving threads count when system is idle. @@ -91,7 +81,6 @@ func NewLesServer(node *node.Node, e ethBackend, config *ethconfig.Config) (*Les if err != nil { return nil, err } - ns := nodestate.NewNodeStateMachine(nil, nil, mclock.System{}, serverSetup) // Calculate the number of threads used to service the light client // requests based on the user-specified value. threads := config.LightServ * 4 / 100 @@ -111,9 +100,9 @@ func NewLesServer(node *node.Node, e ethBackend, config *ethconfig.Config) (*Les bloomTrieIndexer: light.NewBloomTrieIndexer(e.ChainDb(), nil, params.BloomBitsBlocks, params.BloomTrieFrequency, true), closeCh: make(chan struct{}), }, - ns: ns, archiveMode: e.ArchiveMode(), - broadcaster: newBroadcaster(ns), + peers: newClientPeerSet(), + serverset: newServerSet(), vfluxServer: vfs.NewServer(time.Millisecond * 10), fcManager: flowcontrol.NewClientManager(nil, &mclock.System{}), servingQueue: newServingQueue(int64(time.Millisecond*10), float64(config.LightServ)/100), @@ -121,7 +110,6 @@ func NewLesServer(node *node.Node, e ethBackend, config *ethconfig.Config) (*Les threadsIdle: threads, p2pSrv: node.Server(), } - srv.vfluxServer.Register(srv) issync := e.Synced if config.LightNoSyncServe { issync = func() bool { return true } @@ -149,8 +137,11 @@ func NewLesServer(node *node.Node, e ethBackend, config *ethconfig.Config) (*Les srv.maxCapacity = totalRecharge } srv.fcManager.SetCapacityLimits(srv.minCapacity, srv.maxCapacity, srv.minCapacity*2) - srv.clientPool = newClientPool(ns, lesDb, srv.minCapacity, defaultConnectedBias, mclock.System{}, srv.dropClient, issync) - srv.clientPool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 0, CapacityFactor: 1, RequestFactor: 1}) + srv.clientPool = vfs.NewClientPool(lesDb, srv.minCapacity, defaultConnectedBias, mclock.System{}, issync) + srv.clientPool.AddMetrics(totalConnectedGauge, clientConnectedMeter, clientDisconnectedMeter, clientActivatedMeter, clientDeactivatedMeter) + srv.clientPool.Start() + srv.clientPool.SetDefaultFactors(defaultPosFactors, defaultNegFactors) + srv.vfluxServer.Register(srv.clientPool, "les", "Ethereum light client service") checkpoint := srv.latestLocalCheckpoint() if !checkpoint.Empty() { @@ -162,14 +153,6 @@ func NewLesServer(node *node.Node, e ethBackend, config *ethconfig.Config) (*Les node.RegisterProtocols(srv.Protocols()) node.RegisterAPIs(srv.APIs()) node.RegisterLifecycle(srv) - - // disconnect all peers at nsm shutdown - ns.SubscribeField(clientPeerField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { - if state.Equals(serverSetup.OfflineFlag()) && oldValue != nil { - oldValue.(*clientPeer).Peer.Disconnect(p2p.DiscRequested) - } - }) - ns.Start() return srv, nil } @@ -198,7 +181,7 @@ func (s *LesServer) APIs() []rpc.API { func (s *LesServer) Protocols() []p2p.Protocol { ps := s.makeProtocols(ServerProtocolVersions, s.handler.runPeer, func(id enode.ID) interface{} { - if p := s.getClient(id); p != nil { + if p := s.peers.peer(id); p != nil { return p.Info() } return nil @@ -215,7 +198,7 @@ func (s *LesServer) Protocols() []p2p.Protocol { // Start starts the LES server func (s *LesServer) Start() error { s.privateKey = s.p2pSrv.PrivateKey - s.broadcaster.setSignerKey(s.privateKey) + s.peers.setSignerKey(s.privateKey) s.handler.start() s.wg.Add(1) go s.capacityManagement() @@ -229,8 +212,9 @@ func (s *LesServer) Start() error { func (s *LesServer) Stop() error { close(s.closeCh) - s.clientPool.stop() - s.ns.Stop() + s.clientPool.Stop() + s.serverset.close() + s.peers.close() s.fcManager.Stop() s.costTracker.stop() s.handler.stop() @@ -261,7 +245,7 @@ func (s *LesServer) capacityManagement() { totalCapacityCh := make(chan uint64, 100) totalCapacity := s.fcManager.SubscribeTotalCapacity(totalCapacityCh) - s.clientPool.setLimits(s.config.LightPeers, totalCapacity) + s.clientPool.SetLimits(uint64(s.config.LightPeers), totalCapacity) var ( busy bool @@ -298,39 +282,9 @@ func (s *LesServer) capacityManagement() { log.Warn("Reduced free peer connections", "from", freePeers, "to", newFreePeers) } freePeers = newFreePeers - s.clientPool.setLimits(s.config.LightPeers, totalCapacity) + s.clientPool.SetLimits(uint64(s.config.LightPeers), totalCapacity) case <-s.closeCh: return } } } - -func (s *LesServer) getClient(id enode.ID) *clientPeer { - if node := s.ns.GetNode(id); node != nil { - if p, ok := s.ns.GetField(node, clientPeerField).(*clientPeer); ok { - return p - } - } - return nil -} - -func (s *LesServer) dropClient(id enode.ID) { - if p := s.getClient(id); p != nil { - p.Peer.Disconnect(p2p.DiscRequested) - } -} - -// ServiceInfo implements vfs.Service -func (s *LesServer) ServiceInfo() (string, string) { - return "les", "Ethereum light client service" -} - -// Handle implements vfs.Service -func (s *LesServer) Handle(id enode.ID, address string, name string, data []byte) []byte { - switch name { - case vflux.CapacityQueryName: - return s.clientPool.serveCapQuery(id, address, data) - default: - return nil - } -} diff --git a/les/server_handler.go b/les/server_handler.go index 7651d03cab7e..5e12136d90a8 100644 --- a/les/server_handler.go +++ b/les/server_handler.go @@ -17,7 +17,6 @@ package les import ( - "crypto/ecdsa" "errors" "sync" "sync/atomic" @@ -31,13 +30,10 @@ import ( "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/ethdb" - vfs "github.com/ethereum/go-ethereum/les/vflux/server" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/log" "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/nodestate" "github.com/ethereum/go-ethereum/rlp" "github.com/ethereum/go-ethereum/trie" ) @@ -59,7 +55,6 @@ const ( var ( errTooManyInvalidRequest = errors.New("too many invalid requests made") - errFullClientPool = errors.New("client pool is full") ) // serverHandler is responsible for serving light client and process @@ -128,32 +123,18 @@ func (h *serverHandler) handle(p *clientPeer) error { p.Log().Debug("Light Ethereum handshake failed", "err", err) return err } - // Reject the duplicated peer, otherwise register it to peerset. - var registered bool - if err := h.server.ns.Operation(func() { - if h.server.ns.GetField(p.Node(), clientPeerField) != nil { - registered = true - } else { - h.server.ns.SetFieldSub(p.Node(), clientPeerField, p) - } - }); err != nil { - return err - } - if registered { - return errAlreadyRegistered - } - defer func() { - h.server.ns.SetField(p.Node(), clientPeerField, nil) - if p.fcClient != nil { // is nil when connecting another server - p.fcClient.Disconnect() - } - }() if p.server { + if err := h.server.serverset.register(p); err != nil { + return err + } // connected to another server, no messages expected, just wait for disconnection _, err := p.rw.ReadMsg() + h.server.serverset.unregister(p) return err } + defer p.fcClient.Disconnect() // set by handshake if it's not another server + // Reject light clients if server is not synced. // // Put this checking here, so that "non-synced" les-server peers are still allowed @@ -162,30 +143,31 @@ func (h *serverHandler) handle(p *clientPeer) error { p.Log().Debug("Light server not synced, rejecting peer") return p2p.DiscRequested } - // Disconnect the inbound peer if it's rejected by clientPool - if cap, err := h.server.clientPool.connect(p); cap != p.fcParams.MinRecharge || err != nil { - p.Log().Debug("Light Ethereum peer rejected", "err", errFullClientPool) - return errFullClientPool + if err := h.server.peers.register(p); err != nil { + return err } - p.balance, _ = h.server.ns.GetField(p.Node(), h.server.clientPool.BalanceField).(*vfs.NodeBalance) - if p.balance == nil { + if p.balance = h.server.clientPool.Register(p); p.balance == nil { + h.server.peers.unregister(p.ID()) + p.Log().Debug("Client pool already closed") return p2p.DiscRequested } - activeCount, _ := h.server.clientPool.pp.Active() + activeCount, _ := h.server.clientPool.Active() clientConnectionGauge.Update(int64(activeCount)) + p.connectedAt = mclock.Now() var wg sync.WaitGroup // Wait group used to track all in-flight task routines. - connectedAt := mclock.Now() defer func() { wg.Wait() // Ensure all background task routines have exited. - h.server.clientPool.disconnect(p) + h.server.clientPool.Unregister(p) + h.server.peers.unregister(p.ID()) p.balance = nil - activeCount, _ := h.server.clientPool.pp.Active() + activeCount, _ := h.server.clientPool.Active() clientConnectionGauge.Update(int64(activeCount)) - connectionTimer.Update(time.Duration(mclock.Now() - connectedAt)) + connectionTimer.Update(time.Duration(mclock.Now() - p.connectedAt)) }() - // Mark the peer starts to be served. + + // Mark the peer as being served. atomic.StoreUint32(&p.serving, 1) defer atomic.StoreUint32(&p.serving, 0) @@ -448,78 +430,9 @@ func (h *serverHandler) broadcastLoop() { } lastHead, lastTd = header, td log.Debug("Announcing block to peers", "number", number, "hash", hash, "td", td, "reorg", reorg) - h.server.broadcaster.broadcast(announceData{Hash: hash, Number: number, Td: td, ReorgDepth: reorg}) + h.server.peers.broadcast(announceData{Hash: hash, Number: number, Td: td, ReorgDepth: reorg}) case <-h.closeCh: return } } } - -// broadcaster sends new header announcements to active client peers -type broadcaster struct { - ns *nodestate.NodeStateMachine - privateKey *ecdsa.PrivateKey - lastAnnounce, signedAnnounce announceData -} - -// newBroadcaster creates a new broadcaster -func newBroadcaster(ns *nodestate.NodeStateMachine) *broadcaster { - b := &broadcaster{ns: ns} - ns.SubscribeState(priorityPoolSetup.ActiveFlag, func(node *enode.Node, oldState, newState nodestate.Flags) { - if newState.Equals(priorityPoolSetup.ActiveFlag) { - // send last announcement to activated peers - b.sendTo(node) - } - }) - return b -} - -// setSignerKey sets the signer key for signed announcements. Should be called before -// starting the protocol handler. -func (b *broadcaster) setSignerKey(privateKey *ecdsa.PrivateKey) { - b.privateKey = privateKey -} - -// broadcast sends the given announcements to all active peers -func (b *broadcaster) broadcast(announce announceData) { - b.ns.Operation(func() { - // iterate in an Operation to ensure that the active set does not change while iterating - b.lastAnnounce = announce - b.ns.ForEach(priorityPoolSetup.ActiveFlag, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) { - b.sendTo(node) - }) - }) -} - -// sendTo sends the most recent announcement to the given node unless the same or higher Td -// announcement has already been sent. -func (b *broadcaster) sendTo(node *enode.Node) { - if b.lastAnnounce.Td == nil { - return - } - if p, _ := b.ns.GetField(node, clientPeerField).(*clientPeer); p != nil { - if p.headInfo.Td == nil || b.lastAnnounce.Td.Cmp(p.headInfo.Td) > 0 { - announce := b.lastAnnounce - switch p.announceType { - case announceTypeSimple: - if !p.queueSend(func() { p.sendAnnounce(announce) }) { - log.Debug("Drop announcement because queue is full", "number", announce.Number, "hash", announce.Hash) - } else { - log.Debug("Sent announcement", "number", announce.Number, "hash", announce.Hash) - } - case announceTypeSigned: - if b.signedAnnounce.Hash != b.lastAnnounce.Hash { - b.signedAnnounce = b.lastAnnounce - b.signedAnnounce.sign(b.privateKey) - } - announce := b.signedAnnounce - if !p.queueSend(func() { p.sendAnnounce(announce) }) { - log.Debug("Drop announcement because queue is full", "number", announce.Number, "hash", announce.Hash) - } else { - log.Debug("Sent announcement", "number", announce.Number, "hash", announce.Hash) - } - } - p.headInfo = blockInfo{b.lastAnnounce.Hash, b.lastAnnounce.Number, b.lastAnnounce.Td} - } - } -} diff --git a/les/test_helper.go b/les/test_helper.go index e49bfc8738b3..ee2da2f8eb8f 100644 --- a/les/test_helper.go +++ b/les/test_helper.go @@ -45,10 +45,10 @@ import ( "github.com/ethereum/go-ethereum/event" "github.com/ethereum/go-ethereum/les/checkpointoracle" "github.com/ethereum/go-ethereum/les/flowcontrol" + vfs "github.com/ethereum/go-ethereum/les/vflux/server" "github.com/ethereum/go-ethereum/light" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/nodestate" "github.com/ethereum/go-ethereum/params" ) @@ -284,7 +284,6 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da } oracle = checkpointoracle.New(checkpointConfig, getLocal) } - ns := nodestate.NewNodeStateMachine(nil, nil, mclock.System{}, serverSetup) server := &LesServer{ lesCommons: lesCommons{ genesis: genesis.Hash(), @@ -296,8 +295,7 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da oracle: oracle, closeCh: make(chan struct{}), }, - ns: ns, - broadcaster: newBroadcaster(ns), + peers: newClientPeerSet(), servingQueue: newServingQueue(int64(time.Millisecond*10), 1), defParams: flowcontrol.ServerParams{ BufLimit: testBufLimit, @@ -307,14 +305,14 @@ func newTestServerHandler(blocks int, indexers []*core.ChainIndexer, db ethdb.Da } server.costTracker, server.minCapacity = newCostTracker(db, server.config) server.costTracker.testCostList = testCostList(0) // Disable flow control mechanism. - server.clientPool = newClientPool(ns, db, testBufRecharge, defaultConnectedBias, clock, func(id enode.ID) {}, alwaysTrueFn) - server.clientPool.setLimits(10000, 10000) // Assign enough capacity for clientpool + server.clientPool = vfs.NewClientPool(db, testBufRecharge, defaultConnectedBias, clock, alwaysTrueFn) + server.clientPool.Start() + server.clientPool.SetLimits(10000, 10000) // Assign enough capacity for clientpool server.handler = newServerHandler(server, simulation.Blockchain(), db, txpool, func() bool { return true }) if server.oracle != nil { server.oracle.Start(simulation) } server.servingQueue.setThreads(4) - ns.Start() server.handler.start() return server.handler, simulation } diff --git a/les/vflux/server/balance.go b/les/vflux/server/balance.go index db12a5c5736a..7a677e65bcf3 100644 --- a/les/vflux/server/balance.go +++ b/les/vflux/server/balance.go @@ -60,8 +60,7 @@ type NodeBalance struct { lock sync.RWMutex node *enode.Node connAddress string - active bool - priority bool + active, priority, setFlags bool capacity uint64 balance balance posFactor, negFactor PriceFactors @@ -114,48 +113,48 @@ func (n *NodeBalance) GetRawBalance() (utils.ExpiredValue, utils.ExpiredValue) { // before and after the operation. Exceeding maxBalance results in an error (balance is // unchanged) while adding a negative amount higher than the current balance results in // zero balance. +// Note: this function should run inside a NodeStateMachine operation func (n *NodeBalance) AddBalance(amount int64) (uint64, uint64, error) { var ( err error old, new uint64 ) - n.bt.ns.Operation(func() { - var ( - callbacks []func() - setPriority bool - ) - n.bt.updateTotalBalance(n, func() bool { - now := n.bt.clock.Now() - n.updateBalance(now) - - // Ensure the given amount is valid to apply. - offset := n.bt.posExp.LogOffset(now) - old = n.balance.pos.Value(offset) - if amount > 0 && (amount > maxBalance || old > maxBalance-uint64(amount)) { - err = errBalanceOverflow - return false - } + var ( + callbacks []func() + setPriority bool + ) + n.bt.updateTotalBalance(n, func() bool { + now := n.bt.clock.Now() + n.updateBalance(now) - // Update the total positive balance counter. - n.balance.pos.Add(amount, offset) - callbacks = n.checkCallbacks(now) - setPriority = n.checkPriorityStatus() - new = n.balance.pos.Value(offset) - n.storeBalance(true, false) - return true - }) - for _, cb := range callbacks { - cb() + // Ensure the given amount is valid to apply. + offset := n.bt.posExp.LogOffset(now) + old = n.balance.pos.Value(offset) + if amount > 0 && (amount > maxBalance || old > maxBalance-uint64(amount)) { + err = errBalanceOverflow + return false } + + // Update the total positive balance counter. + n.balance.pos.Add(amount, offset) + callbacks = n.checkCallbacks(now) + setPriority = n.checkPriorityStatus() + new = n.balance.pos.Value(offset) + n.storeBalance(true, false) + return true + }) + for _, cb := range callbacks { + cb() + } + if n.setFlags { if setPriority { n.bt.ns.SetStateSub(n.node, n.bt.PriorityFlag, nodestate.Flags{}, 0) } n.signalPriorityUpdate() - }) + } if err != nil { return old, old, err } - return old, new, nil } @@ -186,10 +185,12 @@ func (n *NodeBalance) SetBalance(pos, neg uint64) error { for _, cb := range callbacks { cb() } - if setPriority { - n.bt.ns.SetStateSub(n.node, n.bt.PriorityFlag, nodestate.Flags{}, 0) + if n.setFlags { + if setPriority { + n.bt.ns.SetStateSub(n.node, n.bt.PriorityFlag, nodestate.Flags{}, 0) + } + n.signalPriorityUpdate() } - n.signalPriorityUpdate() }) return nil } @@ -517,7 +518,9 @@ func (n *NodeBalance) balanceExhausted() { n.storeBalance(true, false) n.priority = false n.lock.Unlock() - n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.PriorityFlag, 0) + if n.setFlags { + n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.PriorityFlag, 0) + } } // checkPriorityStatus checks whether the node has gained priority status and sets the priority diff --git a/les/vflux/server/balance_test.go b/les/vflux/server/balance_test.go index e22074db2d08..fc4311080484 100644 --- a/les/vflux/server/balance_test.go +++ b/les/vflux/server/balance_test.go @@ -32,13 +32,13 @@ import ( ) var ( - testFlag = testSetup.NewFlag("testFlag") - connAddrFlag = testSetup.NewField("connAddr", reflect.TypeOf("")) - btTestSetup = NewBalanceTrackerSetup(testSetup) + testFlag = testSetup.NewFlag("testFlag") + btClientField = testSetup.NewField("clientField", reflect.TypeOf(balanceTestClient{})) + btTestSetup = NewBalanceTrackerSetup(testSetup) ) func init() { - btTestSetup.Connect(connAddrFlag, ppTestSetup.CapacityField) + btTestSetup.Connect(btClientField, ppTestSetup.CapacityField) } type zeroExpirer struct{} @@ -66,10 +66,16 @@ func newBalanceTestSetup() *balanceTestSetup { } } +type balanceTestClient struct{} + +func (btc balanceTestClient) FreeClientId() string { + return "" +} + func (b *balanceTestSetup) newNode(capacity uint64) *NodeBalance { node := enode.SignNull(&enr.Record{}, enode.ID{}) b.ns.SetState(node, testFlag, nodestate.Flags{}, 0) - b.ns.SetField(node, btTestSetup.connAddressField, "") + b.ns.SetField(node, btTestSetup.clientField, balanceTestClient{}) if capacity != 0 { b.ns.SetField(node, ppTestSetup.CapacityField, capacity) } @@ -100,7 +106,13 @@ func TestAddBalance(t *testing.T) { {maxBalance, [2]uint64{0, 0}, 0, true}, } for _, i := range inputs { - old, new, err := node.AddBalance(i.delta) + var ( + old, new uint64 + err error + ) + b.ns.Operation(func() { + old, new, err = node.AddBalance(i.delta) + }) if i.expectErr { if err == nil { t.Fatalf("Expect get error but nil") @@ -323,7 +335,9 @@ func TestPostiveBalanceCounting(t *testing.T) { var sum uint64 for i := 0; i < 100; i += 1 { amount := int64(rand.Intn(100) + 100) - nodes[i].AddBalance(amount) + b.ns.Operation(func() { + nodes[i].AddBalance(amount) + }) sum += uint64(amount) } if b.bt.TotalTokenAmount() != sum { diff --git a/les/vflux/server/balance_tracker.go b/les/vflux/server/balance_tracker.go index 1708019de4ca..384b3560ba7b 100644 --- a/les/vflux/server/balance_tracker.go +++ b/les/vflux/server/balance_tracker.go @@ -25,6 +25,7 @@ import ( "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/les/utils" "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/p2p/nodestate" ) @@ -40,7 +41,7 @@ type BalanceTrackerSetup struct { PriorityFlag, UpdateFlag nodestate.Flags BalanceField nodestate.Field // external connections - connAddressField, capacityField nodestate.Field + clientField, capacityField nodestate.Field } // NewBalanceTrackerSetup creates a new BalanceTrackerSetup and initializes the fields @@ -59,13 +60,13 @@ func NewBalanceTrackerSetup(setup *nodestate.Setup) BalanceTrackerSetup { } // Connect sets the fields used by BalanceTracker as an input -func (bts *BalanceTrackerSetup) Connect(connAddressField, capacityField nodestate.Field) { - bts.connAddressField = connAddressField +func (bts *BalanceTrackerSetup) Connect(clientField, capacityField nodestate.Field) { + bts.clientField = clientField bts.capacityField = capacityField } // BalanceTracker tracks positive and negative balances for connected nodes. -// After connAddressField is set externally, a NodeBalance is created and previous +// After clientField is set externally, a NodeBalance is created and previous // balance values are loaded from the database. Both balances are exponentially expired // values. Costs are deducted from the positive balance if present, otherwise added to // the negative balance. If the capacity is non-zero then a time cost is applied @@ -86,6 +87,10 @@ type BalanceTracker struct { quit chan struct{} } +type balancePeer interface { + FreeClientId() string +} + // NewBalanceTracker creates a new BalanceTracker func NewBalanceTracker(ns *nodestate.NodeStateMachine, setup BalanceTrackerSetup, db ethdb.KeyValueStore, clock mclock.Clock, posExp, negExp utils.ValueExpirer) *BalanceTracker { ndb := newNodeDB(db, clock) @@ -126,9 +131,9 @@ func NewBalanceTracker(ns *nodestate.NodeStateMachine, setup BalanceTrackerSetup n.deactivate() } }) - ns.SubscribeField(bt.connAddressField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + ns.SubscribeField(bt.clientField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { if newValue != nil { - ns.SetFieldSub(node, bt.BalanceField, bt.newNodeBalance(node, newValue.(string))) + ns.SetFieldSub(node, bt.BalanceField, bt.newNodeBalance(node, newValue.(balancePeer).FreeClientId(), true)) } else { ns.SetStateSub(node, nodestate.Flags{}, bt.PriorityFlag, 0) if b, _ := ns.GetField(node, bt.BalanceField).(*NodeBalance); b != nil { @@ -227,16 +232,35 @@ func (bt *BalanceTracker) GetExpirationTCs() (pos, neg uint64) { return bt.posExpTC, bt.negExpTC } +// BalanceOperation allows safe operations on the balance of a node regardless of whether +// it is currently connected or not +func (bt *BalanceTracker) BalanceOperation(id enode.ID, negBalanceKey string, cb func(*NodeBalance)) { + bt.ns.Operation(func() { + node := bt.ns.GetNode(id) + var nb *NodeBalance + if node != nil { + nb, _ = bt.ns.GetField(node, bt.BalanceField).(*NodeBalance) + } else { + node = enode.SignNull(&enr.Record{}, id) + } + if nb == nil { + nb = bt.newNodeBalance(node, negBalanceKey, false) + } + cb(nb) + }) +} + // newNodeBalance loads balances from the database and creates a NodeBalance instance // for the given node. It also sets the PriorityFlag and adds balanceCallbackZero if // the node has a positive balance. // Note: this function should run inside a NodeStateMachine operation -func (bt *BalanceTracker) newNodeBalance(node *enode.Node, negBalanceKey string) *NodeBalance { +func (bt *BalanceTracker) newNodeBalance(node *enode.Node, negBalanceKey string, setFlags bool) *NodeBalance { pb := bt.ndb.getOrNewBalance(node.ID().Bytes(), false) nb := bt.ndb.getOrNewBalance([]byte(negBalanceKey), true) n := &NodeBalance{ bt: bt, node: node, + setFlags: setFlags, connAddress: negBalanceKey, balance: balance{pos: pb, neg: nb}, initTime: bt.clock.Now(), @@ -245,7 +269,7 @@ func (bt *BalanceTracker) newNodeBalance(node *enode.Node, negBalanceKey string) for i := range n.callbackIndex { n.callbackIndex[i] = -1 } - if n.checkPriorityStatus() { + if setFlags && n.checkPriorityStatus() { n.bt.ns.SetStateSub(n.node, n.bt.PriorityFlag, nodestate.Flags{}, 0) } return n diff --git a/les/vflux/server/clientpool.go b/les/vflux/server/clientpool.go new file mode 100644 index 000000000000..781a043be169 --- /dev/null +++ b/les/vflux/server/clientpool.go @@ -0,0 +1,349 @@ +// Copyright 2019 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package server + +import ( + "errors" + "reflect" + "sync" + "time" + + "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/ethdb" + "github.com/ethereum/go-ethereum/les/utils" + "github.com/ethereum/go-ethereum/les/vflux" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/metrics" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/nodestate" + "github.com/ethereum/go-ethereum/rlp" +) + +var ( + serverSetup = &nodestate.Setup{} + clientField = serverSetup.NewField("client", reflect.TypeOf(clientPeerInstance{})) + btSetup = NewBalanceTrackerSetup(serverSetup) + ppSetup = NewPriorityPoolSetup(serverSetup) +) + +var ( + errNotConnected = errors.New("client not connected") + errNoPriority = errors.New("priority too low to raise capacity") + errCantFindMaximum = errors.New("Unable to find maximum allowed capacity") +) + +func init() { + btSetup.Connect(clientField, ppSetup.CapacityField) + ppSetup.Connect(btSetup.BalanceField, btSetup.UpdateFlag) // NodeBalance implements nodePriority +} + +// ClientPool implements a client database that assigns a priority to each client +// based on a positive and negative balance. Positive balance is externally assigned +// to prioritized clients and is decreased with connection time and processed +// requests (unless the price factors are zero). If the positive balance is zero +// then negative balance is accumulated. +// +// Balance tracking and priority calculation for connected clients is done by +// BalanceTracker. activeQueue ensures that clients with the lowest positive or +// highest negative balance get evicted when the total capacity allowance is full +// and new clients with a better balance want to connect. +// +// Already connected nodes receive a small bias in their favor in order to avoid +// accepting and instantly kicking out clients. In theory, we try to ensure that +// each client can have several minutes of connection time. +// +// Balances of disconnected clients are stored in nodeDB including positive balance +// and negative banalce. Boeth positive balance and negative balance will decrease +// exponentially. If the balance is low enough, then the record will be dropped. +type ClientPool struct { + *PriorityPool + *BalanceTracker + clock mclock.Clock + closed bool + ns *nodestate.NodeStateMachine + + lock sync.RWMutex + defaultPosFactors, defaultNegFactors PriceFactors + connectedBias time.Duration + + minCap uint64 // the minimal capacity value allowed for any client + capReqNode *enode.Node // node that is requesting capacity change; only used inside NSM operation +} + +// clientPeer represents a peer in the client pool. None of the callbacks should block. +type clientPeer interface { + Node() *enode.Node + FreeClientId() string // unique id for non-priority clients (typically a prefix of the network address) + InactiveTimeout() time.Duration // disconnection timeout for inactive non-priority peers + UpdateCapacity(newCap uint64, requested bool) // signals a capacity update (requested is true if it is a result of a SetCapacity call on the given peer + Disconnect() // initiates disconnection (Unregister should always be called) +} + +type clientPeerInstance struct{ clientPeer } // the NodeStateMachine type system needs this wrapper + +// NewClientPool creates a new client pool +func NewClientPool(balanceDb ethdb.KeyValueStore, minCap uint64, connectedBias time.Duration, clock mclock.Clock) *ClientPool { + ns := nodestate.NewNodeStateMachine(nil, nil, clock, serverSetup) + cp := &ClientPool{ + ns: ns, + BalanceTracker: NewBalanceTracker(ns, btSetup, balanceDb, clock, &utils.Expirer{}, &utils.Expirer{}), + PriorityPool: NewPriorityPool(ns, ppSetup, clock, minCap, connectedBias, 4), + clock: clock, + minCap: minCap, + connectedBias: connectedBias, + } + + ns.SubscribeState(nodestate.MergeFlags(ppSetup.ActiveFlag, ppSetup.InactiveFlag, btSetup.PriorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { + if newState.Equals(ppSetup.InactiveFlag) { + // set timeout for non-priority inactive client + var timeout time.Duration + if c, ok := ns.GetField(node, clientField).(clientPeer); ok { + timeout = c.InactiveTimeout() + } + if timeout > 0 { + ns.AddTimeout(node, ppSetup.InactiveFlag, timeout) + } else { + // Note: if capacity is immediately available then PriorityPool will set the active + // flag simultaneously with removing the inactive flag and therefore this will not + // initiate disconnection + ns.SetStateSub(node, nodestate.Flags{}, ppSetup.InactiveFlag, 0) + } + } + if oldState.Equals(ppSetup.InactiveFlag) && newState.Equals(ppSetup.InactiveFlag.Or(btSetup.PriorityFlag)) { + ns.SetStateSub(node, ppSetup.InactiveFlag, nodestate.Flags{}, 0) // priority gained; remove timeout + } + if newState.Equals(ppSetup.ActiveFlag) { + // active with no priority; limit capacity to minCap + cap, _ := ns.GetField(node, ppSetup.CapacityField).(uint64) + if cap > minCap { + cp.RequestCapacity(node, minCap, 0, true) + } + } + if newState.Equals(nodestate.Flags{}) { + if c, ok := ns.GetField(node, clientField).(clientPeer); ok { + c.Disconnect() + } + } + }) + + ns.SubscribeField(btSetup.BalanceField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + if newValue != nil { + ns.SetStateSub(node, ppSetup.InactiveFlag, nodestate.Flags{}, 0) + cp.lock.RLock() + newValue.(*NodeBalance).SetPriceFactors(cp.defaultPosFactors, cp.defaultNegFactors) + cp.lock.RUnlock() + } + }) + + ns.SubscribeField(ppSetup.CapacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + if c, ok := ns.GetField(node, clientField).(clientPeer); ok { + newCap, _ := newValue.(uint64) + c.UpdateCapacity(newCap, node == cp.capReqNode) + } + }) + return cp +} + +// AddMetrics adds metrics to the client pool. Should be called before Start(). +func (cp *ClientPool) AddMetrics(totalConnectedGauge metrics.Gauge, + clientConnectedMeter, clientDisconnectedMeter, clientActivatedMeter, clientDeactivatedMeter metrics.Meter) { + cp.ns.SubscribeState(nodestate.MergeFlags(ppSetup.ActiveFlag, ppSetup.InactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { + if oldState.IsEmpty() && !newState.IsEmpty() { + clientConnectedMeter.Mark(1) + } + if !oldState.IsEmpty() && newState.IsEmpty() { + clientDisconnectedMeter.Mark(1) + } + if oldState.HasNone(ppSetup.ActiveFlag) && oldState.HasAll(ppSetup.ActiveFlag) { + clientActivatedMeter.Mark(1) + } + if oldState.HasAll(ppSetup.ActiveFlag) && oldState.HasNone(ppSetup.ActiveFlag) { + clientDeactivatedMeter.Mark(1) + } + _, connected := cp.Active() + totalConnectedGauge.Update(int64(connected)) + }) +} + +// Start starts the client pool. Should be called before Register/Unregister. +func (cp *ClientPool) Start() { + cp.ns.Start() +} + +// Stop shuts the client pool down. The clientPeer interface callbacks will not be called +// after Stop. Register calls will return nil. +func (cp *ClientPool) Stop() { + cp.BalanceTracker.Stop() + cp.ns.Stop() +} + +// Register registers the peer into the client pool. If the peer has insufficient +// priority and remains inactive for longer than the allowed timeout then it will be +// disconnected by calling the Disconnect function of the clientPeer interface. +func (cp *ClientPool) Register(peer clientPeer) *NodeBalance { + cp.ns.SetField(peer.Node(), clientField, clientPeerInstance{peer}) + balance, _ := cp.ns.GetField(peer.Node(), btSetup.BalanceField).(*NodeBalance) + return balance +} + +// Unregister removes the peer from the client pool +func (cp *ClientPool) Unregister(peer clientPeer) { + cp.ns.SetField(peer.Node(), clientField, nil) +} + +// SetDefaultFactors sets the default price factors applied to subsequently connected clients +func (cp *ClientPool) SetDefaultFactors(posFactors, negFactors PriceFactors) { + cp.lock.Lock() + cp.defaultPosFactors = posFactors + cp.defaultNegFactors = negFactors + cp.lock.Unlock() +} + +// setConnectedBias sets the connection bias, which is applied to already connected clients +// So that already connected client won't be kicked out very soon and we can ensure all +// connected clients can have enough time to request or sync some data. +func (cp *ClientPool) SetConnectedBias(bias time.Duration) { + cp.lock.Lock() + cp.connectedBias = bias + cp.SetActiveBias(bias) + cp.lock.Unlock() +} + +// SetCapacity sets the assigned capacity of a connected client +func (cp *ClientPool) SetCapacity(node *enode.Node, reqCap uint64, bias time.Duration, requested bool) (capacity uint64, err error) { + cp.lock.RLock() + if cp.connectedBias > bias { + bias = cp.connectedBias + } + cp.lock.RUnlock() + + cp.ns.Operation(func() { + balance, _ := cp.ns.GetField(node, btSetup.BalanceField).(*NodeBalance) + if balance == nil { + err = errNotConnected + return + } + capacity, _ = cp.ns.GetField(node, ppSetup.CapacityField).(uint64) + if capacity == 0 { + // if the client is inactive then it has insufficient priority for the minimal capacity + // (will be activated automatically with minCap when possible) + return + } + if reqCap < cp.minCap { + // can't request less than minCap; switching between 0 (inactive state) and minCap is + // performed by the server automatically as soon as necessary/possible + reqCap = cp.minCap + } + if reqCap > cp.minCap { + if cp.ns.GetState(node).HasNone(btSetup.PriorityFlag) && reqCap > cp.minCap { + err = errNoPriority + return + } + } + if reqCap == capacity { + return + } + curveBias := bias + if requested { + // mark the requested node so that the UpdateCapacity callback can signal + // whether the update is the direct result of a SetCapacity call on the given node + cp.capReqNode = node + defer func() { + cp.capReqNode = nil + }() + } + + // estimate maximum available capacity at the current priority level and request + // the estimated amount; allow a limited number of retries because individual + // balances can change between the estimation and the request + for count := 0; count < 100; count++ { + // apply a small extra bias to ensure that the request won't fail because of rounding errors + curveBias += time.Second * 10 + tryCap := reqCap + if reqCap > capacity { + curve := cp.GetCapacityCurve().Exclude(node.ID()) + tryCap = curve.MaxCapacity(func(capacity uint64) int64 { + return balance.EstimatePriority(capacity, 0, 0, curveBias, false) + }) + if tryCap <= capacity { + return + } + if tryCap > reqCap { + tryCap = reqCap + } + } + if _, allowed := cp.RequestCapacity(node, tryCap, bias, true); allowed { + capacity = tryCap + return + } + } + // we should be able to find the maximum allowed capacity in a few iterations + log.Error("Unable to find maximum allowed capacity") + err = errCantFindMaximum + }) + return +} + +// serveCapQuery serves a vflux capacity query. It receives multiple token amount values +// and a bias time value. For each given token amount it calculates the maximum achievable +// capacity in case the amount is added to the balance. +func (cp *ClientPool) serveCapQuery(id enode.ID, freeID string, data []byte) []byte { + var req vflux.CapacityQueryReq + if rlp.DecodeBytes(data, &req) != nil { + return nil + } + if l := len(req.AddTokens); l == 0 || l > vflux.CapacityQueryMaxLen { + return nil + } + bias := time.Second * time.Duration(req.Bias) + cp.lock.RLock() + if cp.connectedBias > bias { + bias = cp.connectedBias + } + cp.lock.RUnlock() + + // use CapacityCurve to answer request for multiple newly bought token amounts + curve := cp.GetCapacityCurve().Exclude(id) + result := make(vflux.CapacityQueryReply, len(req.AddTokens)) + cp.BalanceOperation(id, freeID, func(balance *NodeBalance) { + pb, _ := balance.GetBalance() + for i, addTokens := range req.AddTokens { + add := addTokens.Int64() + result[i] = curve.MaxCapacity(func(capacity uint64) int64 { + return balance.EstimatePriority(capacity, add, 0, bias, false) / int64(capacity) + }) + if add <= 0 && uint64(-add) >= pb && result[i] > cp.minCap { + result[i] = cp.minCap + } + if result[i] < cp.minCap { + result[i] = 0 + } + } + }) + reply, _ := rlp.EncodeToBytes(&result) + return reply +} + +// Handle implements Service +func (cp *ClientPool) Handle(id enode.ID, address string, name string, data []byte) []byte { + switch name { + case vflux.CapacityQueryName: + return cp.serveCapQuery(id, address, data) + default: + return nil + } +} diff --git a/les/clientpool_test.go b/les/vflux/server/clientpool_test.go similarity index 61% rename from les/clientpool_test.go rename to les/vflux/server/clientpool_test.go index 2aee444545a3..b50e18869025 100644 --- a/les/clientpool_test.go +++ b/les/vflux/server/clientpool_test.go @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Lesser General Public License // along with the go-ethereum library. If not, see . -package les +package server import ( "fmt" @@ -24,12 +24,13 @@ import ( "github.com/ethereum/go-ethereum/common/mclock" "github.com/ethereum/go-ethereum/core/rawdb" - vfs "github.com/ethereum/go-ethereum/les/vflux/server" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" "github.com/ethereum/go-ethereum/p2p/nodestate" ) +const defaultConnectedBias = time.Minute * 3 + func TestClientPoolL10C100Free(t *testing.T) { testClientPool(t, 10, 100, 0, true) } @@ -64,11 +65,6 @@ type poolTestPeer struct { inactiveAllowed bool } -func testStateMachine() *nodestate.NodeStateMachine { - return nodestate.NewNodeStateMachine(nil, nil, mclock.System{}, serverSetup) - -} - func newPoolTestPeer(i int, disconnCh chan int) *poolTestPeer { return &poolTestPeer{ index: i, @@ -81,36 +77,39 @@ func (i *poolTestPeer) Node() *enode.Node { return i.node } -func (i *poolTestPeer) freeClientId() string { +func (i *poolTestPeer) FreeClientId() string { return fmt.Sprintf("addr #%d", i.index) } -func (i *poolTestPeer) updateCapacity(cap uint64) { - i.cap = cap +func (i *poolTestPeer) InactiveTimeout() time.Duration { + if i.inactiveAllowed { + return time.Second * 10 + } + return 0 } -func (i *poolTestPeer) freeze() {} - -func (i *poolTestPeer) allowInactive() bool { - return i.inactiveAllowed +func (i *poolTestPeer) UpdateCapacity(capacity uint64, requested bool) { + i.cap = capacity } -func getBalance(pool *clientPool, p *poolTestPeer) (pos, neg uint64) { - temp := pool.ns.GetField(p.node, clientInfoField) == nil - if temp { - pool.ns.SetField(p.node, connAddressField, p.freeClientId()) - } - n, _ := pool.ns.GetField(p.node, pool.BalanceField).(*vfs.NodeBalance) - pos, neg = n.GetBalance() - if temp { - pool.ns.SetField(p.node, connAddressField, nil) +func (i *poolTestPeer) Disconnect() { + if i.disconnCh == nil { + return } + id := i.node.ID() + i.disconnCh <- int(id[0]) + int(id[1])<<8 +} + +func getBalance(pool *ClientPool, p *poolTestPeer) (pos, neg uint64) { + pool.BalanceOperation(p.node.ID(), p.FreeClientId(), func(nb *NodeBalance) { + pos, neg = nb.GetBalance() + }) return } -func addBalance(pool *clientPool, id enode.ID, amount int64) { - pool.forClients([]enode.ID{id}, func(c *clientInfo) { - c.balance.AddBalance(amount) +func addBalance(pool *ClientPool, id enode.ID, amount int64) { + pool.BalanceOperation(id, "", func(nb *NodeBalance) { + nb.AddBalance(amount) }) } @@ -122,6 +121,15 @@ func checkDiff(a, b uint64) bool { return a > b+maxDiff || b > a+maxDiff } +func connect(pool *ClientPool, peer *poolTestPeer) uint64 { + pool.Register(peer) + return peer.cap +} + +func disconnect(pool *ClientPool, peer *poolTestPeer) { + pool.Unregister(peer) +} + func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, randomDisconnect bool) { rand.Seed(time.Now().UnixNano()) var ( @@ -130,19 +138,17 @@ func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, rando connected = make([]bool, clientCount) connTicks = make([]int, clientCount) disconnCh = make(chan int, clientCount) - disconnFn = func(id enode.ID) { - disconnCh <- int(id[0]) + int(id[1])<<8 - } - pool = newClientPool(testStateMachine(), db, 1, 0, &clock, disconnFn, alwaysTrueFn) + pool = NewClientPool(db, 1, 0, &clock, alwaysTrueFn) ) - pool.ns.Start() + pool.Start() + pool.SetExpirationTCs(0, 1000) - pool.setLimits(activeLimit, uint64(activeLimit)) - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool.SetLimits(uint64(activeLimit), uint64(activeLimit)) + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) // pool should accept new peers up to its connected limit for i := 0; i < activeLimit; i++ { - if cap, _ := pool.connect(newPoolTestPeer(i, disconnCh)); cap != 0 { + if cap := connect(pool, newPoolTestPeer(i, disconnCh)); cap != 0 { connected[i] = true } else { t.Fatalf("Test peer #%d rejected", i) @@ -163,23 +169,23 @@ func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, rando i := rand.Intn(clientCount) if connected[i] { if randomDisconnect { - pool.disconnect(newPoolTestPeer(i, disconnCh)) + disconnect(pool, newPoolTestPeer(i, disconnCh)) connected[i] = false connTicks[i] += tickCounter } } else { - if cap, _ := pool.connect(newPoolTestPeer(i, disconnCh)); cap != 0 { + if cap := connect(pool, newPoolTestPeer(i, disconnCh)); cap != 0 { connected[i] = true connTicks[i] -= tickCounter } else { - pool.disconnect(newPoolTestPeer(i, disconnCh)) + disconnect(pool, newPoolTestPeer(i, disconnCh)) } } pollDisconnects: for { select { case i := <-disconnCh: - pool.disconnect(newPoolTestPeer(i, disconnCh)) + disconnect(pool, newPoolTestPeer(i, disconnCh)) if connected[i] { connTicks[i] += tickCounter connected[i] = false @@ -211,18 +217,18 @@ func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, rando t.Errorf("Total connected time of test node #%d (%d) outside expected range (%d to %d)", i, connTicks[i], min, max) } } - pool.stop() + pool.Stop() } -func testPriorityConnect(t *testing.T, pool *clientPool, p *poolTestPeer, cap uint64, expSuccess bool) { - if cap, _ := pool.connect(p); cap == 0 { +func testPriorityConnect(t *testing.T, pool *ClientPool, p *poolTestPeer, cap uint64, expSuccess bool) { + if cap := connect(pool, p); cap == 0 { if expSuccess { t.Fatalf("Failed to connect paid client") } else { return } } - if _, err := pool.setCapacity(p.node, "", cap, defaultConnectedBias, true); err != nil { + if newCap, _ := pool.SetCapacity(p.node, cap, defaultConnectedBias, true); newCap != cap { if expSuccess { t.Fatalf("Failed to raise capacity of paid client") } else { @@ -239,11 +245,11 @@ func TestConnectPaidClient(t *testing.T) { clock mclock.Simulated db = rawdb.NewMemoryDatabase() ) - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {}, alwaysTrueFn) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(10, uint64(10)) + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) // Add balance for an external client and mark it as paid client addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute)) @@ -255,11 +261,11 @@ func TestConnectPaidClientToSmallPool(t *testing.T) { clock mclock.Simulated db = rawdb.NewMemoryDatabase() ) - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {}, alwaysTrueFn) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) // Add balance for an external client and mark it as paid client addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute)) @@ -273,24 +279,23 @@ func TestConnectPaidClientToFullPool(t *testing.T) { clock mclock.Simulated db = rawdb.NewMemoryDatabase() ) - removeFn := func(enode.ID) {} // Noop - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn, alwaysTrueFn) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) for i := 0; i < 10; i++ { addBalance(pool, newPoolTestPeer(i, nil).node.ID(), int64(time.Second*20)) - pool.connect(newPoolTestPeer(i, nil)) + connect(pool, newPoolTestPeer(i, nil)) } addBalance(pool, newPoolTestPeer(11, nil).node.ID(), int64(time.Second*2)) // Add low balance to new paid client - if cap, _ := pool.connect(newPoolTestPeer(11, nil)); cap != 0 { + if cap := connect(pool, newPoolTestPeer(11, nil)); cap != 0 { t.Fatalf("Low balance paid client should be rejected") } clock.Run(time.Second) addBalance(pool, newPoolTestPeer(12, nil).node.ID(), int64(time.Minute*5)) // Add high balance to new paid client - if cap, _ := pool.connect(newPoolTestPeer(12, nil)); cap == 0 { + if cap := connect(pool, newPoolTestPeer(12, nil)); cap == 0 { t.Fatalf("High balance paid client should be accepted") } } @@ -301,23 +306,20 @@ func TestPaidClientKickedOut(t *testing.T) { db = rawdb.NewMemoryDatabase() kickedCh = make(chan int, 100) ) - removeFn := func(id enode.ID) { - kickedCh <- int(id[0]) - } - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn, alwaysTrueFn) - pool.ns.Start() - pool.bt.SetExpirationTCs(0, 0) - defer pool.stop() - pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + pool.SetExpirationTCs(0, 0) + defer pool.Stop() + pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) for i := 0; i < 10; i++ { addBalance(pool, newPoolTestPeer(i, kickedCh).node.ID(), 10000000000) // 10 second allowance - pool.connect(newPoolTestPeer(i, kickedCh)) + connect(pool, newPoolTestPeer(i, kickedCh)) clock.Run(time.Millisecond) } clock.Run(defaultConnectedBias + time.Second*11) - if cap, _ := pool.connect(newPoolTestPeer(11, kickedCh)); cap == 0 { + if cap := connect(pool, newPoolTestPeer(11, kickedCh)); cap == 0 { t.Fatalf("Free client should be accepted") } select { @@ -335,12 +337,12 @@ func TestConnectFreeClient(t *testing.T) { clock mclock.Simulated db = rawdb.NewMemoryDatabase() ) - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {}, alwaysTrueFn) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) - if cap, _ := pool.connect(newPoolTestPeer(0, nil)); cap == 0 { + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(10, uint64(10)) + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + if cap := connect(pool, newPoolTestPeer(0, nil)); cap == 0 { t.Fatalf("Failed to connect free client") } testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 2, false) @@ -351,26 +353,25 @@ func TestConnectFreeClientToFullPool(t *testing.T) { clock mclock.Simulated db = rawdb.NewMemoryDatabase() ) - removeFn := func(enode.ID) {} // Noop - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn, alwaysTrueFn) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) for i := 0; i < 10; i++ { - pool.connect(newPoolTestPeer(i, nil)) + connect(pool, newPoolTestPeer(i, nil)) } - if cap, _ := pool.connect(newPoolTestPeer(11, nil)); cap != 0 { + if cap := connect(pool, newPoolTestPeer(11, nil)); cap != 0 { t.Fatalf("New free client should be rejected") } clock.Run(time.Minute) - if cap, _ := pool.connect(newPoolTestPeer(12, nil)); cap != 0 { + if cap := connect(pool, newPoolTestPeer(12, nil)); cap != 0 { t.Fatalf("New free client should be rejected") } clock.Run(time.Millisecond) clock.Run(4 * time.Minute) - if cap, _ := pool.connect(newPoolTestPeer(13, nil)); cap == 0 { + if cap := connect(pool, newPoolTestPeer(13, nil)); cap == 0 { t.Fatalf("Old client connects more than 5min should be kicked") } } @@ -381,18 +382,17 @@ func TestFreeClientKickedOut(t *testing.T) { db = rawdb.NewMemoryDatabase() kicked = make(chan int, 100) ) - removeFn := func(id enode.ID) { kicked <- int(id[0]) } - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn, alwaysTrueFn) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) for i := 0; i < 10; i++ { - pool.connect(newPoolTestPeer(i, kicked)) + connect(pool, newPoolTestPeer(i, kicked)) clock.Run(time.Millisecond) } - if cap, _ := pool.connect(newPoolTestPeer(10, kicked)); cap != 0 { + if cap := connect(pool, newPoolTestPeer(10, kicked)); cap != 0 { t.Fatalf("New free client should be rejected") } select { @@ -400,10 +400,10 @@ func TestFreeClientKickedOut(t *testing.T) { case <-time.NewTimer(time.Second).C: t.Fatalf("timeout") } - pool.disconnect(newPoolTestPeer(10, kicked)) + disconnect(pool, newPoolTestPeer(10, kicked)) clock.Run(5 * time.Minute) for i := 0; i < 10; i++ { - pool.connect(newPoolTestPeer(i+10, kicked)) + connect(pool, newPoolTestPeer(i+10, kicked)) } for i := 0; i < 10; i++ { select { @@ -423,18 +423,17 @@ func TestPositiveBalanceCalculation(t *testing.T) { db = rawdb.NewMemoryDatabase() kicked = make(chan int, 10) ) - removeFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn, alwaysTrueFn) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) addBalance(pool, newPoolTestPeer(0, kicked).node.ID(), int64(time.Minute*3)) testPriorityConnect(t, pool, newPoolTestPeer(0, kicked), 10, true) clock.Run(time.Minute) - pool.disconnect(newPoolTestPeer(0, kicked)) + disconnect(pool, newPoolTestPeer(0, kicked)) pb, _ := getBalance(pool, newPoolTestPeer(0, kicked)) if checkDiff(pb, uint64(time.Minute*2)) { t.Fatalf("Positive balance mismatch, want %v, got %v", uint64(time.Minute*2), pb) @@ -447,12 +446,11 @@ func TestDowngradePriorityClient(t *testing.T) { db = rawdb.NewMemoryDatabase() kicked = make(chan int, 10) ) - removeFn := func(id enode.ID) { kicked <- int(id[0]) } // Noop - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, removeFn, alwaysTrueFn) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 1}) p := newPoolTestPeer(0, kicked) addBalance(pool, p.node.ID(), int64(time.Minute)) @@ -483,30 +481,31 @@ func TestNegativeBalanceCalculation(t *testing.T) { clock mclock.Simulated db = rawdb.NewMemoryDatabase() ) - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {}, alwaysTrueFn) - pool.ns.Start() - defer pool.stop() - pool.setLimits(10, uint64(10)) // Total capacity limit is 10 - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}, vfs.PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetExpirationTCs(0, 3600) + pool.SetLimits(10, uint64(10)) // Total capacity limit is 10 + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}, PriceFactors{TimeFactor: 1e-3, CapacityFactor: 0, RequestFactor: 1}) for i := 0; i < 10; i++ { - pool.connect(newPoolTestPeer(i, nil)) + connect(pool, newPoolTestPeer(i, nil)) } clock.Run(time.Second) for i := 0; i < 10; i++ { - pool.disconnect(newPoolTestPeer(i, nil)) + disconnect(pool, newPoolTestPeer(i, nil)) _, nb := getBalance(pool, newPoolTestPeer(i, nil)) if nb != 0 { t.Fatalf("Short connection shouldn't be recorded") } } for i := 0; i < 10; i++ { - pool.connect(newPoolTestPeer(i, nil)) + connect(pool, newPoolTestPeer(i, nil)) } clock.Run(time.Minute) for i := 0; i < 10; i++ { - pool.disconnect(newPoolTestPeer(i, nil)) + disconnect(pool, newPoolTestPeer(i, nil)) _, nb := getBalance(pool, newPoolTestPeer(i, nil)) exp := uint64(time.Minute) / 1000 exp -= exp / 120 // correct for negative balance expiration @@ -521,10 +520,10 @@ func TestInactiveClient(t *testing.T) { clock mclock.Simulated db = rawdb.NewMemoryDatabase() ) - pool := newClientPool(testStateMachine(), db, 1, defaultConnectedBias, &clock, func(id enode.ID) {}, alwaysTrueFn) - pool.ns.Start() - defer pool.stop() - pool.setLimits(2, uint64(2)) + pool := NewClientPool(db, 1, defaultConnectedBias, &clock, alwaysTrueFn) + pool.Start() + defer pool.Stop() + pool.SetLimits(2, uint64(2)) p1 := newPoolTestPeer(1, nil) p1.inactiveAllowed = true @@ -535,15 +534,15 @@ func TestInactiveClient(t *testing.T) { addBalance(pool, p1.node.ID(), 1000*int64(time.Second)) addBalance(pool, p3.node.ID(), 2000*int64(time.Second)) // p1: 1000 p2: 0 p3: 2000 - p1.cap, _ = pool.connect(p1) + p1.cap = connect(pool, p1) if p1.cap != 1 { t.Fatalf("Failed to connect peer #1") } - p2.cap, _ = pool.connect(p2) + p2.cap = connect(pool, p2) if p2.cap != 1 { t.Fatalf("Failed to connect peer #2") } - p3.cap, _ = pool.connect(p3) + p3.cap = connect(pool, p3) if p3.cap != 1 { t.Fatalf("Failed to connect peer #3") } @@ -566,11 +565,11 @@ func TestInactiveClient(t *testing.T) { if p2.cap != 0 { t.Fatalf("Failed to deactivate peer #2") } - pool.setDefaultFactors(vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}, vfs.PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}) + pool.SetDefaultFactors(PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}, PriceFactors{TimeFactor: 1, CapacityFactor: 0, RequestFactor: 0}) p4 := newPoolTestPeer(4, nil) addBalance(pool, p4.node.ID(), 1500*int64(time.Second)) // p1: 1000 p2: 500 p3: 2000 p4: 1500 - p4.cap, _ = pool.connect(p4) + p4.cap = connect(pool, p4) if p4.cap != 1 { t.Fatalf("Failed to activate peer #4") } @@ -579,8 +578,8 @@ func TestInactiveClient(t *testing.T) { } clock.Run(time.Second * 600) // manually trigger a check to avoid a long real-time wait - pool.ns.SetState(p1.node, pool.UpdateFlag, nodestate.Flags{}, 0) - pool.ns.SetState(p1.node, nodestate.Flags{}, pool.UpdateFlag, 0) + pool.ns.SetState(p1.node, btSetup.UpdateFlag, nodestate.Flags{}, 0) + pool.ns.SetState(p1.node, nodestate.Flags{}, btSetup.UpdateFlag, 0) // p1: 1000 p2: 500 p3: 2000 p4: 900 if p1.cap != 1 { t.Fatalf("Failed to activate peer #1") @@ -588,8 +587,8 @@ func TestInactiveClient(t *testing.T) { if p4.cap != 0 { t.Fatalf("Failed to deactivate peer #4") } - pool.disconnect(p2) - pool.disconnect(p4) + disconnect(pool, p2) + disconnect(pool, p4) addBalance(pool, p1.node.ID(), -1000*int64(time.Second)) if p1.cap != 1 { t.Fatalf("Should not deactivate peer #1") diff --git a/les/vflux/server/prioritypool.go b/les/vflux/server/prioritypool.go index e940ac7c657a..fa76252c3178 100644 --- a/les/vflux/server/prioritypool.go +++ b/les/vflux/server/prioritypool.go @@ -268,6 +268,14 @@ func (pp *PriorityPool) Active() (uint64, uint64) { return pp.activeCount, pp.activeCap } +// Limits returns the maximum allowed number and total capacity of active nodes +func (pp *PriorityPool) Limits() (uint64, uint64) { + pp.lock.Lock() + defer pp.lock.Unlock() + + return pp.maxCount, pp.maxCap +} + // inactiveSetIndex callback updates ppNodeInfo item index in inactiveQueue func inactiveSetIndex(a interface{}, index int) { a.(*ppNodeInfo).inactiveIndex = index diff --git a/les/vflux/server/service.go b/les/vflux/server/service.go index ab759ae441d0..80a0f4754372 100644 --- a/les/vflux/server/service.go +++ b/les/vflux/server/service.go @@ -40,7 +40,6 @@ type ( // Service is a service registered at the Server and identified by a string id Service interface { - ServiceInfo() (id, desc string) // only called during registration Handle(id enode.ID, address string, name string, data []byte) []byte // never called concurrently } @@ -60,9 +59,8 @@ func NewServer(delayPerRequest time.Duration) *Server { } // Register registers a Service -func (s *Server) Register(b Service) { - srv := &serviceEntry{backend: b} - srv.id, srv.desc = b.ServiceInfo() +func (s *Server) Register(b Service, id, desc string) { + srv := &serviceEntry{backend: b, id: id, desc: desc} if strings.Contains(srv.id, ":") { // srv.id + ":" will be used as a service database prefix log.Error("Service ID contains ':'", "id", srv.id) diff --git a/p2p/nodestate/nodestate.go b/p2p/nodestate/nodestate.go index d3166f1d873a..9323d53cbd46 100644 --- a/p2p/nodestate/nodestate.go +++ b/p2p/nodestate/nodestate.go @@ -858,6 +858,23 @@ func (ns *NodeStateMachine) GetField(n *enode.Node, field Field) interface{} { return nil } +// GetState retrieves the current state of the given node. Note that when used in a +// subscription callback the result can be out of sync with the state change represented +// by the callback parameters so extra safety checks might be necessary. +func (ns *NodeStateMachine) GetState(n *enode.Node) Flags { + ns.lock.Lock() + defer ns.lock.Unlock() + + ns.checkStarted() + if ns.closed { + return Flags{} + } + if _, node := ns.updateEnode(n); node != nil { + return Flags{mask: node.state, setup: ns.setup} + } + return Flags{} +} + // SetField sets the given field of the given node and blocks until the operation is finished func (ns *NodeStateMachine) SetField(n *enode.Node, field Field, value interface{}) error { ns.lock.Lock() From 5409e4c33f633ba840daedf5fd05888daca176db Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Tue, 16 Mar 2021 23:34:31 +0100 Subject: [PATCH 02/27] les/vflux/server: un-expose NodeBalance, remove unused fn, fix bugs --- les/api.go | 8 +- les/peer.go | 2 +- les/vflux/server/balance.go | 186 ++++++++++++++-------------- les/vflux/server/balance_test.go | 37 +----- les/vflux/server/balance_tracker.go | 30 ++--- les/vflux/server/clientpool.go | 26 ++-- les/vflux/server/clientpool_test.go | 4 +- les/vflux/server/prioritypool.go | 13 +- 8 files changed, 134 insertions(+), 172 deletions(-) diff --git a/les/api.go b/les/api.go index f570ffa98f8d..782bb31ef29a 100644 --- a/les/api.go +++ b/les/api.go @@ -88,7 +88,7 @@ func (api *PrivateLightServerAPI) ClientInfo(nodes []string) map[enode.ID]map[st if peer := api.server.peers.peer(id); peer != nil { res[id] = api.clientInfo(peer, peer.balance) } else { - api.server.clientPool.BalanceOperation(id, "", func(balance *vfs.NodeBalance) { + api.server.clientPool.BalanceOperation(id, "", func(balance vfs.AtomicBalanceOperator) { res[id] = api.clientInfo(nil, balance) }) } @@ -113,7 +113,7 @@ func (api *PrivateLightServerAPI) PriorityClientInfo(start, stop enode.ID, maxCo if peer := api.server.peers.peer(id); peer != nil { res[id] = api.clientInfo(peer, peer.balance) } else { - api.server.clientPool.BalanceOperation(id, "", func(balance *vfs.NodeBalance) { + api.server.clientPool.BalanceOperation(id, "", func(balance vfs.AtomicBalanceOperator) { res[id] = api.clientInfo(nil, balance) }) } @@ -122,7 +122,7 @@ func (api *PrivateLightServerAPI) PriorityClientInfo(start, stop enode.ID, maxCo } // clientInfo creates a client info data structure -func (api *PrivateLightServerAPI) clientInfo(peer *clientPeer, balance *vfs.NodeBalance) map[string]interface{} { +func (api *PrivateLightServerAPI) clientInfo(peer *clientPeer, balance vfs.ReadOnlyBalance) map[string]interface{} { info := make(map[string]interface{}) pb, nb := balance.GetBalance() info["isConnected"] = peer != nil @@ -242,7 +242,7 @@ func (api *PrivateLightServerAPI) AddBalance(node string, amount int64) (balance if id, err = parseNode(node); err != nil { return } - api.server.clientPool.BalanceOperation(id, "", func(nb *vfs.NodeBalance) { + api.server.clientPool.BalanceOperation(id, "", func(nb vfs.AtomicBalanceOperator) { balance[0], balance[1], err = nb.AddBalance(amount) }) return diff --git a/les/peer.go b/les/peer.go index 648ed1e18652..8c8196b08835 100644 --- a/les/peer.go +++ b/les/peer.go @@ -764,7 +764,7 @@ type clientPeer struct { responseLock sync.Mutex responseCount uint64 // Counter to generate an unique id for request processing. - balance *vfs.NodeBalance + balance vfs.ConnectedBalance // invalidLock is used for protecting invalidCount. invalidLock sync.RWMutex diff --git a/les/vflux/server/balance.go b/les/vflux/server/balance.go index 7a677e65bcf3..4db789b01d9d 100644 --- a/les/vflux/server/balance.go +++ b/les/vflux/server/balance.go @@ -52,10 +52,46 @@ func (p PriceFactors) timePrice(cap uint64) float64 { return p.TimeFactor + float64(cap)*p.CapacityFactor/1000000 } -// NodeBalance keeps track of the positive and negative balances of a connected +type ( + // nodePriority interface provides current and estimated future priorities on demand + nodePriority interface { + // Priority should return the current priority of the node (higher is better) + Priority(cap uint64) int64 + // EstMinPriority should return a lower estimate for the minimum of the node priority + // value starting from the current moment until the given time. If the priority goes + // under the returned estimate before the specified moment then it is the caller's + // responsibility to signal with updateFlag. + EstimatePriority(cap uint64, addBalance int64, future, bias time.Duration, update bool) int64 + } + + // ReadOnlyBalance provides read-only operations on the node balance + ReadOnlyBalance interface { + nodePriority + GetBalance() (uint64, uint64) + GetRawBalance() (utils.ExpiredValue, utils.ExpiredValue) + GetPriceFactors() (posFactor, negFactor PriceFactors) + } + + // ConnectedBalance provides operations permitted on connected nodes (non-read-only + // operations are not permitted inside a BalanceOperation) + ConnectedBalance interface { + ReadOnlyBalance + SetPriceFactors(posFactor, negFactor PriceFactors) + RequestServed(cost uint64) uint64 + } + + // AtomicBalanceOperator provides operations permitted in an atomic BalanceOperation + AtomicBalanceOperator interface { + ReadOnlyBalance + AddBalance(amount int64) (uint64, uint64, error) + SetBalance(pos, neg uint64) error + } +) + +// nodeBalance keeps track of the positive and negative balances of a connected // client and calculates actual and projected future priority values. // Implements nodePriority interface. -type NodeBalance struct { +type nodeBalance struct { bt *BalanceTracker lock sync.RWMutex node *enode.Node @@ -89,7 +125,7 @@ type balanceCallback struct { } // GetBalance returns the current positive and negative balance. -func (n *NodeBalance) GetBalance() (uint64, uint64) { +func (n *nodeBalance) GetBalance() (uint64, uint64) { n.lock.Lock() defer n.lock.Unlock() @@ -100,7 +136,7 @@ func (n *NodeBalance) GetBalance() (uint64, uint64) { // GetRawBalance returns the current positive and negative balance // but in the raw(expired value) format. -func (n *NodeBalance) GetRawBalance() (utils.ExpiredValue, utils.ExpiredValue) { +func (n *nodeBalance) GetRawBalance() (utils.ExpiredValue, utils.ExpiredValue) { n.lock.Lock() defer n.lock.Unlock() @@ -114,7 +150,7 @@ func (n *NodeBalance) GetRawBalance() (utils.ExpiredValue, utils.ExpiredValue) { // unchanged) while adding a negative amount higher than the current balance results in // zero balance. // Note: this function should run inside a NodeStateMachine operation -func (n *NodeBalance) AddBalance(amount int64) (uint64, uint64, error) { +func (n *nodeBalance) AddBalance(amount int64) (uint64, uint64, error) { var ( err error old, new uint64 @@ -159,44 +195,43 @@ func (n *NodeBalance) AddBalance(amount int64) (uint64, uint64, error) { } // SetBalance sets the positive and negative balance to the given values -func (n *NodeBalance) SetBalance(pos, neg uint64) error { +// Note: this function should run inside a NodeStateMachine operation +func (n *nodeBalance) SetBalance(pos, neg uint64) error { if pos > maxBalance || neg > maxBalance { return errBalanceOverflow } - n.bt.ns.Operation(func() { - var ( - callbacks []func() - setPriority bool - ) - n.bt.updateTotalBalance(n, func() bool { - now := n.bt.clock.Now() - n.updateBalance(now) - - var pb, nb utils.ExpiredValue - pb.Add(int64(pos), n.bt.posExp.LogOffset(now)) - nb.Add(int64(neg), n.bt.negExp.LogOffset(now)) - n.balance.pos = pb - n.balance.neg = nb - callbacks = n.checkCallbacks(now) - setPriority = n.checkPriorityStatus() - n.storeBalance(true, true) - return true - }) - for _, cb := range callbacks { - cb() - } - if n.setFlags { - if setPriority { - n.bt.ns.SetStateSub(n.node, n.bt.PriorityFlag, nodestate.Flags{}, 0) - } - n.signalPriorityUpdate() - } + var ( + callbacks []func() + setPriority bool + ) + n.bt.updateTotalBalance(n, func() bool { + now := n.bt.clock.Now() + n.updateBalance(now) + + var pb, nb utils.ExpiredValue + pb.Add(int64(pos), n.bt.posExp.LogOffset(now)) + nb.Add(int64(neg), n.bt.negExp.LogOffset(now)) + n.balance.pos = pb + n.balance.neg = nb + callbacks = n.checkCallbacks(now) + setPriority = n.checkPriorityStatus() + n.storeBalance(true, true) + return true }) + for _, cb := range callbacks { + cb() + } + if n.setFlags { + if setPriority { + n.bt.ns.SetStateSub(n.node, n.bt.PriorityFlag, nodestate.Flags{}, 0) + } + n.signalPriorityUpdate() + } return nil } // RequestServed should be called after serving a request for the given peer -func (n *NodeBalance) RequestServed(cost uint64) uint64 { +func (n *nodeBalance) RequestServed(cost uint64) uint64 { n.lock.Lock() var callbacks []func() defer func() { @@ -244,7 +279,7 @@ func (n *NodeBalance) RequestServed(cost uint64) uint64 { } // Priority returns the actual priority based on the current balance -func (n *NodeBalance) Priority(capacity uint64) int64 { +func (n *nodeBalance) Priority(capacity uint64) int64 { n.lock.Lock() defer n.lock.Unlock() @@ -257,7 +292,7 @@ func (n *NodeBalance) Priority(capacity uint64) int64 { // in the current session. // If update is true then a priority callback is added that turns UpdateFlag on and off // in case the priority goes below the estimated minimum. -func (n *NodeBalance) EstimatePriority(capacity uint64, addBalance int64, future, bias time.Duration, update bool) int64 { +func (n *nodeBalance) EstimatePriority(capacity uint64, addBalance int64, future, bias time.Duration, update bool) int64 { n.lock.Lock() defer n.lock.Unlock() @@ -287,50 +322,17 @@ func (n *NodeBalance) EstimatePriority(capacity uint64, addBalance int64, future } pri := n.balanceToPriority(b, capacity) if update { - n.addCallback(balanceCallbackUpdate, pri, n.signalPriorityUpdate) + // Note: always set the threshold to lower than the estimate in order to ensure + // that two nodes will not ping-pong update signals forever if both of them have + // zero estimated priority drop in the projected future + n.addCallback(balanceCallbackUpdate, pri-1, n.signalPriorityUpdate) } return pri } -// PosBalanceMissing calculates the missing amount of positive balance in order to -// connect at targetCapacity, stay connected for the given amount of time and then -// still have a priority of targetPriority -func (n *NodeBalance) PosBalanceMissing(targetPriority int64, targetCapacity uint64, after time.Duration) uint64 { - n.lock.Lock() - defer n.lock.Unlock() - - now := n.bt.clock.Now() - if targetPriority < 0 { - timePrice := n.negFactor.timePrice(targetCapacity) - timeCost := uint64(float64(after) * timePrice) - negBalance := n.balance.neg.Value(n.bt.negExp.LogOffset(now)) - if timeCost+negBalance < uint64(-targetPriority) { - return 0 - } - if uint64(-targetPriority) > negBalance && timePrice > 1e-100 { - if negTime := time.Duration(float64(uint64(-targetPriority)-negBalance) / timePrice); negTime < after { - after -= negTime - } else { - after = 0 - } - } - targetPriority = 0 - } - timePrice := n.posFactor.timePrice(targetCapacity) - posRequired := uint64(float64(targetPriority)*float64(targetCapacity)+float64(after)*timePrice) + 1 - if posRequired >= maxBalance { - return math.MaxUint64 // target not reachable - } - posBalance := n.balance.pos.Value(n.bt.posExp.LogOffset(now)) - if posRequired > posBalance { - return posRequired - posBalance - } - return 0 -} - // SetPriceFactors sets the price factors. TimeFactor is the price of a nanosecond of // connection while RequestFactor is the price of a request cost unit. -func (n *NodeBalance) SetPriceFactors(posFactor, negFactor PriceFactors) { +func (n *nodeBalance) SetPriceFactors(posFactor, negFactor PriceFactors) { n.lock.Lock() now := n.bt.clock.Now() n.updateBalance(now) @@ -347,7 +349,7 @@ func (n *NodeBalance) SetPriceFactors(posFactor, negFactor PriceFactors) { } // GetPriceFactors returns the price factors -func (n *NodeBalance) GetPriceFactors() (posFactor, negFactor PriceFactors) { +func (n *nodeBalance) GetPriceFactors() (posFactor, negFactor PriceFactors) { n.lock.Lock() defer n.lock.Unlock() @@ -355,7 +357,7 @@ func (n *NodeBalance) GetPriceFactors() (posFactor, negFactor PriceFactors) { } // activate starts time/capacity cost deduction. -func (n *NodeBalance) activate() { +func (n *nodeBalance) activate() { n.bt.updateTotalBalance(n, func() bool { if n.active { return false @@ -367,7 +369,7 @@ func (n *NodeBalance) activate() { } // deactivate stops time/capacity cost deduction and saves the balances in the database -func (n *NodeBalance) deactivate() { +func (n *nodeBalance) deactivate() { n.bt.updateTotalBalance(n, func() bool { if !n.active { return false @@ -384,7 +386,7 @@ func (n *NodeBalance) deactivate() { } // updateBalance updates balance based on the time factor -func (n *NodeBalance) updateBalance(now mclock.AbsTime) { +func (n *nodeBalance) updateBalance(now mclock.AbsTime) { if n.active && now > n.lastUpdate { n.balance = n.reducedBalance(n.balance, n.lastUpdate, time.Duration(now-n.lastUpdate), n.capacity, 0) n.lastUpdate = now @@ -392,7 +394,7 @@ func (n *NodeBalance) updateBalance(now mclock.AbsTime) { } // storeBalance stores the positive and/or negative balance of the node in the database -func (n *NodeBalance) storeBalance(pos, neg bool) { +func (n *nodeBalance) storeBalance(pos, neg bool) { if pos { n.bt.storeBalance(n.node.ID().Bytes(), false, n.balance.pos) } @@ -406,7 +408,7 @@ func (n *NodeBalance) storeBalance(pos, neg bool) { // immediately. // Note: should be called while n.lock is held // Note 2: the callback function runs inside a NodeStateMachine operation -func (n *NodeBalance) addCallback(id int, threshold int64, callback func()) { +func (n *nodeBalance) addCallback(id int, threshold int64, callback func()) { n.removeCallback(id) idx := 0 for idx < n.callbackCount && threshold > n.callbacks[idx].threshold { @@ -426,7 +428,7 @@ func (n *NodeBalance) addCallback(id int, threshold int64, callback func()) { // removeCallback removes the given callback and returns true if it was active // Note: should be called while n.lock is held -func (n *NodeBalance) removeCallback(id int) bool { +func (n *nodeBalance) removeCallback(id int) bool { idx := n.callbackIndex[id] if idx == -1 { return false @@ -443,7 +445,7 @@ func (n *NodeBalance) removeCallback(id int) bool { // checkCallbacks checks whether the threshold of any of the active callbacks // have been reached and returns triggered callbacks. // Note: checkCallbacks assumes that the balance has been recently updated. -func (n *NodeBalance) checkCallbacks(now mclock.AbsTime) (callbacks []func()) { +func (n *nodeBalance) checkCallbacks(now mclock.AbsTime) (callbacks []func()) { if n.callbackCount == 0 || n.capacity == 0 { return } @@ -459,7 +461,7 @@ func (n *NodeBalance) checkCallbacks(now mclock.AbsTime) (callbacks []func()) { // scheduleCheck sets up or updates a scheduled event to ensure that it will be called // again just after the next threshold has been reached. -func (n *NodeBalance) scheduleCheck(now mclock.AbsTime) { +func (n *nodeBalance) scheduleCheck(now mclock.AbsTime) { if n.callbackCount != 0 { d, ok := n.timeUntil(n.callbacks[n.callbackCount-1].threshold) if !ok { @@ -485,7 +487,7 @@ func (n *NodeBalance) scheduleCheck(now mclock.AbsTime) { } // updateAfter schedules a balance update and callback check in the future -func (n *NodeBalance) updateAfter(dt time.Duration) { +func (n *nodeBalance) updateAfter(dt time.Duration) { if n.updateEvent == nil || n.updateEvent.Stop() { if dt == 0 { n.updateEvent = nil @@ -513,7 +515,7 @@ func (n *NodeBalance) updateAfter(dt time.Duration) { // balanceExhausted should be called when the positive balance is exhausted (priority goes to zero/negative) // Note: this function should run inside a NodeStateMachine operation -func (n *NodeBalance) balanceExhausted() { +func (n *nodeBalance) balanceExhausted() { n.lock.Lock() n.storeBalance(true, false) n.priority = false @@ -526,7 +528,7 @@ func (n *NodeBalance) balanceExhausted() { // checkPriorityStatus checks whether the node has gained priority status and sets the priority // callback and flag if necessary. It assumes that the balance has been recently updated. // Note that the priority flag has to be set by the caller after the mutex has been released. -func (n *NodeBalance) checkPriorityStatus() bool { +func (n *nodeBalance) checkPriorityStatus() bool { if !n.priority && !n.balance.pos.IsZero() { n.priority = true n.addCallback(balanceCallbackZero, 0, func() { n.balanceExhausted() }) @@ -537,7 +539,7 @@ func (n *NodeBalance) checkPriorityStatus() bool { // signalPriorityUpdate signals that the priority fell below the previous minimum estimate // Note: this function should run inside a NodeStateMachine operation -func (n *NodeBalance) signalPriorityUpdate() { +func (n *nodeBalance) signalPriorityUpdate() { n.bt.ns.SetStateSub(n.node, n.bt.UpdateFlag, nodestate.Flags{}, 0) n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.UpdateFlag, 0) } @@ -545,7 +547,7 @@ func (n *NodeBalance) signalPriorityUpdate() { // setCapacity updates the capacity value used for priority calculation // Note: capacity should never be zero // Note 2: this function should run inside a NodeStateMachine operation -func (n *NodeBalance) setCapacity(capacity uint64) { +func (n *nodeBalance) setCapacity(capacity uint64) { n.lock.Lock() now := n.bt.clock.Now() n.updateBalance(now) @@ -560,7 +562,7 @@ func (n *NodeBalance) setCapacity(capacity uint64) { // balanceToPriority converts a balance to a priority value. Lower priority means // first to disconnect. Positive balance translates to positive priority. If positive // balance is zero then negative balance translates to a negative priority. -func (n *NodeBalance) balanceToPriority(b balance, capacity uint64) int64 { +func (n *nodeBalance) balanceToPriority(b balance, capacity uint64) int64 { if !b.pos.IsZero() { return int64(b.pos.Value(n.bt.posExp.LogOffset(n.bt.clock.Now())) / capacity) } @@ -569,7 +571,7 @@ func (n *NodeBalance) balanceToPriority(b balance, capacity uint64) int64 { // reducedBalance estimates the reduced balance at a given time in the fututre based // on the given balance, the time factor and an estimated average request cost per time ratio -func (n *NodeBalance) reducedBalance(b balance, start mclock.AbsTime, dt time.Duration, capacity uint64, avgReqCost float64) balance { +func (n *nodeBalance) reducedBalance(b balance, start mclock.AbsTime, dt time.Duration, capacity uint64, avgReqCost float64) balance { // since the costs are applied continuously during the dt time period we calculate // the expiration offset at the middle of the period at := start + mclock.AbsTime(dt/2) @@ -596,7 +598,7 @@ func (n *NodeBalance) reducedBalance(b balance, start mclock.AbsTime, dt time.Du // reached then (0, false) is returned. // Note: the function assumes that the balance has been recently updated and // calculates the time starting from the last update. -func (n *NodeBalance) timeUntil(priority int64) (time.Duration, bool) { +func (n *nodeBalance) timeUntil(priority int64) (time.Duration, bool) { now := n.bt.clock.Now() var dt float64 if !n.balance.pos.IsZero() { diff --git a/les/vflux/server/balance_test.go b/les/vflux/server/balance_test.go index fc4311080484..6397babeb5a6 100644 --- a/les/vflux/server/balance_test.go +++ b/les/vflux/server/balance_test.go @@ -72,14 +72,14 @@ func (btc balanceTestClient) FreeClientId() string { return "" } -func (b *balanceTestSetup) newNode(capacity uint64) *NodeBalance { +func (b *balanceTestSetup) newNode(capacity uint64) *nodeBalance { node := enode.SignNull(&enr.Record{}, enode.ID{}) b.ns.SetState(node, testFlag, nodestate.Flags{}, 0) b.ns.SetField(node, btTestSetup.clientField, balanceTestClient{}) if capacity != 0 { b.ns.SetField(node, ppTestSetup.CapacityField, capacity) } - n, _ := b.ns.GetField(node, btTestSetup.BalanceField).(*NodeBalance) + n, _ := b.ns.GetField(node, btTestSetup.BalanceField).(*nodeBalance) return n } @@ -291,40 +291,11 @@ func TestEstimatedPriority(t *testing.T) { } } -func TestPosBalanceMissing(t *testing.T) { - b := newBalanceTestSetup() - defer b.stop() - node := b.newNode(1000) - node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) - - b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1)) - var inputs = []struct { - pos, neg uint64 - priority int64 - cap uint64 - after time.Duration - expect uint64 - }{ - {uint64(time.Second * 2), 0, 0, 1, time.Second, 0}, - {uint64(time.Second * 2), 0, 0, 1, 2 * time.Second, 1}, - {uint64(time.Second * 2), 0, int64(time.Second), 1, 2 * time.Second, uint64(time.Second) + 1}, - {0, 0, int64(time.Second), 1, time.Second, uint64(2*time.Second) + 1}, - {0, 0, -int64(time.Second), 1, time.Second, 1}, - } - for _, i := range inputs { - node.SetBalance(i.pos, i.neg) - got := node.PosBalanceMissing(i.priority, i.cap, i.after) - if got != i.expect { - t.Fatalf("Missing budget mismatch, want %v, got %v", i.expect, got) - } - } -} - func TestPostiveBalanceCounting(t *testing.T) { b := newBalanceTestSetup() defer b.stop() - var nodes []*NodeBalance + var nodes []*nodeBalance for i := 0; i < 100; i += 1 { node := b.newNode(1000000) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) @@ -431,7 +402,7 @@ func TestBalancePersistence(t *testing.T) { ns: ns, bt: bt, } - var nb *NodeBalance + var nb *nodeBalance exp := func(expPos, expNeg uint64) { pos, neg := nb.GetBalance() if pos != expPos { diff --git a/les/vflux/server/balance_tracker.go b/les/vflux/server/balance_tracker.go index 384b3560ba7b..437aba6e52a9 100644 --- a/les/vflux/server/balance_tracker.go +++ b/les/vflux/server/balance_tracker.go @@ -53,9 +53,9 @@ func NewBalanceTrackerSetup(setup *nodestate.Setup) BalanceTrackerSetup { // UpdateFlag set and then immediately reset if the balance has been updated and // therefore priority is suddenly changed UpdateFlag: setup.NewFlag("balanceUpdate"), - // BalanceField contains the NodeBalance struct which implements nodePriority, + // BalanceField contains the nodeBalance struct which implements nodePriority, // allowing on-demand priority calculation and future priority estimation - BalanceField: setup.NewField("balance", reflect.TypeOf(&NodeBalance{})), + BalanceField: setup.NewField("balance", reflect.TypeOf(&nodeBalance{})), } } @@ -66,7 +66,7 @@ func (bts *BalanceTrackerSetup) Connect(clientField, capacityField nodestate.Fie } // BalanceTracker tracks positive and negative balances for connected nodes. -// After clientField is set externally, a NodeBalance is created and previous +// After clientField is set externally, a nodeBalance is created and previous // balance values are loaded from the database. Both balances are exponentially expired // values. Costs are deducted from the positive balance if present, otherwise added to // the negative balance. If the capacity is non-zero then a time cost is applied @@ -114,7 +114,7 @@ func NewBalanceTracker(ns *nodestate.NodeStateMachine, setup BalanceTrackerSetup }) ns.SubscribeField(bt.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { - n, _ := ns.GetField(node, bt.BalanceField).(*NodeBalance) + n, _ := ns.GetField(node, bt.BalanceField).(*nodeBalance) if n == nil { return } @@ -136,7 +136,7 @@ func NewBalanceTracker(ns *nodestate.NodeStateMachine, setup BalanceTrackerSetup ns.SetFieldSub(node, bt.BalanceField, bt.newNodeBalance(node, newValue.(balancePeer).FreeClientId(), true)) } else { ns.SetStateSub(node, nodestate.Flags{}, bt.PriorityFlag, 0) - if b, _ := ns.GetField(node, bt.BalanceField).(*NodeBalance); b != nil { + if b, _ := ns.GetField(node, bt.BalanceField).(*nodeBalance); b != nil { b.deactivate() } ns.SetFieldSub(node, bt.BalanceField, nil) @@ -168,7 +168,7 @@ func (bt *BalanceTracker) Stop() { bt.ndb.setExpiration(bt.posExp.LogOffset(now), bt.negExp.LogOffset(now)) close(bt.quit) bt.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) { - if n, ok := bt.ns.GetField(node, bt.BalanceField).(*NodeBalance); ok { + if n, ok := bt.ns.GetField(node, bt.BalanceField).(*nodeBalance); ok { n.lock.Lock() n.storeBalance(true, true) n.lock.Unlock() @@ -186,7 +186,7 @@ func (bt *BalanceTracker) TotalTokenAmount() uint64 { bt.balanceTimer.Update(func(_ time.Duration) bool { bt.active = utils.ExpiredValue{} bt.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) { - if n, ok := bt.ns.GetField(node, bt.BalanceField).(*NodeBalance); ok && n.active { + if n, ok := bt.ns.GetField(node, bt.BalanceField).(*nodeBalance); ok && n.active { pos, _ := n.GetRawBalance() bt.active.AddExp(pos) } @@ -232,14 +232,14 @@ func (bt *BalanceTracker) GetExpirationTCs() (pos, neg uint64) { return bt.posExpTC, bt.negExpTC } -// BalanceOperation allows safe operations on the balance of a node regardless of whether +// BalanceOperation allows atomic operations on the balance of a node regardless of whether // it is currently connected or not -func (bt *BalanceTracker) BalanceOperation(id enode.ID, negBalanceKey string, cb func(*NodeBalance)) { +func (bt *BalanceTracker) BalanceOperation(id enode.ID, negBalanceKey string, cb func(AtomicBalanceOperator)) { bt.ns.Operation(func() { node := bt.ns.GetNode(id) - var nb *NodeBalance + var nb *nodeBalance if node != nil { - nb, _ = bt.ns.GetField(node, bt.BalanceField).(*NodeBalance) + nb, _ = bt.ns.GetField(node, bt.BalanceField).(*nodeBalance) } else { node = enode.SignNull(&enr.Record{}, id) } @@ -250,14 +250,14 @@ func (bt *BalanceTracker) BalanceOperation(id enode.ID, negBalanceKey string, cb }) } -// newNodeBalance loads balances from the database and creates a NodeBalance instance +// newNodeBalance loads balances from the database and creates a nodeBalance instance // for the given node. It also sets the PriorityFlag and adds balanceCallbackZero if // the node has a positive balance. // Note: this function should run inside a NodeStateMachine operation -func (bt *BalanceTracker) newNodeBalance(node *enode.Node, negBalanceKey string, setFlags bool) *NodeBalance { +func (bt *BalanceTracker) newNodeBalance(node *enode.Node, negBalanceKey string, setFlags bool) *nodeBalance { pb := bt.ndb.getOrNewBalance(node.ID().Bytes(), false) nb := bt.ndb.getOrNewBalance([]byte(negBalanceKey), true) - n := &NodeBalance{ + n := &nodeBalance{ bt: bt, node: node, setFlags: setFlags, @@ -294,7 +294,7 @@ func (bt *BalanceTracker) canDropBalance(now mclock.AbsTime, neg bool, b utils.E } // updateTotalBalance adjusts the total balance after executing given callback. -func (bt *BalanceTracker) updateTotalBalance(n *NodeBalance, callback func() bool) { +func (bt *BalanceTracker) updateTotalBalance(n *nodeBalance, callback func() bool) { bt.lock.Lock() defer bt.lock.Unlock() diff --git a/les/vflux/server/clientpool.go b/les/vflux/server/clientpool.go index 781a043be169..dc862503966a 100644 --- a/les/vflux/server/clientpool.go +++ b/les/vflux/server/clientpool.go @@ -41,14 +41,14 @@ var ( ) var ( - errNotConnected = errors.New("client not connected") - errNoPriority = errors.New("priority too low to raise capacity") - errCantFindMaximum = errors.New("Unable to find maximum allowed capacity") + ErrNotConnected = errors.New("client not connected") + ErrNoPriority = errors.New("priority too low to raise capacity") + ErrCantFindMaximum = errors.New("Unable to find maximum allowed capacity") ) func init() { btSetup.Connect(clientField, ppSetup.CapacityField) - ppSetup.Connect(btSetup.BalanceField, btSetup.UpdateFlag) // NodeBalance implements nodePriority + ppSetup.Connect(btSetup.BalanceField, btSetup.UpdateFlag) // nodeBalance implements nodePriority } // ClientPool implements a client database that assigns a priority to each client @@ -144,7 +144,7 @@ func NewClientPool(balanceDb ethdb.KeyValueStore, minCap uint64, connectedBias t if newValue != nil { ns.SetStateSub(node, ppSetup.InactiveFlag, nodestate.Flags{}, 0) cp.lock.RLock() - newValue.(*NodeBalance).SetPriceFactors(cp.defaultPosFactors, cp.defaultNegFactors) + newValue.(*nodeBalance).SetPriceFactors(cp.defaultPosFactors, cp.defaultNegFactors) cp.lock.RUnlock() } }) @@ -194,9 +194,9 @@ func (cp *ClientPool) Stop() { // Register registers the peer into the client pool. If the peer has insufficient // priority and remains inactive for longer than the allowed timeout then it will be // disconnected by calling the Disconnect function of the clientPeer interface. -func (cp *ClientPool) Register(peer clientPeer) *NodeBalance { +func (cp *ClientPool) Register(peer clientPeer) ConnectedBalance { cp.ns.SetField(peer.Node(), clientField, clientPeerInstance{peer}) - balance, _ := cp.ns.GetField(peer.Node(), btSetup.BalanceField).(*NodeBalance) + balance, _ := cp.ns.GetField(peer.Node(), btSetup.BalanceField).(*nodeBalance) return balance } @@ -232,9 +232,9 @@ func (cp *ClientPool) SetCapacity(node *enode.Node, reqCap uint64, bias time.Dur cp.lock.RUnlock() cp.ns.Operation(func() { - balance, _ := cp.ns.GetField(node, btSetup.BalanceField).(*NodeBalance) + balance, _ := cp.ns.GetField(node, btSetup.BalanceField).(*nodeBalance) if balance == nil { - err = errNotConnected + err = ErrNotConnected return } capacity, _ = cp.ns.GetField(node, ppSetup.CapacityField).(uint64) @@ -250,7 +250,7 @@ func (cp *ClientPool) SetCapacity(node *enode.Node, reqCap uint64, bias time.Dur } if reqCap > cp.minCap { if cp.ns.GetState(node).HasNone(btSetup.PriorityFlag) && reqCap > cp.minCap { - err = errNoPriority + err = ErrNoPriority return } } @@ -292,8 +292,8 @@ func (cp *ClientPool) SetCapacity(node *enode.Node, reqCap uint64, bias time.Dur } } // we should be able to find the maximum allowed capacity in a few iterations - log.Error("Unable to find maximum allowed capacity") - err = errCantFindMaximum + log.Crit("Unable to find maximum allowed capacity") + err = ErrCantFindMaximum }) return } @@ -319,7 +319,7 @@ func (cp *ClientPool) serveCapQuery(id enode.ID, freeID string, data []byte) []b // use CapacityCurve to answer request for multiple newly bought token amounts curve := cp.GetCapacityCurve().Exclude(id) result := make(vflux.CapacityQueryReply, len(req.AddTokens)) - cp.BalanceOperation(id, freeID, func(balance *NodeBalance) { + cp.BalanceOperation(id, freeID, func(balance AtomicBalanceOperator) { pb, _ := balance.GetBalance() for i, addTokens := range req.AddTokens { add := addTokens.Int64() diff --git a/les/vflux/server/clientpool_test.go b/les/vflux/server/clientpool_test.go index b50e18869025..3eac72fff4ed 100644 --- a/les/vflux/server/clientpool_test.go +++ b/les/vflux/server/clientpool_test.go @@ -101,14 +101,14 @@ func (i *poolTestPeer) Disconnect() { } func getBalance(pool *ClientPool, p *poolTestPeer) (pos, neg uint64) { - pool.BalanceOperation(p.node.ID(), p.FreeClientId(), func(nb *NodeBalance) { + pool.BalanceOperation(p.node.ID(), p.FreeClientId(), func(nb AtomicBalanceOperator) { pos, neg = nb.GetBalance() }) return } func addBalance(pool *ClientPool, id enode.ID, amount int64) { - pool.BalanceOperation(id, "", func(nb *NodeBalance) { + pool.BalanceOperation(id, "", func(nb AtomicBalanceOperator) { nb.AddBalance(amount) }) } diff --git a/les/vflux/server/prioritypool.go b/les/vflux/server/prioritypool.go index fa76252c3178..5c0fcc0ea578 100644 --- a/les/vflux/server/prioritypool.go +++ b/les/vflux/server/prioritypool.go @@ -107,17 +107,6 @@ type PriorityPool struct { ccUpdateForced bool } -// nodePriority interface provides current and estimated future priorities on demand -type nodePriority interface { - // Priority should return the current priority of the node (higher is better) - Priority(cap uint64) int64 - // EstMinPriority should return a lower estimate for the minimum of the node priority - // value starting from the current moment until the given time. If the priority goes - // under the returned estimate before the specified moment then it is the caller's - // responsibility to signal with updateFlag. - EstimatePriority(cap uint64, addBalance int64, future, bias time.Duration, update bool) int64 -} - // ppNodeInfo is the internal node descriptor of PriorityPool type ppNodeInfo struct { nodePriority nodePriority @@ -247,7 +236,7 @@ func (pp *PriorityPool) SetLimits(maxCount, maxCap uint64) { updates = pp.finalizeChanges(true) } if inc { - updates = pp.tryActivate() + updates = append(updates, pp.tryActivate()...) } } From 42f5b60a28f968e7240cdd6e1dc1e484c853667a Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Tue, 16 Mar 2021 23:36:33 +0100 Subject: [PATCH 03/27] tests/fuzzers/vflux: add ClientPool fuzzer --- tests/fuzzers/vflux/clientpool-fuzzer.go | 286 +++++++++++++++++++++++ tests/fuzzers/vflux/debug/main.go | 41 ++++ 2 files changed, 327 insertions(+) create mode 100644 tests/fuzzers/vflux/clientpool-fuzzer.go create mode 100644 tests/fuzzers/vflux/debug/main.go diff --git a/tests/fuzzers/vflux/clientpool-fuzzer.go b/tests/fuzzers/vflux/clientpool-fuzzer.go new file mode 100644 index 000000000000..ba45c2ddb41d --- /dev/null +++ b/tests/fuzzers/vflux/clientpool-fuzzer.go @@ -0,0 +1,286 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package vflux + +import ( + "bytes" + "encoding/binary" + "io" + "math" + "math/big" + "time" + + "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/ethdb/memorydb" + "github.com/ethereum/go-ethereum/les/vflux" + vfs "github.com/ethereum/go-ethereum/les/vflux/server" + "github.com/ethereum/go-ethereum/p2p/enode" + "github.com/ethereum/go-ethereum/p2p/enr" + "github.com/ethereum/go-ethereum/rlp" +) + +type fuzzer struct { + peers [256]*clientPeer + disconnectList []*clientPeer + input io.Reader + exhausted bool + activeCount, activeCap uint64 + maxCount, maxCap uint64 +} + +type clientPeer struct { + fuzzer *fuzzer + node *enode.Node + freeID string + timeout time.Duration + + balance vfs.ConnectedBalance + capacity uint64 +} + +func (p *clientPeer) Node() *enode.Node { + return p.node +} + +func (p *clientPeer) FreeClientId() string { + return p.freeID +} + +func (p *clientPeer) InactiveTimeout() time.Duration { + return p.timeout +} + +func (p *clientPeer) UpdateCapacity(newCap uint64, requested bool) { + p.fuzzer.activeCap -= p.capacity + if p.capacity != 0 { + p.fuzzer.activeCount-- + } + p.capacity = newCap + p.fuzzer.activeCap += p.capacity + if p.capacity != 0 { + p.fuzzer.activeCount++ + } +} + +func (p *clientPeer) Disconnect() { + p.fuzzer.disconnectList = append(p.fuzzer.disconnectList, p) + p.fuzzer.activeCap -= p.capacity + if p.capacity != 0 { + p.fuzzer.activeCount-- + } + p.capacity = 0 + p.balance = nil +} + +func newFuzzer(input []byte) *fuzzer { + f := &fuzzer{ + input: bytes.NewReader(input), + } + for i := range f.peers { + f.peers[i] = &clientPeer{ + fuzzer: f, + node: enode.SignNull(new(enr.Record), enode.ID{byte(i)}), + freeID: string([]byte{byte(i)}), + timeout: f.randomDelay(), + } + } + return f +} + +func (f *fuzzer) read(size int) []byte { + out := make([]byte, size) + if _, err := f.input.Read(out); err != nil { + f.exhausted = true + } + return out +} + +func (f *fuzzer) randomByte() byte { + d := f.read(1) + return d[0] +} + +func (f *fuzzer) randomBool() bool { + d := f.read(1) + return d[0]&1 == 1 +} + +func (f *fuzzer) randomInt(max int) int { + if max == 0 { + return 0 + } + if max <= 256 { + return int(f.randomByte()) % max + } + var a uint16 + if err := binary.Read(f.input, binary.LittleEndian, &a); err != nil { + f.exhausted = true + } + return int(a % uint16(max)) +} + +func (f *fuzzer) randomTokenAmount(signed bool) int64 { + x := uint64(f.randomInt(65000)) + x = x * x * x * x + + if signed && (x&1) == 1 { + if x <= math.MaxInt64 { + return -int64(x) + } + return math.MinInt64 + } + if x <= math.MaxInt64 { + return int64(x) + } + return math.MaxInt64 +} + +func (f *fuzzer) randomDelay() time.Duration { + delay := f.randomByte() + if delay < 128 { + return time.Duration(delay) * time.Second + } + return 0 +} + +func (f *fuzzer) randomFactors() vfs.PriceFactors { + return vfs.PriceFactors{ + TimeFactor: float64(f.randomByte()) / 25500, + CapacityFactor: float64(f.randomByte()) / 255, + RequestFactor: float64(f.randomByte()) / 255, + } +} + +func (f *fuzzer) connectedBalanceOp(balance vfs.ConnectedBalance) { + switch f.randomInt(3) { + case 0: + balance.RequestServed(uint64(f.randomTokenAmount(false))) + case 1: + balance.SetPriceFactors(f.randomFactors(), f.randomFactors()) + case 2: + balance.GetBalance() + balance.GetRawBalance() + balance.GetPriceFactors() + } +} + +func (f *fuzzer) atomicBalanceOp(balance vfs.AtomicBalanceOperator) { + switch f.randomInt(3) { + case 0: + balance.AddBalance(f.randomTokenAmount(true)) + case 1: + balance.SetBalance(uint64(f.randomTokenAmount(false)), uint64(f.randomTokenAmount(false))) + case 2: + balance.GetBalance() + balance.GetRawBalance() + balance.GetPriceFactors() + } +} + +func FuzzClientPool(input []byte) int { + if len(input) > 10000 { + return -1 + } + f := newFuzzer(input) + if f.exhausted { + return 0 + } + clock := &mclock.Simulated{} + db := memorydb.New() + pool := vfs.NewClientPool(db, 10, f.randomDelay(), clock) + pool.Start() + defer pool.Stop() + + count := 0 + for !f.exhausted && count < 1000 { + count++ + switch f.randomInt(11) { + case 0: + i := int(f.randomByte()) + f.peers[i].balance = pool.Register(f.peers[i]) + case 1: + i := int(f.randomByte()) + f.peers[i].Disconnect() + case 2: + f.maxCount = uint64(f.randomByte()) + f.maxCap = uint64(f.randomByte()) + f.maxCap *= f.maxCap + pool.SetLimits(f.maxCount, f.maxCap) + case 3: + pool.SetConnectedBias(f.randomDelay()) + case 4: + pool.SetDefaultFactors(f.randomFactors(), f.randomFactors()) + case 5: + pool.SetExpirationTCs(uint64(f.randomInt(50000)), uint64(f.randomInt(50000))) + case 6: + if _, err := pool.SetCapacity(f.peers[f.randomByte()].node, uint64(f.randomByte()), f.randomDelay(), f.randomBool()); err == vfs.ErrCantFindMaximum { + panic(nil) + } + case 7: + if balance := f.peers[f.randomByte()].balance; balance != nil { + f.connectedBalanceOp(balance) + } + case 8: + pool.BalanceOperation(f.peers[f.randomByte()].node.ID(), f.peers[f.randomByte()].freeID, func(balance vfs.AtomicBalanceOperator) { + count := f.randomInt(4) + for i := 0; i < count; i++ { + f.atomicBalanceOp(balance) + } + }) + case 9: + pool.TotalTokenAmount() + pool.GetExpirationTCs() + pool.Active() + pool.Limits() + pool.GetPosBalanceIDs(f.peers[f.randomByte()].node.ID(), f.peers[f.randomByte()].node.ID(), f.randomInt(100)) + case 10: + req := vflux.CapacityQueryReq{ + Bias: uint64(f.randomByte()), + AddTokens: make([]vflux.IntOrInf, f.randomInt(vflux.CapacityQueryMaxLen+1)), + } + for i := range req.AddTokens { + v := vflux.IntOrInf{Type: uint8(f.randomInt(4))} + if v.Type < 2 { + v.Value = *big.NewInt(f.randomTokenAmount(false)) + } + req.AddTokens[i] = v + } + reqEnc, err := rlp.EncodeToBytes(&req) + if err != nil { + panic(err) + } + p := int(f.randomByte()) + if p < len(reqEnc) { + reqEnc[p] = f.randomByte() + } + pool.Handle(f.peers[f.randomByte()].node.ID(), f.peers[f.randomByte()].freeID, vflux.CapacityQueryName, reqEnc) + } + + for _, peer := range f.disconnectList { + pool.Unregister(peer) + } + f.disconnectList = nil + if d := f.randomDelay(); d > 0 { + clock.Run(d) + } + //fmt.Println(f.activeCount, f.maxCount, f.activeCap, f.maxCap) + if f.activeCount > f.maxCount || f.activeCap > f.maxCap { + panic(nil) + } + } + return 0 +} diff --git a/tests/fuzzers/vflux/debug/main.go b/tests/fuzzers/vflux/debug/main.go new file mode 100644 index 000000000000..de0b5d41241a --- /dev/null +++ b/tests/fuzzers/vflux/debug/main.go @@ -0,0 +1,41 @@ +// Copyright 2020 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package main + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/ethereum/go-ethereum/tests/fuzzers/vflux" +) + +func main() { + if len(os.Args) != 2 { + fmt.Fprintf(os.Stderr, "Usage: debug \n") + fmt.Fprintf(os.Stderr, "Example\n") + fmt.Fprintf(os.Stderr, " $ debug ../crashers/4bbef6857c733a87ecf6fd8b9e7238f65eb9862a\n") + os.Exit(1) + } + crasher := os.Args[1] + data, err := ioutil.ReadFile(crasher) + if err != nil { + fmt.Fprintf(os.Stderr, "error loading crasher %v: %v", crasher, err) + os.Exit(1) + } + vflux.FuzzClientPool(data) +} From e84b25265cacc4cf744c3e51fc504a13ac3ceea0 Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Tue, 16 Mar 2021 23:57:24 +0100 Subject: [PATCH 04/27] les/vflux/server: fixed balance tests --- les/vflux/server/balance_test.go | 46 ++++++++++++++++++-------------- 1 file changed, 26 insertions(+), 20 deletions(-) diff --git a/les/vflux/server/balance_test.go b/les/vflux/server/balance_test.go index 6397babeb5a6..72231d6469f3 100644 --- a/les/vflux/server/balance_test.go +++ b/les/vflux/server/balance_test.go @@ -83,6 +83,20 @@ func (b *balanceTestSetup) newNode(capacity uint64) *nodeBalance { return n } +func (b *balanceTestSetup) setBalance(node *nodeBalance, pos, neg uint64) (err error) { + b.bt.BalanceOperation(node.node.ID(), node.connAddress, func(balance AtomicBalanceOperator) { + err = balance.SetBalance(pos, neg) + }) + return +} + +func (b *balanceTestSetup) addBalance(node *nodeBalance, add int64) (old, new uint64, err error) { + b.bt.BalanceOperation(node.node.ID(), node.connAddress, func(balance AtomicBalanceOperator) { + old, new, err = balance.AddBalance(add) + }) + return +} + func (b *balanceTestSetup) stop() { b.bt.Stop() b.ns.Stop() @@ -106,13 +120,7 @@ func TestAddBalance(t *testing.T) { {maxBalance, [2]uint64{0, 0}, 0, true}, } for _, i := range inputs { - var ( - old, new uint64 - err error - ) - b.ns.Operation(func() { - old, new, err = node.AddBalance(i.delta) - }) + old, new, err := b.addBalance(node, i.delta) if i.expectErr { if err == nil { t.Fatalf("Expect get error but nil") @@ -144,7 +152,7 @@ func TestSetBalance(t *testing.T) { } for _, i := range inputs { - node.SetBalance(i.pos, i.neg) + b.setBalance(node, i.pos, i.neg) pos, neg := node.GetBalance() if pos != i.pos { t.Fatalf("Positive balance mismatch, want %v, got %v", i.pos, pos) @@ -162,7 +170,7 @@ func TestBalanceTimeCost(t *testing.T) { b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1)) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) - node.SetBalance(uint64(time.Minute), 0) // 1 minute time allowance + b.setBalance(node, uint64(time.Minute), 0) // 1 minute time allowance var inputs = []struct { runTime time.Duration @@ -184,7 +192,7 @@ func TestBalanceTimeCost(t *testing.T) { } } - node.SetBalance(uint64(time.Minute), 0) // Refill 1 minute time allowance + b.setBalance(node, uint64(time.Minute), 0) // Refill 1 minute time allowance for _, i := range inputs { b.clock.Run(i.runTime) if pos, _ := node.GetBalance(); pos != i.expPos { @@ -203,7 +211,7 @@ func TestBalanceReqCost(t *testing.T) { node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1)) - node.SetBalance(uint64(time.Minute), 0) // 1 minute time serving time allowance + b.setBalance(node, uint64(time.Minute), 0) // 1 minute time serving time allowance var inputs = []struct { reqCost uint64 expPos uint64 @@ -242,7 +250,7 @@ func TestBalanceToPriority(t *testing.T) { {0, 1000, -1000}, } for _, i := range inputs { - node.SetBalance(i.pos, i.neg) + b.setBalance(node, i.pos, i.neg) priority := node.Priority(1000) if priority != i.priority { t.Fatalf("Priority mismatch, want %v, got %v", i.priority, priority) @@ -257,7 +265,7 @@ func TestEstimatedPriority(t *testing.T) { node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1)) - node.SetBalance(uint64(time.Minute), 0) + b.setBalance(node, uint64(time.Minute), 0) var inputs = []struct { runTime time.Duration // time cost futureTime time.Duration // diff of future time @@ -306,9 +314,7 @@ func TestPostiveBalanceCounting(t *testing.T) { var sum uint64 for i := 0; i < 100; i += 1 { amount := int64(rand.Intn(100) + 100) - b.ns.Operation(func() { - nodes[i].AddBalance(amount) - }) + b.addBalance(nodes[i], amount) sum += uint64(amount) } if b.bt.TotalTokenAmount() != sum { @@ -348,7 +354,7 @@ func TestCallbackChecking(t *testing.T) { {0, time.Second}, {-int64(time.Second), 2 * time.Second}, } - node.SetBalance(uint64(time.Second), 0) + b.setBalance(node, uint64(time.Second), 0) for _, i := range inputs { diff, _ := node.timeUntil(i.priority) if diff != i.expDiff { @@ -365,7 +371,7 @@ func TestCallback(t *testing.T) { b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1)) callCh := make(chan struct{}, 1) - node.SetBalance(uint64(time.Minute), 0) + b.setBalance(node, uint64(time.Minute), 0) node.addCallback(balanceCallbackZero, 0, func() { callCh <- struct{}{} }) b.clock.Run(time.Minute) @@ -375,7 +381,7 @@ func TestCallback(t *testing.T) { t.Fatalf("Callback hasn't been called yet") } - node.SetBalance(uint64(time.Minute), 0) + b.setBalance(node, uint64(time.Minute), 0) node.addCallback(balanceCallbackZero, 0, func() { callCh <- struct{}{} }) node.removeCallback(balanceCallbackZero) @@ -422,7 +428,7 @@ func TestBalancePersistence(t *testing.T) { expTotal(0) nb = bts.newNode(0) expTotal(0) - nb.SetBalance(16000000000, 16000000000) + bts.setBalance(nb, 16000000000, 16000000000) exp(16000000000, 16000000000) expTotal(16000000000) clock.Run(time.Hour * 2) From cda86d98cf4c91896b96942cf873e77b9be33686 Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Wed, 17 Mar 2021 02:29:13 +0100 Subject: [PATCH 05/27] les: rebase fix --- les/clientpool.go | 453 ----------------------- les/server.go | 3 +- les/vflux/server/clientpool.go | 32 +- les/vflux/server/clientpool_test.go | 4 + tests/fuzzers/vflux/clientpool-fuzzer.go | 2 +- 5 files changed, 36 insertions(+), 458 deletions(-) delete mode 100644 les/clientpool.go diff --git a/les/clientpool.go b/les/clientpool.go deleted file mode 100644 index 3965d54508db..000000000000 --- a/les/clientpool.go +++ /dev/null @@ -1,453 +0,0 @@ -// Copyright 2019 The go-ethereum Authors -// This file is part of the go-ethereum library. -// -// The go-ethereum library is free software: you can redistribute it and/or modify -// it under the terms of the GNU Lesser General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// The go-ethereum library is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Lesser General Public License for more details. -// -// You should have received a copy of the GNU Lesser General Public License -// along with the go-ethereum library. If not, see . - -package les - -import ( - "fmt" - "sync" - "time" - - "github.com/ethereum/go-ethereum/common/mclock" - "github.com/ethereum/go-ethereum/ethdb" - "github.com/ethereum/go-ethereum/les/utils" - "github.com/ethereum/go-ethereum/les/vflux" - vfs "github.com/ethereum/go-ethereum/les/vflux/server" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/p2p/enr" - "github.com/ethereum/go-ethereum/p2p/nodestate" - "github.com/ethereum/go-ethereum/rlp" -) - -const ( - defaultNegExpTC = 3600 // default time constant (in seconds) for exponentially reducing negative balance - - // defaultConnectedBias is applied to already connected clients So that - // already connected client won't be kicked out very soon and we - // can ensure all connected clients can have enough time to request - // or sync some data. - // - // todo(rjl493456442) make it configurable. It can be the option of - // free trial time! - defaultConnectedBias = time.Minute * 3 - inactiveTimeout = time.Second * 10 -) - -// clientPool implements a client database that assigns a priority to each client -// based on a positive and negative balance. Positive balance is externally assigned -// to prioritized clients and is decreased with connection time and processed -// requests (unless the price factors are zero). If the positive balance is zero -// then negative balance is accumulated. -// -// Balance tracking and priority calculation for connected clients is done by -// balanceTracker. activeQueue ensures that clients with the lowest positive or -// highest negative balance get evicted when the total capacity allowance is full -// and new clients with a better balance want to connect. -// -// Already connected nodes receive a small bias in their favor in order to avoid -// accepting and instantly kicking out clients. In theory, we try to ensure that -// each client can have several minutes of connection time. -// -// Balances of disconnected clients are stored in nodeDB including positive balance -// and negative banalce. Boeth positive balance and negative balance will decrease -// exponentially. If the balance is low enough, then the record will be dropped. -type clientPool struct { - vfs.BalanceTrackerSetup - vfs.PriorityPoolSetup - lock sync.Mutex - clock mclock.Clock - closed bool - removePeer func(enode.ID) - synced func() bool - ns *nodestate.NodeStateMachine - pp *vfs.PriorityPool - bt *vfs.BalanceTracker - - defaultPosFactors, defaultNegFactors vfs.PriceFactors - posExpTC, negExpTC uint64 - minCap uint64 // The minimal capacity value allowed for any client - connectedBias time.Duration - capLimit uint64 -} - -// clientPoolPeer represents a client peer in the pool. -// Positive balances are assigned to node key while negative balances are assigned -// to freeClientId. Currently network IP address without port is used because -// clients have a limited access to IP addresses while new node keys can be easily -// generated so it would be useless to assign a negative value to them. -type clientPoolPeer interface { - Node() *enode.Node - freeClientId() string - updateCapacity(uint64) - freeze() - allowInactive() bool -} - -// clientInfo defines all information required by clientpool. -type clientInfo struct { - node *enode.Node - address string - peer clientPoolPeer - connected, priority bool - connectedAt mclock.AbsTime - balance *vfs.NodeBalance -} - -// newClientPool creates a new client pool -func newClientPool(ns *nodestate.NodeStateMachine, lesDb ethdb.Database, minCap uint64, connectedBias time.Duration, clock mclock.Clock, removePeer func(enode.ID), synced func() bool) *clientPool { - pool := &clientPool{ - ns: ns, - BalanceTrackerSetup: balanceTrackerSetup, - PriorityPoolSetup: priorityPoolSetup, - clock: clock, - minCap: minCap, - connectedBias: connectedBias, - removePeer: removePeer, - synced: synced, - } - pool.bt = vfs.NewBalanceTracker(ns, balanceTrackerSetup, lesDb, clock, &utils.Expirer{}, &utils.Expirer{}) - pool.pp = vfs.NewPriorityPool(ns, priorityPoolSetup, clock, minCap, connectedBias, 4) - - // set default expiration constants used by tests - // Note: server overwrites this if token sale is active - pool.bt.SetExpirationTCs(0, defaultNegExpTC) - - ns.SubscribeState(pool.InactiveFlag.Or(pool.PriorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { - if newState.Equals(pool.InactiveFlag) { - ns.AddTimeout(node, pool.InactiveFlag, inactiveTimeout) - } - if oldState.Equals(pool.InactiveFlag) && newState.Equals(pool.InactiveFlag.Or(pool.PriorityFlag)) { - ns.SetStateSub(node, pool.InactiveFlag, nodestate.Flags{}, 0) // remove timeout - } - }) - - ns.SubscribeState(pool.ActiveFlag.Or(pool.PriorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { - c, _ := ns.GetField(node, clientInfoField).(*clientInfo) - if c == nil { - return - } - c.priority = newState.HasAll(pool.PriorityFlag) - if newState.Equals(pool.ActiveFlag) { - cap, _ := ns.GetField(node, pool.CapacityField).(uint64) - if cap > minCap { - pool.pp.RequestCapacity(node, minCap, 0, true) - } - } - }) - - ns.SubscribeState(pool.InactiveFlag.Or(pool.ActiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { - if oldState.IsEmpty() { - clientConnectedMeter.Mark(1) - log.Debug("Client connected", "id", node.ID()) - } - if oldState.Equals(pool.InactiveFlag) && newState.Equals(pool.ActiveFlag) { - clientActivatedMeter.Mark(1) - log.Debug("Client activated", "id", node.ID()) - } - if oldState.Equals(pool.ActiveFlag) && newState.Equals(pool.InactiveFlag) { - clientDeactivatedMeter.Mark(1) - log.Debug("Client deactivated", "id", node.ID()) - c, _ := ns.GetField(node, clientInfoField).(*clientInfo) - if c == nil || !c.peer.allowInactive() { - pool.removePeer(node.ID()) - } - } - if newState.IsEmpty() { - clientDisconnectedMeter.Mark(1) - log.Debug("Client disconnected", "id", node.ID()) - pool.removePeer(node.ID()) - } - }) - - var totalConnected uint64 - ns.SubscribeField(pool.CapacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { - oldCap, _ := oldValue.(uint64) - newCap, _ := newValue.(uint64) - totalConnected += newCap - oldCap - totalConnectedGauge.Update(int64(totalConnected)) - c, _ := ns.GetField(node, clientInfoField).(*clientInfo) - if c != nil { - c.peer.updateCapacity(newCap) - } - }) - return pool -} - -// stop shuts the client pool down -func (f *clientPool) stop() { - f.lock.Lock() - f.closed = true - f.lock.Unlock() - f.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) { - // enforces saving all balances in BalanceTracker - f.disconnectNode(node) - }) - f.bt.Stop() -} - -// connect should be called after a successful handshake. If the connection was -// rejected, there is no need to call disconnect. -func (f *clientPool) connect(peer clientPoolPeer) (uint64, error) { - f.lock.Lock() - defer f.lock.Unlock() - - // Short circuit if clientPool is already closed. - if f.closed { - return 0, fmt.Errorf("Client pool is already closed") - } - // Dedup connected peers. - node, freeID := peer.Node(), peer.freeClientId() - if f.ns.GetField(node, clientInfoField) != nil { - log.Debug("Client already connected", "address", freeID, "id", node.ID().String()) - return 0, fmt.Errorf("Client already connected address=%s id=%s", freeID, node.ID().String()) - } - now := f.clock.Now() - c := &clientInfo{ - node: node, - address: freeID, - peer: peer, - connected: true, - connectedAt: now, - } - f.ns.SetField(node, clientInfoField, c) - f.ns.SetField(node, connAddressField, freeID) - if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance == nil { - f.disconnect(peer) - return 0, nil - } - c.balance.SetPriceFactors(f.defaultPosFactors, f.defaultNegFactors) - - f.ns.SetState(node, f.InactiveFlag, nodestate.Flags{}, 0) - var allowed bool - f.ns.Operation(func() { - _, allowed = f.pp.RequestCapacity(node, f.minCap, f.connectedBias, true) - }) - if allowed { - return f.minCap, nil - } - if !peer.allowInactive() { - f.disconnect(peer) - } - return 0, nil -} - -// setConnectedBias sets the connection bias, which is applied to already connected clients -// So that already connected client won't be kicked out very soon and we can ensure all -// connected clients can have enough time to request or sync some data. -func (f *clientPool) setConnectedBias(bias time.Duration) { - f.lock.Lock() - defer f.lock.Unlock() - - f.connectedBias = bias - f.pp.SetActiveBias(bias) -} - -// disconnect should be called when a connection is terminated. If the disconnection -// was initiated by the pool itself using disconnectFn then calling disconnect is -// not necessary but permitted. -func (f *clientPool) disconnect(p clientPoolPeer) { - f.disconnectNode(p.Node()) -} - -// disconnectNode removes node fields and flags related to connected status -func (f *clientPool) disconnectNode(node *enode.Node) { - f.ns.SetField(node, connAddressField, nil) - f.ns.SetField(node, clientInfoField, nil) -} - -// setDefaultFactors sets the default price factors applied to subsequently connected clients -func (f *clientPool) setDefaultFactors(posFactors, negFactors vfs.PriceFactors) { - f.lock.Lock() - defer f.lock.Unlock() - - f.defaultPosFactors = posFactors - f.defaultNegFactors = negFactors -} - -// capacityInfo returns the total capacity allowance, the total capacity of connected -// clients and the total capacity of connected and prioritized clients -func (f *clientPool) capacityInfo() (uint64, uint64, uint64) { - f.lock.Lock() - defer f.lock.Unlock() - - // total priority active cap will be supported when the token issuer module is added - _, activeCap := f.pp.Active() - return f.capLimit, activeCap, 0 -} - -// setLimits sets the maximum number and total capacity of connected clients, -// dropping some of them if necessary. -func (f *clientPool) setLimits(totalConn int, totalCap uint64) { - f.lock.Lock() - defer f.lock.Unlock() - - f.capLimit = totalCap - f.pp.SetLimits(uint64(totalConn), totalCap) -} - -// setCapacity sets the assigned capacity of a connected client -func (f *clientPool) setCapacity(node *enode.Node, freeID string, capacity uint64, bias time.Duration, setCap bool) (uint64, error) { - c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo) - if c == nil { - if setCap { - return 0, fmt.Errorf("client %064x is not connected", node.ID()) - } - c = &clientInfo{node: node} - f.ns.SetField(node, clientInfoField, c) - f.ns.SetField(node, connAddressField, freeID) - if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance == nil { - log.Error("BalanceField is missing", "node", node.ID()) - return 0, fmt.Errorf("BalanceField of %064x is missing", node.ID()) - } - defer func() { - f.ns.SetField(node, connAddressField, nil) - f.ns.SetField(node, clientInfoField, nil) - }() - } - var ( - minPriority int64 - allowed bool - ) - f.ns.Operation(func() { - if !setCap || c.priority { - // check clientInfo.priority inside Operation to ensure thread safety - minPriority, allowed = f.pp.RequestCapacity(node, capacity, bias, setCap) - } - }) - if allowed { - return 0, nil - } - missing := c.balance.PosBalanceMissing(minPriority, capacity, bias) - if missing < 1 { - // ensure that we never return 0 missing and insufficient priority error - missing = 1 - } - return missing, errNoPriority -} - -// setCapacityLocked is the equivalent of setCapacity used when f.lock is already locked -func (f *clientPool) setCapacityLocked(node *enode.Node, freeID string, capacity uint64, minConnTime time.Duration, setCap bool) (uint64, error) { - f.lock.Lock() - defer f.lock.Unlock() - - return f.setCapacity(node, freeID, capacity, minConnTime, setCap) -} - -// forClients calls the supplied callback for either the listed node IDs or all connected -// nodes. It passes a valid clientInfo to the callback and ensures that the necessary -// fields and flags are set in order for BalanceTracker and PriorityPool to work even if -// the node is not connected. -func (f *clientPool) forClients(ids []enode.ID, cb func(client *clientInfo)) { - f.lock.Lock() - defer f.lock.Unlock() - - if len(ids) == 0 { - f.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) { - c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo) - if c != nil { - cb(c) - } - }) - } else { - for _, id := range ids { - node := f.ns.GetNode(id) - if node == nil { - node = enode.SignNull(&enr.Record{}, id) - } - c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo) - if c != nil { - cb(c) - } else { - c = &clientInfo{node: node} - f.ns.SetField(node, clientInfoField, c) - f.ns.SetField(node, connAddressField, "") - if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance != nil { - cb(c) - } else { - log.Error("BalanceField is missing") - } - f.ns.SetField(node, connAddressField, nil) - f.ns.SetField(node, clientInfoField, nil) - } - } - } -} - -// serveCapQuery serves a vflux capacity query. It receives multiple token amount values -// and a bias time value. For each given token amount it calculates the maximum achievable -// capacity in case the amount is added to the balance. -func (f *clientPool) serveCapQuery(id enode.ID, freeID string, data []byte) []byte { - var req vflux.CapacityQueryReq - if rlp.DecodeBytes(data, &req) != nil { - return nil - } - if l := len(req.AddTokens); l == 0 || l > vflux.CapacityQueryMaxLen { - return nil - } - result := make(vflux.CapacityQueryReply, len(req.AddTokens)) - if !f.synced() { - capacityQueryZeroMeter.Mark(1) - reply, _ := rlp.EncodeToBytes(&result) - return reply - } - - node := f.ns.GetNode(id) - if node == nil { - node = enode.SignNull(&enr.Record{}, id) - } - c, _ := f.ns.GetField(node, clientInfoField).(*clientInfo) - if c == nil { - c = &clientInfo{node: node} - f.ns.SetField(node, clientInfoField, c) - f.ns.SetField(node, connAddressField, freeID) - defer func() { - f.ns.SetField(node, connAddressField, nil) - f.ns.SetField(node, clientInfoField, nil) - }() - if c.balance, _ = f.ns.GetField(node, f.BalanceField).(*vfs.NodeBalance); c.balance == nil { - log.Error("BalanceField is missing", "node", node.ID()) - return nil - } - } - // use vfs.CapacityCurve to answer request for multiple newly bought token amounts - curve := f.pp.GetCapacityCurve().Exclude(id) - bias := time.Second * time.Duration(req.Bias) - if f.connectedBias > bias { - bias = f.connectedBias - } - pb, _ := c.balance.GetBalance() - for i, addTokens := range req.AddTokens { - add := addTokens.Int64() - result[i] = curve.MaxCapacity(func(capacity uint64) int64 { - return c.balance.EstimatePriority(capacity, add, 0, bias, false) / int64(capacity) - }) - if add <= 0 && uint64(-add) >= pb && result[i] > f.minCap { - result[i] = f.minCap - } - if result[i] < f.minCap { - result[i] = 0 - } - } - // add first result to metrics (don't care about priority client multi-queries yet) - if result[0] == 0 { - capacityQueryZeroMeter.Mark(1) - } else { - capacityQueryNonZeroMeter.Mark(1) - } - reply, _ := rlp.EncodeToBytes(&result) - return reply -} diff --git a/les/server.go b/les/server.go index 2dbcfbd0b0d7..b170521170cf 100644 --- a/les/server.go +++ b/les/server.go @@ -138,7 +138,8 @@ func NewLesServer(node *node.Node, e ethBackend, config *ethconfig.Config) (*Les } srv.fcManager.SetCapacityLimits(srv.minCapacity, srv.maxCapacity, srv.minCapacity*2) srv.clientPool = vfs.NewClientPool(lesDb, srv.minCapacity, defaultConnectedBias, mclock.System{}, issync) - srv.clientPool.AddMetrics(totalConnectedGauge, clientConnectedMeter, clientDisconnectedMeter, clientActivatedMeter, clientDeactivatedMeter) + srv.clientPool.AddMetrics(totalConnectedGauge, clientConnectedMeter, clientDisconnectedMeter, + clientActivatedMeter, clientDeactivatedMeter, capacityQueryZeroMeter, capacityQueryNonZeroMeter) srv.clientPool.Start() srv.clientPool.SetDefaultFactors(defaultPosFactors, defaultNegFactors) srv.vfluxServer.Register(srv.clientPool, "les", "Ethereum light client service") diff --git a/les/vflux/server/clientpool.go b/les/vflux/server/clientpool.go index dc862503966a..a747adf302af 100644 --- a/les/vflux/server/clientpool.go +++ b/les/vflux/server/clientpool.go @@ -75,6 +75,7 @@ type ClientPool struct { clock mclock.Clock closed bool ns *nodestate.NodeStateMachine + synced func() bool lock sync.RWMutex defaultPosFactors, defaultNegFactors PriceFactors @@ -82,6 +83,8 @@ type ClientPool struct { minCap uint64 // the minimal capacity value allowed for any client capReqNode *enode.Node // node that is requesting capacity change; only used inside NSM operation + + capacityQueryZeroMeter, capacityQueryNonZeroMeter metrics.Meter } // clientPeer represents a peer in the client pool. None of the callbacks should block. @@ -96,7 +99,7 @@ type clientPeer interface { type clientPeerInstance struct{ clientPeer } // the NodeStateMachine type system needs this wrapper // NewClientPool creates a new client pool -func NewClientPool(balanceDb ethdb.KeyValueStore, minCap uint64, connectedBias time.Duration, clock mclock.Clock) *ClientPool { +func NewClientPool(balanceDb ethdb.KeyValueStore, minCap uint64, connectedBias time.Duration, clock mclock.Clock, synced func() bool) *ClientPool { ns := nodestate.NewNodeStateMachine(nil, nil, clock, serverSetup) cp := &ClientPool{ ns: ns, @@ -105,6 +108,7 @@ func NewClientPool(balanceDb ethdb.KeyValueStore, minCap uint64, connectedBias t clock: clock, minCap: minCap, connectedBias: connectedBias, + synced: synced, } ns.SubscribeState(nodestate.MergeFlags(ppSetup.ActiveFlag, ppSetup.InactiveFlag, btSetup.PriorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { @@ -160,7 +164,9 @@ func NewClientPool(balanceDb ethdb.KeyValueStore, minCap uint64, connectedBias t // AddMetrics adds metrics to the client pool. Should be called before Start(). func (cp *ClientPool) AddMetrics(totalConnectedGauge metrics.Gauge, - clientConnectedMeter, clientDisconnectedMeter, clientActivatedMeter, clientDeactivatedMeter metrics.Meter) { + clientConnectedMeter, clientDisconnectedMeter, clientActivatedMeter, clientDeactivatedMeter, + capacityQueryZeroMeter, capacityQueryNonZeroMeter metrics.Meter) { + cp.ns.SubscribeState(nodestate.MergeFlags(ppSetup.ActiveFlag, ppSetup.InactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { if oldState.IsEmpty() && !newState.IsEmpty() { clientConnectedMeter.Mark(1) @@ -177,6 +183,8 @@ func (cp *ClientPool) AddMetrics(totalConnectedGauge metrics.Gauge, _, connected := cp.Active() totalConnectedGauge.Update(int64(connected)) }) + cp.capacityQueryZeroMeter = capacityQueryZeroMeter + cp.capacityQueryNonZeroMeter = capacityQueryNonZeroMeter } // Start starts the client pool. Should be called before Register/Unregister. @@ -309,6 +317,15 @@ func (cp *ClientPool) serveCapQuery(id enode.ID, freeID string, data []byte) []b if l := len(req.AddTokens); l == 0 || l > vflux.CapacityQueryMaxLen { return nil } + result := make(vflux.CapacityQueryReply, len(req.AddTokens)) + if !cp.synced() { + if cp.capacityQueryZeroMeter != nil { + cp.capacityQueryZeroMeter.Mark(1) + } + reply, _ := rlp.EncodeToBytes(&result) + return reply + } + bias := time.Second * time.Duration(req.Bias) cp.lock.RLock() if cp.connectedBias > bias { @@ -318,7 +335,6 @@ func (cp *ClientPool) serveCapQuery(id enode.ID, freeID string, data []byte) []b // use CapacityCurve to answer request for multiple newly bought token amounts curve := cp.GetCapacityCurve().Exclude(id) - result := make(vflux.CapacityQueryReply, len(req.AddTokens)) cp.BalanceOperation(id, freeID, func(balance AtomicBalanceOperator) { pb, _ := balance.GetBalance() for i, addTokens := range req.AddTokens { @@ -334,6 +350,16 @@ func (cp *ClientPool) serveCapQuery(id enode.ID, freeID string, data []byte) []b } } }) + // add first result to metrics (don't care about priority client multi-queries yet) + if result[0] == 0 { + if cp.capacityQueryZeroMeter != nil { + cp.capacityQueryZeroMeter.Mark(1) + } + } else { + if cp.capacityQueryNonZeroMeter != nil { + cp.capacityQueryNonZeroMeter.Mark(1) + } + } reply, _ := rlp.EncodeToBytes(&result) return reply } diff --git a/les/vflux/server/clientpool_test.go b/les/vflux/server/clientpool_test.go index 3eac72fff4ed..c65dc90a7750 100644 --- a/les/vflux/server/clientpool_test.go +++ b/les/vflux/server/clientpool_test.go @@ -130,6 +130,10 @@ func disconnect(pool *ClientPool, peer *poolTestPeer) { pool.Unregister(peer) } +func alwaysTrueFn() bool { + return true +} + func testClientPool(t *testing.T, activeLimit, clientCount, paidCount int, randomDisconnect bool) { rand.Seed(time.Now().UnixNano()) var ( diff --git a/tests/fuzzers/vflux/clientpool-fuzzer.go b/tests/fuzzers/vflux/clientpool-fuzzer.go index ba45c2ddb41d..3d71f5b7b1d8 100644 --- a/tests/fuzzers/vflux/clientpool-fuzzer.go +++ b/tests/fuzzers/vflux/clientpool-fuzzer.go @@ -201,7 +201,7 @@ func FuzzClientPool(input []byte) int { } clock := &mclock.Simulated{} db := memorydb.New() - pool := vfs.NewClientPool(db, 10, f.randomDelay(), clock) + pool := vfs.NewClientPool(db, 10, f.randomDelay(), clock, func() bool { return true }) pool.Start() defer pool.Stop() From c77a70b82fe9706dc7e1547c7f58fa1b7926e423 Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Wed, 17 Mar 2021 22:02:20 +0100 Subject: [PATCH 06/27] les/vflux/server: fixed more bugs --- les/vflux/server/balance.go | 11 ++++++----- les/vflux/server/balance_test.go | 6 ++---- les/vflux/server/clientpool.go | 4 ++-- les/vflux/server/prioritypool.go | 16 +++++++++++++--- les/vflux/server/prioritypool_test.go | 14 +++++--------- tests/fuzzers/vflux/clientpool-fuzzer.go | 3 +++ 6 files changed, 31 insertions(+), 23 deletions(-) diff --git a/les/vflux/server/balance.go b/les/vflux/server/balance.go index 4db789b01d9d..6fd2ec4245ad 100644 --- a/les/vflux/server/balance.go +++ b/les/vflux/server/balance.go @@ -320,12 +320,13 @@ func (n *nodeBalance) EstimatePriority(capacity uint64, addBalance int64, future if bias > 0 { b = n.reducedBalance(b, now+mclock.AbsTime(future), bias, capacity, 0) } - pri := n.balanceToPriority(b, capacity) + // Note: we subtract one from the estimated priority in order to ensure that biased + // estimates are always lower than actual priorities, even if the bias is very small. + // This ensures that two nodes will not ping-pong update signals forever if both of + // them have zero estimated priority drop in the projected future. + pri := n.balanceToPriority(b, capacity) - 1 if update { - // Note: always set the threshold to lower than the estimate in order to ensure - // that two nodes will not ping-pong update signals forever if both of them have - // zero estimated priority drop in the projected future - n.addCallback(balanceCallbackUpdate, pri-1, n.signalPriorityUpdate) + n.addCallback(balanceCallbackUpdate, pri, n.signalPriorityUpdate) } return pri } diff --git a/les/vflux/server/balance_test.go b/les/vflux/server/balance_test.go index 72231d6469f3..b723a699c7a7 100644 --- a/les/vflux/server/balance_test.go +++ b/les/vflux/server/balance_test.go @@ -32,7 +32,6 @@ import ( ) var ( - testFlag = testSetup.NewFlag("testFlag") btClientField = testSetup.NewField("clientField", reflect.TypeOf(balanceTestClient{})) btTestSetup = NewBalanceTrackerSetup(testSetup) ) @@ -74,7 +73,6 @@ func (btc balanceTestClient) FreeClientId() string { func (b *balanceTestSetup) newNode(capacity uint64) *nodeBalance { node := enode.SignNull(&enr.Record{}, enode.ID{}) - b.ns.SetState(node, testFlag, nodestate.Flags{}, 0) b.ns.SetField(node, btTestSetup.clientField, balanceTestClient{}) if capacity != 0 { b.ns.SetField(node, ppTestSetup.CapacityField, capacity) @@ -293,8 +291,8 @@ func TestEstimatedPriority(t *testing.T) { b.clock.Run(i.runTime) node.RequestServed(i.reqCost) priority := node.EstimatePriority(1000000000, 0, i.futureTime, 0, false) - if priority != i.priority { - t.Fatalf("Estimated priority mismatch, want %v, got %v", i.priority, priority) + if priority != i.priority-1 { + t.Fatalf("Estimated priority mismatch, want %v, got %v", i.priority-1, priority) } } } diff --git a/les/vflux/server/clientpool.go b/les/vflux/server/clientpool.go index a747adf302af..de090994432c 100644 --- a/les/vflux/server/clientpool.go +++ b/les/vflux/server/clientpool.go @@ -278,7 +278,7 @@ func (cp *ClientPool) SetCapacity(node *enode.Node, reqCap uint64, bias time.Dur // estimate maximum available capacity at the current priority level and request // the estimated amount; allow a limited number of retries because individual // balances can change between the estimation and the request - for count := 0; count < 100; count++ { + for count := 0; count < 20; count++ { // apply a small extra bias to ensure that the request won't fail because of rounding errors curveBias += time.Second * 10 tryCap := reqCap @@ -300,7 +300,7 @@ func (cp *ClientPool) SetCapacity(node *enode.Node, reqCap uint64, bias time.Dur } } // we should be able to find the maximum allowed capacity in a few iterations - log.Crit("Unable to find maximum allowed capacity") + log.Error("Unable to find maximum allowed capacity") err = ErrCantFindMaximum }) return diff --git a/les/vflux/server/prioritypool.go b/les/vflux/server/prioritypool.go index 5c0fcc0ea578..7b6d32747b83 100644 --- a/les/vflux/server/prioritypool.go +++ b/les/vflux/server/prioritypool.go @@ -129,6 +129,9 @@ func NewPriorityPool(ns *nodestate.NodeStateMachine, setup PriorityPoolSetup, cl activeBias: activeBias, capacityStepDiv: capacityStepDiv, } + if pp.activeBias < time.Duration(1) { + pp.activeBias = time.Duration(1) + } pp.activeQueue = prque.NewLazyQueue(activeSetIndex, activePriority, pp.activeMaxPriority, clock, lazyQueueRefresh) ns.SubscribeField(pp.priorityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { @@ -213,7 +216,7 @@ func (pp *PriorityPool) RequestCapacity(node *enode.Node, targetCap uint64, bias _, minPriority = pp.enforceLimits() // if capacity update is possible now then minPriority == math.MinInt64 // if it is not possible at all then minPriority == math.MaxInt64 - allowed = priority > minPriority + allowed = priority >= minPriority updates = pp.finalizeChanges(setCap && allowed) return } @@ -243,10 +246,17 @@ func (pp *PriorityPool) SetLimits(maxCount, maxCap uint64) { // SetActiveBias sets the bias applied when trying to activate inactive nodes func (pp *PriorityPool) SetActiveBias(bias time.Duration) { pp.lock.Lock() - defer pp.lock.Unlock() + var updates []capUpdate + defer func() { + pp.lock.Unlock() + pp.ns.Operation(func() { pp.updateFlags(updates) }) + }() pp.activeBias = bias - pp.tryActivate() + if pp.activeBias < time.Duration(1) { + pp.activeBias = time.Duration(1) + } + updates = pp.tryActivate() } // Active returns the number and total capacity of currently active nodes diff --git a/les/vflux/server/prioritypool_test.go b/les/vflux/server/prioritypool_test.go index d83ddc17679c..1b04abc7b6d4 100644 --- a/les/vflux/server/prioritypool_test.go +++ b/les/vflux/server/prioritypool_test.go @@ -30,14 +30,12 @@ import ( var ( testSetup = &nodestate.Setup{} - ppTestClientFlag = testSetup.NewFlag("ppTestClientFlag") ppTestClientField = testSetup.NewField("ppTestClient", reflect.TypeOf(&ppTestClient{})) - ppUpdateFlag = testSetup.NewFlag("ppUpdateFlag") ppTestSetup = NewPriorityPoolSetup(testSetup) ) func init() { - ppTestSetup.Connect(ppTestClientField, ppUpdateFlag) + ppTestSetup.Connect(ppTestClientField, btTestSetup.UpdateFlag) } const ( @@ -101,7 +99,6 @@ func TestPriorityPool(t *testing.T) { } sumBalance += c.balance clients[i] = c - ns.SetState(c.node, ppTestClientFlag, nodestate.Flags{}, 0) ns.SetField(c.node, ppTestSetup.priorityField, c) ns.SetState(c.node, ppTestSetup.InactiveFlag, nodestate.Flags{}, 0) raise(c) @@ -113,8 +110,8 @@ func TestPriorityPool(t *testing.T) { oldBalance := c.balance c.balance = uint64(rand.Int63n(100000000000) + 100000000000) sumBalance += c.balance - oldBalance - pp.ns.SetState(c.node, ppUpdateFlag, nodestate.Flags{}, 0) - pp.ns.SetState(c.node, nodestate.Flags{}, ppUpdateFlag, 0) + pp.ns.SetState(c.node, btTestSetup.UpdateFlag, nodestate.Flags{}, 0) + pp.ns.SetState(c.node, nodestate.Flags{}, btTestSetup.UpdateFlag, 0) if c.balance > oldBalance { raise(c) } else { @@ -162,8 +159,8 @@ func TestPriorityPool(t *testing.T) { } c.balance -= add sumBalance -= add - pp.ns.SetState(c.node, ppUpdateFlag, nodestate.Flags{}, 0) - pp.ns.SetState(c.node, nodestate.Flags{}, ppUpdateFlag, 0) + pp.ns.SetState(c.node, btTestSetup.UpdateFlag, nodestate.Flags{}, 0) + pp.ns.SetState(c.node, nodestate.Flags{}, btTestSetup.UpdateFlag, 0) for _, c := range clients { raise(c) } @@ -188,7 +185,6 @@ func TestCapacityCurve(t *testing.T) { cap: 1000000, } clients[i] = c - ns.SetState(c.node, ppTestClientFlag, nodestate.Flags{}, 0) ns.SetField(c.node, ppTestSetup.priorityField, c) ns.SetState(c.node, ppTestSetup.InactiveFlag, nodestate.Flags{}, 0) ns.Operation(func() { diff --git a/tests/fuzzers/vflux/clientpool-fuzzer.go b/tests/fuzzers/vflux/clientpool-fuzzer.go index 3d71f5b7b1d8..cd43362c08c9 100644 --- a/tests/fuzzers/vflux/clientpool-fuzzer.go +++ b/tests/fuzzers/vflux/clientpool-fuzzer.go @@ -278,6 +278,9 @@ func FuzzClientPool(input []byte) int { clock.Run(d) } //fmt.Println(f.activeCount, f.maxCount, f.activeCap, f.maxCap) + if activeCount, activeCap := pool.Active(); activeCount != f.activeCount || activeCap != f.activeCap { + panic(nil) + } if f.activeCount > f.maxCount || f.activeCap > f.maxCap { panic(nil) } From 89c45fac5cd4a5edf6622575d7a5b70e2e68c5dd Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Wed, 17 Mar 2021 22:47:05 +0100 Subject: [PATCH 07/27] les/vflux/server: unexported NodeStateMachine fields and flags --- les/vflux/server/balance.go | 12 ++--- les/vflux/server/balance_test.go | 20 ++++---- les/vflux/server/balance_tracker.go | 58 +++++++++++----------- les/vflux/server/clientpool.go | 44 ++++++++--------- les/vflux/server/clientpool_test.go | 6 +-- les/vflux/server/prioritypool.go | 70 +++++++++++++-------------- les/vflux/server/prioritypool_test.go | 18 +++---- 7 files changed, 114 insertions(+), 114 deletions(-) diff --git a/les/vflux/server/balance.go b/les/vflux/server/balance.go index 6fd2ec4245ad..e93e05c48ca9 100644 --- a/les/vflux/server/balance.go +++ b/les/vflux/server/balance.go @@ -184,7 +184,7 @@ func (n *nodeBalance) AddBalance(amount int64) (uint64, uint64, error) { } if n.setFlags { if setPriority { - n.bt.ns.SetStateSub(n.node, n.bt.PriorityFlag, nodestate.Flags{}, 0) + n.bt.ns.SetStateSub(n.node, n.bt.priorityFlag, nodestate.Flags{}, 0) } n.signalPriorityUpdate() } @@ -223,7 +223,7 @@ func (n *nodeBalance) SetBalance(pos, neg uint64) error { } if n.setFlags { if setPriority { - n.bt.ns.SetStateSub(n.node, n.bt.PriorityFlag, nodestate.Flags{}, 0) + n.bt.ns.SetStateSub(n.node, n.bt.priorityFlag, nodestate.Flags{}, 0) } n.signalPriorityUpdate() } @@ -290,7 +290,7 @@ func (n *nodeBalance) Priority(capacity uint64) int64 { // EstMinPriority gives a lower estimate for the priority at a given time in the future. // An average request cost per time is assumed that is twice the average cost per time // in the current session. -// If update is true then a priority callback is added that turns UpdateFlag on and off +// If update is true then a priority callback is added that turns updateFlag on and off // in case the priority goes below the estimated minimum. func (n *nodeBalance) EstimatePriority(capacity uint64, addBalance int64, future, bias time.Duration, update bool) int64 { n.lock.Lock() @@ -522,7 +522,7 @@ func (n *nodeBalance) balanceExhausted() { n.priority = false n.lock.Unlock() if n.setFlags { - n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.PriorityFlag, 0) + n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.priorityFlag, 0) } } @@ -541,8 +541,8 @@ func (n *nodeBalance) checkPriorityStatus() bool { // signalPriorityUpdate signals that the priority fell below the previous minimum estimate // Note: this function should run inside a NodeStateMachine operation func (n *nodeBalance) signalPriorityUpdate() { - n.bt.ns.SetStateSub(n.node, n.bt.UpdateFlag, nodestate.Flags{}, 0) - n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.UpdateFlag, 0) + n.bt.ns.SetStateSub(n.node, n.bt.updateFlag, nodestate.Flags{}, 0) + n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.updateFlag, 0) } // setCapacity updates the capacity value used for priority calculation diff --git a/les/vflux/server/balance_test.go b/les/vflux/server/balance_test.go index b723a699c7a7..b7ca5985abe7 100644 --- a/les/vflux/server/balance_test.go +++ b/les/vflux/server/balance_test.go @@ -33,11 +33,11 @@ import ( var ( btClientField = testSetup.NewField("clientField", reflect.TypeOf(balanceTestClient{})) - btTestSetup = NewBalanceTrackerSetup(testSetup) + btTestSetup = newBalanceTrackerSetup(testSetup) ) func init() { - btTestSetup.Connect(btClientField, ppTestSetup.CapacityField) + btTestSetup.connect(btClientField, ppTestSetup.capacityField) } type zeroExpirer struct{} @@ -75,9 +75,9 @@ func (b *balanceTestSetup) newNode(capacity uint64) *nodeBalance { node := enode.SignNull(&enr.Record{}, enode.ID{}) b.ns.SetField(node, btTestSetup.clientField, balanceTestClient{}) if capacity != 0 { - b.ns.SetField(node, ppTestSetup.CapacityField, capacity) + b.ns.SetField(node, ppTestSetup.capacityField, capacity) } - n, _ := b.ns.GetField(node, btTestSetup.BalanceField).(*nodeBalance) + n, _ := b.ns.GetField(node, btTestSetup.balanceField).(*nodeBalance) return n } @@ -166,7 +166,7 @@ func TestBalanceTimeCost(t *testing.T) { defer b.stop() node := b.newNode(1000) - b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1)) + b.ns.SetField(node.node, ppTestSetup.capacityField, uint64(1)) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) b.setBalance(node, uint64(time.Minute), 0) // 1 minute time allowance @@ -208,7 +208,7 @@ func TestBalanceReqCost(t *testing.T) { node := b.newNode(1000) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) - b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1)) + b.ns.SetField(node.node, ppTestSetup.capacityField, uint64(1)) b.setBalance(node, uint64(time.Minute), 0) // 1 minute time serving time allowance var inputs = []struct { reqCost uint64 @@ -262,7 +262,7 @@ func TestEstimatedPriority(t *testing.T) { node := b.newNode(1000000000) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) - b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1)) + b.ns.SetField(node.node, ppTestSetup.capacityField, uint64(1)) b.setBalance(node, uint64(time.Minute), 0) var inputs = []struct { runTime time.Duration // time cost @@ -322,7 +322,7 @@ func TestPostiveBalanceCounting(t *testing.T) { // Change client status for i := 0; i < 100; i += 1 { if rand.Intn(2) == 0 { - b.ns.SetField(nodes[i].node, ppTestSetup.CapacityField, uint64(1)) + b.ns.SetField(nodes[i].node, ppTestSetup.capacityField, uint64(1)) } } if b.bt.TotalTokenAmount() != sum { @@ -330,7 +330,7 @@ func TestPostiveBalanceCounting(t *testing.T) { } for i := 0; i < 100; i += 1 { if rand.Intn(2) == 0 { - b.ns.SetField(nodes[i].node, ppTestSetup.CapacityField, uint64(1)) + b.ns.SetField(nodes[i].node, ppTestSetup.capacityField, uint64(1)) } } if b.bt.TotalTokenAmount() != sum { @@ -366,7 +366,7 @@ func TestCallback(t *testing.T) { defer b.stop() node := b.newNode(1000) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) - b.ns.SetField(node.node, ppTestSetup.CapacityField, uint64(1)) + b.ns.SetField(node.node, ppTestSetup.capacityField, uint64(1)) callCh := make(chan struct{}, 1) b.setBalance(node, uint64(time.Minute), 0) diff --git a/les/vflux/server/balance_tracker.go b/les/vflux/server/balance_tracker.go index 437aba6e52a9..350e354b237e 100644 --- a/les/vflux/server/balance_tracker.go +++ b/les/vflux/server/balance_tracker.go @@ -35,32 +35,32 @@ const ( persistExpirationRefresh = time.Minute * 5 // refresh period of the token expiration persistence ) -// BalanceTrackerSetup contains node state flags and fields used by BalanceTracker -type BalanceTrackerSetup struct { +// balanceTrackerSetup contains node state flags and fields used by BalanceTracker +type balanceTrackerSetup struct { // controlled by PriorityPool - PriorityFlag, UpdateFlag nodestate.Flags - BalanceField nodestate.Field + priorityFlag, updateFlag nodestate.Flags + balanceField nodestate.Field // external connections clientField, capacityField nodestate.Field } -// NewBalanceTrackerSetup creates a new BalanceTrackerSetup and initializes the fields +// newBalanceTrackerSetup creates a new balanceTrackerSetup and initializes the fields // and flags controlled by BalanceTracker -func NewBalanceTrackerSetup(setup *nodestate.Setup) BalanceTrackerSetup { - return BalanceTrackerSetup{ - // PriorityFlag is set if the node has a positive balance - PriorityFlag: setup.NewFlag("priorityNode"), - // UpdateFlag set and then immediately reset if the balance has been updated and +func newBalanceTrackerSetup(setup *nodestate.Setup) balanceTrackerSetup { + return balanceTrackerSetup{ + // priorityFlag is set if the node has a positive balance + priorityFlag: setup.NewFlag("priorityNode"), + // updateFlag set and then immediately reset if the balance has been updated and // therefore priority is suddenly changed - UpdateFlag: setup.NewFlag("balanceUpdate"), - // BalanceField contains the nodeBalance struct which implements nodePriority, + updateFlag: setup.NewFlag("balanceUpdate"), + // balanceField contains the nodeBalance struct which implements nodePriority, // allowing on-demand priority calculation and future priority estimation - BalanceField: setup.NewField("balance", reflect.TypeOf(&nodeBalance{})), + balanceField: setup.NewField("balance", reflect.TypeOf(&nodeBalance{})), } } -// Connect sets the fields used by BalanceTracker as an input -func (bts *BalanceTrackerSetup) Connect(clientField, capacityField nodestate.Field) { +// connect sets the fields used by BalanceTracker as an input +func (bts *balanceTrackerSetup) connect(clientField, capacityField nodestate.Field) { bts.clientField = clientField bts.capacityField = capacityField } @@ -74,7 +74,7 @@ func (bts *BalanceTrackerSetup) Connect(clientField, capacityField nodestate.Fie // The two balances are translated into a single priority value that also depends // on the actual capacity. type BalanceTracker struct { - BalanceTrackerSetup + balanceTrackerSetup clock mclock.Clock lock sync.Mutex ns *nodestate.NodeStateMachine @@ -92,11 +92,11 @@ type balancePeer interface { } // NewBalanceTracker creates a new BalanceTracker -func NewBalanceTracker(ns *nodestate.NodeStateMachine, setup BalanceTrackerSetup, db ethdb.KeyValueStore, clock mclock.Clock, posExp, negExp utils.ValueExpirer) *BalanceTracker { +func NewBalanceTracker(ns *nodestate.NodeStateMachine, setup balanceTrackerSetup, db ethdb.KeyValueStore, clock mclock.Clock, posExp, negExp utils.ValueExpirer) *BalanceTracker { ndb := newNodeDB(db, clock) bt := &BalanceTracker{ ns: ns, - BalanceTrackerSetup: setup, + balanceTrackerSetup: setup, ndb: ndb, clock: clock, posExp: posExp, @@ -114,7 +114,7 @@ func NewBalanceTracker(ns *nodestate.NodeStateMachine, setup BalanceTrackerSetup }) ns.SubscribeField(bt.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { - n, _ := ns.GetField(node, bt.BalanceField).(*nodeBalance) + n, _ := ns.GetField(node, bt.balanceField).(*nodeBalance) if n == nil { return } @@ -133,13 +133,13 @@ func NewBalanceTracker(ns *nodestate.NodeStateMachine, setup BalanceTrackerSetup }) ns.SubscribeField(bt.clientField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { if newValue != nil { - ns.SetFieldSub(node, bt.BalanceField, bt.newNodeBalance(node, newValue.(balancePeer).FreeClientId(), true)) + ns.SetFieldSub(node, bt.balanceField, bt.newNodeBalance(node, newValue.(balancePeer).FreeClientId(), true)) } else { - ns.SetStateSub(node, nodestate.Flags{}, bt.PriorityFlag, 0) - if b, _ := ns.GetField(node, bt.BalanceField).(*nodeBalance); b != nil { + ns.SetStateSub(node, nodestate.Flags{}, bt.priorityFlag, 0) + if b, _ := ns.GetField(node, bt.balanceField).(*nodeBalance); b != nil { b.deactivate() } - ns.SetFieldSub(node, bt.BalanceField, nil) + ns.SetFieldSub(node, bt.balanceField, nil) } }) @@ -168,11 +168,11 @@ func (bt *BalanceTracker) Stop() { bt.ndb.setExpiration(bt.posExp.LogOffset(now), bt.negExp.LogOffset(now)) close(bt.quit) bt.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) { - if n, ok := bt.ns.GetField(node, bt.BalanceField).(*nodeBalance); ok { + if n, ok := bt.ns.GetField(node, bt.balanceField).(*nodeBalance); ok { n.lock.Lock() n.storeBalance(true, true) n.lock.Unlock() - bt.ns.SetField(node, bt.BalanceField, nil) + bt.ns.SetField(node, bt.balanceField, nil) } }) bt.ndb.close() @@ -186,7 +186,7 @@ func (bt *BalanceTracker) TotalTokenAmount() uint64 { bt.balanceTimer.Update(func(_ time.Duration) bool { bt.active = utils.ExpiredValue{} bt.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) { - if n, ok := bt.ns.GetField(node, bt.BalanceField).(*nodeBalance); ok && n.active { + if n, ok := bt.ns.GetField(node, bt.balanceField).(*nodeBalance); ok && n.active { pos, _ := n.GetRawBalance() bt.active.AddExp(pos) } @@ -239,7 +239,7 @@ func (bt *BalanceTracker) BalanceOperation(id enode.ID, negBalanceKey string, cb node := bt.ns.GetNode(id) var nb *nodeBalance if node != nil { - nb, _ = bt.ns.GetField(node, bt.BalanceField).(*nodeBalance) + nb, _ = bt.ns.GetField(node, bt.balanceField).(*nodeBalance) } else { node = enode.SignNull(&enr.Record{}, id) } @@ -251,7 +251,7 @@ func (bt *BalanceTracker) BalanceOperation(id enode.ID, negBalanceKey string, cb } // newNodeBalance loads balances from the database and creates a nodeBalance instance -// for the given node. It also sets the PriorityFlag and adds balanceCallbackZero if +// for the given node. It also sets the priorityFlag and adds balanceCallbackZero if // the node has a positive balance. // Note: this function should run inside a NodeStateMachine operation func (bt *BalanceTracker) newNodeBalance(node *enode.Node, negBalanceKey string, setFlags bool) *nodeBalance { @@ -270,7 +270,7 @@ func (bt *BalanceTracker) newNodeBalance(node *enode.Node, negBalanceKey string, n.callbackIndex[i] = -1 } if setFlags && n.checkPriorityStatus() { - n.bt.ns.SetStateSub(n.node, n.bt.PriorityFlag, nodestate.Flags{}, 0) + n.bt.ns.SetStateSub(n.node, n.bt.priorityFlag, nodestate.Flags{}, 0) } return n } diff --git a/les/vflux/server/clientpool.go b/les/vflux/server/clientpool.go index de090994432c..e69bb931864a 100644 --- a/les/vflux/server/clientpool.go +++ b/les/vflux/server/clientpool.go @@ -36,8 +36,8 @@ import ( var ( serverSetup = &nodestate.Setup{} clientField = serverSetup.NewField("client", reflect.TypeOf(clientPeerInstance{})) - btSetup = NewBalanceTrackerSetup(serverSetup) - ppSetup = NewPriorityPoolSetup(serverSetup) + btSetup = newBalanceTrackerSetup(serverSetup) + ppSetup = newPriorityPoolSetup(serverSetup) ) var ( @@ -47,8 +47,8 @@ var ( ) func init() { - btSetup.Connect(clientField, ppSetup.CapacityField) - ppSetup.Connect(btSetup.BalanceField, btSetup.UpdateFlag) // nodeBalance implements nodePriority + btSetup.connect(clientField, ppSetup.capacityField) + ppSetup.connect(btSetup.balanceField, btSetup.updateFlag) // nodeBalance implements nodePriority } // ClientPool implements a client database that assigns a priority to each client @@ -111,28 +111,28 @@ func NewClientPool(balanceDb ethdb.KeyValueStore, minCap uint64, connectedBias t synced: synced, } - ns.SubscribeState(nodestate.MergeFlags(ppSetup.ActiveFlag, ppSetup.InactiveFlag, btSetup.PriorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { - if newState.Equals(ppSetup.InactiveFlag) { + ns.SubscribeState(nodestate.MergeFlags(ppSetup.activeFlag, ppSetup.inactiveFlag, btSetup.priorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { + if newState.Equals(ppSetup.inactiveFlag) { // set timeout for non-priority inactive client var timeout time.Duration if c, ok := ns.GetField(node, clientField).(clientPeer); ok { timeout = c.InactiveTimeout() } if timeout > 0 { - ns.AddTimeout(node, ppSetup.InactiveFlag, timeout) + ns.AddTimeout(node, ppSetup.inactiveFlag, timeout) } else { // Note: if capacity is immediately available then PriorityPool will set the active // flag simultaneously with removing the inactive flag and therefore this will not // initiate disconnection - ns.SetStateSub(node, nodestate.Flags{}, ppSetup.InactiveFlag, 0) + ns.SetStateSub(node, nodestate.Flags{}, ppSetup.inactiveFlag, 0) } } - if oldState.Equals(ppSetup.InactiveFlag) && newState.Equals(ppSetup.InactiveFlag.Or(btSetup.PriorityFlag)) { - ns.SetStateSub(node, ppSetup.InactiveFlag, nodestate.Flags{}, 0) // priority gained; remove timeout + if oldState.Equals(ppSetup.inactiveFlag) && newState.Equals(ppSetup.inactiveFlag.Or(btSetup.priorityFlag)) { + ns.SetStateSub(node, ppSetup.inactiveFlag, nodestate.Flags{}, 0) // priority gained; remove timeout } - if newState.Equals(ppSetup.ActiveFlag) { + if newState.Equals(ppSetup.activeFlag) { // active with no priority; limit capacity to minCap - cap, _ := ns.GetField(node, ppSetup.CapacityField).(uint64) + cap, _ := ns.GetField(node, ppSetup.capacityField).(uint64) if cap > minCap { cp.RequestCapacity(node, minCap, 0, true) } @@ -144,16 +144,16 @@ func NewClientPool(balanceDb ethdb.KeyValueStore, minCap uint64, connectedBias t } }) - ns.SubscribeField(btSetup.BalanceField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + ns.SubscribeField(btSetup.balanceField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { if newValue != nil { - ns.SetStateSub(node, ppSetup.InactiveFlag, nodestate.Flags{}, 0) + ns.SetStateSub(node, ppSetup.inactiveFlag, nodestate.Flags{}, 0) cp.lock.RLock() newValue.(*nodeBalance).SetPriceFactors(cp.defaultPosFactors, cp.defaultNegFactors) cp.lock.RUnlock() } }) - ns.SubscribeField(ppSetup.CapacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + ns.SubscribeField(ppSetup.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { if c, ok := ns.GetField(node, clientField).(clientPeer); ok { newCap, _ := newValue.(uint64) c.UpdateCapacity(newCap, node == cp.capReqNode) @@ -167,17 +167,17 @@ func (cp *ClientPool) AddMetrics(totalConnectedGauge metrics.Gauge, clientConnectedMeter, clientDisconnectedMeter, clientActivatedMeter, clientDeactivatedMeter, capacityQueryZeroMeter, capacityQueryNonZeroMeter metrics.Meter) { - cp.ns.SubscribeState(nodestate.MergeFlags(ppSetup.ActiveFlag, ppSetup.InactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { + cp.ns.SubscribeState(nodestate.MergeFlags(ppSetup.activeFlag, ppSetup.inactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { if oldState.IsEmpty() && !newState.IsEmpty() { clientConnectedMeter.Mark(1) } if !oldState.IsEmpty() && newState.IsEmpty() { clientDisconnectedMeter.Mark(1) } - if oldState.HasNone(ppSetup.ActiveFlag) && oldState.HasAll(ppSetup.ActiveFlag) { + if oldState.HasNone(ppSetup.activeFlag) && oldState.HasAll(ppSetup.activeFlag) { clientActivatedMeter.Mark(1) } - if oldState.HasAll(ppSetup.ActiveFlag) && oldState.HasNone(ppSetup.ActiveFlag) { + if oldState.HasAll(ppSetup.activeFlag) && oldState.HasNone(ppSetup.activeFlag) { clientDeactivatedMeter.Mark(1) } _, connected := cp.Active() @@ -204,7 +204,7 @@ func (cp *ClientPool) Stop() { // disconnected by calling the Disconnect function of the clientPeer interface. func (cp *ClientPool) Register(peer clientPeer) ConnectedBalance { cp.ns.SetField(peer.Node(), clientField, clientPeerInstance{peer}) - balance, _ := cp.ns.GetField(peer.Node(), btSetup.BalanceField).(*nodeBalance) + balance, _ := cp.ns.GetField(peer.Node(), btSetup.balanceField).(*nodeBalance) return balance } @@ -240,12 +240,12 @@ func (cp *ClientPool) SetCapacity(node *enode.Node, reqCap uint64, bias time.Dur cp.lock.RUnlock() cp.ns.Operation(func() { - balance, _ := cp.ns.GetField(node, btSetup.BalanceField).(*nodeBalance) + balance, _ := cp.ns.GetField(node, btSetup.balanceField).(*nodeBalance) if balance == nil { err = ErrNotConnected return } - capacity, _ = cp.ns.GetField(node, ppSetup.CapacityField).(uint64) + capacity, _ = cp.ns.GetField(node, ppSetup.capacityField).(uint64) if capacity == 0 { // if the client is inactive then it has insufficient priority for the minimal capacity // (will be activated automatically with minCap when possible) @@ -257,7 +257,7 @@ func (cp *ClientPool) SetCapacity(node *enode.Node, reqCap uint64, bias time.Dur reqCap = cp.minCap } if reqCap > cp.minCap { - if cp.ns.GetState(node).HasNone(btSetup.PriorityFlag) && reqCap > cp.minCap { + if cp.ns.GetState(node).HasNone(btSetup.priorityFlag) && reqCap > cp.minCap { err = ErrNoPriority return } diff --git a/les/vflux/server/clientpool_test.go b/les/vflux/server/clientpool_test.go index c65dc90a7750..296c025d4bc8 100644 --- a/les/vflux/server/clientpool_test.go +++ b/les/vflux/server/clientpool_test.go @@ -274,7 +274,7 @@ func TestConnectPaidClientToSmallPool(t *testing.T) { // Add balance for an external client and mark it as paid client addBalance(pool, newPoolTestPeer(0, nil).node.ID(), int64(time.Minute)) - // Connect a fat paid client to pool, should reject it. + // connect a fat paid client to pool, should reject it. testPriorityConnect(t, pool, newPoolTestPeer(0, nil), 100, false) } @@ -582,8 +582,8 @@ func TestInactiveClient(t *testing.T) { } clock.Run(time.Second * 600) // manually trigger a check to avoid a long real-time wait - pool.ns.SetState(p1.node, btSetup.UpdateFlag, nodestate.Flags{}, 0) - pool.ns.SetState(p1.node, nodestate.Flags{}, btSetup.UpdateFlag, 0) + pool.ns.SetState(p1.node, btSetup.updateFlag, nodestate.Flags{}, 0) + pool.ns.SetState(p1.node, nodestate.Flags{}, btSetup.updateFlag, 0) // p1: 1000 p2: 500 p3: 2000 p4: 900 if p1.cap != 1 { t.Fatalf("Failed to activate peer #1") diff --git a/les/vflux/server/prioritypool.go b/les/vflux/server/prioritypool.go index 7b6d32747b83..3894d6dd3576 100644 --- a/les/vflux/server/prioritypool.go +++ b/les/vflux/server/prioritypool.go @@ -33,31 +33,31 @@ const ( lazyQueueRefresh = time.Second * 10 // refresh period of the active queue ) -// PriorityPoolSetup contains node state flags and fields used by PriorityPool -// Note: ActiveFlag and InactiveFlag can be controlled both externally and by the pool, +// priorityPoolSetup contains node state flags and fields used by PriorityPool +// Note: activeFlag and inactiveFlag can be controlled both externally and by the pool, // see PriorityPool description for details. -type PriorityPoolSetup struct { +type priorityPoolSetup struct { // controlled by PriorityPool - ActiveFlag, InactiveFlag nodestate.Flags - CapacityField, ppNodeInfoField nodestate.Field + activeFlag, inactiveFlag nodestate.Flags + capacityField, ppNodeInfoField nodestate.Field // external connections updateFlag nodestate.Flags priorityField nodestate.Field } -// NewPriorityPoolSetup creates a new PriorityPoolSetup and initializes the fields +// newPriorityPoolSetup creates a new priorityPoolSetup and initializes the fields // and flags controlled by PriorityPool -func NewPriorityPoolSetup(setup *nodestate.Setup) PriorityPoolSetup { - return PriorityPoolSetup{ - ActiveFlag: setup.NewFlag("active"), - InactiveFlag: setup.NewFlag("inactive"), - CapacityField: setup.NewField("capacity", reflect.TypeOf(uint64(0))), +func newPriorityPoolSetup(setup *nodestate.Setup) priorityPoolSetup { + return priorityPoolSetup{ + activeFlag: setup.NewFlag("active"), + inactiveFlag: setup.NewFlag("inactive"), + capacityField: setup.NewField("capacity", reflect.TypeOf(uint64(0))), ppNodeInfoField: setup.NewField("ppNodeInfo", reflect.TypeOf(&ppNodeInfo{})), } } -// Connect sets the fields and flags used by PriorityPool as an input -func (pps *PriorityPoolSetup) Connect(priorityField nodestate.Field, updateFlag nodestate.Flags) { +// connect sets the fields and flags used by PriorityPool as an input +func (pps *priorityPoolSetup) connect(priorityField nodestate.Field, updateFlag nodestate.Flags) { pps.priorityField = priorityField // should implement nodePriority pps.updateFlag = updateFlag // triggers an immediate priority update } @@ -79,17 +79,17 @@ func (pps *PriorityPoolSetup) Connect(priorityField nodestate.Field, updateFlag // This time bias can be interpreted as minimum expected active time at the given // capacity (if the threshold priority stays the same). // -// Nodes in the pool always have either InactiveFlag or ActiveFlag set. A new node is -// added to the pool by externally setting InactiveFlag. PriorityPool can switch a node -// between InactiveFlag and ActiveFlag at any time. Nodes can be removed from the pool -// by externally resetting both flags. ActiveFlag should not be set externally. +// Nodes in the pool always have either inactiveFlag or activeFlag set. A new node is +// added to the pool by externally setting inactiveFlag. PriorityPool can switch a node +// between inactiveFlag and activeFlag at any time. Nodes can be removed from the pool +// by externally resetting both flags. activeFlag should not be set externally. // // The highest priority nodes in "inactive" state are moved to "active" state as soon as // the minimum capacity can be granted for them. The capacity of lower priority active // nodes is reduced or they are demoted to "inactive" state if their priority is // insufficient even at minimal capacity. type PriorityPool struct { - PriorityPoolSetup + priorityPoolSetup ns *nodestate.NodeStateMachine clock mclock.Clock lock sync.Mutex @@ -119,10 +119,10 @@ type ppNodeInfo struct { } // NewPriorityPool creates a new PriorityPool -func NewPriorityPool(ns *nodestate.NodeStateMachine, setup PriorityPoolSetup, clock mclock.Clock, minCap uint64, activeBias time.Duration, capacityStepDiv uint64) *PriorityPool { +func NewPriorityPool(ns *nodestate.NodeStateMachine, setup priorityPoolSetup, clock mclock.Clock, minCap uint64, activeBias time.Duration, capacityStepDiv uint64) *PriorityPool { pp := &PriorityPool{ ns: ns, - PriorityPoolSetup: setup, + priorityPoolSetup: setup, clock: clock, inactiveQueue: prque.New(inactiveSetIndex), minCap: minCap, @@ -144,15 +144,15 @@ func NewPriorityPool(ns *nodestate.NodeStateMachine, setup PriorityPoolSetup, cl } ns.SetFieldSub(node, pp.ppNodeInfoField, c) } else { - ns.SetStateSub(node, nodestate.Flags{}, pp.ActiveFlag.Or(pp.InactiveFlag), 0) + ns.SetStateSub(node, nodestate.Flags{}, pp.activeFlag.Or(pp.inactiveFlag), 0) if n, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo); n != nil { pp.disconnectedNode(n) } - ns.SetFieldSub(node, pp.CapacityField, nil) + ns.SetFieldSub(node, pp.capacityField, nil) ns.SetFieldSub(node, pp.ppNodeInfoField, nil) } }) - ns.SubscribeState(pp.ActiveFlag.Or(pp.InactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { + ns.SubscribeState(pp.activeFlag.Or(pp.inactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { if c, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo); c != nil { if oldState.IsEmpty() { pp.connectedNode(c) @@ -178,7 +178,7 @@ func NewPriorityPool(ns *nodestate.NodeStateMachine, setup PriorityPoolSetup, cl // If setCap and allowed are both true then the caller can assume that the change was // successful. // Note: priorityField should always be set before calling RequestCapacity. If setCap -// is false then both InactiveFlag and ActiveFlag can be unset and they are not changed +// is false then both inactiveFlag and activeFlag can be unset and they are not changed // by this function call either. // Note 2: this function should run inside a NodeStateMachine operation func (pp *PriorityPool) RequestCapacity(node *enode.Node, targetCap uint64, bias time.Duration, setCap bool) (minPriority int64, allowed bool) { @@ -325,7 +325,7 @@ func (pp *PriorityPool) inactivePriority(p *ppNodeInfo) int64 { return p.nodePriority.Priority(pp.minCap) } -// connectedNode is called when a new node has been added to the pool (InactiveFlag set) +// connectedNode is called when a new node has been added to the pool (inactiveFlag set) // Note: this function should run inside a NodeStateMachine operation func (pp *PriorityPool) connectedNode(c *ppNodeInfo) { pp.lock.Lock() @@ -344,8 +344,8 @@ func (pp *PriorityPool) connectedNode(c *ppNodeInfo) { updates = pp.tryActivate() } -// disconnectedNode is called when a node has been removed from the pool (both InactiveFlag -// and ActiveFlag reset) +// disconnectedNode is called when a node has been removed from the pool (both inactiveFlag +// and activeFlag reset) // Note: this function should run inside a NodeStateMachine operation func (pp *PriorityPool) disconnectedNode(c *ppNodeInfo) { pp.lock.Lock() @@ -370,8 +370,8 @@ func (pp *PriorityPool) disconnectedNode(c *ppNodeInfo) { // markForChange internally puts a node in a temporary state that can either be reverted // or confirmed later. This temporary state allows changing the capacity of a node and -// moving it between the active and inactive queue. ActiveFlag/InactiveFlag and -// CapacityField are not changed while the changes are still temporary. +// moving it between the active and inactive queue. activeFlag/inactiveFlag and +// capacityField are not changed while the changes are still temporary. func (pp *PriorityPool) markForChange(c *ppNodeInfo) { if c.changed { return @@ -458,25 +458,25 @@ func (pp *PriorityPool) finalizeChanges(commit bool) (updates []capUpdate) { return } -// capUpdate describes a CapacityField and ActiveFlag/InactiveFlag update +// capUpdate describes a capacityField and activeFlag/inactiveFlag update type capUpdate struct { node *enode.Node oldCap, newCap uint64 } -// updateFlags performs CapacityField and ActiveFlag/InactiveFlag updates while the +// updateFlags performs capacityField and activeFlag/inactiveFlag updates while the // pool mutex is not held // Note: this function should run inside a NodeStateMachine operation func (pp *PriorityPool) updateFlags(updates []capUpdate) { for _, f := range updates { if f.oldCap == 0 { - pp.ns.SetStateSub(f.node, pp.ActiveFlag, pp.InactiveFlag, 0) + pp.ns.SetStateSub(f.node, pp.activeFlag, pp.inactiveFlag, 0) } if f.newCap == 0 { - pp.ns.SetStateSub(f.node, pp.InactiveFlag, pp.ActiveFlag, 0) - pp.ns.SetFieldSub(f.node, pp.CapacityField, nil) + pp.ns.SetStateSub(f.node, pp.inactiveFlag, pp.activeFlag, 0) + pp.ns.SetFieldSub(f.node, pp.capacityField, nil) } else { - pp.ns.SetFieldSub(f.node, pp.CapacityField, f.newCap) + pp.ns.SetFieldSub(f.node, pp.capacityField, f.newCap) } } } diff --git a/les/vflux/server/prioritypool_test.go b/les/vflux/server/prioritypool_test.go index 1b04abc7b6d4..816affb6ec27 100644 --- a/les/vflux/server/prioritypool_test.go +++ b/les/vflux/server/prioritypool_test.go @@ -31,11 +31,11 @@ import ( var ( testSetup = &nodestate.Setup{} ppTestClientField = testSetup.NewField("ppTestClient", reflect.TypeOf(&ppTestClient{})) - ppTestSetup = NewPriorityPoolSetup(testSetup) + ppTestSetup = newPriorityPoolSetup(testSetup) ) func init() { - ppTestSetup.Connect(ppTestClientField, btTestSetup.UpdateFlag) + ppTestSetup.connect(ppTestClientField, btTestSetup.updateFlag) } const ( @@ -61,7 +61,7 @@ func TestPriorityPool(t *testing.T) { clock := &mclock.Simulated{} ns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup) - ns.SubscribeField(ppTestSetup.CapacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + ns.SubscribeField(ppTestSetup.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { if n := ns.GetField(node, ppTestSetup.priorityField); n != nil { c := n.(*ppTestClient) c.cap = newValue.(uint64) @@ -100,7 +100,7 @@ func TestPriorityPool(t *testing.T) { sumBalance += c.balance clients[i] = c ns.SetField(c.node, ppTestSetup.priorityField, c) - ns.SetState(c.node, ppTestSetup.InactiveFlag, nodestate.Flags{}, 0) + ns.SetState(c.node, ppTestSetup.inactiveFlag, nodestate.Flags{}, 0) raise(c) check(c) } @@ -110,8 +110,8 @@ func TestPriorityPool(t *testing.T) { oldBalance := c.balance c.balance = uint64(rand.Int63n(100000000000) + 100000000000) sumBalance += c.balance - oldBalance - pp.ns.SetState(c.node, btTestSetup.UpdateFlag, nodestate.Flags{}, 0) - pp.ns.SetState(c.node, nodestate.Flags{}, btTestSetup.UpdateFlag, 0) + pp.ns.SetState(c.node, btTestSetup.updateFlag, nodestate.Flags{}, 0) + pp.ns.SetState(c.node, nodestate.Flags{}, btTestSetup.updateFlag, 0) if c.balance > oldBalance { raise(c) } else { @@ -159,8 +159,8 @@ func TestPriorityPool(t *testing.T) { } c.balance -= add sumBalance -= add - pp.ns.SetState(c.node, btTestSetup.UpdateFlag, nodestate.Flags{}, 0) - pp.ns.SetState(c.node, nodestate.Flags{}, btTestSetup.UpdateFlag, 0) + pp.ns.SetState(c.node, btTestSetup.updateFlag, nodestate.Flags{}, 0) + pp.ns.SetState(c.node, nodestate.Flags{}, btTestSetup.updateFlag, 0) for _, c := range clients { raise(c) } @@ -186,7 +186,7 @@ func TestCapacityCurve(t *testing.T) { } clients[i] = c ns.SetField(c.node, ppTestSetup.priorityField, c) - ns.SetState(c.node, ppTestSetup.InactiveFlag, nodestate.Flags{}, 0) + ns.SetState(c.node, ppTestSetup.inactiveFlag, nodestate.Flags{}, 0) ns.Operation(func() { pp.RequestCapacity(c.node, c.cap, 0, true) }) From ed69bfb59faa6065e809a8c936065cccdbc1249f Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Wed, 17 Mar 2021 23:12:37 +0100 Subject: [PATCH 08/27] les/vflux/server: unexport all internal components and functions --- les/vflux/server/balance.go | 24 +++--- les/vflux/server/balance_test.go | 20 ++--- les/vflux/server/balance_tracker.go | 40 ++++----- les/vflux/server/clientpool.go | 34 ++++---- les/vflux/server/prioritypool.go | 118 +++++++++++++------------- les/vflux/server/prioritypool_test.go | 30 +++---- 6 files changed, 131 insertions(+), 135 deletions(-) diff --git a/les/vflux/server/balance.go b/les/vflux/server/balance.go index e93e05c48ca9..2b9dae5cbff7 100644 --- a/les/vflux/server/balance.go +++ b/les/vflux/server/balance.go @@ -55,13 +55,13 @@ func (p PriceFactors) timePrice(cap uint64) float64 { type ( // nodePriority interface provides current and estimated future priorities on demand nodePriority interface { - // Priority should return the current priority of the node (higher is better) - Priority(cap uint64) int64 - // EstMinPriority should return a lower estimate for the minimum of the node priority + // priority should return the current priority of the node (higher is better) + priority(cap uint64) int64 + // estimatePriority should return a lower estimate for the minimum of the node priority // value starting from the current moment until the given time. If the priority goes // under the returned estimate before the specified moment then it is the caller's // responsibility to signal with updateFlag. - EstimatePriority(cap uint64, addBalance int64, future, bias time.Duration, update bool) int64 + estimatePriority(cap uint64, addBalance int64, future, bias time.Duration, update bool) int64 } // ReadOnlyBalance provides read-only operations on the node balance @@ -92,11 +92,11 @@ type ( // client and calculates actual and projected future priority values. // Implements nodePriority interface. type nodeBalance struct { - bt *BalanceTracker + bt *balanceTracker lock sync.RWMutex node *enode.Node connAddress string - active, priority, setFlags bool + active, hasPriority, setFlags bool capacity uint64 balance balance posFactor, negFactor PriceFactors @@ -278,8 +278,8 @@ func (n *nodeBalance) RequestServed(cost uint64) uint64 { return n.balance.pos.Value(posExp) } -// Priority returns the actual priority based on the current balance -func (n *nodeBalance) Priority(capacity uint64) int64 { +// priority returns the actual priority based on the current balance +func (n *nodeBalance) priority(capacity uint64) int64 { n.lock.Lock() defer n.lock.Unlock() @@ -292,7 +292,7 @@ func (n *nodeBalance) Priority(capacity uint64) int64 { // in the current session. // If update is true then a priority callback is added that turns updateFlag on and off // in case the priority goes below the estimated minimum. -func (n *nodeBalance) EstimatePriority(capacity uint64, addBalance int64, future, bias time.Duration, update bool) int64 { +func (n *nodeBalance) estimatePriority(capacity uint64, addBalance int64, future, bias time.Duration, update bool) int64 { n.lock.Lock() defer n.lock.Unlock() @@ -519,7 +519,7 @@ func (n *nodeBalance) updateAfter(dt time.Duration) { func (n *nodeBalance) balanceExhausted() { n.lock.Lock() n.storeBalance(true, false) - n.priority = false + n.hasPriority = false n.lock.Unlock() if n.setFlags { n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.priorityFlag, 0) @@ -530,8 +530,8 @@ func (n *nodeBalance) balanceExhausted() { // callback and flag if necessary. It assumes that the balance has been recently updated. // Note that the priority flag has to be set by the caller after the mutex has been released. func (n *nodeBalance) checkPriorityStatus() bool { - if !n.priority && !n.balance.pos.IsZero() { - n.priority = true + if !n.hasPriority && !n.balance.pos.IsZero() { + n.hasPriority = true n.addCallback(balanceCallbackZero, 0, func() { n.balanceExhausted() }) return true } diff --git a/les/vflux/server/balance_test.go b/les/vflux/server/balance_test.go index b7ca5985abe7..dce51ad1884b 100644 --- a/les/vflux/server/balance_test.go +++ b/les/vflux/server/balance_test.go @@ -49,14 +49,14 @@ func (z zeroExpirer) LogOffset(now mclock.AbsTime) utils.Fixed64 { type balanceTestSetup struct { clock *mclock.Simulated ns *nodestate.NodeStateMachine - bt *BalanceTracker + bt *balanceTracker } func newBalanceTestSetup() *balanceTestSetup { clock := &mclock.Simulated{} ns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup) db := memorydb.New() - bt := NewBalanceTracker(ns, btTestSetup, db, clock, zeroExpirer{}, zeroExpirer{}) + bt := newBalanceTracker(ns, btTestSetup, db, clock, zeroExpirer{}, zeroExpirer{}) ns.Start() return &balanceTestSetup{ clock: clock, @@ -96,7 +96,7 @@ func (b *balanceTestSetup) addBalance(node *nodeBalance, add int64) (old, new ui } func (b *balanceTestSetup) stop() { - b.bt.Stop() + b.bt.stop() b.ns.Stop() } @@ -249,9 +249,9 @@ func TestBalanceToPriority(t *testing.T) { } for _, i := range inputs { b.setBalance(node, i.pos, i.neg) - priority := node.Priority(1000) + priority := node.priority(1000) if priority != i.priority { - t.Fatalf("Priority mismatch, want %v, got %v", i.priority, priority) + t.Fatalf("priority mismatch, want %v, got %v", i.priority, priority) } } } @@ -290,7 +290,7 @@ func TestEstimatedPriority(t *testing.T) { for _, i := range inputs { b.clock.Run(i.runTime) node.RequestServed(i.reqCost) - priority := node.EstimatePriority(1000000000, 0, i.futureTime, 0, false) + priority := node.estimatePriority(1000000000, 0, i.futureTime, 0, false) if priority != i.priority-1 { t.Fatalf("Estimated priority mismatch, want %v, got %v", i.priority-1, priority) } @@ -399,7 +399,7 @@ func TestBalancePersistence(t *testing.T) { negExp := &utils.Expirer{} posExp.SetRate(clock.Now(), math.Log(2)/float64(time.Hour*2)) // halves every two hours negExp.SetRate(clock.Now(), math.Log(2)/float64(time.Hour)) // halves every hour - bt := NewBalanceTracker(ns, btTestSetup, db, clock, posExp, negExp) + bt := newBalanceTracker(ns, btTestSetup, db, clock, posExp, negExp) ns.Start() bts := &balanceTestSetup{ clock: clock, @@ -432,7 +432,7 @@ func TestBalancePersistence(t *testing.T) { clock.Run(time.Hour * 2) exp(8000000000, 4000000000) expTotal(8000000000) - bt.Stop() + bt.stop() ns.Stop() clock = &mclock.Simulated{} @@ -441,7 +441,7 @@ func TestBalancePersistence(t *testing.T) { negExp = &utils.Expirer{} posExp.SetRate(clock.Now(), math.Log(2)/float64(time.Hour*2)) // halves every two hours negExp.SetRate(clock.Now(), math.Log(2)/float64(time.Hour)) // halves every hour - bt = NewBalanceTracker(ns, btTestSetup, db, clock, posExp, negExp) + bt = newBalanceTracker(ns, btTestSetup, db, clock, posExp, negExp) ns.Start() bts = &balanceTestSetup{ clock: clock, @@ -455,6 +455,6 @@ func TestBalancePersistence(t *testing.T) { clock.Run(time.Hour * 2) exp(4000000000, 1000000000) expTotal(4000000000) - bt.Stop() + bt.stop() ns.Stop() } diff --git a/les/vflux/server/balance_tracker.go b/les/vflux/server/balance_tracker.go index 350e354b237e..39dd0130439b 100644 --- a/les/vflux/server/balance_tracker.go +++ b/les/vflux/server/balance_tracker.go @@ -35,9 +35,9 @@ const ( persistExpirationRefresh = time.Minute * 5 // refresh period of the token expiration persistence ) -// balanceTrackerSetup contains node state flags and fields used by BalanceTracker +// balanceTrackerSetup contains node state flags and fields used by balanceTracker type balanceTrackerSetup struct { - // controlled by PriorityPool + // controlled by priorityPool priorityFlag, updateFlag nodestate.Flags balanceField nodestate.Field // external connections @@ -45,7 +45,7 @@ type balanceTrackerSetup struct { } // newBalanceTrackerSetup creates a new balanceTrackerSetup and initializes the fields -// and flags controlled by BalanceTracker +// and flags controlled by balanceTracker func newBalanceTrackerSetup(setup *nodestate.Setup) balanceTrackerSetup { return balanceTrackerSetup{ // priorityFlag is set if the node has a positive balance @@ -59,13 +59,13 @@ func newBalanceTrackerSetup(setup *nodestate.Setup) balanceTrackerSetup { } } -// connect sets the fields used by BalanceTracker as an input +// connect sets the fields used by balanceTracker as an input func (bts *balanceTrackerSetup) connect(clientField, capacityField nodestate.Field) { bts.clientField = clientField bts.capacityField = capacityField } -// BalanceTracker tracks positive and negative balances for connected nodes. +// balanceTracker tracks positive and negative balances for connected nodes. // After clientField is set externally, a nodeBalance is created and previous // balance values are loaded from the database. Both balances are exponentially expired // values. Costs are deducted from the positive balance if present, otherwise added to @@ -73,7 +73,7 @@ func (bts *balanceTrackerSetup) connect(clientField, capacityField nodestate.Fie // continuously while individual request costs are applied immediately. // The two balances are translated into a single priority value that also depends // on the actual capacity. -type BalanceTracker struct { +type balanceTracker struct { balanceTrackerSetup clock mclock.Clock lock sync.Mutex @@ -91,10 +91,10 @@ type balancePeer interface { FreeClientId() string } -// NewBalanceTracker creates a new BalanceTracker -func NewBalanceTracker(ns *nodestate.NodeStateMachine, setup balanceTrackerSetup, db ethdb.KeyValueStore, clock mclock.Clock, posExp, negExp utils.ValueExpirer) *BalanceTracker { +// newBalanceTracker creates a new balanceTracker +func newBalanceTracker(ns *nodestate.NodeStateMachine, setup balanceTrackerSetup, db ethdb.KeyValueStore, clock mclock.Clock, posExp, negExp utils.ValueExpirer) *balanceTracker { ndb := newNodeDB(db, clock) - bt := &BalanceTracker{ + bt := &balanceTracker{ ns: ns, balanceTrackerSetup: setup, ndb: ndb, @@ -162,8 +162,8 @@ func NewBalanceTracker(ns *nodestate.NodeStateMachine, setup balanceTrackerSetup return bt } -// Stop saves expiration offset and unsaved node balances and shuts BalanceTracker down -func (bt *BalanceTracker) Stop() { +// Stop saves expiration offset and unsaved node balances and shuts balanceTracker down +func (bt *balanceTracker) stop() { now := bt.clock.Now() bt.ndb.setExpiration(bt.posExp.LogOffset(now), bt.negExp.LogOffset(now)) close(bt.quit) @@ -179,7 +179,7 @@ func (bt *BalanceTracker) Stop() { } // TotalTokenAmount returns the current total amount of service tokens in existence -func (bt *BalanceTracker) TotalTokenAmount() uint64 { +func (bt *balanceTracker) TotalTokenAmount() uint64 { bt.lock.Lock() defer bt.lock.Unlock() @@ -199,13 +199,13 @@ func (bt *BalanceTracker) TotalTokenAmount() uint64 { } // GetPosBalanceIDs lists node IDs with an associated positive balance -func (bt *BalanceTracker) GetPosBalanceIDs(start, stop enode.ID, maxCount int) (result []enode.ID) { +func (bt *balanceTracker) GetPosBalanceIDs(start, stop enode.ID, maxCount int) (result []enode.ID) { return bt.ndb.getPosBalanceIDs(start, stop, maxCount) } // SetExpirationTCs sets positive and negative token expiration time constants. // Specified in seconds, 0 means infinite (no expiration). -func (bt *BalanceTracker) SetExpirationTCs(pos, neg uint64) { +func (bt *balanceTracker) SetExpirationTCs(pos, neg uint64) { bt.lock.Lock() defer bt.lock.Unlock() @@ -225,7 +225,7 @@ func (bt *BalanceTracker) SetExpirationTCs(pos, neg uint64) { // GetExpirationTCs returns the current positive and negative token expiration // time constants -func (bt *BalanceTracker) GetExpirationTCs() (pos, neg uint64) { +func (bt *balanceTracker) GetExpirationTCs() (pos, neg uint64) { bt.lock.Lock() defer bt.lock.Unlock() @@ -234,7 +234,7 @@ func (bt *BalanceTracker) GetExpirationTCs() (pos, neg uint64) { // BalanceOperation allows atomic operations on the balance of a node regardless of whether // it is currently connected or not -func (bt *BalanceTracker) BalanceOperation(id enode.ID, negBalanceKey string, cb func(AtomicBalanceOperator)) { +func (bt *balanceTracker) BalanceOperation(id enode.ID, negBalanceKey string, cb func(AtomicBalanceOperator)) { bt.ns.Operation(func() { node := bt.ns.GetNode(id) var nb *nodeBalance @@ -254,7 +254,7 @@ func (bt *BalanceTracker) BalanceOperation(id enode.ID, negBalanceKey string, cb // for the given node. It also sets the priorityFlag and adds balanceCallbackZero if // the node has a positive balance. // Note: this function should run inside a NodeStateMachine operation -func (bt *BalanceTracker) newNodeBalance(node *enode.Node, negBalanceKey string, setFlags bool) *nodeBalance { +func (bt *balanceTracker) newNodeBalance(node *enode.Node, negBalanceKey string, setFlags bool) *nodeBalance { pb := bt.ndb.getOrNewBalance(node.ID().Bytes(), false) nb := bt.ndb.getOrNewBalance([]byte(negBalanceKey), true) n := &nodeBalance{ @@ -276,7 +276,7 @@ func (bt *BalanceTracker) newNodeBalance(node *enode.Node, negBalanceKey string, } // storeBalance stores either a positive or a negative balance in the database -func (bt *BalanceTracker) storeBalance(id []byte, neg bool, value utils.ExpiredValue) { +func (bt *balanceTracker) storeBalance(id []byte, neg bool, value utils.ExpiredValue) { if bt.canDropBalance(bt.clock.Now(), neg, value) { bt.ndb.delBalance(id, neg) // balance is small enough, drop it directly. } else { @@ -286,7 +286,7 @@ func (bt *BalanceTracker) storeBalance(id []byte, neg bool, value utils.ExpiredV // canDropBalance tells whether a positive or negative balance is below the threshold // and therefore can be dropped from the database -func (bt *BalanceTracker) canDropBalance(now mclock.AbsTime, neg bool, b utils.ExpiredValue) bool { +func (bt *balanceTracker) canDropBalance(now mclock.AbsTime, neg bool, b utils.ExpiredValue) bool { if neg { return b.Value(bt.negExp.LogOffset(now)) <= negThreshold } @@ -294,7 +294,7 @@ func (bt *BalanceTracker) canDropBalance(now mclock.AbsTime, neg bool, b utils.E } // updateTotalBalance adjusts the total balance after executing given callback. -func (bt *BalanceTracker) updateTotalBalance(n *nodeBalance, callback func() bool) { +func (bt *balanceTracker) updateTotalBalance(n *nodeBalance, callback func() bool) { bt.lock.Lock() defer bt.lock.Unlock() diff --git a/les/vflux/server/clientpool.go b/les/vflux/server/clientpool.go index e69bb931864a..5a357f13300a 100644 --- a/les/vflux/server/clientpool.go +++ b/les/vflux/server/clientpool.go @@ -58,7 +58,7 @@ func init() { // then negative balance is accumulated. // // Balance tracking and priority calculation for connected clients is done by -// BalanceTracker. activeQueue ensures that clients with the lowest positive or +// balanceTracker. activeQueue ensures that clients with the lowest positive or // highest negative balance get evicted when the total capacity allowance is full // and new clients with a better balance want to connect. // @@ -70,8 +70,8 @@ func init() { // and negative banalce. Boeth positive balance and negative balance will decrease // exponentially. If the balance is low enough, then the record will be dropped. type ClientPool struct { - *PriorityPool - *BalanceTracker + *priorityPool + *balanceTracker clock mclock.Clock closed bool ns *nodestate.NodeStateMachine @@ -103,8 +103,8 @@ func NewClientPool(balanceDb ethdb.KeyValueStore, minCap uint64, connectedBias t ns := nodestate.NewNodeStateMachine(nil, nil, clock, serverSetup) cp := &ClientPool{ ns: ns, - BalanceTracker: NewBalanceTracker(ns, btSetup, balanceDb, clock, &utils.Expirer{}, &utils.Expirer{}), - PriorityPool: NewPriorityPool(ns, ppSetup, clock, minCap, connectedBias, 4), + balanceTracker: newBalanceTracker(ns, btSetup, balanceDb, clock, &utils.Expirer{}, &utils.Expirer{}), + priorityPool: newPriorityPool(ns, ppSetup, clock, minCap, connectedBias, 4), clock: clock, minCap: minCap, connectedBias: connectedBias, @@ -121,7 +121,7 @@ func NewClientPool(balanceDb ethdb.KeyValueStore, minCap uint64, connectedBias t if timeout > 0 { ns.AddTimeout(node, ppSetup.inactiveFlag, timeout) } else { - // Note: if capacity is immediately available then PriorityPool will set the active + // Note: if capacity is immediately available then priorityPool will set the active // flag simultaneously with removing the inactive flag and therefore this will not // initiate disconnection ns.SetStateSub(node, nodestate.Flags{}, ppSetup.inactiveFlag, 0) @@ -134,7 +134,7 @@ func NewClientPool(balanceDb ethdb.KeyValueStore, minCap uint64, connectedBias t // active with no priority; limit capacity to minCap cap, _ := ns.GetField(node, ppSetup.capacityField).(uint64) if cap > minCap { - cp.RequestCapacity(node, minCap, 0, true) + cp.requestCapacity(node, minCap, 0, true) } } if newState.Equals(nodestate.Flags{}) { @@ -195,7 +195,7 @@ func (cp *ClientPool) Start() { // Stop shuts the client pool down. The clientPeer interface callbacks will not be called // after Stop. Register calls will return nil. func (cp *ClientPool) Stop() { - cp.BalanceTracker.Stop() + cp.balanceTracker.stop() cp.ns.Stop() } @@ -227,7 +227,7 @@ func (cp *ClientPool) SetDefaultFactors(posFactors, negFactors PriceFactors) { func (cp *ClientPool) SetConnectedBias(bias time.Duration) { cp.lock.Lock() cp.connectedBias = bias - cp.SetActiveBias(bias) + cp.setActiveBias(bias) cp.lock.Unlock() } @@ -283,9 +283,9 @@ func (cp *ClientPool) SetCapacity(node *enode.Node, reqCap uint64, bias time.Dur curveBias += time.Second * 10 tryCap := reqCap if reqCap > capacity { - curve := cp.GetCapacityCurve().Exclude(node.ID()) - tryCap = curve.MaxCapacity(func(capacity uint64) int64 { - return balance.EstimatePriority(capacity, 0, 0, curveBias, false) + curve := cp.getCapacityCurve().exclude(node.ID()) + tryCap = curve.maxCapacity(func(capacity uint64) int64 { + return balance.estimatePriority(capacity, 0, 0, curveBias, false) }) if tryCap <= capacity { return @@ -294,7 +294,7 @@ func (cp *ClientPool) SetCapacity(node *enode.Node, reqCap uint64, bias time.Dur tryCap = reqCap } } - if _, allowed := cp.RequestCapacity(node, tryCap, bias, true); allowed { + if _, allowed := cp.requestCapacity(node, tryCap, bias, true); allowed { capacity = tryCap return } @@ -333,14 +333,14 @@ func (cp *ClientPool) serveCapQuery(id enode.ID, freeID string, data []byte) []b } cp.lock.RUnlock() - // use CapacityCurve to answer request for multiple newly bought token amounts - curve := cp.GetCapacityCurve().Exclude(id) + // use capacityCurve to answer request for multiple newly bought token amounts + curve := cp.getCapacityCurve().exclude(id) cp.BalanceOperation(id, freeID, func(balance AtomicBalanceOperator) { pb, _ := balance.GetBalance() for i, addTokens := range req.AddTokens { add := addTokens.Int64() - result[i] = curve.MaxCapacity(func(capacity uint64) int64 { - return balance.EstimatePriority(capacity, add, 0, bias, false) / int64(capacity) + result[i] = curve.maxCapacity(func(capacity uint64) int64 { + return balance.estimatePriority(capacity, add, 0, bias, false) / int64(capacity) }) if add <= 0 && uint64(-add) >= pb && result[i] > cp.minCap { result[i] = cp.minCap diff --git a/les/vflux/server/prioritypool.go b/les/vflux/server/prioritypool.go index 3894d6dd3576..8c59ee6443ec 100644 --- a/les/vflux/server/prioritypool.go +++ b/les/vflux/server/prioritypool.go @@ -33,11 +33,11 @@ const ( lazyQueueRefresh = time.Second * 10 // refresh period of the active queue ) -// priorityPoolSetup contains node state flags and fields used by PriorityPool +// priorityPoolSetup contains node state flags and fields used by priorityPool // Note: activeFlag and inactiveFlag can be controlled both externally and by the pool, -// see PriorityPool description for details. +// see priorityPool description for details. type priorityPoolSetup struct { - // controlled by PriorityPool + // controlled by priorityPool activeFlag, inactiveFlag nodestate.Flags capacityField, ppNodeInfoField nodestate.Field // external connections @@ -46,7 +46,7 @@ type priorityPoolSetup struct { } // newPriorityPoolSetup creates a new priorityPoolSetup and initializes the fields -// and flags controlled by PriorityPool +// and flags controlled by priorityPool func newPriorityPoolSetup(setup *nodestate.Setup) priorityPoolSetup { return priorityPoolSetup{ activeFlag: setup.NewFlag("active"), @@ -56,13 +56,13 @@ func newPriorityPoolSetup(setup *nodestate.Setup) priorityPoolSetup { } } -// connect sets the fields and flags used by PriorityPool as an input +// connect sets the fields and flags used by priorityPool as an input func (pps *priorityPoolSetup) connect(priorityField nodestate.Field, updateFlag nodestate.Flags) { pps.priorityField = priorityField // should implement nodePriority pps.updateFlag = updateFlag // triggers an immediate priority update } -// PriorityPool handles a set of nodes where each node has a capacity (a scalar value) +// priorityPool handles a set of nodes where each node has a capacity (a scalar value) // and a priority (which can change over time and can also depend on the capacity). // A node is active if it has at least the necessary minimal amount of capacity while // inactive nodes have 0 capacity (values between 0 and the minimum are not allowed). @@ -80,7 +80,7 @@ func (pps *priorityPoolSetup) connect(priorityField nodestate.Field, updateFlag // capacity (if the threshold priority stays the same). // // Nodes in the pool always have either inactiveFlag or activeFlag set. A new node is -// added to the pool by externally setting inactiveFlag. PriorityPool can switch a node +// added to the pool by externally setting inactiveFlag. priorityPool can switch a node // between inactiveFlag and activeFlag at any time. Nodes can be removed from the pool // by externally resetting both flags. activeFlag should not be set externally. // @@ -88,7 +88,7 @@ func (pps *priorityPoolSetup) connect(priorityField nodestate.Field, updateFlag // the minimum capacity can be granted for them. The capacity of lower priority active // nodes is reduced or they are demoted to "inactive" state if their priority is // insufficient even at minimal capacity. -type PriorityPool struct { +type priorityPool struct { priorityPoolSetup ns *nodestate.NodeStateMachine clock mclock.Clock @@ -102,12 +102,12 @@ type PriorityPool struct { activeBias time.Duration capacityStepDiv uint64 - cachedCurve *CapacityCurve + cachedCurve *capacityCurve ccUpdatedAt mclock.AbsTime ccUpdateForced bool } -// ppNodeInfo is the internal node descriptor of PriorityPool +// ppNodeInfo is the internal node descriptor of priorityPool type ppNodeInfo struct { nodePriority nodePriority node *enode.Node @@ -118,9 +118,9 @@ type ppNodeInfo struct { activeIndex, inactiveIndex int } -// NewPriorityPool creates a new PriorityPool -func NewPriorityPool(ns *nodestate.NodeStateMachine, setup priorityPoolSetup, clock mclock.Clock, minCap uint64, activeBias time.Duration, capacityStepDiv uint64) *PriorityPool { - pp := &PriorityPool{ +// newPriorityPool creates a new priorityPool +func newPriorityPool(ns *nodestate.NodeStateMachine, setup priorityPoolSetup, clock mclock.Clock, minCap uint64, activeBias time.Duration, capacityStepDiv uint64) *priorityPool { + pp := &priorityPool{ ns: ns, priorityPoolSetup: setup, clock: clock, @@ -170,18 +170,18 @@ func NewPriorityPool(ns *nodestate.NodeStateMachine, setup priorityPoolSetup, cl return pp } -// RequestCapacity checks whether changing the capacity of a node to the given target +// requestCapacity checks whether changing the capacity of a node to the given target // is possible (bias is applied in favor of other active nodes if the target is higher // than the current capacity). // If setCap is true then it also performs the change if possible. The function returns // the minimum priority needed to do the change and whether it is currently allowed. // If setCap and allowed are both true then the caller can assume that the change was // successful. -// Note: priorityField should always be set before calling RequestCapacity. If setCap +// Note: priorityField should always be set before calling requestCapacity. If setCap // is false then both inactiveFlag and activeFlag can be unset and they are not changed // by this function call either. // Note 2: this function should run inside a NodeStateMachine operation -func (pp *PriorityPool) RequestCapacity(node *enode.Node, targetCap uint64, bias time.Duration, setCap bool) (minPriority int64, allowed bool) { +func (pp *priorityPool) requestCapacity(node *enode.Node, targetCap uint64, bias time.Duration, setCap bool) (minPriority int64, allowed bool) { pp.lock.Lock() pp.activeQueue.Refresh() var updates []capUpdate @@ -198,14 +198,14 @@ func (pp *PriorityPool) RequestCapacity(node *enode.Node, targetCap uint64, bias } c, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo) if c == nil { - log.Error("RequestCapacity called for unknown node", "id", node.ID()) + log.Error("requestCapacity called for unknown node", "id", node.ID()) return math.MaxInt64, false } var priority int64 if targetCap > c.capacity { - priority = c.nodePriority.EstimatePriority(targetCap, 0, 0, bias, false) + priority = c.nodePriority.estimatePriority(targetCap, 0, 0, bias, false) } else { - priority = c.nodePriority.Priority(targetCap) + priority = c.nodePriority.priority(targetCap) } pp.markForChange(c) pp.setCapacity(c, targetCap) @@ -222,7 +222,7 @@ func (pp *PriorityPool) RequestCapacity(node *enode.Node, targetCap uint64, bias } // SetLimits sets the maximum number and total capacity of simultaneously active nodes -func (pp *PriorityPool) SetLimits(maxCount, maxCap uint64) { +func (pp *priorityPool) SetLimits(maxCount, maxCap uint64) { pp.lock.Lock() pp.activeQueue.Refresh() var updates []capUpdate @@ -243,8 +243,8 @@ func (pp *PriorityPool) SetLimits(maxCount, maxCap uint64) { } } -// SetActiveBias sets the bias applied when trying to activate inactive nodes -func (pp *PriorityPool) SetActiveBias(bias time.Duration) { +// setActiveBias sets the bias applied when trying to activate inactive nodes +func (pp *priorityPool) setActiveBias(bias time.Duration) { pp.lock.Lock() var updates []capUpdate defer func() { @@ -260,7 +260,7 @@ func (pp *PriorityPool) SetActiveBias(bias time.Duration) { } // Active returns the number and total capacity of currently active nodes -func (pp *PriorityPool) Active() (uint64, uint64) { +func (pp *priorityPool) Active() (uint64, uint64) { pp.lock.Lock() defer pp.lock.Unlock() @@ -268,7 +268,7 @@ func (pp *PriorityPool) Active() (uint64, uint64) { } // Limits returns the maximum allowed number and total capacity of active nodes -func (pp *PriorityPool) Limits() (uint64, uint64) { +func (pp *priorityPool) Limits() (uint64, uint64) { pp.lock.Lock() defer pp.lock.Unlock() @@ -301,14 +301,14 @@ func activePriority(a interface{}) int64 { return math.MinInt64 } if c.bias == 0 { - return invertPriority(c.nodePriority.Priority(c.capacity)) + return invertPriority(c.nodePriority.priority(c.capacity)) } else { - return invertPriority(c.nodePriority.EstimatePriority(c.capacity, 0, 0, c.bias, true)) + return invertPriority(c.nodePriority.estimatePriority(c.capacity, 0, 0, c.bias, true)) } } // activeMaxPriority callback returns estimated maximum priority of ppNodeInfo item in activeQueue -func (pp *PriorityPool) activeMaxPriority(a interface{}, until mclock.AbsTime) int64 { +func (pp *priorityPool) activeMaxPriority(a interface{}, until mclock.AbsTime) int64 { c := a.(*ppNodeInfo) if c.forced { return math.MinInt64 @@ -317,17 +317,17 @@ func (pp *PriorityPool) activeMaxPriority(a interface{}, until mclock.AbsTime) i if future < 0 { future = 0 } - return invertPriority(c.nodePriority.EstimatePriority(c.capacity, 0, future, c.bias, false)) + return invertPriority(c.nodePriority.estimatePriority(c.capacity, 0, future, c.bias, false)) } // inactivePriority callback returns actual priority of ppNodeInfo item in inactiveQueue -func (pp *PriorityPool) inactivePriority(p *ppNodeInfo) int64 { - return p.nodePriority.Priority(pp.minCap) +func (pp *priorityPool) inactivePriority(p *ppNodeInfo) int64 { + return p.nodePriority.priority(pp.minCap) } // connectedNode is called when a new node has been added to the pool (inactiveFlag set) // Note: this function should run inside a NodeStateMachine operation -func (pp *PriorityPool) connectedNode(c *ppNodeInfo) { +func (pp *priorityPool) connectedNode(c *ppNodeInfo) { pp.lock.Lock() pp.activeQueue.Refresh() var updates []capUpdate @@ -347,7 +347,7 @@ func (pp *PriorityPool) connectedNode(c *ppNodeInfo) { // disconnectedNode is called when a node has been removed from the pool (both inactiveFlag // and activeFlag reset) // Note: this function should run inside a NodeStateMachine operation -func (pp *PriorityPool) disconnectedNode(c *ppNodeInfo) { +func (pp *priorityPool) disconnectedNode(c *ppNodeInfo) { pp.lock.Lock() pp.activeQueue.Refresh() var updates []capUpdate @@ -372,7 +372,7 @@ func (pp *PriorityPool) disconnectedNode(c *ppNodeInfo) { // or confirmed later. This temporary state allows changing the capacity of a node and // moving it between the active and inactive queue. activeFlag/inactiveFlag and // capacityField are not changed while the changes are still temporary. -func (pp *PriorityPool) markForChange(c *ppNodeInfo) { +func (pp *priorityPool) markForChange(c *ppNodeInfo) { if c.changed { return } @@ -384,7 +384,7 @@ func (pp *PriorityPool) markForChange(c *ppNodeInfo) { // setCapacity changes the capacity of a node and adjusts activeCap and activeCount // accordingly. Note that this change is performed in the temporary state so it should // be called after markForChange and before finalizeChanges. -func (pp *PriorityPool) setCapacity(n *ppNodeInfo, cap uint64) { +func (pp *priorityPool) setCapacity(n *ppNodeInfo, cap uint64) { pp.activeCap += cap - n.capacity if n.capacity == 0 { pp.activeCount++ @@ -398,7 +398,7 @@ func (pp *PriorityPool) setCapacity(n *ppNodeInfo, cap uint64) { // enforceLimits enforces active node count and total capacity limits. It returns the // lowest active node priority. Note that this function is performed on the temporary // internal state. -func (pp *PriorityPool) enforceLimits() (*ppNodeInfo, int64) { +func (pp *priorityPool) enforceLimits() (*ppNodeInfo, int64) { if pp.activeCap <= pp.maxCap && pp.activeCount <= pp.maxCount { return nil, math.MinInt64 } @@ -428,7 +428,7 @@ func (pp *PriorityPool) enforceLimits() (*ppNodeInfo, int64) { // finalizeChanges either commits or reverts temporary changes. The necessary capacity // field and according flag updates are not performed here but returned in a list because // they should be performed while the mutex is not held. -func (pp *PriorityPool) finalizeChanges(commit bool) (updates []capUpdate) { +func (pp *priorityPool) finalizeChanges(commit bool) (updates []capUpdate) { for _, c := range pp.changed { // always remove and push back in order to update biased/forced priority pp.activeQueue.Remove(c.activeIndex) @@ -467,7 +467,7 @@ type capUpdate struct { // updateFlags performs capacityField and activeFlag/inactiveFlag updates while the // pool mutex is not held // Note: this function should run inside a NodeStateMachine operation -func (pp *PriorityPool) updateFlags(updates []capUpdate) { +func (pp *priorityPool) updateFlags(updates []capUpdate) { for _, f := range updates { if f.oldCap == 0 { pp.ns.SetStateSub(f.node, pp.activeFlag, pp.inactiveFlag, 0) @@ -482,7 +482,7 @@ func (pp *PriorityPool) updateFlags(updates []capUpdate) { } // tryActivate tries to activate inactive nodes if possible -func (pp *PriorityPool) tryActivate() []capUpdate { +func (pp *priorityPool) tryActivate() []capUpdate { var commit bool for pp.inactiveQueue.Size() > 0 { c := pp.inactiveQueue.PopItem().(*ppNodeInfo) @@ -504,7 +504,7 @@ func (pp *PriorityPool) tryActivate() []capUpdate { // updatePriority gets the current priority value of the given node from the nodePriority // interface and performs the necessary changes. It is triggered by updateFlag. // Note: this function should run inside a NodeStateMachine operation -func (pp *PriorityPool) updatePriority(node *enode.Node) { +func (pp *priorityPool) updatePriority(node *enode.Node) { pp.lock.Lock() pp.activeQueue.Refresh() var updates []capUpdate @@ -527,12 +527,12 @@ func (pp *PriorityPool) updatePriority(node *enode.Node) { updates = pp.tryActivate() } -// CapacityCurve is a snapshot of the priority pool contents in a format that can efficiently +// capacityCurve is a snapshot of the priority pool contents in a format that can efficiently // estimate how much capacity could be granted to a given node at a given priority level. -type CapacityCurve struct { +type capacityCurve struct { points []curvePoint // curve points sorted in descending order of priority index map[enode.ID][]int // curve point indexes belonging to each node - exclude []int // curve point indexes of excluded node + excludeList []int // curve point indexes of excluded node excludeFirst bool // true if activeCount == maxCount } @@ -541,8 +541,8 @@ type curvePoint struct { nextPri int64 // next priority level where more capacity will be available } -// GetCapacityCurve returns a new or recently cached CapacityCurve based on the contents of the pool -func (pp *PriorityPool) GetCapacityCurve() *CapacityCurve { +// getCapacityCurve returns a new or recently cached capacityCurve based on the contents of the pool +func (pp *priorityPool) getCapacityCurve() *capacityCurve { pp.lock.Lock() defer pp.lock.Unlock() @@ -554,7 +554,7 @@ func (pp *PriorityPool) GetCapacityCurve() *CapacityCurve { pp.ccUpdateForced = false pp.ccUpdatedAt = now - curve := &CapacityCurve{ + curve := &capacityCurve{ index: make(map[enode.ID][]int), } pp.cachedCurve = curve @@ -579,7 +579,7 @@ func (pp *PriorityPool) GetCapacityCurve() *CapacityCurve { next, cp.nextPri = pp.enforceLimits() pp.activeCap -= tempCap if next == nil { - log.Error("GetCapacityCurve: cannot remove next element from the priority queue") + log.Error("getCapacityCurve: cannot remove next element from the priority queue") break } id := next.node.ID() @@ -602,34 +602,34 @@ func (pp *PriorityPool) GetCapacityCurve() *CapacityCurve { nextPri: math.MaxInt64, }) if curve.excludeFirst { - curve.exclude = curve.index[excludeID] + curve.excludeList = curve.index[excludeID] } return curve } -// Exclude returns a CapacityCurve with the given node excluded from the original curve -func (cc *CapacityCurve) Exclude(id enode.ID) *CapacityCurve { - if exclude, ok := cc.index[id]; ok { +// exclude returns a capacityCurve with the given node excluded from the original curve +func (cc *capacityCurve) exclude(id enode.ID) *capacityCurve { + if excludeList, ok := cc.index[id]; ok { // return a new version of the curve (only one excluded node can be selected) // Note: if the first node was excluded by default (excludeFirst == true) then // we can forget about that and exclude the node with the given id instead. - return &CapacityCurve{ - points: cc.points, - index: cc.index, - exclude: exclude, + return &capacityCurve{ + points: cc.points, + index: cc.index, + excludeList: excludeList, } } return cc } -func (cc *CapacityCurve) getPoint(i int) curvePoint { +func (cc *capacityCurve) getPoint(i int) curvePoint { cp := cc.points[i] if i == 0 && cc.excludeFirst { cp.freeCap = 0 return cp } - for ii := len(cc.exclude) - 1; ii >= 0; ii-- { - ei := cc.exclude[ii] + for ii := len(cc.excludeList) - 1; ii >= 0; ii-- { + ei := cc.excludeList[ii] if ei < i { break } @@ -639,11 +639,11 @@ func (cc *CapacityCurve) getPoint(i int) curvePoint { return cp } -// MaxCapacity calculates the maximum capacity available for a node with a given +// maxCapacity calculates the maximum capacity available for a node with a given // (monotonically decreasing) priority vs. capacity function. Note that if the requesting // node is already in the pool then it should be excluded from the curve in order to get // the correct result. -func (cc *CapacityCurve) MaxCapacity(priority func(cap uint64) int64) uint64 { +func (cc *capacityCurve) maxCapacity(priority func(cap uint64) int64) uint64 { min, max := 0, len(cc.points)-1 // the curve always has at least one point for min < max { mid := (min + max) / 2 diff --git a/les/vflux/server/prioritypool_test.go b/les/vflux/server/prioritypool_test.go index 816affb6ec27..31f3a011ae24 100644 --- a/les/vflux/server/prioritypool_test.go +++ b/les/vflux/server/prioritypool_test.go @@ -49,11 +49,11 @@ type ppTestClient struct { balance, cap uint64 } -func (c *ppTestClient) Priority(cap uint64) int64 { +func (c *ppTestClient) priority(cap uint64) int64 { return int64(c.balance / cap) } -func (c *ppTestClient) EstimatePriority(cap uint64, addBalance int64, future, bias time.Duration, update bool) int64 { +func (c *ppTestClient) estimatePriority(cap uint64, addBalance int64, future, bias time.Duration, update bool) int64 { return int64(c.balance / cap) } @@ -67,7 +67,7 @@ func TestPriorityPool(t *testing.T) { c.cap = newValue.(uint64) } }) - pp := NewPriorityPool(ns, ppTestSetup, clock, testMinCap, 0, testCapacityStepDiv) + pp := newPriorityPool(ns, ppTestSetup, clock, testMinCap, 0, testCapacityStepDiv) ns.Start() pp.SetLimits(100, 1000000) clients := make([]*ppTestClient, 100) @@ -75,7 +75,7 @@ func TestPriorityPool(t *testing.T) { for { var ok bool ns.Operation(func() { - _, ok = pp.RequestCapacity(c.node, c.cap+c.cap/testCapacityStepDiv, 0, true) + _, ok = pp.requestCapacity(c.node, c.cap+c.cap/testCapacityStepDiv, 0, true) }) if !ok { return @@ -126,32 +126,28 @@ func TestPriorityPool(t *testing.T) { if count%10 == 0 { // test available capacity calculation with capacity curve c = clients[rand.Intn(len(clients))] - curve := pp.GetCapacityCurve().Exclude(c.node.ID()) + curve := pp.getCapacityCurve().exclude(c.node.ID()) add := uint64(rand.Int63n(10000000000000)) c.balance += add sumBalance += add - expCap := curve.MaxCapacity(func(cap uint64) int64 { + expCap := curve.maxCapacity(func(cap uint64) int64 { return int64(c.balance / cap) }) - //fmt.Println(expCap, c.balance, sumBalance) - /*for i, cp := range curve.points { - fmt.Println("cp", i, cp, "ex", curve.getPoint(i)) - }*/ var ok bool expFail := expCap + 1 if expFail < testMinCap { expFail = testMinCap } ns.Operation(func() { - _, ok = pp.RequestCapacity(c.node, expFail, 0, true) + _, ok = pp.requestCapacity(c.node, expFail, 0, true) }) if ok { t.Errorf("Request for more than expected available capacity succeeded") } if expCap >= testMinCap { ns.Operation(func() { - _, ok = pp.RequestCapacity(c.node, expCap, 0, true) + _, ok = pp.requestCapacity(c.node, expCap, 0, true) }) if !ok { t.Errorf("Request for expected available capacity failed") @@ -173,7 +169,7 @@ func TestPriorityPool(t *testing.T) { func TestCapacityCurve(t *testing.T) { clock := &mclock.Simulated{} ns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup) - pp := NewPriorityPool(ns, ppTestSetup, clock, 400000, 0, 2) + pp := newPriorityPool(ns, ppTestSetup, clock, 400000, 0, 2) ns.Start() pp.SetLimits(10, 10000000) clients := make([]*ppTestClient, 10) @@ -188,13 +184,13 @@ func TestCapacityCurve(t *testing.T) { ns.SetField(c.node, ppTestSetup.priorityField, c) ns.SetState(c.node, ppTestSetup.inactiveFlag, nodestate.Flags{}, 0) ns.Operation(func() { - pp.RequestCapacity(c.node, c.cap, 0, true) + pp.requestCapacity(c.node, c.cap, 0, true) }) } - curve := pp.GetCapacityCurve() + curve := pp.getCapacityCurve() check := func(balance, expCap uint64) { - cap := curve.MaxCapacity(func(cap uint64) int64 { + cap := curve.maxCapacity(func(cap uint64) int64 { return int64(balance / cap) }) var fail bool @@ -222,7 +218,7 @@ func TestCapacityCurve(t *testing.T) { check(1000000000000, 2500000) pp.SetLimits(11, 10000000) - curve = pp.GetCapacityCurve() + curve = pp.getCapacityCurve() check(0, 0) check(10000000000, 100000) From f8700b587f072b2b154c5cd7792daab0d8c8e110 Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Wed, 17 Mar 2021 23:20:35 +0100 Subject: [PATCH 09/27] les/vflux/server: fixed priorityPool test --- les/vflux/server/prioritypool_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/les/vflux/server/prioritypool_test.go b/les/vflux/server/prioritypool_test.go index 31f3a011ae24..c7aa6bdf5169 100644 --- a/les/vflux/server/prioritypool_test.go +++ b/les/vflux/server/prioritypool_test.go @@ -135,7 +135,7 @@ func TestPriorityPool(t *testing.T) { return int64(c.balance / cap) }) var ok bool - expFail := expCap + 1 + expFail := expCap + 10 if expFail < testMinCap { expFail = testMinCap } From fc9790eba61d05bb07e8b28c5c3af1e114bf5c35 Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Wed, 24 Mar 2021 13:56:05 +0800 Subject: [PATCH 10/27] les/vflux/server: polish balance --- les/vflux/server/balance.go | 265 ++++++++++++++++------------ les/vflux/server/balance_test.go | 3 - les/vflux/server/balance_tracker.go | 2 +- 3 files changed, 154 insertions(+), 116 deletions(-) diff --git a/les/vflux/server/balance.go b/les/vflux/server/balance.go index 2b9dae5cbff7..37cd9c280f7c 100644 --- a/les/vflux/server/balance.go +++ b/les/vflux/server/balance.go @@ -47,9 +47,10 @@ type PriceFactors struct { TimeFactor, CapacityFactor, RequestFactor float64 } -// timePrice returns the price of connection per nanosecond at the given capacity -func (p PriceFactors) timePrice(cap uint64) float64 { - return p.TimeFactor + float64(cap)*p.CapacityFactor/1000000 +// costPrice returns the price of connection per nanosecond at the given capacity +// and the estimated average request cost. +func (p PriceFactors) costPrice(cap uint64, avgReqCost float64) float64 { + return p.TimeFactor + float64(cap)*p.CapacityFactor/1000000 + p.RequestFactor*avgReqCost } type ( @@ -113,7 +114,57 @@ type nodeBalance struct { // balance represents a pair of positive and negative balances type balance struct { - pos, neg utils.ExpiredValue + pos, neg utils.ExpiredValue + posExp, negExp utils.ValueExpirer +} + +// value returns the value of balance at a given timestamp. +func (b balance) value(now mclock.AbsTime) (uint64, uint64) { + return b.pos.Value(b.posExp.LogOffset(now)), b.neg.Value(b.negExp.LogOffset(now)) +} + +// add adds the value of a given amount to the balance. The original value and +// updated value will also be returned if the addtion is successful. +// Returns the error if the given value is too large and the value overflows. +func (b *balance) add(now mclock.AbsTime, amount int64, pos bool, force bool) (uint64, uint64, int64, error) { + var ( + val utils.ExpiredValue + offset utils.Fixed64 + ) + if pos { + offset, val = b.posExp.LogOffset(now), b.pos + } else { + offset, val = b.negExp.LogOffset(now), b.neg + } + old := val.Value(offset) + if amount > 0 && (amount > maxBalance || old > maxBalance-uint64(amount)) { + if !force { + return old, 0, 0, errBalanceOverflow + } + val = utils.ExpiredValue{} + amount = maxBalance + } + net := val.Add(amount, offset) + if pos { + b.pos = val + } else { + b.neg = val + } + return old, val.Value(offset), net, nil +} + +// setValue sets the internal balance amount to the given values. Returns the +// error if the given value is too large. +func (b *balance) setValue(now mclock.AbsTime, pos uint64, neg uint64) error { + if pos > maxBalance || neg > maxBalance { + return errBalanceOverflow + } + var pb, nb utils.ExpiredValue + pb.Add(int64(pos), b.posExp.LogOffset(now)) + nb.Add(int64(neg), b.negExp.LogOffset(now)) + b.pos = pb + b.neg = nb + return nil } // balanceCallback represents a single callback that is activated when client priority @@ -131,7 +182,7 @@ func (n *nodeBalance) GetBalance() (uint64, uint64) { now := n.bt.clock.Now() n.updateBalance(now) - return n.balance.pos.Value(n.bt.posExp.LogOffset(now)), n.balance.neg.Value(n.bt.negExp.LogOffset(now)) + return n.balance.value(now) } // GetRawBalance returns the current positive and negative balance @@ -152,33 +203,26 @@ func (n *nodeBalance) GetRawBalance() (utils.ExpiredValue, utils.ExpiredValue) { // Note: this function should run inside a NodeStateMachine operation func (n *nodeBalance) AddBalance(amount int64) (uint64, uint64, error) { var ( - err error - old, new uint64 - ) - var ( + err error + old, new uint64 + now = n.bt.clock.Now() callbacks []func() setPriority bool ) + // Operation with holding the lock n.bt.updateTotalBalance(n, func() bool { - now := n.bt.clock.Now() n.updateBalance(now) - - // Ensure the given amount is valid to apply. - offset := n.bt.posExp.LogOffset(now) - old = n.balance.pos.Value(offset) - if amount > 0 && (amount > maxBalance || old > maxBalance-uint64(amount)) { - err = errBalanceOverflow + if old, new, _, err = n.balance.add(now, amount, true, false); err != nil { return false } - - // Update the total positive balance counter. - n.balance.pos.Add(amount, offset) - callbacks = n.checkCallbacks(now) - setPriority = n.checkPriorityStatus() - new = n.balance.pos.Value(offset) + callbacks, setPriority = n.checkCallbacks(now), n.checkPriorityStatus() n.storeBalance(true, false) return true }) + if err != nil { + return old, old, err + } + // Operation without holding the lock for _, cb := range callbacks { cb() } @@ -188,36 +232,28 @@ func (n *nodeBalance) AddBalance(amount int64) (uint64, uint64, error) { } n.signalPriorityUpdate() } - if err != nil { - return old, old, err - } return old, new, nil } // SetBalance sets the positive and negative balance to the given values // Note: this function should run inside a NodeStateMachine operation func (n *nodeBalance) SetBalance(pos, neg uint64) error { - if pos > maxBalance || neg > maxBalance { - return errBalanceOverflow - } var ( + now = n.bt.clock.Now() callbacks []func() setPriority bool ) + // Operation with holding the lock n.bt.updateTotalBalance(n, func() bool { - now := n.bt.clock.Now() n.updateBalance(now) - - var pb, nb utils.ExpiredValue - pb.Add(int64(pos), n.bt.posExp.LogOffset(now)) - nb.Add(int64(neg), n.bt.negExp.LogOffset(now)) - n.balance.pos = pb - n.balance.neg = nb - callbacks = n.checkCallbacks(now) - setPriority = n.checkPriorityStatus() + if err := n.balance.setValue(now, pos, neg); err != nil { + return false + } + callbacks, setPriority = n.checkCallbacks(now), n.checkPriorityStatus() n.storeBalance(true, true) return true }) + // Operation without holding the lock for _, cb := range callbacks { cb() } @@ -233,49 +269,46 @@ func (n *nodeBalance) SetBalance(pos, neg uint64) error { // RequestServed should be called after serving a request for the given peer func (n *nodeBalance) RequestServed(cost uint64) uint64 { n.lock.Lock() - var callbacks []func() - defer func() { - n.lock.Unlock() - if callbacks != nil { - n.bt.ns.Operation(func() { - for _, cb := range callbacks { - cb() - } - }) - } - }() - now := n.bt.clock.Now() + var ( + check bool + fcost = float64(cost) + now = n.bt.clock.Now() + ) n.updateBalance(now) - fcost := float64(cost) - - posExp := n.bt.posExp.LogOffset(now) - var check bool if !n.balance.pos.IsZero() { - if n.posFactor.RequestFactor != 0 { - c := -int64(fcost * n.posFactor.RequestFactor) - cc := n.balance.pos.Add(c, posExp) - if c == cc { + posCost := -int64(fcost * n.posFactor.RequestFactor) + if posCost == 0 { + fcost = 0 + } else { + _, _, net, _ := n.balance.add(now, posCost, true, false) + if posCost == net { fcost = 0 } else { - fcost *= 1 - float64(cc)/float64(c) + fcost *= 1 - float64(net)/float64(posCost) } check = true - } else { - fcost = 0 } } - if fcost > 0 { - if n.negFactor.RequestFactor != 0 { - n.balance.neg.Add(int64(fcost*n.negFactor.RequestFactor), n.bt.negExp.LogOffset(now)) - check = true - } + if fcost > 0 && n.negFactor.RequestFactor != 0 { + n.balance.add(now, int64(fcost*n.negFactor.RequestFactor), false, false) + check = true } + n.sumReqCost += cost + pos, _ := n.balance.value(now) + n.lock.Unlock() + if check { - callbacks = n.checkCallbacks(now) + callbacks := n.checkCallbacks(now) + if callbacks != nil { + n.bt.ns.Operation(func() { + for _, cb := range callbacks { + cb() + } + }) + } } - n.sumReqCost += cost - return n.balance.pos.Value(posExp) + return pos } // priority returns the actual priority based on the current balance @@ -283,8 +316,9 @@ func (n *nodeBalance) priority(capacity uint64) int64 { n.lock.Lock() defer n.lock.Unlock() - n.updateBalance(n.bt.clock.Now()) - return n.balanceToPriority(n.balance, capacity) + now := n.bt.clock.Now() + n.updateBalance(now) + return n.balanceToPriority(now, n.balance, capacity) } // EstMinPriority gives a lower estimate for the priority at a given time in the future. @@ -298,16 +332,10 @@ func (n *nodeBalance) estimatePriority(capacity uint64, addBalance int64, future now := n.bt.clock.Now() n.updateBalance(now) - b := n.balance + + b := n.balance // copy the balance if addBalance != 0 { - offset := n.bt.posExp.LogOffset(now) - old := n.balance.pos.Value(offset) - if addBalance > 0 && (addBalance > maxBalance || old > maxBalance-uint64(addBalance)) { - b.pos = utils.ExpiredValue{} - b.pos.Add(maxBalance, offset) - } else { - b.pos.Add(addBalance, offset) - } + b.add(now, addBalance, true, true) } if future > 0 { var avgReqCost float64 @@ -324,7 +352,7 @@ func (n *nodeBalance) estimatePriority(capacity uint64, addBalance int64, future // estimates are always lower than actual priorities, even if the bias is very small. // This ensures that two nodes will not ping-pong update signals forever if both of // them have zero estimated priority drop in the projected future. - pri := n.balanceToPriority(b, capacity) - 1 + pri := n.balanceToPriority(now, b, capacity) - 1 if update { n.addCallback(balanceCallbackUpdate, pri, n.signalPriorityUpdate) } @@ -450,7 +478,7 @@ func (n *nodeBalance) checkCallbacks(now mclock.AbsTime) (callbacks []func()) { if n.callbackCount == 0 || n.capacity == 0 { return } - pri := n.balanceToPriority(n.balance, n.capacity) + pri := n.balanceToPriority(now, n.balance, n.capacity) for n.callbackCount != 0 && n.callbacks[n.callbackCount-1].threshold >= pri { n.callbackCount-- n.callbackIndex[n.callbacks[n.callbackCount].id] = -1 @@ -563,11 +591,22 @@ func (n *nodeBalance) setCapacity(capacity uint64) { // balanceToPriority converts a balance to a priority value. Lower priority means // first to disconnect. Positive balance translates to positive priority. If positive // balance is zero then negative balance translates to a negative priority. -func (n *nodeBalance) balanceToPriority(b balance, capacity uint64) int64 { - if !b.pos.IsZero() { - return int64(b.pos.Value(n.bt.posExp.LogOffset(n.bt.clock.Now())) / capacity) +func (n *nodeBalance) balanceToPriority(now mclock.AbsTime, b balance, capacity uint64) int64 { + pos, neg := b.value(now) + if pos > 0 { + return int64(pos / capacity) } - return -int64(b.neg.Value(n.bt.negExp.LogOffset(n.bt.clock.Now()))) + return -int64(neg) +} + +// priorityToBalance converts a target priority to a requested balance value. +// If the priority is negative, then minimal negative balance is returned; +// otherwise the minimal positive balance is returned. +func (n *nodeBalance) priorityToBalance(priority int64, capacity uint64) (uint64, uint64) { + if priority > 0 { + return uint64(priority) * n.capacity, 0 + } + return 0, uint64(-priority) } // reducedBalance estimates the reduced balance at a given time in the fututre based @@ -575,21 +614,23 @@ func (n *nodeBalance) balanceToPriority(b balance, capacity uint64) int64 { func (n *nodeBalance) reducedBalance(b balance, start mclock.AbsTime, dt time.Duration, capacity uint64, avgReqCost float64) balance { // since the costs are applied continuously during the dt time period we calculate // the expiration offset at the middle of the period - at := start + mclock.AbsTime(dt/2) - dtf := float64(dt) + var ( + at = start + mclock.AbsTime(dt/2) + dtf = float64(dt) + ) if !b.pos.IsZero() { - factor := n.posFactor.timePrice(capacity) + n.posFactor.RequestFactor*avgReqCost + factor := n.posFactor.costPrice(capacity, avgReqCost) diff := -int64(dtf * factor) - dd := b.pos.Add(diff, n.bt.posExp.LogOffset(at)) - if dd == diff { + _, _, net, _ := b.add(at, diff, true, false) + if net == diff { dtf = 0 } else { - dtf += float64(dd) / factor + dtf += float64(net) / factor } } - if dt > 0 { - factor := n.negFactor.timePrice(capacity) + n.negFactor.RequestFactor*avgReqCost - b.neg.Add(int64(dtf*factor), n.bt.negExp.LogOffset(at)) + if dtf > 0 { + factor := n.negFactor.costPrice(capacity, avgReqCost) + b.add(at, int64(dtf*factor), false, false) } return b } @@ -600,37 +641,37 @@ func (n *nodeBalance) reducedBalance(b balance, start mclock.AbsTime, dt time.Du // Note: the function assumes that the balance has been recently updated and // calculates the time starting from the last update. func (n *nodeBalance) timeUntil(priority int64) (time.Duration, bool) { - now := n.bt.clock.Now() - var dt float64 - if !n.balance.pos.IsZero() { - posBalance := n.balance.pos.Value(n.bt.posExp.LogOffset(now)) - timePrice := n.posFactor.timePrice(n.capacity) + var ( + now = n.bt.clock.Now() + pos, neg = n.balance.value(now) + targetPos, targetNeg = n.priorityToBalance(priority, n.capacity) + diffTime float64 + ) + if pos > 0 { + timePrice := n.posFactor.costPrice(n.capacity, 0) if timePrice < 1e-100 { return 0, false } - if priority > 0 { - newBalance := uint64(priority) * n.capacity - if newBalance > posBalance { + if targetPos > 0 { + if targetPos > pos { return 0, false } - dt = float64(posBalance-newBalance) / timePrice - return time.Duration(dt), true + diffTime = float64(pos-targetPos) / timePrice + return time.Duration(diffTime), true } else { - dt = float64(posBalance) / timePrice + diffTime = float64(pos) / timePrice } } else { - if priority > 0 { + if targetPos > 0 { return 0, false } } - // if we have a positive balance then dt equals the time needed to get it to zero - negBalance := n.balance.neg.Value(n.bt.negExp.LogOffset(now)) - timePrice := n.negFactor.timePrice(n.capacity) - if uint64(-priority) > negBalance { + if targetNeg > neg { + timePrice := n.negFactor.costPrice(n.capacity, 0) if timePrice < 1e-100 { return 0, false } - dt += float64(uint64(-priority)-negBalance) / timePrice + diffTime += float64(targetNeg-neg) / timePrice } - return time.Duration(dt), true + return time.Duration(diffTime), true } diff --git a/les/vflux/server/balance_test.go b/les/vflux/server/balance_test.go index dce51ad1884b..964b1d21a7bc 100644 --- a/les/vflux/server/balance_test.go +++ b/les/vflux/server/balance_test.go @@ -148,7 +148,6 @@ func TestSetBalance(t *testing.T) { {0, 1000}, {1000, 1000}, } - for _, i := range inputs { b.setBalance(node, i.pos, i.neg) pos, neg := node.GetBalance() @@ -261,8 +260,6 @@ func TestEstimatedPriority(t *testing.T) { defer b.stop() node := b.newNode(1000000000) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) - - b.ns.SetField(node.node, ppTestSetup.capacityField, uint64(1)) b.setBalance(node, uint64(time.Minute), 0) var inputs = []struct { runTime time.Duration // time cost diff --git a/les/vflux/server/balance_tracker.go b/les/vflux/server/balance_tracker.go index 39dd0130439b..5924d6e5f397 100644 --- a/les/vflux/server/balance_tracker.go +++ b/les/vflux/server/balance_tracker.go @@ -262,7 +262,7 @@ func (bt *balanceTracker) newNodeBalance(node *enode.Node, negBalanceKey string, node: node, setFlags: setFlags, connAddress: negBalanceKey, - balance: balance{pos: pb, neg: nb}, + balance: balance{pos: pb, neg: nb, posExp: bt.posExp, negExp: bt.negExp}, initTime: bt.clock.Now(), lastUpdate: bt.clock.Now(), } From b63f1234a9c91dc90368e205db7ad48960990521 Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Wed, 24 Mar 2021 21:35:33 +0100 Subject: [PATCH 11/27] les/vflux/server: fixed mutex locking error --- les/vflux/server/balance.go | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/les/vflux/server/balance.go b/les/vflux/server/balance.go index 37cd9c280f7c..bfc5b9e4ff83 100644 --- a/les/vflux/server/balance.go +++ b/les/vflux/server/balance.go @@ -296,17 +296,19 @@ func (n *nodeBalance) RequestServed(cost uint64) uint64 { } n.sumReqCost += cost pos, _ := n.balance.value(now) - n.lock.Unlock() + var callbacks []func() if check { - callbacks := n.checkCallbacks(now) - if callbacks != nil { - n.bt.ns.Operation(func() { - for _, cb := range callbacks { - cb() - } - }) - } + callbacks = n.checkCallbacks(now) + } + n.lock.Unlock() + + if callbacks != nil { + n.bt.ns.Operation(func() { + for _, cb := range callbacks { + cb() + } + }) } return pos } From 42d08dc28a628fceb129b86a99f289783a9aa9cb Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Thu, 25 Mar 2021 03:36:51 +0100 Subject: [PATCH 12/27] les/vflux/server: priorityPool bug fixed --- les/vflux/server/prioritypool.go | 1 + 1 file changed, 1 insertion(+) diff --git a/les/vflux/server/prioritypool.go b/les/vflux/server/prioritypool.go index 8c59ee6443ec..3ac87359fdf3 100644 --- a/les/vflux/server/prioritypool.go +++ b/les/vflux/server/prioritypool.go @@ -493,6 +493,7 @@ func (pp *priorityPool) tryActivate() []capUpdate { pp.enforceLimits() if c.capacity > 0 { commit = true + c.bias = 0 } else { break } From 23ab832cd31efc093d5baab3257d310fb7bc6527 Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Thu, 25 Mar 2021 03:49:35 +0100 Subject: [PATCH 13/27] common/prque: make Prque wrap-around priority handling optional --- common/prque/lazyqueue.go | 6 +++--- common/prque/prque.go | 7 ++++++- common/prque/sstack.go | 20 +++++++++++++------- common/prque/sstack_test.go | 6 +++--- les/flowcontrol/manager.go | 2 +- les/servingqueue.go | 4 ++-- 6 files changed, 28 insertions(+), 17 deletions(-) diff --git a/common/prque/lazyqueue.go b/common/prque/lazyqueue.go index c74faab7e674..37c2f3bd42af 100644 --- a/common/prque/lazyqueue.go +++ b/common/prque/lazyqueue.go @@ -55,7 +55,7 @@ type ( // NewLazyQueue creates a new lazy queue func NewLazyQueue(setIndex SetIndexCallback, priority PriorityCallback, maxPriority MaxPriorityCallback, clock mclock.Clock, refreshPeriod time.Duration) *LazyQueue { q := &LazyQueue{ - popQueue: newSstack(nil), + popQueue: newSstack(nil, false), setIndex: setIndex, priority: priority, maxPriority: maxPriority, @@ -71,8 +71,8 @@ func NewLazyQueue(setIndex SetIndexCallback, priority PriorityCallback, maxPrior // Reset clears the contents of the queue func (q *LazyQueue) Reset() { - q.queue[0] = newSstack(q.setIndex0) - q.queue[1] = newSstack(q.setIndex1) + q.queue[0] = newSstack(q.setIndex0, false) + q.queue[1] = newSstack(q.setIndex1, false) } // Refresh performs queue re-evaluation if necessary diff --git a/common/prque/prque.go b/common/prque/prque.go index 3cc5a1adaf15..54c78b5fc2ba 100755 --- a/common/prque/prque.go +++ b/common/prque/prque.go @@ -28,7 +28,12 @@ type Prque struct { // New creates a new priority queue. func New(setIndex SetIndexCallback) *Prque { - return &Prque{newSstack(setIndex)} + return &Prque{newSstack(setIndex, false)} +} + +// NewWrapAround creates a new priority queue with wrap-around priority handling. +func NewWrapAround(setIndex SetIndexCallback) *Prque { + return &Prque{newSstack(setIndex, true)} } // Pushes a value with a given priority into the queue, expanding if necessary. diff --git a/common/prque/sstack.go b/common/prque/sstack.go index 8518af54ff1a..b06a95413df0 100755 --- a/common/prque/sstack.go +++ b/common/prque/sstack.go @@ -31,22 +31,24 @@ type SetIndexCallback func(data interface{}, index int) // the stack (heap) functionality and the Len, Less and Swap methods for the // sortability requirements of the heaps. type sstack struct { - setIndex SetIndexCallback - size int - capacity int - offset int + setIndex SetIndexCallback + size int + capacity int + offset int + wrapAround bool blocks [][]*item active []*item } // Creates a new, empty stack. -func newSstack(setIndex SetIndexCallback) *sstack { +func newSstack(setIndex SetIndexCallback, wrapAround bool) *sstack { result := new(sstack) result.setIndex = setIndex result.active = make([]*item, blockSize) result.blocks = [][]*item{result.active} result.capacity = blockSize + result.wrapAround = wrapAround return result } @@ -94,7 +96,11 @@ func (s *sstack) Len() int { // Compares the priority of two elements of the stack (higher is first). // Required by sort.Interface. func (s *sstack) Less(i, j int) bool { - return (s.blocks[i/blockSize][i%blockSize].priority - s.blocks[j/blockSize][j%blockSize].priority) > 0 + a, b := s.blocks[i/blockSize][i%blockSize].priority, s.blocks[j/blockSize][j%blockSize].priority + if s.wrapAround { + return a-b > 0 + } + return a > b } // Swaps two elements in the stack. Required by sort.Interface. @@ -110,5 +116,5 @@ func (s *sstack) Swap(i, j int) { // Resets the stack, effectively clearing its contents. func (s *sstack) Reset() { - *s = *newSstack(s.setIndex) + *s = *newSstack(s.setIndex, false) } diff --git a/common/prque/sstack_test.go b/common/prque/sstack_test.go index 2ff093579da9..bc6298979cbc 100644 --- a/common/prque/sstack_test.go +++ b/common/prque/sstack_test.go @@ -21,7 +21,7 @@ func TestSstack(t *testing.T) { for i := 0; i < size; i++ { data[i] = &item{rand.Int(), rand.Int63()} } - stack := newSstack(nil) + stack := newSstack(nil, false) for rep := 0; rep < 2; rep++ { // Push all the data into the stack, pop out every second secs := []*item{} @@ -55,7 +55,7 @@ func TestSstackSort(t *testing.T) { data[i] = &item{rand.Int(), int64(i)} } // Push all the data into the stack - stack := newSstack(nil) + stack := newSstack(nil, false) for _, val := range data { stack.Push(val) } @@ -76,7 +76,7 @@ func TestSstackReset(t *testing.T) { for i := 0; i < size; i++ { data[i] = &item{rand.Int(), rand.Int63()} } - stack := newSstack(nil) + stack := newSstack(nil, false) for rep := 0; rep < 2; rep++ { // Push all the data into the stack, pop out every second secs := []*item{} diff --git a/les/flowcontrol/manager.go b/les/flowcontrol/manager.go index d6d0b1adde5a..c9e681c1440a 100644 --- a/les/flowcontrol/manager.go +++ b/les/flowcontrol/manager.go @@ -108,7 +108,7 @@ type ClientManager struct { func NewClientManager(curve PieceWiseLinear, clock mclock.Clock) *ClientManager { cm := &ClientManager{ clock: clock, - rcQueue: prque.New(func(a interface{}, i int) { a.(*ClientNode).queueIndex = i }), + rcQueue: prque.NewWrapAround(func(a interface{}, i int) { a.(*ClientNode).queueIndex = i }), capLastUpdate: clock.Now(), stop: make(chan chan struct{}), } diff --git a/les/servingqueue.go b/les/servingqueue.go index 9db84e6159cf..16e064cb3f8a 100644 --- a/les/servingqueue.go +++ b/les/servingqueue.go @@ -123,7 +123,7 @@ func (t *servingTask) waitOrStop() bool { // newServingQueue returns a new servingQueue func newServingQueue(suspendBias int64, utilTarget float64) *servingQueue { sq := &servingQueue{ - queue: prque.New(nil), + queue: prque.NewWrapAround(nil), suspendBias: suspendBias, queueAddCh: make(chan *servingTask, 100), queueBestCh: make(chan *servingTask), @@ -279,7 +279,7 @@ func (sq *servingQueue) updateRecentTime() { func (sq *servingQueue) addTask(task *servingTask) { if sq.best == nil { sq.best = task - } else if task.priority > sq.best.priority { + } else if task.priority-sq.best.priority > 0 { sq.queue.Push(sq.best, sq.best.priority) sq.best = task } else { From 31dd954031cc6f695d2ced910b8cf1bbf82186d3 Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Thu, 25 Mar 2021 04:15:23 +0100 Subject: [PATCH 14/27] les/vflux/server: rename funcs, small optimizations --- les/vflux/server/balance.go | 37 ++++++++++++++++++++----------------- 1 file changed, 20 insertions(+), 17 deletions(-) diff --git a/les/vflux/server/balance.go b/les/vflux/server/balance.go index bfc5b9e4ff83..af16aeca0b35 100644 --- a/les/vflux/server/balance.go +++ b/les/vflux/server/balance.go @@ -47,9 +47,9 @@ type PriceFactors struct { TimeFactor, CapacityFactor, RequestFactor float64 } -// costPrice returns the price of connection per nanosecond at the given capacity +// connectionPrice returns the price of connection per nanosecond at the given capacity // and the estimated average request cost. -func (p PriceFactors) costPrice(cap uint64, avgReqCost float64) float64 { +func (p PriceFactors) connectionPrice(cap uint64, avgReqCost float64) float64 { return p.TimeFactor + float64(cap)*p.CapacityFactor/1000000 + p.RequestFactor*avgReqCost } @@ -124,9 +124,9 @@ func (b balance) value(now mclock.AbsTime) (uint64, uint64) { } // add adds the value of a given amount to the balance. The original value and -// updated value will also be returned if the addtion is successful. +// updated value will also be returned if the addition is successful. // Returns the error if the given value is too large and the value overflows. -func (b *balance) add(now mclock.AbsTime, amount int64, pos bool, force bool) (uint64, uint64, int64, error) { +func (b *balance) addPosValue(now mclock.AbsTime, amount int64, pos bool, force bool) (uint64, uint64, int64, error) { var ( val utils.ExpiredValue offset utils.Fixed64 @@ -212,7 +212,7 @@ func (n *nodeBalance) AddBalance(amount int64) (uint64, uint64, error) { // Operation with holding the lock n.bt.updateTotalBalance(n, func() bool { n.updateBalance(now) - if old, new, _, err = n.balance.add(now, amount, true, false); err != nil { + if old, new, _, err = n.balance.addPosValue(now, amount, true, false); err != nil { return false } callbacks, setPriority = n.checkCallbacks(now), n.checkPriorityStatus() @@ -230,6 +230,7 @@ func (n *nodeBalance) AddBalance(amount int64) (uint64, uint64, error) { if setPriority { n.bt.ns.SetStateSub(n.node, n.bt.priorityFlag, nodestate.Flags{}, 0) } + // Note: priority flag is automatically removed by the zero priority callback if necessary n.signalPriorityUpdate() } return old, new, nil @@ -261,13 +262,14 @@ func (n *nodeBalance) SetBalance(pos, neg uint64) error { if setPriority { n.bt.ns.SetStateSub(n.node, n.bt.priorityFlag, nodestate.Flags{}, 0) } + // Note: priority flag is automatically removed by the zero priority callback if necessary n.signalPriorityUpdate() } return nil } // RequestServed should be called after serving a request for the given peer -func (n *nodeBalance) RequestServed(cost uint64) uint64 { +func (n *nodeBalance) RequestServed(cost uint64) (newBalance uint64) { n.lock.Lock() var ( @@ -280,8 +282,10 @@ func (n *nodeBalance) RequestServed(cost uint64) uint64 { posCost := -int64(fcost * n.posFactor.RequestFactor) if posCost == 0 { fcost = 0 + newBalance, _ = n.balance.value(now) } else { - _, _, net, _ := n.balance.add(now, posCost, true, false) + var net int64 + _, newBalance, net, _ = n.balance.addPosValue(now, posCost, true, false) if posCost == net { fcost = 0 } else { @@ -291,11 +295,10 @@ func (n *nodeBalance) RequestServed(cost uint64) uint64 { } } if fcost > 0 && n.negFactor.RequestFactor != 0 { - n.balance.add(now, int64(fcost*n.negFactor.RequestFactor), false, false) + n.balance.addPosValue(now, int64(fcost*n.negFactor.RequestFactor), false, false) check = true } n.sumReqCost += cost - pos, _ := n.balance.value(now) var callbacks []func() if check { @@ -310,7 +313,7 @@ func (n *nodeBalance) RequestServed(cost uint64) uint64 { } }) } - return pos + return } // priority returns the actual priority based on the current balance @@ -337,7 +340,7 @@ func (n *nodeBalance) estimatePriority(capacity uint64, addBalance int64, future b := n.balance // copy the balance if addBalance != 0 { - b.add(now, addBalance, true, true) + b.addPosValue(now, addBalance, true, true) } if future > 0 { var avgReqCost float64 @@ -621,9 +624,9 @@ func (n *nodeBalance) reducedBalance(b balance, start mclock.AbsTime, dt time.Du dtf = float64(dt) ) if !b.pos.IsZero() { - factor := n.posFactor.costPrice(capacity, avgReqCost) + factor := n.posFactor.connectionPrice(capacity, avgReqCost) diff := -int64(dtf * factor) - _, _, net, _ := b.add(at, diff, true, false) + _, _, net, _ := b.addPosValue(at, diff, true, false) if net == diff { dtf = 0 } else { @@ -631,8 +634,8 @@ func (n *nodeBalance) reducedBalance(b balance, start mclock.AbsTime, dt time.Du } } if dtf > 0 { - factor := n.negFactor.costPrice(capacity, avgReqCost) - b.add(at, int64(dtf*factor), false, false) + factor := n.negFactor.connectionPrice(capacity, avgReqCost) + b.addPosValue(at, int64(dtf*factor), false, false) } return b } @@ -650,7 +653,7 @@ func (n *nodeBalance) timeUntil(priority int64) (time.Duration, bool) { diffTime float64 ) if pos > 0 { - timePrice := n.posFactor.costPrice(n.capacity, 0) + timePrice := n.posFactor.connectionPrice(n.capacity, 0) if timePrice < 1e-100 { return 0, false } @@ -669,7 +672,7 @@ func (n *nodeBalance) timeUntil(priority int64) (time.Duration, bool) { } } if targetNeg > neg { - timePrice := n.negFactor.costPrice(n.capacity, 0) + timePrice := n.negFactor.connectionPrice(n.capacity, 0) if timePrice < 1e-100 { return 0, false } From 9332261f8da17f36ee20f6d461886a291744a6e9 Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Thu, 25 Mar 2021 04:17:46 +0100 Subject: [PATCH 15/27] les/vflux/server: fixed timeUntil --- les/vflux/server/balance.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/les/vflux/server/balance.go b/les/vflux/server/balance.go index af16aeca0b35..88c1804fcc52 100644 --- a/les/vflux/server/balance.go +++ b/les/vflux/server/balance.go @@ -642,7 +642,8 @@ func (n *nodeBalance) reducedBalance(b balance, start mclock.AbsTime, dt time.Du // timeUntil calculates the remaining time needed to reach a given priority level // assuming that no requests are processed until then. If the given level is never -// reached then (0, false) is returned. +// reached then (0, false) is returned. If it has already been reached then (0, true) +// is returned. // Note: the function assumes that the balance has been recently updated and // calculates the time starting from the last update. func (n *nodeBalance) timeUntil(priority int64) (time.Duration, bool) { @@ -659,7 +660,7 @@ func (n *nodeBalance) timeUntil(priority int64) (time.Duration, bool) { } if targetPos > 0 { if targetPos > pos { - return 0, false + return 0, true } diffTime = float64(pos-targetPos) / timePrice return time.Duration(diffTime), true @@ -668,7 +669,7 @@ func (n *nodeBalance) timeUntil(priority int64) (time.Duration, bool) { } } else { if targetPos > 0 { - return 0, false + return 0, true } } if targetNeg > neg { From 380d626309d91dbb3e6c400a2a9c527dc7ef347c Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Thu, 25 Mar 2021 04:21:46 +0100 Subject: [PATCH 16/27] les/vflux/server: separated balance.posValue and negValue --- les/vflux/server/balance.go | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/les/vflux/server/balance.go b/les/vflux/server/balance.go index 88c1804fcc52..1463d850f6e1 100644 --- a/les/vflux/server/balance.go +++ b/les/vflux/server/balance.go @@ -118,9 +118,14 @@ type balance struct { posExp, negExp utils.ValueExpirer } -// value returns the value of balance at a given timestamp. -func (b balance) value(now mclock.AbsTime) (uint64, uint64) { - return b.pos.Value(b.posExp.LogOffset(now)), b.neg.Value(b.negExp.LogOffset(now)) +// posValue returns the value of positive balance at a given timestamp. +func (b balance) posValue(now mclock.AbsTime) uint64 { + return b.pos.Value(b.posExp.LogOffset(now)) +} + +// negValue returns the value of negative balance at a given timestamp. +func (b balance) negValue(now mclock.AbsTime) uint64 { + return b.neg.Value(b.negExp.LogOffset(now)) } // add adds the value of a given amount to the balance. The original value and @@ -182,7 +187,7 @@ func (n *nodeBalance) GetBalance() (uint64, uint64) { now := n.bt.clock.Now() n.updateBalance(now) - return n.balance.value(now) + return n.balance.posValue(now), n.balance.negValue(now) } // GetRawBalance returns the current positive and negative balance @@ -282,7 +287,7 @@ func (n *nodeBalance) RequestServed(cost uint64) (newBalance uint64) { posCost := -int64(fcost * n.posFactor.RequestFactor) if posCost == 0 { fcost = 0 - newBalance, _ = n.balance.value(now) + newBalance = n.balance.posValue(now) } else { var net int64 _, newBalance, net, _ = n.balance.addPosValue(now, posCost, true, false) @@ -597,11 +602,11 @@ func (n *nodeBalance) setCapacity(capacity uint64) { // first to disconnect. Positive balance translates to positive priority. If positive // balance is zero then negative balance translates to a negative priority. func (n *nodeBalance) balanceToPriority(now mclock.AbsTime, b balance, capacity uint64) int64 { - pos, neg := b.value(now) + pos := b.posValue(now) if pos > 0 { return int64(pos / capacity) } - return -int64(neg) + return -int64(b.negValue(now)) } // priorityToBalance converts a target priority to a requested balance value. @@ -649,7 +654,7 @@ func (n *nodeBalance) reducedBalance(b balance, start mclock.AbsTime, dt time.Du func (n *nodeBalance) timeUntil(priority int64) (time.Duration, bool) { var ( now = n.bt.clock.Now() - pos, neg = n.balance.value(now) + pos = n.balance.posValue(now) targetPos, targetNeg = n.priorityToBalance(priority, n.capacity) diffTime float64 ) @@ -672,6 +677,7 @@ func (n *nodeBalance) timeUntil(priority int64) (time.Duration, bool) { return 0, true } } + neg := n.balance.negValue(now) if targetNeg > neg { timePrice := n.negFactor.connectionPrice(n.capacity, 0) if timePrice < 1e-100 { From b914785dd8fcf72bb359e9b451cd7740c56c812e Mon Sep 17 00:00:00 2001 From: rjl493456442 Date: Thu, 25 Mar 2021 15:28:53 +0800 Subject: [PATCH 17/27] les/vflux/server: polish setup --- les/vflux/server/balance.go | 10 +- les/vflux/server/balance_test.go | 140 +++++++++++--------------- les/vflux/server/balance_tracker.go | 99 ++++++------------ les/vflux/server/clientpool.go | 73 ++++++-------- les/vflux/server/clientpool_test.go | 4 +- les/vflux/server/prioritypool.go | 78 +++++--------- les/vflux/server/prioritypool_test.go | 43 ++++---- les/vflux/server/status.go | 59 +++++++++++ 8 files changed, 234 insertions(+), 272 deletions(-) create mode 100644 les/vflux/server/status.go diff --git a/les/vflux/server/balance.go b/les/vflux/server/balance.go index 1463d850f6e1..b63aaf81aa4d 100644 --- a/les/vflux/server/balance.go +++ b/les/vflux/server/balance.go @@ -233,7 +233,7 @@ func (n *nodeBalance) AddBalance(amount int64) (uint64, uint64, error) { } if n.setFlags { if setPriority { - n.bt.ns.SetStateSub(n.node, n.bt.priorityFlag, nodestate.Flags{}, 0) + n.bt.ns.SetStateSub(n.node, n.bt.setup.priorityFlag, nodestate.Flags{}, 0) } // Note: priority flag is automatically removed by the zero priority callback if necessary n.signalPriorityUpdate() @@ -265,7 +265,7 @@ func (n *nodeBalance) SetBalance(pos, neg uint64) error { } if n.setFlags { if setPriority { - n.bt.ns.SetStateSub(n.node, n.bt.priorityFlag, nodestate.Flags{}, 0) + n.bt.ns.SetStateSub(n.node, n.bt.setup.priorityFlag, nodestate.Flags{}, 0) } // Note: priority flag is automatically removed by the zero priority callback if necessary n.signalPriorityUpdate() @@ -560,7 +560,7 @@ func (n *nodeBalance) balanceExhausted() { n.hasPriority = false n.lock.Unlock() if n.setFlags { - n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.priorityFlag, 0) + n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.setup.priorityFlag, 0) } } @@ -579,8 +579,8 @@ func (n *nodeBalance) checkPriorityStatus() bool { // signalPriorityUpdate signals that the priority fell below the previous minimum estimate // Note: this function should run inside a NodeStateMachine operation func (n *nodeBalance) signalPriorityUpdate() { - n.bt.ns.SetStateSub(n.node, n.bt.updateFlag, nodestate.Flags{}, 0) - n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.updateFlag, 0) + n.bt.ns.SetStateSub(n.node, n.bt.setup.updateFlag, nodestate.Flags{}, 0) + n.bt.ns.SetStateSub(n.node, nodestate.Flags{}, n.bt.setup.updateFlag, 0) } // setCapacity updates the capacity value used for priority calculation diff --git a/les/vflux/server/balance_test.go b/les/vflux/server/balance_test.go index 964b1d21a7bc..66f0d1f30123 100644 --- a/les/vflux/server/balance_test.go +++ b/les/vflux/server/balance_test.go @@ -24,6 +24,7 @@ import ( "time" "github.com/ethereum/go-ethereum/common/mclock" + "github.com/ethereum/go-ethereum/ethdb" "github.com/ethereum/go-ethereum/ethdb/memorydb" "github.com/ethereum/go-ethereum/les/utils" "github.com/ethereum/go-ethereum/p2p/enode" @@ -31,53 +32,58 @@ import ( "github.com/ethereum/go-ethereum/p2p/nodestate" ) -var ( - btClientField = testSetup.NewField("clientField", reflect.TypeOf(balanceTestClient{})) - btTestSetup = newBalanceTrackerSetup(testSetup) -) - -func init() { - btTestSetup.connect(btClientField, ppTestSetup.capacityField) -} - type zeroExpirer struct{} func (z zeroExpirer) SetRate(now mclock.AbsTime, rate float64) {} func (z zeroExpirer) SetLogOffset(now mclock.AbsTime, logOffset utils.Fixed64) {} func (z zeroExpirer) LogOffset(now mclock.AbsTime) utils.Fixed64 { return 0 } +type balanceTestClient struct{} + +func (client balanceTestClient) FreeClientId() string { return "" } + type balanceTestSetup struct { clock *mclock.Simulated + db ethdb.KeyValueStore ns *nodestate.NodeStateMachine + setup *serverSetup bt *balanceTracker } -func newBalanceTestSetup() *balanceTestSetup { +func newBalanceTestSetup(db ethdb.KeyValueStore, posExp, negExp utils.ValueExpirer) *balanceTestSetup { + // Initialize and customize the setup for the balance testing clock := &mclock.Simulated{} - ns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup) - db := memorydb.New() - bt := newBalanceTracker(ns, btTestSetup, db, clock, zeroExpirer{}, zeroExpirer{}) + setup := newServerSetup() + setup.clientField = setup.setup.NewField("balancTestClient", reflect.TypeOf(balanceTestClient{})) + + ns := nodestate.NewNodeStateMachine(nil, nil, clock, setup.setup) + if posExp == nil { + posExp = zeroExpirer{} + } + if negExp == nil { + negExp = zeroExpirer{} + } + if db == nil { + db = memorydb.New() + } + bt := newBalanceTracker(ns, setup, db, clock, posExp, negExp) ns.Start() return &balanceTestSetup{ clock: clock, + db: db, ns: ns, + setup: setup, bt: bt, } } -type balanceTestClient struct{} - -func (btc balanceTestClient) FreeClientId() string { - return "" -} - func (b *balanceTestSetup) newNode(capacity uint64) *nodeBalance { node := enode.SignNull(&enr.Record{}, enode.ID{}) - b.ns.SetField(node, btTestSetup.clientField, balanceTestClient{}) + b.ns.SetField(node, b.setup.clientField, balanceTestClient{}) if capacity != 0 { - b.ns.SetField(node, ppTestSetup.capacityField, capacity) + b.ns.SetField(node, b.setup.capacityField, capacity) } - n, _ := b.ns.GetField(node, btTestSetup.balanceField).(*nodeBalance) + n, _ := b.ns.GetField(node, b.setup.balanceField).(*nodeBalance) return n } @@ -101,7 +107,7 @@ func (b *balanceTestSetup) stop() { } func TestAddBalance(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() node := b.newNode(1000) @@ -137,7 +143,7 @@ func TestAddBalance(t *testing.T) { } func TestSetBalance(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() node := b.newNode(1000) @@ -161,11 +167,10 @@ func TestSetBalance(t *testing.T) { } func TestBalanceTimeCost(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() node := b.newNode(1000) - b.ns.SetField(node.node, ppTestSetup.capacityField, uint64(1)) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) b.setBalance(node, uint64(time.Minute), 0) // 1 minute time allowance @@ -202,12 +207,11 @@ func TestBalanceTimeCost(t *testing.T) { } func TestBalanceReqCost(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() node := b.newNode(1000) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) - b.ns.SetField(node.node, ppTestSetup.capacityField, uint64(1)) b.setBalance(node, uint64(time.Minute), 0) // 1 minute time serving time allowance var inputs = []struct { reqCost uint64 @@ -231,7 +235,7 @@ func TestBalanceReqCost(t *testing.T) { } func TestBalanceToPriority(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() node := b.newNode(1000) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) @@ -256,7 +260,7 @@ func TestBalanceToPriority(t *testing.T) { } func TestEstimatedPriority(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() node := b.newNode(1000000000) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) @@ -295,7 +299,7 @@ func TestEstimatedPriority(t *testing.T) { } func TestPostiveBalanceCounting(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() var nodes []*nodeBalance @@ -319,7 +323,7 @@ func TestPostiveBalanceCounting(t *testing.T) { // Change client status for i := 0; i < 100; i += 1 { if rand.Intn(2) == 0 { - b.ns.SetField(nodes[i].node, ppTestSetup.capacityField, uint64(1)) + b.ns.SetField(nodes[i].node, b.setup.capacityField, uint64(1)) } } if b.bt.TotalTokenAmount() != sum { @@ -327,7 +331,7 @@ func TestPostiveBalanceCounting(t *testing.T) { } for i := 0; i < 100; i += 1 { if rand.Intn(2) == 0 { - b.ns.SetField(nodes[i].node, ppTestSetup.capacityField, uint64(1)) + b.ns.SetField(nodes[i].node, b.setup.capacityField, uint64(1)) } } if b.bt.TotalTokenAmount() != sum { @@ -336,7 +340,7 @@ func TestPostiveBalanceCounting(t *testing.T) { } func TestCallbackChecking(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() node := b.newNode(1000000) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) @@ -359,11 +363,10 @@ func TestCallbackChecking(t *testing.T) { } func TestCallback(t *testing.T) { - b := newBalanceTestSetup() + b := newBalanceTestSetup(nil, nil, nil) defer b.stop() node := b.newNode(1000) node.SetPriceFactors(PriceFactors{1, 0, 1}, PriceFactors{1, 0, 1}) - b.ns.SetField(node.node, ppTestSetup.capacityField, uint64(1)) callCh := make(chan struct{}, 1) b.setBalance(node, uint64(time.Minute), 0) @@ -389,23 +392,14 @@ func TestCallback(t *testing.T) { } func TestBalancePersistence(t *testing.T) { - clock := &mclock.Simulated{} - ns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup) - db := memorydb.New() posExp := &utils.Expirer{} negExp := &utils.Expirer{} - posExp.SetRate(clock.Now(), math.Log(2)/float64(time.Hour*2)) // halves every two hours - negExp.SetRate(clock.Now(), math.Log(2)/float64(time.Hour)) // halves every hour - bt := newBalanceTracker(ns, btTestSetup, db, clock, posExp, negExp) - ns.Start() - bts := &balanceTestSetup{ - clock: clock, - ns: ns, - bt: bt, - } - var nb *nodeBalance - exp := func(expPos, expNeg uint64) { - pos, neg := nb.GetBalance() + posExp.SetRate(0, math.Log(2)/float64(time.Hour*2)) // halves every two hours + negExp.SetRate(0, math.Log(2)/float64(time.Hour)) // halves every hour + setup := newBalanceTestSetup(nil, posExp, negExp) + + exp := func(balance *nodeBalance, expPos, expNeg uint64) { + pos, neg := balance.GetBalance() if pos != expPos { t.Fatalf("Positive balance incorrect, want %v, got %v", expPos, pos) } @@ -414,44 +408,32 @@ func TestBalancePersistence(t *testing.T) { } } expTotal := func(expTotal uint64) { - total := bt.TotalTokenAmount() + total := setup.bt.TotalTokenAmount() if total != expTotal { t.Fatalf("Total token amount incorrect, want %v, got %v", expTotal, total) } } expTotal(0) - nb = bts.newNode(0) + balance := setup.newNode(0) expTotal(0) - bts.setBalance(nb, 16000000000, 16000000000) - exp(16000000000, 16000000000) + setup.setBalance(balance, 16000000000, 16000000000) + exp(balance, 16000000000, 16000000000) expTotal(16000000000) - clock.Run(time.Hour * 2) - exp(8000000000, 4000000000) + + setup.clock.Run(time.Hour * 2) + exp(balance, 8000000000, 4000000000) expTotal(8000000000) - bt.stop() - ns.Stop() - - clock = &mclock.Simulated{} - ns = nodestate.NewNodeStateMachine(nil, nil, clock, testSetup) - posExp = &utils.Expirer{} - negExp = &utils.Expirer{} - posExp.SetRate(clock.Now(), math.Log(2)/float64(time.Hour*2)) // halves every two hours - negExp.SetRate(clock.Now(), math.Log(2)/float64(time.Hour)) // halves every hour - bt = newBalanceTracker(ns, btTestSetup, db, clock, posExp, negExp) - ns.Start() - bts = &balanceTestSetup{ - clock: clock, - ns: ns, - bt: bt, - } + setup.stop() + + // Test the functionalities after restart + setup = newBalanceTestSetup(setup.db, posExp, negExp) expTotal(8000000000) - nb = bts.newNode(0) - exp(8000000000, 4000000000) + balance = setup.newNode(0) + exp(balance, 8000000000, 4000000000) expTotal(8000000000) - clock.Run(time.Hour * 2) - exp(4000000000, 1000000000) + setup.clock.Run(time.Hour * 2) + exp(balance, 4000000000, 1000000000) expTotal(4000000000) - bt.stop() - ns.Stop() + setup.stop() } diff --git a/les/vflux/server/balance_tracker.go b/les/vflux/server/balance_tracker.go index 5924d6e5f397..9460c2f19ca9 100644 --- a/les/vflux/server/balance_tracker.go +++ b/les/vflux/server/balance_tracker.go @@ -17,7 +17,6 @@ package server import ( - "reflect" "sync" "time" @@ -35,36 +34,6 @@ const ( persistExpirationRefresh = time.Minute * 5 // refresh period of the token expiration persistence ) -// balanceTrackerSetup contains node state flags and fields used by balanceTracker -type balanceTrackerSetup struct { - // controlled by priorityPool - priorityFlag, updateFlag nodestate.Flags - balanceField nodestate.Field - // external connections - clientField, capacityField nodestate.Field -} - -// newBalanceTrackerSetup creates a new balanceTrackerSetup and initializes the fields -// and flags controlled by balanceTracker -func newBalanceTrackerSetup(setup *nodestate.Setup) balanceTrackerSetup { - return balanceTrackerSetup{ - // priorityFlag is set if the node has a positive balance - priorityFlag: setup.NewFlag("priorityNode"), - // updateFlag set and then immediately reset if the balance has been updated and - // therefore priority is suddenly changed - updateFlag: setup.NewFlag("balanceUpdate"), - // balanceField contains the nodeBalance struct which implements nodePriority, - // allowing on-demand priority calculation and future priority estimation - balanceField: setup.NewField("balance", reflect.TypeOf(&nodeBalance{})), - } -} - -// connect sets the fields used by balanceTracker as an input -func (bts *balanceTrackerSetup) connect(clientField, capacityField nodestate.Field) { - bts.clientField = clientField - bts.capacityField = capacityField -} - // balanceTracker tracks positive and negative balances for connected nodes. // After clientField is set externally, a nodeBalance is created and previous // balance values are loaded from the database. Both balances are exponentially expired @@ -74,7 +43,7 @@ func (bts *balanceTrackerSetup) connect(clientField, capacityField nodestate.Fie // The two balances are translated into a single priority value that also depends // on the actual capacity. type balanceTracker struct { - balanceTrackerSetup + setup *serverSetup clock mclock.Clock lock sync.Mutex ns *nodestate.NodeStateMachine @@ -87,34 +56,32 @@ type balanceTracker struct { quit chan struct{} } -type balancePeer interface { - FreeClientId() string -} - // newBalanceTracker creates a new balanceTracker -func newBalanceTracker(ns *nodestate.NodeStateMachine, setup balanceTrackerSetup, db ethdb.KeyValueStore, clock mclock.Clock, posExp, negExp utils.ValueExpirer) *balanceTracker { +func newBalanceTracker(ns *nodestate.NodeStateMachine, setup *serverSetup, db ethdb.KeyValueStore, clock mclock.Clock, posExp, negExp utils.ValueExpirer) *balanceTracker { ndb := newNodeDB(db, clock) bt := &balanceTracker{ - ns: ns, - balanceTrackerSetup: setup, - ndb: ndb, - clock: clock, - posExp: posExp, - negExp: negExp, - balanceTimer: utils.NewUpdateTimer(clock, time.Second*10), - quit: make(chan struct{}), + ns: ns, + setup: setup, + ndb: ndb, + clock: clock, + posExp: posExp, + negExp: negExp, + balanceTimer: utils.NewUpdateTimer(clock, time.Second*10), + quit: make(chan struct{}), } posOffset, negOffset := bt.ndb.getExpiration() posExp.SetLogOffset(clock.Now(), posOffset) negExp.SetLogOffset(clock.Now(), negOffset) + // Load all persisted balance entries of priority nodes, + // calculate the total number of issued service tokens. bt.ndb.forEachBalance(false, func(id enode.ID, balance utils.ExpiredValue) bool { bt.inactive.AddExp(balance) return true }) - ns.SubscribeField(bt.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { - n, _ := ns.GetField(node, bt.balanceField).(*nodeBalance) + ns.SubscribeField(bt.setup.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + n, _ := ns.GetField(node, bt.setup.balanceField).(*nodeBalance) if n == nil { return } @@ -131,15 +98,18 @@ func newBalanceTracker(ns *nodestate.NodeStateMachine, setup balanceTrackerSetup n.deactivate() } }) - ns.SubscribeField(bt.clientField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + ns.SubscribeField(bt.setup.clientField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + type peer interface { + FreeClientId() string + } if newValue != nil { - ns.SetFieldSub(node, bt.balanceField, bt.newNodeBalance(node, newValue.(balancePeer).FreeClientId(), true)) + ns.SetFieldSub(node, bt.setup.balanceField, bt.newNodeBalance(node, newValue.(peer).FreeClientId(), true)) } else { - ns.SetStateSub(node, nodestate.Flags{}, bt.priorityFlag, 0) - if b, _ := ns.GetField(node, bt.balanceField).(*nodeBalance); b != nil { + ns.SetStateSub(node, nodestate.Flags{}, bt.setup.priorityFlag, 0) + if b, _ := ns.GetField(node, bt.setup.balanceField).(*nodeBalance); b != nil { b.deactivate() } - ns.SetFieldSub(node, bt.balanceField, nil) + ns.SetFieldSub(node, bt.setup.balanceField, nil) } }) @@ -168,11 +138,11 @@ func (bt *balanceTracker) stop() { bt.ndb.setExpiration(bt.posExp.LogOffset(now), bt.negExp.LogOffset(now)) close(bt.quit) bt.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) { - if n, ok := bt.ns.GetField(node, bt.balanceField).(*nodeBalance); ok { + if n, ok := bt.ns.GetField(node, bt.setup.balanceField).(*nodeBalance); ok { n.lock.Lock() n.storeBalance(true, true) n.lock.Unlock() - bt.ns.SetField(node, bt.balanceField, nil) + bt.ns.SetField(node, bt.setup.balanceField, nil) } }) bt.ndb.close() @@ -186,7 +156,7 @@ func (bt *balanceTracker) TotalTokenAmount() uint64 { bt.balanceTimer.Update(func(_ time.Duration) bool { bt.active = utils.ExpiredValue{} bt.ns.ForEach(nodestate.Flags{}, nodestate.Flags{}, func(node *enode.Node, state nodestate.Flags) { - if n, ok := bt.ns.GetField(node, bt.balanceField).(*nodeBalance); ok && n.active { + if n, ok := bt.ns.GetField(node, bt.setup.balanceField).(*nodeBalance); ok && n.active { pos, _ := n.GetRawBalance() bt.active.AddExp(pos) } @@ -234,17 +204,14 @@ func (bt *balanceTracker) GetExpirationTCs() (pos, neg uint64) { // BalanceOperation allows atomic operations on the balance of a node regardless of whether // it is currently connected or not -func (bt *balanceTracker) BalanceOperation(id enode.ID, negBalanceKey string, cb func(AtomicBalanceOperator)) { +func (bt *balanceTracker) BalanceOperation(id enode.ID, connAddress string, cb func(AtomicBalanceOperator)) { bt.ns.Operation(func() { - node := bt.ns.GetNode(id) var nb *nodeBalance - if node != nil { - nb, _ = bt.ns.GetField(node, bt.balanceField).(*nodeBalance) + if node := bt.ns.GetNode(id); node != nil { + nb, _ = bt.ns.GetField(node, bt.setup.balanceField).(*nodeBalance) } else { node = enode.SignNull(&enr.Record{}, id) - } - if nb == nil { - nb = bt.newNodeBalance(node, negBalanceKey, false) + nb = bt.newNodeBalance(node, connAddress, false) } cb(nb) }) @@ -254,14 +221,14 @@ func (bt *balanceTracker) BalanceOperation(id enode.ID, negBalanceKey string, cb // for the given node. It also sets the priorityFlag and adds balanceCallbackZero if // the node has a positive balance. // Note: this function should run inside a NodeStateMachine operation -func (bt *balanceTracker) newNodeBalance(node *enode.Node, negBalanceKey string, setFlags bool) *nodeBalance { +func (bt *balanceTracker) newNodeBalance(node *enode.Node, connAddress string, setFlags bool) *nodeBalance { pb := bt.ndb.getOrNewBalance(node.ID().Bytes(), false) - nb := bt.ndb.getOrNewBalance([]byte(negBalanceKey), true) + nb := bt.ndb.getOrNewBalance([]byte(connAddress), true) n := &nodeBalance{ bt: bt, node: node, setFlags: setFlags, - connAddress: negBalanceKey, + connAddress: connAddress, balance: balance{pos: pb, neg: nb, posExp: bt.posExp, negExp: bt.negExp}, initTime: bt.clock.Now(), lastUpdate: bt.clock.Now(), @@ -270,7 +237,7 @@ func (bt *balanceTracker) newNodeBalance(node *enode.Node, negBalanceKey string, n.callbackIndex[i] = -1 } if setFlags && n.checkPriorityStatus() { - n.bt.ns.SetStateSub(n.node, n.bt.priorityFlag, nodestate.Flags{}, 0) + n.bt.ns.SetStateSub(n.node, n.bt.setup.priorityFlag, nodestate.Flags{}, 0) } return n } diff --git a/les/vflux/server/clientpool.go b/les/vflux/server/clientpool.go index 5a357f13300a..6bf4e923d482 100644 --- a/les/vflux/server/clientpool.go +++ b/les/vflux/server/clientpool.go @@ -18,7 +18,6 @@ package server import ( "errors" - "reflect" "sync" "time" @@ -33,24 +32,12 @@ import ( "github.com/ethereum/go-ethereum/rlp" ) -var ( - serverSetup = &nodestate.Setup{} - clientField = serverSetup.NewField("client", reflect.TypeOf(clientPeerInstance{})) - btSetup = newBalanceTrackerSetup(serverSetup) - ppSetup = newPriorityPoolSetup(serverSetup) -) - var ( ErrNotConnected = errors.New("client not connected") ErrNoPriority = errors.New("priority too low to raise capacity") ErrCantFindMaximum = errors.New("Unable to find maximum allowed capacity") ) -func init() { - btSetup.connect(clientField, ppSetup.capacityField) - ppSetup.connect(btSetup.balanceField, btSetup.updateFlag) // nodeBalance implements nodePriority -} - // ClientPool implements a client database that assigns a priority to each client // based on a positive and negative balance. Positive balance is externally assigned // to prioritized clients and is decreased with connection time and processed @@ -58,7 +45,7 @@ func init() { // then negative balance is accumulated. // // Balance tracking and priority calculation for connected clients is done by -// balanceTracker. activeQueue ensures that clients with the lowest positive or +// balanceTracker. PriorityQueue ensures that clients with the lowest positive or // highest negative balance get evicted when the total capacity allowance is full // and new clients with a better balance want to connect. // @@ -72,6 +59,8 @@ func init() { type ClientPool struct { *priorityPool *balanceTracker + + setup *serverSetup clock mclock.Clock closed bool ns *nodestate.NodeStateMachine @@ -96,65 +85,65 @@ type clientPeer interface { Disconnect() // initiates disconnection (Unregister should always be called) } -type clientPeerInstance struct{ clientPeer } // the NodeStateMachine type system needs this wrapper - // NewClientPool creates a new client pool func NewClientPool(balanceDb ethdb.KeyValueStore, minCap uint64, connectedBias time.Duration, clock mclock.Clock, synced func() bool) *ClientPool { - ns := nodestate.NewNodeStateMachine(nil, nil, clock, serverSetup) + setup := newServerSetup() + ns := nodestate.NewNodeStateMachine(nil, nil, clock, setup.setup) cp := &ClientPool{ + priorityPool: newPriorityPool(ns, setup, clock, minCap, connectedBias, 4), + balanceTracker: newBalanceTracker(ns, setup, balanceDb, clock, &utils.Expirer{}, &utils.Expirer{}), + setup: setup, ns: ns, - balanceTracker: newBalanceTracker(ns, btSetup, balanceDb, clock, &utils.Expirer{}, &utils.Expirer{}), - priorityPool: newPriorityPool(ns, ppSetup, clock, minCap, connectedBias, 4), clock: clock, minCap: minCap, connectedBias: connectedBias, synced: synced, } - ns.SubscribeState(nodestate.MergeFlags(ppSetup.activeFlag, ppSetup.inactiveFlag, btSetup.priorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { - if newState.Equals(ppSetup.inactiveFlag) { + ns.SubscribeState(nodestate.MergeFlags(setup.activeFlag, setup.inactiveFlag, setup.priorityFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { + if newState.Equals(setup.inactiveFlag) { // set timeout for non-priority inactive client var timeout time.Duration - if c, ok := ns.GetField(node, clientField).(clientPeer); ok { + if c, ok := ns.GetField(node, setup.clientField).(clientPeer); ok { timeout = c.InactiveTimeout() } if timeout > 0 { - ns.AddTimeout(node, ppSetup.inactiveFlag, timeout) + ns.AddTimeout(node, setup.inactiveFlag, timeout) } else { // Note: if capacity is immediately available then priorityPool will set the active // flag simultaneously with removing the inactive flag and therefore this will not // initiate disconnection - ns.SetStateSub(node, nodestate.Flags{}, ppSetup.inactiveFlag, 0) + ns.SetStateSub(node, nodestate.Flags{}, setup.inactiveFlag, 0) } } - if oldState.Equals(ppSetup.inactiveFlag) && newState.Equals(ppSetup.inactiveFlag.Or(btSetup.priorityFlag)) { - ns.SetStateSub(node, ppSetup.inactiveFlag, nodestate.Flags{}, 0) // priority gained; remove timeout + if oldState.Equals(setup.inactiveFlag) && newState.Equals(setup.inactiveFlag.Or(setup.priorityFlag)) { + ns.SetStateSub(node, setup.inactiveFlag, nodestate.Flags{}, 0) // priority gained; remove timeout } - if newState.Equals(ppSetup.activeFlag) { + if newState.Equals(setup.activeFlag) { // active with no priority; limit capacity to minCap - cap, _ := ns.GetField(node, ppSetup.capacityField).(uint64) + cap, _ := ns.GetField(node, setup.capacityField).(uint64) if cap > minCap { cp.requestCapacity(node, minCap, 0, true) } } if newState.Equals(nodestate.Flags{}) { - if c, ok := ns.GetField(node, clientField).(clientPeer); ok { + if c, ok := ns.GetField(node, setup.clientField).(clientPeer); ok { c.Disconnect() } } }) - ns.SubscribeField(btSetup.balanceField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + ns.SubscribeField(setup.balanceField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { if newValue != nil { - ns.SetStateSub(node, ppSetup.inactiveFlag, nodestate.Flags{}, 0) + ns.SetStateSub(node, setup.inactiveFlag, nodestate.Flags{}, 0) cp.lock.RLock() newValue.(*nodeBalance).SetPriceFactors(cp.defaultPosFactors, cp.defaultNegFactors) cp.lock.RUnlock() } }) - ns.SubscribeField(ppSetup.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { - if c, ok := ns.GetField(node, clientField).(clientPeer); ok { + ns.SubscribeField(setup.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + if c, ok := ns.GetField(node, setup.clientField).(clientPeer); ok { newCap, _ := newValue.(uint64) c.UpdateCapacity(newCap, node == cp.capReqNode) } @@ -167,17 +156,17 @@ func (cp *ClientPool) AddMetrics(totalConnectedGauge metrics.Gauge, clientConnectedMeter, clientDisconnectedMeter, clientActivatedMeter, clientDeactivatedMeter, capacityQueryZeroMeter, capacityQueryNonZeroMeter metrics.Meter) { - cp.ns.SubscribeState(nodestate.MergeFlags(ppSetup.activeFlag, ppSetup.inactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { + cp.ns.SubscribeState(nodestate.MergeFlags(cp.setup.activeFlag, cp.setup.inactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { if oldState.IsEmpty() && !newState.IsEmpty() { clientConnectedMeter.Mark(1) } if !oldState.IsEmpty() && newState.IsEmpty() { clientDisconnectedMeter.Mark(1) } - if oldState.HasNone(ppSetup.activeFlag) && oldState.HasAll(ppSetup.activeFlag) { + if oldState.HasNone(cp.setup.activeFlag) && oldState.HasAll(cp.setup.activeFlag) { clientActivatedMeter.Mark(1) } - if oldState.HasAll(ppSetup.activeFlag) && oldState.HasNone(ppSetup.activeFlag) { + if oldState.HasAll(cp.setup.activeFlag) && oldState.HasNone(cp.setup.activeFlag) { clientDeactivatedMeter.Mark(1) } _, connected := cp.Active() @@ -203,14 +192,14 @@ func (cp *ClientPool) Stop() { // priority and remains inactive for longer than the allowed timeout then it will be // disconnected by calling the Disconnect function of the clientPeer interface. func (cp *ClientPool) Register(peer clientPeer) ConnectedBalance { - cp.ns.SetField(peer.Node(), clientField, clientPeerInstance{peer}) - balance, _ := cp.ns.GetField(peer.Node(), btSetup.balanceField).(*nodeBalance) + cp.ns.SetField(peer.Node(), cp.setup.clientField, peerWrapper{peer}) + balance, _ := cp.ns.GetField(peer.Node(), cp.setup.balanceField).(*nodeBalance) return balance } // Unregister removes the peer from the client pool func (cp *ClientPool) Unregister(peer clientPeer) { - cp.ns.SetField(peer.Node(), clientField, nil) + cp.ns.SetField(peer.Node(), cp.setup.clientField, nil) } // SetDefaultFactors sets the default price factors applied to subsequently connected clients @@ -240,12 +229,12 @@ func (cp *ClientPool) SetCapacity(node *enode.Node, reqCap uint64, bias time.Dur cp.lock.RUnlock() cp.ns.Operation(func() { - balance, _ := cp.ns.GetField(node, btSetup.balanceField).(*nodeBalance) + balance, _ := cp.ns.GetField(node, cp.setup.balanceField).(*nodeBalance) if balance == nil { err = ErrNotConnected return } - capacity, _ = cp.ns.GetField(node, ppSetup.capacityField).(uint64) + capacity, _ = cp.ns.GetField(node, cp.setup.capacityField).(uint64) if capacity == 0 { // if the client is inactive then it has insufficient priority for the minimal capacity // (will be activated automatically with minCap when possible) @@ -257,7 +246,7 @@ func (cp *ClientPool) SetCapacity(node *enode.Node, reqCap uint64, bias time.Dur reqCap = cp.minCap } if reqCap > cp.minCap { - if cp.ns.GetState(node).HasNone(btSetup.priorityFlag) && reqCap > cp.minCap { + if cp.ns.GetState(node).HasNone(cp.setup.priorityFlag) && reqCap > cp.minCap { err = ErrNoPriority return } diff --git a/les/vflux/server/clientpool_test.go b/les/vflux/server/clientpool_test.go index 296c025d4bc8..36d919213566 100644 --- a/les/vflux/server/clientpool_test.go +++ b/les/vflux/server/clientpool_test.go @@ -582,8 +582,8 @@ func TestInactiveClient(t *testing.T) { } clock.Run(time.Second * 600) // manually trigger a check to avoid a long real-time wait - pool.ns.SetState(p1.node, btSetup.updateFlag, nodestate.Flags{}, 0) - pool.ns.SetState(p1.node, nodestate.Flags{}, btSetup.updateFlag, 0) + pool.ns.SetState(p1.node, pool.setup.updateFlag, nodestate.Flags{}, 0) + pool.ns.SetState(p1.node, nodestate.Flags{}, pool.setup.updateFlag, 0) // p1: 1000 p2: 500 p3: 2000 p4: 900 if p1.cap != 1 { t.Fatalf("Failed to activate peer #1") diff --git a/les/vflux/server/prioritypool.go b/les/vflux/server/prioritypool.go index 3ac87359fdf3..edcda1486dda 100644 --- a/les/vflux/server/prioritypool.go +++ b/les/vflux/server/prioritypool.go @@ -18,7 +18,6 @@ package server import ( "math" - "reflect" "sync" "time" @@ -33,35 +32,6 @@ const ( lazyQueueRefresh = time.Second * 10 // refresh period of the active queue ) -// priorityPoolSetup contains node state flags and fields used by priorityPool -// Note: activeFlag and inactiveFlag can be controlled both externally and by the pool, -// see priorityPool description for details. -type priorityPoolSetup struct { - // controlled by priorityPool - activeFlag, inactiveFlag nodestate.Flags - capacityField, ppNodeInfoField nodestate.Field - // external connections - updateFlag nodestate.Flags - priorityField nodestate.Field -} - -// newPriorityPoolSetup creates a new priorityPoolSetup and initializes the fields -// and flags controlled by priorityPool -func newPriorityPoolSetup(setup *nodestate.Setup) priorityPoolSetup { - return priorityPoolSetup{ - activeFlag: setup.NewFlag("active"), - inactiveFlag: setup.NewFlag("inactive"), - capacityField: setup.NewField("capacity", reflect.TypeOf(uint64(0))), - ppNodeInfoField: setup.NewField("ppNodeInfo", reflect.TypeOf(&ppNodeInfo{})), - } -} - -// connect sets the fields and flags used by priorityPool as an input -func (pps *priorityPoolSetup) connect(priorityField nodestate.Field, updateFlag nodestate.Flags) { - pps.priorityField = priorityField // should implement nodePriority - pps.updateFlag = updateFlag // triggers an immediate priority update -} - // priorityPool handles a set of nodes where each node has a capacity (a scalar value) // and a priority (which can change over time and can also depend on the capacity). // A node is active if it has at least the necessary minimal amount of capacity while @@ -89,7 +59,7 @@ func (pps *priorityPoolSetup) connect(priorityField nodestate.Field, updateFlag // nodes is reduced or they are demoted to "inactive" state if their priority is // insufficient even at minimal capacity. type priorityPool struct { - priorityPoolSetup + setup *serverSetup ns *nodestate.NodeStateMachine clock mclock.Clock lock sync.Mutex @@ -119,22 +89,22 @@ type ppNodeInfo struct { } // newPriorityPool creates a new priorityPool -func newPriorityPool(ns *nodestate.NodeStateMachine, setup priorityPoolSetup, clock mclock.Clock, minCap uint64, activeBias time.Duration, capacityStepDiv uint64) *priorityPool { +func newPriorityPool(ns *nodestate.NodeStateMachine, setup *serverSetup, clock mclock.Clock, minCap uint64, activeBias time.Duration, capacityStepDiv uint64) *priorityPool { pp := &priorityPool{ - ns: ns, - priorityPoolSetup: setup, - clock: clock, - inactiveQueue: prque.New(inactiveSetIndex), - minCap: minCap, - activeBias: activeBias, - capacityStepDiv: capacityStepDiv, + setup: setup, + ns: ns, + clock: clock, + inactiveQueue: prque.New(inactiveSetIndex), + minCap: minCap, + activeBias: activeBias, + capacityStepDiv: capacityStepDiv, } if pp.activeBias < time.Duration(1) { pp.activeBias = time.Duration(1) } pp.activeQueue = prque.NewLazyQueue(activeSetIndex, activePriority, pp.activeMaxPriority, clock, lazyQueueRefresh) - ns.SubscribeField(pp.priorityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + ns.SubscribeField(pp.setup.balanceField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { if newValue != nil { c := &ppNodeInfo{ node: node, @@ -142,18 +112,18 @@ func newPriorityPool(ns *nodestate.NodeStateMachine, setup priorityPoolSetup, cl activeIndex: -1, inactiveIndex: -1, } - ns.SetFieldSub(node, pp.ppNodeInfoField, c) + ns.SetFieldSub(node, pp.setup.queueField, c) } else { - ns.SetStateSub(node, nodestate.Flags{}, pp.activeFlag.Or(pp.inactiveFlag), 0) - if n, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo); n != nil { + ns.SetStateSub(node, nodestate.Flags{}, pp.setup.activeFlag.Or(pp.setup.inactiveFlag), 0) + if n, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo); n != nil { pp.disconnectedNode(n) } - ns.SetFieldSub(node, pp.capacityField, nil) - ns.SetFieldSub(node, pp.ppNodeInfoField, nil) + ns.SetFieldSub(node, pp.setup.capacityField, nil) + ns.SetFieldSub(node, pp.setup.queueField, nil) } }) - ns.SubscribeState(pp.activeFlag.Or(pp.inactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { - if c, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo); c != nil { + ns.SubscribeState(pp.setup.activeFlag.Or(pp.setup.inactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { + if c, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo); c != nil { if oldState.IsEmpty() { pp.connectedNode(c) } @@ -162,7 +132,7 @@ func newPriorityPool(ns *nodestate.NodeStateMachine, setup priorityPoolSetup, cl } } }) - ns.SubscribeState(pp.updateFlag, func(node *enode.Node, oldState, newState nodestate.Flags) { + ns.SubscribeState(pp.setup.updateFlag, func(node *enode.Node, oldState, newState nodestate.Flags) { if !newState.IsEmpty() { pp.updatePriority(node) } @@ -196,7 +166,7 @@ func (pp *priorityPool) requestCapacity(node *enode.Node, targetCap uint64, bias if bias < pp.activeBias { bias = pp.activeBias } - c, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo) + c, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo) if c == nil { log.Error("requestCapacity called for unknown node", "id", node.ID()) return math.MaxInt64, false @@ -470,13 +440,13 @@ type capUpdate struct { func (pp *priorityPool) updateFlags(updates []capUpdate) { for _, f := range updates { if f.oldCap == 0 { - pp.ns.SetStateSub(f.node, pp.activeFlag, pp.inactiveFlag, 0) + pp.ns.SetStateSub(f.node, pp.setup.activeFlag, pp.setup.inactiveFlag, 0) } if f.newCap == 0 { - pp.ns.SetStateSub(f.node, pp.inactiveFlag, pp.activeFlag, 0) - pp.ns.SetFieldSub(f.node, pp.capacityField, nil) + pp.ns.SetStateSub(f.node, pp.setup.inactiveFlag, pp.setup.activeFlag, 0) + pp.ns.SetFieldSub(f.node, pp.setup.capacityField, nil) } else { - pp.ns.SetFieldSub(f.node, pp.capacityField, f.newCap) + pp.ns.SetFieldSub(f.node, pp.setup.capacityField, f.newCap) } } } @@ -514,7 +484,7 @@ func (pp *priorityPool) updatePriority(node *enode.Node) { pp.updateFlags(updates) }() - c, _ := pp.ns.GetField(node, pp.ppNodeInfoField).(*ppNodeInfo) + c, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo) if c == nil || !c.connected { return } diff --git a/les/vflux/server/prioritypool_test.go b/les/vflux/server/prioritypool_test.go index c7aa6bdf5169..6c2257157f5a 100644 --- a/les/vflux/server/prioritypool_test.go +++ b/les/vflux/server/prioritypool_test.go @@ -28,16 +28,6 @@ import ( "github.com/ethereum/go-ethereum/p2p/nodestate" ) -var ( - testSetup = &nodestate.Setup{} - ppTestClientField = testSetup.NewField("ppTestClient", reflect.TypeOf(&ppTestClient{})) - ppTestSetup = newPriorityPoolSetup(testSetup) -) - -func init() { - ppTestSetup.connect(ppTestClientField, btTestSetup.updateFlag) -} - const ( testCapacityStepDiv = 100 testCapacityToleranceDiv = 10 @@ -59,15 +49,17 @@ func (c *ppTestClient) estimatePriority(cap uint64, addBalance int64, future, bi func TestPriorityPool(t *testing.T) { clock := &mclock.Simulated{} - ns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup) + setup := newServerSetup() + setup.balanceField = setup.setup.NewField("ppTestClient", reflect.TypeOf(&ppTestClient{})) + ns := nodestate.NewNodeStateMachine(nil, nil, clock, setup.setup) - ns.SubscribeField(ppTestSetup.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { - if n := ns.GetField(node, ppTestSetup.priorityField); n != nil { + ns.SubscribeField(setup.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { + if n := ns.GetField(node, setup.balanceField); n != nil { c := n.(*ppTestClient) c.cap = newValue.(uint64) } }) - pp := newPriorityPool(ns, ppTestSetup, clock, testMinCap, 0, testCapacityStepDiv) + pp := newPriorityPool(ns, setup, clock, testMinCap, 0, testCapacityStepDiv) ns.Start() pp.SetLimits(100, 1000000) clients := make([]*ppTestClient, 100) @@ -99,8 +91,8 @@ func TestPriorityPool(t *testing.T) { } sumBalance += c.balance clients[i] = c - ns.SetField(c.node, ppTestSetup.priorityField, c) - ns.SetState(c.node, ppTestSetup.inactiveFlag, nodestate.Flags{}, 0) + ns.SetField(c.node, setup.balanceField, c) + ns.SetState(c.node, setup.inactiveFlag, nodestate.Flags{}, 0) raise(c) check(c) } @@ -110,8 +102,8 @@ func TestPriorityPool(t *testing.T) { oldBalance := c.balance c.balance = uint64(rand.Int63n(100000000000) + 100000000000) sumBalance += c.balance - oldBalance - pp.ns.SetState(c.node, btTestSetup.updateFlag, nodestate.Flags{}, 0) - pp.ns.SetState(c.node, nodestate.Flags{}, btTestSetup.updateFlag, 0) + pp.ns.SetState(c.node, setup.updateFlag, nodestate.Flags{}, 0) + pp.ns.SetState(c.node, nodestate.Flags{}, setup.updateFlag, 0) if c.balance > oldBalance { raise(c) } else { @@ -155,8 +147,8 @@ func TestPriorityPool(t *testing.T) { } c.balance -= add sumBalance -= add - pp.ns.SetState(c.node, btTestSetup.updateFlag, nodestate.Flags{}, 0) - pp.ns.SetState(c.node, nodestate.Flags{}, btTestSetup.updateFlag, 0) + pp.ns.SetState(c.node, setup.updateFlag, nodestate.Flags{}, 0) + pp.ns.SetState(c.node, nodestate.Flags{}, setup.updateFlag, 0) for _, c := range clients { raise(c) } @@ -168,8 +160,11 @@ func TestPriorityPool(t *testing.T) { func TestCapacityCurve(t *testing.T) { clock := &mclock.Simulated{} - ns := nodestate.NewNodeStateMachine(nil, nil, clock, testSetup) - pp := newPriorityPool(ns, ppTestSetup, clock, 400000, 0, 2) + setup := newServerSetup() + setup.balanceField = setup.setup.NewField("ppTestClient", reflect.TypeOf(&ppTestClient{})) + ns := nodestate.NewNodeStateMachine(nil, nil, clock, setup.setup) + + pp := newPriorityPool(ns, setup, clock, 400000, 0, 2) ns.Start() pp.SetLimits(10, 10000000) clients := make([]*ppTestClient, 10) @@ -181,8 +176,8 @@ func TestCapacityCurve(t *testing.T) { cap: 1000000, } clients[i] = c - ns.SetField(c.node, ppTestSetup.priorityField, c) - ns.SetState(c.node, ppTestSetup.inactiveFlag, nodestate.Flags{}, 0) + ns.SetField(c.node, setup.balanceField, c) + ns.SetState(c.node, setup.inactiveFlag, nodestate.Flags{}, 0) ns.Operation(func() { pp.requestCapacity(c.node, c.cap, 0, true) }) diff --git a/les/vflux/server/status.go b/les/vflux/server/status.go new file mode 100644 index 000000000000..469190777b25 --- /dev/null +++ b/les/vflux/server/status.go @@ -0,0 +1,59 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package server + +import ( + "reflect" + + "github.com/ethereum/go-ethereum/p2p/nodestate" +) + +type peerWrapper struct{ clientPeer } // the NodeStateMachine type system needs this wrapper + +// serverSetup is a wrapper of the node state machine setup, which contains +// all the created flags and fields used in the vflux server side. +type serverSetup struct { + setup *nodestate.Setup + clientField nodestate.Field // Field contains the client peer handler + + // Flags and fields controlled by balance tracker. BalanceTracker + // is responsible for setting/deleting these flags or fields. + priorityFlag nodestate.Flags // Flag is set if the node has a positive balance + updateFlag nodestate.Flags // Flag is set whenever the node balance is changed(priority changed) + balanceField nodestate.Field // Field contains the client balance for priority calculation + + // Flags and fields controlled by priority queue. Priority queue + // is responsible for setting/deleting these flags or fields. + activeFlag nodestate.Flags // Flag is set if the node is active + inactiveFlag nodestate.Flags // Flag is set if the node is inactive + capacityField nodestate.Field // Field contains the capacity of the node + queueField nodestate.Field // Field contains the infomration in the priority queue +} + +// newServerSetup initializes the setup for state machine and returns the flags/fields group. +func newServerSetup() *serverSetup { + setup := &serverSetup{setup: &nodestate.Setup{}} + setup.clientField = setup.setup.NewField("client", reflect.TypeOf(peerWrapper{})) + setup.priorityFlag = setup.setup.NewFlag("priority") + setup.updateFlag = setup.setup.NewFlag("update") + setup.balanceField = setup.setup.NewField("balance", reflect.TypeOf(&nodeBalance{})) + setup.activeFlag = setup.setup.NewFlag("active") + setup.inactiveFlag = setup.setup.NewFlag("inactive") + setup.capacityField = setup.setup.NewField("capacity", reflect.TypeOf(uint64(0))) + setup.queueField = setup.setup.NewField("queue", reflect.TypeOf(&ppNodeInfo{})) + return setup +} From 0db4ff42cc4548ebf266a5153f00c341f00659fe Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Mon, 29 Mar 2021 14:59:53 +0200 Subject: [PATCH 18/27] les/vflux/server: enforce capacity curve monotonicity --- les/vflux/server/prioritypool.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/les/vflux/server/prioritypool.go b/les/vflux/server/prioritypool.go index edcda1486dda..99912a7737d8 100644 --- a/les/vflux/server/prioritypool.go +++ b/les/vflux/server/prioritypool.go @@ -534,6 +534,7 @@ func (pp *priorityPool) getCapacityCurve() *capacityCurve { excludeFirst := pp.maxCount == pp.activeCount // reduce node capacities or remove nodes until nothing is left in the queue; // record the available capacity and the necessary priority after each step + lastPri := int64(math.MinInt64) for pp.activeCap > 0 { cp := curvePoint{} if pp.activeCap > pp.maxCap { @@ -548,6 +549,12 @@ func (pp *priorityPool) getCapacityCurve() *capacityCurve { // enforceLimits removes the lowest priority node if it has minimal capacity, // otherwise reduces its capacity next, cp.nextPri = pp.enforceLimits() + if cp.nextPri < lastPri { + // enforce monotonicity which may be broken by continuously changing priorities + cp.nextPri = lastPri + } else { + lastPri = cp.nextPri + } pp.activeCap -= tempCap if next == nil { log.Error("getCapacityCurve: cannot remove next element from the priority queue") From 7032e9db10f4334e7606f8b3ed2c10fbb2fc6d87 Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Mon, 29 Mar 2021 16:14:55 +0200 Subject: [PATCH 19/27] les/vflux/server: simplified requestCapacity --- les/vflux/server/clientpool.go | 4 +-- les/vflux/server/prioritypool.go | 35 +++++++++------------------ les/vflux/server/prioritypool_test.go | 8 +++--- 3 files changed, 17 insertions(+), 30 deletions(-) diff --git a/les/vflux/server/clientpool.go b/les/vflux/server/clientpool.go index 6bf4e923d482..ff7b20dbc948 100644 --- a/les/vflux/server/clientpool.go +++ b/les/vflux/server/clientpool.go @@ -123,7 +123,7 @@ func NewClientPool(balanceDb ethdb.KeyValueStore, minCap uint64, connectedBias t // active with no priority; limit capacity to minCap cap, _ := ns.GetField(node, setup.capacityField).(uint64) if cap > minCap { - cp.requestCapacity(node, minCap, 0, true) + cp.requestCapacity(node, minCap, 0) } } if newState.Equals(nodestate.Flags{}) { @@ -283,7 +283,7 @@ func (cp *ClientPool) SetCapacity(node *enode.Node, reqCap uint64, bias time.Dur tryCap = reqCap } } - if _, allowed := cp.requestCapacity(node, tryCap, bias, true); allowed { + if cp.requestCapacity(node, tryCap, bias) { capacity = tryCap return } diff --git a/les/vflux/server/prioritypool.go b/les/vflux/server/prioritypool.go index 99912a7737d8..beea05bd8817 100644 --- a/les/vflux/server/prioritypool.go +++ b/les/vflux/server/prioritypool.go @@ -84,7 +84,7 @@ type ppNodeInfo struct { connected bool capacity, origCap uint64 bias time.Duration - forced, changed bool + changed bool activeIndex, inactiveIndex int } @@ -151,7 +151,7 @@ func newPriorityPool(ns *nodestate.NodeStateMachine, setup *serverSetup, clock m // is false then both inactiveFlag and activeFlag can be unset and they are not changed // by this function call either. // Note 2: this function should run inside a NodeStateMachine operation -func (pp *priorityPool) requestCapacity(node *enode.Node, targetCap uint64, bias time.Duration, setCap bool) (minPriority int64, allowed bool) { +func (pp *priorityPool) requestCapacity(node *enode.Node, targetCap uint64, bias time.Duration) bool { pp.lock.Lock() pp.activeQueue.Refresh() var updates []capUpdate @@ -169,26 +169,20 @@ func (pp *priorityPool) requestCapacity(node *enode.Node, targetCap uint64, bias c, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo) if c == nil { log.Error("requestCapacity called for unknown node", "id", node.ID()) - return math.MaxInt64, false - } - var priority int64 - if targetCap > c.capacity { - priority = c.nodePriority.estimatePriority(targetCap, 0, 0, bias, false) - } else { - priority = c.nodePriority.priority(targetCap) + return false } pp.markForChange(c) pp.setCapacity(c, targetCap) - c.forced = true + if targetCap > c.capacity { + c.bias = bias + } pp.activeQueue.Remove(c.activeIndex) pp.inactiveQueue.Remove(c.inactiveIndex) pp.activeQueue.Push(c) - _, minPriority = pp.enforceLimits() - // if capacity update is possible now then minPriority == math.MinInt64 - // if it is not possible at all then minPriority == math.MaxInt64 - allowed = priority >= minPriority - updates = pp.finalizeChanges(setCap && allowed) - return + pp.enforceLimits() + success := c.capacity == targetCap + updates = pp.finalizeChanges(success) + return success } // SetLimits sets the maximum number and total capacity of simultaneously active nodes @@ -267,9 +261,6 @@ func invertPriority(p int64) int64 { // activePriority callback returns actual priority of ppNodeInfo item in activeQueue func activePriority(a interface{}) int64 { c := a.(*ppNodeInfo) - if c.forced { - return math.MinInt64 - } if c.bias == 0 { return invertPriority(c.nodePriority.priority(c.capacity)) } else { @@ -280,9 +271,6 @@ func activePriority(a interface{}) int64 { // activeMaxPriority callback returns estimated maximum priority of ppNodeInfo item in activeQueue func (pp *priorityPool) activeMaxPriority(a interface{}, until mclock.AbsTime) int64 { c := a.(*ppNodeInfo) - if c.forced { - return math.MinInt64 - } future := time.Duration(until - pp.clock.Now()) if future < 0 { future = 0 @@ -400,11 +388,10 @@ func (pp *priorityPool) enforceLimits() (*ppNodeInfo, int64) { // they should be performed while the mutex is not held. func (pp *priorityPool) finalizeChanges(commit bool) (updates []capUpdate) { for _, c := range pp.changed { - // always remove and push back in order to update biased/forced priority + // always remove and push back in order to update biased priority pp.activeQueue.Remove(c.activeIndex) pp.inactiveQueue.Remove(c.inactiveIndex) c.bias = 0 - c.forced = false c.changed = false if !commit { pp.setCapacity(c, c.origCap) diff --git a/les/vflux/server/prioritypool_test.go b/les/vflux/server/prioritypool_test.go index 6c2257157f5a..af411fb6f008 100644 --- a/les/vflux/server/prioritypool_test.go +++ b/les/vflux/server/prioritypool_test.go @@ -67,7 +67,7 @@ func TestPriorityPool(t *testing.T) { for { var ok bool ns.Operation(func() { - _, ok = pp.requestCapacity(c.node, c.cap+c.cap/testCapacityStepDiv, 0, true) + ok = pp.requestCapacity(c.node, c.cap+c.cap/testCapacityStepDiv, 0) }) if !ok { return @@ -132,14 +132,14 @@ func TestPriorityPool(t *testing.T) { expFail = testMinCap } ns.Operation(func() { - _, ok = pp.requestCapacity(c.node, expFail, 0, true) + ok = pp.requestCapacity(c.node, expFail, 0) }) if ok { t.Errorf("Request for more than expected available capacity succeeded") } if expCap >= testMinCap { ns.Operation(func() { - _, ok = pp.requestCapacity(c.node, expCap, 0, true) + ok = pp.requestCapacity(c.node, expCap, 0) }) if !ok { t.Errorf("Request for expected available capacity failed") @@ -179,7 +179,7 @@ func TestCapacityCurve(t *testing.T) { ns.SetField(c.node, setup.balanceField, c) ns.SetState(c.node, setup.inactiveFlag, nodestate.Flags{}, 0) ns.Operation(func() { - pp.requestCapacity(c.node, c.cap, 0, true) + pp.requestCapacity(c.node, c.cap, 0) }) } From f7b4531195b97c3abe53b90df68fc34b6e3576a3 Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Mon, 29 Mar 2021 18:12:11 +0200 Subject: [PATCH 20/27] les/vflux/server: requestCapacity with target range, no iterations in SetCapacity --- les/vflux/server/clientpool.go | 55 +++++++++------- les/vflux/server/prioritypool.go | 95 ++++++++++++++------------- les/vflux/server/prioritypool_test.go | 13 ++-- 3 files changed, 89 insertions(+), 74 deletions(-) diff --git a/les/vflux/server/clientpool.go b/les/vflux/server/clientpool.go index ff7b20dbc948..4fa30b2a3ca5 100644 --- a/les/vflux/server/clientpool.go +++ b/les/vflux/server/clientpool.go @@ -90,7 +90,7 @@ func NewClientPool(balanceDb ethdb.KeyValueStore, minCap uint64, connectedBias t setup := newServerSetup() ns := nodestate.NewNodeStateMachine(nil, nil, clock, setup.setup) cp := &ClientPool{ - priorityPool: newPriorityPool(ns, setup, clock, minCap, connectedBias, 4), + priorityPool: newPriorityPool(ns, setup, clock, minCap, connectedBias, 4, 100), balanceTracker: newBalanceTracker(ns, setup, balanceDb, clock, &utils.Expirer{}, &utils.Expirer{}), setup: setup, ns: ns, @@ -123,7 +123,7 @@ func NewClientPool(balanceDb ethdb.KeyValueStore, minCap uint64, connectedBias t // active with no priority; limit capacity to minCap cap, _ := ns.GetField(node, setup.capacityField).(uint64) if cap > minCap { - cp.requestCapacity(node, minCap, 0) + cp.requestCapacity(node, minCap, minCap, 0) } } if newState.Equals(nodestate.Flags{}) { @@ -254,7 +254,6 @@ func (cp *ClientPool) SetCapacity(node *enode.Node, reqCap uint64, bias time.Dur if reqCap == capacity { return } - curveBias := bias if requested { // mark the requested node so that the UpdateCapacity callback can signal // whether the update is the direct result of a SetCapacity call on the given node @@ -264,29 +263,37 @@ func (cp *ClientPool) SetCapacity(node *enode.Node, reqCap uint64, bias time.Dur }() } - // estimate maximum available capacity at the current priority level and request - // the estimated amount; allow a limited number of retries because individual - // balances can change between the estimation and the request - for count := 0; count < 20; count++ { - // apply a small extra bias to ensure that the request won't fail because of rounding errors - curveBias += time.Second * 10 - tryCap := reqCap - if reqCap > capacity { - curve := cp.getCapacityCurve().exclude(node.ID()) - tryCap = curve.maxCapacity(func(capacity uint64) int64 { - return balance.estimatePriority(capacity, 0, 0, curveBias, false) - }) - if tryCap <= capacity { - return - } - if tryCap > reqCap { - tryCap = reqCap - } - } - if cp.requestCapacity(node, tryCap, bias) { - capacity = tryCap + var minTarget, maxTarget uint64 + if reqCap > capacity { + // Estimate maximum available capacity at the current priority level and request + // the estimated amount. + // Note: requestCapacity could find the highest available capacity between the + // current and the requested capacity but it could cost a lot of iterations with + // fine step adjustment if the requested capacity is very high. By doing a quick + // estimation of the maximum available capacity based on the capacity curve we + // can limit the number of required iterations. + curve := cp.getCapacityCurve().exclude(node.ID()) + maxTarget = curve.maxCapacity(func(capacity uint64) int64 { + return balance.estimatePriority(capacity, 0, 0, bias, false) + }) + if maxTarget <= capacity { return } + if maxTarget > reqCap { + maxTarget = reqCap + } + // Specify a narrow target range that allows a limited number of fine step + // iterations + minTarget = maxTarget - maxTarget/20 + if minTarget < capacity { + minTarget = capacity + } + } else { + minTarget, maxTarget = reqCap, reqCap + } + if newCap := cp.requestCapacity(node, minTarget, maxTarget, bias); newCap >= minTarget && newCap <= maxTarget { + capacity = newCap + return } // we should be able to find the maximum allowed capacity in a few iterations log.Error("Unable to find maximum allowed capacity") diff --git a/les/vflux/server/prioritypool.go b/les/vflux/server/prioritypool.go index beea05bd8817..f00448fe670f 100644 --- a/les/vflux/server/prioritypool.go +++ b/les/vflux/server/prioritypool.go @@ -59,18 +59,18 @@ const ( // nodes is reduced or they are demoted to "inactive" state if their priority is // insufficient even at minimal capacity. type priorityPool struct { - setup *serverSetup - ns *nodestate.NodeStateMachine - clock mclock.Clock - lock sync.Mutex - activeQueue *prque.LazyQueue - inactiveQueue *prque.Prque - changed []*ppNodeInfo - activeCount, activeCap uint64 - maxCount, maxCap uint64 - minCap uint64 - activeBias time.Duration - capacityStepDiv uint64 + setup *serverSetup + ns *nodestate.NodeStateMachine + clock mclock.Clock + lock sync.Mutex + activeQueue *prque.LazyQueue + inactiveQueue *prque.Prque + changed []*ppNodeInfo + activeCount, activeCap uint64 + maxCount, maxCap uint64 + minCap uint64 + activeBias time.Duration + capacityStepDiv, fineStepDiv uint64 cachedCurve *capacityCurve ccUpdatedAt mclock.AbsTime @@ -79,17 +79,17 @@ type priorityPool struct { // ppNodeInfo is the internal node descriptor of priorityPool type ppNodeInfo struct { - nodePriority nodePriority - node *enode.Node - connected bool - capacity, origCap uint64 - bias time.Duration - changed bool - activeIndex, inactiveIndex int + nodePriority nodePriority + node *enode.Node + connected bool + capacity, origCap, minTarget, stepDiv uint64 + bias time.Duration + changed bool + activeIndex, inactiveIndex int } // newPriorityPool creates a new priorityPool -func newPriorityPool(ns *nodestate.NodeStateMachine, setup *serverSetup, clock mclock.Clock, minCap uint64, activeBias time.Duration, capacityStepDiv uint64) *priorityPool { +func newPriorityPool(ns *nodestate.NodeStateMachine, setup *serverSetup, clock mclock.Clock, minCap uint64, activeBias time.Duration, capacityStepDiv, fineStepDiv uint64) *priorityPool { pp := &priorityPool{ setup: setup, ns: ns, @@ -98,6 +98,7 @@ func newPriorityPool(ns *nodestate.NodeStateMachine, setup *serverSetup, clock m minCap: minCap, activeBias: activeBias, capacityStepDiv: capacityStepDiv, + fineStepDiv: fineStepDiv, } if pp.activeBias < time.Duration(1) { pp.activeBias = time.Duration(1) @@ -111,6 +112,8 @@ func newPriorityPool(ns *nodestate.NodeStateMachine, setup *serverSetup, clock m nodePriority: newValue.(nodePriority), activeIndex: -1, inactiveIndex: -1, + minTarget: pp.minCap, + stepDiv: pp.capacityStepDiv, } ns.SetFieldSub(node, pp.setup.queueField, c) } else { @@ -140,18 +143,12 @@ func newPriorityPool(ns *nodestate.NodeStateMachine, setup *serverSetup, clock m return pp } -// requestCapacity checks whether changing the capacity of a node to the given target -// is possible (bias is applied in favor of other active nodes if the target is higher -// than the current capacity). -// If setCap is true then it also performs the change if possible. The function returns -// the minimum priority needed to do the change and whether it is currently allowed. -// If setCap and allowed are both true then the caller can assume that the change was -// successful. -// Note: priorityField should always be set before calling requestCapacity. If setCap -// is false then both inactiveFlag and activeFlag can be unset and they are not changed -// by this function call either. -// Note 2: this function should run inside a NodeStateMachine operation -func (pp *priorityPool) requestCapacity(node *enode.Node, targetCap uint64, bias time.Duration) bool { +// requestCapacity tries to set the capacity of a connected node to the highest possible +// value inside the given target range. If maxTarget is not reachable then the capacity is +// iteratively reduced in fine steps based on the fineStepDiv parameter until minTarget is reached. +// The function returns the new capacity if successful and the original capacity otherwise. +// Note: this function should run inside a NodeStateMachine operation +func (pp *priorityPool) requestCapacity(node *enode.Node, minTarget, maxTarget uint64, bias time.Duration) uint64 { pp.lock.Lock() pp.activeQueue.Refresh() var updates []capUpdate @@ -160,8 +157,11 @@ func (pp *priorityPool) requestCapacity(node *enode.Node, targetCap uint64, bias pp.updateFlags(updates) }() - if targetCap < pp.minCap { - targetCap = pp.minCap + if minTarget < pp.minCap { + minTarget = pp.minCap + } + if maxTarget < minTarget { + maxTarget = minTarget } if bias < pp.activeBias { bias = pp.activeBias @@ -169,20 +169,22 @@ func (pp *priorityPool) requestCapacity(node *enode.Node, targetCap uint64, bias c, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo) if c == nil { log.Error("requestCapacity called for unknown node", "id", node.ID()) - return false + return 0 } pp.markForChange(c) - pp.setCapacity(c, targetCap) - if targetCap > c.capacity { + if maxTarget > c.capacity { c.bias = bias + c.stepDiv = pp.fineStepDiv } + oldCapacity := c.capacity + pp.setCapacity(c, maxTarget) + c.minTarget = minTarget pp.activeQueue.Remove(c.activeIndex) pp.inactiveQueue.Remove(c.inactiveIndex) pp.activeQueue.Push(c) pp.enforceLimits() - success := c.capacity == targetCap - updates = pp.finalizeChanges(success) - return success + updates = pp.finalizeChanges(c.capacity >= minTarget && c.capacity <= maxTarget && c.capacity != oldCapacity) + return c.capacity } // SetLimits sets the maximum number and total capacity of simultaneously active nodes @@ -368,12 +370,15 @@ func (pp *priorityPool) enforceLimits() (*ppNodeInfo, int64) { c = data.(*ppNodeInfo) pp.markForChange(c) maxActivePriority = priority - if c.capacity == pp.minCap || pp.activeCount > pp.maxCount { + if c.capacity == c.minTarget || pp.activeCount > pp.maxCount { pp.setCapacity(c, 0) } else { - sub := c.capacity / pp.capacityStepDiv - if c.capacity-sub < pp.minCap { - sub = c.capacity - pp.minCap + sub := c.capacity / c.stepDiv + if sub == 0 { + sub = 1 + } + if c.capacity-sub < c.minTarget { + sub = c.capacity - c.minTarget } pp.setCapacity(c, c.capacity-sub) pp.activeQueue.Push(c) @@ -392,6 +397,8 @@ func (pp *priorityPool) finalizeChanges(commit bool) (updates []capUpdate) { pp.activeQueue.Remove(c.activeIndex) pp.inactiveQueue.Remove(c.inactiveIndex) c.bias = 0 + c.stepDiv = pp.capacityStepDiv + c.minTarget = pp.minCap c.changed = false if !commit { pp.setCapacity(c, c.origCap) diff --git a/les/vflux/server/prioritypool_test.go b/les/vflux/server/prioritypool_test.go index af411fb6f008..51523121160b 100644 --- a/les/vflux/server/prioritypool_test.go +++ b/les/vflux/server/prioritypool_test.go @@ -59,7 +59,7 @@ func TestPriorityPool(t *testing.T) { c.cap = newValue.(uint64) } }) - pp := newPriorityPool(ns, setup, clock, testMinCap, 0, testCapacityStepDiv) + pp := newPriorityPool(ns, setup, clock, testMinCap, 0, testCapacityStepDiv, testCapacityStepDiv) ns.Start() pp.SetLimits(100, 1000000) clients := make([]*ppTestClient, 100) @@ -67,7 +67,8 @@ func TestPriorityPool(t *testing.T) { for { var ok bool ns.Operation(func() { - ok = pp.requestCapacity(c.node, c.cap+c.cap/testCapacityStepDiv, 0) + newCap := c.cap + c.cap/testCapacityStepDiv + ok = pp.requestCapacity(c.node, newCap, newCap, 0) == newCap }) if !ok { return @@ -132,14 +133,14 @@ func TestPriorityPool(t *testing.T) { expFail = testMinCap } ns.Operation(func() { - ok = pp.requestCapacity(c.node, expFail, 0) + ok = pp.requestCapacity(c.node, expFail, expFail, 0) == expFail }) if ok { t.Errorf("Request for more than expected available capacity succeeded") } if expCap >= testMinCap { ns.Operation(func() { - ok = pp.requestCapacity(c.node, expCap, 0) + ok = pp.requestCapacity(c.node, expCap, expCap, 0) == expCap }) if !ok { t.Errorf("Request for expected available capacity failed") @@ -164,7 +165,7 @@ func TestCapacityCurve(t *testing.T) { setup.balanceField = setup.setup.NewField("ppTestClient", reflect.TypeOf(&ppTestClient{})) ns := nodestate.NewNodeStateMachine(nil, nil, clock, setup.setup) - pp := newPriorityPool(ns, setup, clock, 400000, 0, 2) + pp := newPriorityPool(ns, setup, clock, 400000, 0, 2, 2) ns.Start() pp.SetLimits(10, 10000000) clients := make([]*ppTestClient, 10) @@ -179,7 +180,7 @@ func TestCapacityCurve(t *testing.T) { ns.SetField(c.node, setup.balanceField, c) ns.SetState(c.node, setup.inactiveFlag, nodestate.Flags{}, 0) ns.Operation(func() { - pp.requestCapacity(c.node, c.cap, 0) + pp.requestCapacity(c.node, c.cap, c.cap, 0) }) } From e294ffffbe09501edcf551ab5ab3e45e148b3a62 Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Mon, 29 Mar 2021 18:48:09 +0200 Subject: [PATCH 21/27] les/vflux/server: minor changes --- les/peer.go | 4 ++-- les/vflux/server/balance.go | 16 ++++++++-------- les/vflux/server/clientpool.go | 12 +++++------- les/vflux/server/clientpool_test.go | 2 +- les/vflux/server/prioritypool.go | 10 +++------- tests/fuzzers/vflux/clientpool-fuzzer.go | 2 +- 6 files changed, 20 insertions(+), 26 deletions(-) diff --git a/les/peer.go b/les/peer.go index 8c8196b08835..f6cc94dfad22 100644 --- a/les/peer.go +++ b/les/peer.go @@ -930,8 +930,8 @@ func (p *clientPeer) sendAnnounce(request announceData) error { return p2p.Send(p.rw, AnnounceMsg, request) } -// InactiveTimeout implements vfs.clientPeer -func (p *clientPeer) InactiveTimeout() time.Duration { +// InactiveAllowance implements vfs.clientPeer +func (p *clientPeer) InactiveAllowance() time.Duration { return 0 // will return more than zero for les/5 clients } diff --git a/les/vflux/server/balance.go b/les/vflux/server/balance.go index b63aaf81aa4d..01e645a16a5d 100644 --- a/les/vflux/server/balance.go +++ b/les/vflux/server/balance.go @@ -128,10 +128,10 @@ func (b balance) negValue(now mclock.AbsTime) uint64 { return b.neg.Value(b.negExp.LogOffset(now)) } -// add adds the value of a given amount to the balance. The original value and +// addValue adds the value of a given amount to the balance. The original value and // updated value will also be returned if the addition is successful. // Returns the error if the given value is too large and the value overflows. -func (b *balance) addPosValue(now mclock.AbsTime, amount int64, pos bool, force bool) (uint64, uint64, int64, error) { +func (b *balance) addValue(now mclock.AbsTime, amount int64, pos bool, force bool) (uint64, uint64, int64, error) { var ( val utils.ExpiredValue offset utils.Fixed64 @@ -217,7 +217,7 @@ func (n *nodeBalance) AddBalance(amount int64) (uint64, uint64, error) { // Operation with holding the lock n.bt.updateTotalBalance(n, func() bool { n.updateBalance(now) - if old, new, _, err = n.balance.addPosValue(now, amount, true, false); err != nil { + if old, new, _, err = n.balance.addValue(now, amount, true, false); err != nil { return false } callbacks, setPriority = n.checkCallbacks(now), n.checkPriorityStatus() @@ -290,7 +290,7 @@ func (n *nodeBalance) RequestServed(cost uint64) (newBalance uint64) { newBalance = n.balance.posValue(now) } else { var net int64 - _, newBalance, net, _ = n.balance.addPosValue(now, posCost, true, false) + _, newBalance, net, _ = n.balance.addValue(now, posCost, true, false) if posCost == net { fcost = 0 } else { @@ -300,7 +300,7 @@ func (n *nodeBalance) RequestServed(cost uint64) (newBalance uint64) { } } if fcost > 0 && n.negFactor.RequestFactor != 0 { - n.balance.addPosValue(now, int64(fcost*n.negFactor.RequestFactor), false, false) + n.balance.addValue(now, int64(fcost*n.negFactor.RequestFactor), false, false) check = true } n.sumReqCost += cost @@ -345,7 +345,7 @@ func (n *nodeBalance) estimatePriority(capacity uint64, addBalance int64, future b := n.balance // copy the balance if addBalance != 0 { - b.addPosValue(now, addBalance, true, true) + b.addValue(now, addBalance, true, true) } if future > 0 { var avgReqCost float64 @@ -631,7 +631,7 @@ func (n *nodeBalance) reducedBalance(b balance, start mclock.AbsTime, dt time.Du if !b.pos.IsZero() { factor := n.posFactor.connectionPrice(capacity, avgReqCost) diff := -int64(dtf * factor) - _, _, net, _ := b.addPosValue(at, diff, true, false) + _, _, net, _ := b.addValue(at, diff, true, false) if net == diff { dtf = 0 } else { @@ -640,7 +640,7 @@ func (n *nodeBalance) reducedBalance(b balance, start mclock.AbsTime, dt time.Du } if dtf > 0 { factor := n.negFactor.connectionPrice(capacity, avgReqCost) - b.addPosValue(at, int64(dtf*factor), false, false) + b.addValue(at, int64(dtf*factor), false, false) } return b } diff --git a/les/vflux/server/clientpool.go b/les/vflux/server/clientpool.go index 4fa30b2a3ca5..e6e9a2c4e165 100644 --- a/les/vflux/server/clientpool.go +++ b/les/vflux/server/clientpool.go @@ -80,7 +80,7 @@ type ClientPool struct { type clientPeer interface { Node() *enode.Node FreeClientId() string // unique id for non-priority clients (typically a prefix of the network address) - InactiveTimeout() time.Duration // disconnection timeout for inactive non-priority peers + InactiveAllowance() time.Duration // disconnection timeout for inactive non-priority peers UpdateCapacity(newCap uint64, requested bool) // signals a capacity update (requested is true if it is a result of a SetCapacity call on the given peer Disconnect() // initiates disconnection (Unregister should always be called) } @@ -105,7 +105,7 @@ func NewClientPool(balanceDb ethdb.KeyValueStore, minCap uint64, connectedBias t // set timeout for non-priority inactive client var timeout time.Duration if c, ok := ns.GetField(node, setup.clientField).(clientPeer); ok { - timeout = c.InactiveTimeout() + timeout = c.InactiveAllowance() } if timeout > 0 { ns.AddTimeout(node, setup.inactiveFlag, timeout) @@ -245,11 +245,9 @@ func (cp *ClientPool) SetCapacity(node *enode.Node, reqCap uint64, bias time.Dur // performed by the server automatically as soon as necessary/possible reqCap = cp.minCap } - if reqCap > cp.minCap { - if cp.ns.GetState(node).HasNone(cp.setup.priorityFlag) && reqCap > cp.minCap { - err = ErrNoPriority - return - } + if reqCap > cp.minCap && cp.ns.GetState(node).HasNone(cp.setup.priorityFlag) { + err = ErrNoPriority + return } if reqCap == capacity { return diff --git a/les/vflux/server/clientpool_test.go b/les/vflux/server/clientpool_test.go index 36d919213566..950312169724 100644 --- a/les/vflux/server/clientpool_test.go +++ b/les/vflux/server/clientpool_test.go @@ -81,7 +81,7 @@ func (i *poolTestPeer) FreeClientId() string { return fmt.Sprintf("addr #%d", i.index) } -func (i *poolTestPeer) InactiveTimeout() time.Duration { +func (i *poolTestPeer) InactiveAllowance() time.Duration { if i.inactiveAllowed { return time.Second * 10 } diff --git a/les/vflux/server/prioritypool.go b/les/vflux/server/prioritypool.go index f00448fe670f..d3584ea63b18 100644 --- a/les/vflux/server/prioritypool.go +++ b/les/vflux/server/prioritypool.go @@ -212,17 +212,13 @@ func (pp *priorityPool) SetLimits(maxCount, maxCap uint64) { // setActiveBias sets the bias applied when trying to activate inactive nodes func (pp *priorityPool) setActiveBias(bias time.Duration) { pp.lock.Lock() - var updates []capUpdate - defer func() { - pp.lock.Unlock() - pp.ns.Operation(func() { pp.updateFlags(updates) }) - }() - pp.activeBias = bias if pp.activeBias < time.Duration(1) { pp.activeBias = time.Duration(1) } - updates = pp.tryActivate() + updates := pp.tryActivate() + pp.lock.Unlock() + pp.ns.Operation(func() { pp.updateFlags(updates) }) } // Active returns the number and total capacity of currently active nodes diff --git a/tests/fuzzers/vflux/clientpool-fuzzer.go b/tests/fuzzers/vflux/clientpool-fuzzer.go index cd43362c08c9..41b862734884 100644 --- a/tests/fuzzers/vflux/clientpool-fuzzer.go +++ b/tests/fuzzers/vflux/clientpool-fuzzer.go @@ -60,7 +60,7 @@ func (p *clientPeer) FreeClientId() string { return p.freeID } -func (p *clientPeer) InactiveTimeout() time.Duration { +func (p *clientPeer) InactiveAllowance() time.Duration { return p.timeout } From 50ddf0ee113fcf646a455e3a2338af4a7fa31897 Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Mon, 29 Mar 2021 19:48:27 +0200 Subject: [PATCH 22/27] les/vflux/server: moved default factors to balanceTracker --- les/vflux/server/balance_tracker.go | 30 +++++++++++++++++++++-------- les/vflux/server/clientpool.go | 16 ++------------- 2 files changed, 24 insertions(+), 22 deletions(-) diff --git a/les/vflux/server/balance_tracker.go b/les/vflux/server/balance_tracker.go index 9460c2f19ca9..746697a8c7c7 100644 --- a/les/vflux/server/balance_tracker.go +++ b/les/vflux/server/balance_tracker.go @@ -43,13 +43,15 @@ const ( // The two balances are translated into a single priority value that also depends // on the actual capacity. type balanceTracker struct { - setup *serverSetup - clock mclock.Clock - lock sync.Mutex - ns *nodestate.NodeStateMachine - ndb *nodeDB - posExp, negExp utils.ValueExpirer - posExpTC, negExpTC uint64 + setup *serverSetup + clock mclock.Clock + lock sync.Mutex + ns *nodestate.NodeStateMachine + ndb *nodeDB + posExp, negExp utils.ValueExpirer + + posExpTC, negExpTC uint64 + defaultPosFactors, defaultNegFactors PriceFactors active, inactive utils.ExpiredValue balanceTimer *utils.UpdateTimer @@ -103,7 +105,11 @@ func newBalanceTracker(ns *nodestate.NodeStateMachine, setup *serverSetup, db et FreeClientId() string } if newValue != nil { - ns.SetFieldSub(node, bt.setup.balanceField, bt.newNodeBalance(node, newValue.(peer).FreeClientId(), true)) + n := bt.newNodeBalance(node, newValue.(peer).FreeClientId(), true) + bt.lock.Lock() + n.SetPriceFactors(bt.defaultPosFactors, bt.defaultNegFactors) + bt.lock.Unlock() + ns.SetFieldSub(node, bt.setup.balanceField, n) } else { ns.SetStateSub(node, nodestate.Flags{}, bt.setup.priorityFlag, 0) if b, _ := ns.GetField(node, bt.setup.balanceField).(*nodeBalance); b != nil { @@ -173,6 +179,14 @@ func (bt *balanceTracker) GetPosBalanceIDs(start, stop enode.ID, maxCount int) ( return bt.ndb.getPosBalanceIDs(start, stop, maxCount) } +// SetDefaultFactors sets the default price factors applied to subsequently connected clients +func (bt *balanceTracker) SetDefaultFactors(posFactors, negFactors PriceFactors) { + bt.lock.Lock() + bt.defaultPosFactors = posFactors + bt.defaultNegFactors = negFactors + bt.lock.Unlock() +} + // SetExpirationTCs sets positive and negative token expiration time constants. // Specified in seconds, 0 means infinite (no expiration). func (bt *balanceTracker) SetExpirationTCs(pos, neg uint64) { diff --git a/les/vflux/server/clientpool.go b/les/vflux/server/clientpool.go index e6e9a2c4e165..cd2ab6ad7c7b 100644 --- a/les/vflux/server/clientpool.go +++ b/les/vflux/server/clientpool.go @@ -66,9 +66,8 @@ type ClientPool struct { ns *nodestate.NodeStateMachine synced func() bool - lock sync.RWMutex - defaultPosFactors, defaultNegFactors PriceFactors - connectedBias time.Duration + lock sync.RWMutex + connectedBias time.Duration minCap uint64 // the minimal capacity value allowed for any client capReqNode *enode.Node // node that is requesting capacity change; only used inside NSM operation @@ -136,9 +135,6 @@ func NewClientPool(balanceDb ethdb.KeyValueStore, minCap uint64, connectedBias t ns.SubscribeField(setup.balanceField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { if newValue != nil { ns.SetStateSub(node, setup.inactiveFlag, nodestate.Flags{}, 0) - cp.lock.RLock() - newValue.(*nodeBalance).SetPriceFactors(cp.defaultPosFactors, cp.defaultNegFactors) - cp.lock.RUnlock() } }) @@ -202,14 +198,6 @@ func (cp *ClientPool) Unregister(peer clientPeer) { cp.ns.SetField(peer.Node(), cp.setup.clientField, nil) } -// SetDefaultFactors sets the default price factors applied to subsequently connected clients -func (cp *ClientPool) SetDefaultFactors(posFactors, negFactors PriceFactors) { - cp.lock.Lock() - cp.defaultPosFactors = posFactors - cp.defaultNegFactors = negFactors - cp.lock.Unlock() -} - // setConnectedBias sets the connection bias, which is applied to already connected clients // So that already connected client won't be kicked out very soon and we can ensure all // connected clients can have enough time to request or sync some data. From f1000d6e52f885bdd0bfb8cd2146896bd974f9f2 Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Mon, 29 Mar 2021 19:54:04 +0200 Subject: [PATCH 23/27] les/vflux/server: set inactiveFlag in priorityPool --- les/vflux/server/clientpool.go | 6 ------ les/vflux/server/prioritypool.go | 1 + 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/les/vflux/server/clientpool.go b/les/vflux/server/clientpool.go index cd2ab6ad7c7b..a2adeef66cb5 100644 --- a/les/vflux/server/clientpool.go +++ b/les/vflux/server/clientpool.go @@ -132,12 +132,6 @@ func NewClientPool(balanceDb ethdb.KeyValueStore, minCap uint64, connectedBias t } }) - ns.SubscribeField(setup.balanceField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { - if newValue != nil { - ns.SetStateSub(node, setup.inactiveFlag, nodestate.Flags{}, 0) - } - }) - ns.SubscribeField(setup.capacityField, func(node *enode.Node, state nodestate.Flags, oldValue, newValue interface{}) { if c, ok := ns.GetField(node, setup.clientField).(clientPeer); ok { newCap, _ := newValue.(uint64) diff --git a/les/vflux/server/prioritypool.go b/les/vflux/server/prioritypool.go index d3584ea63b18..ace23f11511d 100644 --- a/les/vflux/server/prioritypool.go +++ b/les/vflux/server/prioritypool.go @@ -116,6 +116,7 @@ func newPriorityPool(ns *nodestate.NodeStateMachine, setup *serverSetup, clock m stepDiv: pp.capacityStepDiv, } ns.SetFieldSub(node, pp.setup.queueField, c) + ns.SetStateSub(node, setup.inactiveFlag, nodestate.Flags{}, 0) } else { ns.SetStateSub(node, nodestate.Flags{}, pp.setup.activeFlag.Or(pp.setup.inactiveFlag), 0) if n, _ := pp.ns.GetField(node, pp.setup.queueField).(*ppNodeInfo); n != nil { From 4a56ebe14519bf25f080d38c9cf64b3a59b557d3 Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Mon, 29 Mar 2021 20:02:03 +0200 Subject: [PATCH 24/27] les/vflux/server: moved related metrics to vfs package --- les/metrics.go | 17 +++++------------ les/server.go | 2 -- les/vflux/server/clientpool.go | 26 +++++--------------------- les/vflux/server/metrics.go | 33 +++++++++++++++++++++++++++++++++ 4 files changed, 43 insertions(+), 35 deletions(-) create mode 100644 les/vflux/server/metrics.go diff --git a/les/metrics.go b/les/metrics.go index 5a8d4bbe0212..d356326b76ef 100644 --- a/les/metrics.go +++ b/les/metrics.go @@ -73,12 +73,9 @@ var ( serverConnectionGauge = metrics.NewRegisteredGauge("les/connection/server", nil) clientConnectionGauge = metrics.NewRegisteredGauge("les/connection/client", nil) - totalCapacityGauge = metrics.NewRegisteredGauge("les/server/totalCapacity", nil) - totalRechargeGauge = metrics.NewRegisteredGauge("les/server/totalRecharge", nil) - totalConnectedGauge = metrics.NewRegisteredGauge("les/server/totalConnected", nil) - blockProcessingTimer = metrics.NewRegisteredTimer("les/server/blockProcessingTime", nil) - capacityQueryZeroMeter = metrics.NewRegisteredMeter("les/server/capQueryZero", nil) - capacityQueryNonZeroMeter = metrics.NewRegisteredMeter("les/server/capQueryNonZero", nil) + totalCapacityGauge = metrics.NewRegisteredGauge("les/server/totalCapacity", nil) + totalRechargeGauge = metrics.NewRegisteredGauge("les/server/totalRecharge", nil) + blockProcessingTimer = metrics.NewRegisteredTimer("les/server/blockProcessingTime", nil) requestServedMeter = metrics.NewRegisteredMeter("les/server/req/avgServedTime", nil) requestServedTimer = metrics.NewRegisteredTimer("les/server/req/servedTime", nil) @@ -100,12 +97,8 @@ var ( sqServedGauge = metrics.NewRegisteredGauge("les/server/servingQueue/served", nil) sqQueuedGauge = metrics.NewRegisteredGauge("les/server/servingQueue/queued", nil) - clientConnectedMeter = metrics.NewRegisteredMeter("les/server/clientEvent/connected", nil) - clientActivatedMeter = metrics.NewRegisteredMeter("les/server/clientEvent/activated", nil) - clientDeactivatedMeter = metrics.NewRegisteredMeter("les/server/clientEvent/deactivated", nil) - clientDisconnectedMeter = metrics.NewRegisteredMeter("les/server/clientEvent/disconnected", nil) - clientFreezeMeter = metrics.NewRegisteredMeter("les/server/clientEvent/freeze", nil) - clientErrorMeter = metrics.NewRegisteredMeter("les/server/clientEvent/error", nil) + clientFreezeMeter = metrics.NewRegisteredMeter("les/server/clientEvent/freeze", nil) + clientErrorMeter = metrics.NewRegisteredMeter("les/server/clientEvent/error", nil) requestRTT = metrics.NewRegisteredTimer("les/client/req/rtt", nil) requestSendDelay = metrics.NewRegisteredTimer("les/client/req/sendDelay", nil) diff --git a/les/server.go b/les/server.go index b170521170cf..d44b1b57d414 100644 --- a/les/server.go +++ b/les/server.go @@ -138,8 +138,6 @@ func NewLesServer(node *node.Node, e ethBackend, config *ethconfig.Config) (*Les } srv.fcManager.SetCapacityLimits(srv.minCapacity, srv.maxCapacity, srv.minCapacity*2) srv.clientPool = vfs.NewClientPool(lesDb, srv.minCapacity, defaultConnectedBias, mclock.System{}, issync) - srv.clientPool.AddMetrics(totalConnectedGauge, clientConnectedMeter, clientDisconnectedMeter, - clientActivatedMeter, clientDeactivatedMeter, capacityQueryZeroMeter, capacityQueryNonZeroMeter) srv.clientPool.Start() srv.clientPool.SetDefaultFactors(defaultPosFactors, defaultNegFactors) srv.vfluxServer.Register(srv.clientPool, "les", "Ethereum light client service") diff --git a/les/vflux/server/clientpool.go b/les/vflux/server/clientpool.go index a2adeef66cb5..2e5fdd0ee796 100644 --- a/les/vflux/server/clientpool.go +++ b/les/vflux/server/clientpool.go @@ -26,7 +26,6 @@ import ( "github.com/ethereum/go-ethereum/les/utils" "github.com/ethereum/go-ethereum/les/vflux" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/metrics" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/nodestate" "github.com/ethereum/go-ethereum/rlp" @@ -71,8 +70,6 @@ type ClientPool struct { minCap uint64 // the minimal capacity value allowed for any client capReqNode *enode.Node // node that is requesting capacity change; only used inside NSM operation - - capacityQueryZeroMeter, capacityQueryNonZeroMeter metrics.Meter } // clientPeer represents a peer in the client pool. None of the callbacks should block. @@ -138,14 +135,8 @@ func NewClientPool(balanceDb ethdb.KeyValueStore, minCap uint64, connectedBias t c.UpdateCapacity(newCap, node == cp.capReqNode) } }) - return cp -} - -// AddMetrics adds metrics to the client pool. Should be called before Start(). -func (cp *ClientPool) AddMetrics(totalConnectedGauge metrics.Gauge, - clientConnectedMeter, clientDisconnectedMeter, clientActivatedMeter, clientDeactivatedMeter, - capacityQueryZeroMeter, capacityQueryNonZeroMeter metrics.Meter) { + // add metrics cp.ns.SubscribeState(nodestate.MergeFlags(cp.setup.activeFlag, cp.setup.inactiveFlag), func(node *enode.Node, oldState, newState nodestate.Flags) { if oldState.IsEmpty() && !newState.IsEmpty() { clientConnectedMeter.Mark(1) @@ -162,8 +153,7 @@ func (cp *ClientPool) AddMetrics(totalConnectedGauge metrics.Gauge, _, connected := cp.Active() totalConnectedGauge.Update(int64(connected)) }) - cp.capacityQueryZeroMeter = capacityQueryZeroMeter - cp.capacityQueryNonZeroMeter = capacityQueryNonZeroMeter + return cp } // Start starts the client pool. Should be called before Register/Unregister. @@ -295,9 +285,7 @@ func (cp *ClientPool) serveCapQuery(id enode.ID, freeID string, data []byte) []b } result := make(vflux.CapacityQueryReply, len(req.AddTokens)) if !cp.synced() { - if cp.capacityQueryZeroMeter != nil { - cp.capacityQueryZeroMeter.Mark(1) - } + capacityQueryZeroMeter.Mark(1) reply, _ := rlp.EncodeToBytes(&result) return reply } @@ -328,13 +316,9 @@ func (cp *ClientPool) serveCapQuery(id enode.ID, freeID string, data []byte) []b }) // add first result to metrics (don't care about priority client multi-queries yet) if result[0] == 0 { - if cp.capacityQueryZeroMeter != nil { - cp.capacityQueryZeroMeter.Mark(1) - } + capacityQueryZeroMeter.Mark(1) } else { - if cp.capacityQueryNonZeroMeter != nil { - cp.capacityQueryNonZeroMeter.Mark(1) - } + capacityQueryNonZeroMeter.Mark(1) } reply, _ := rlp.EncodeToBytes(&result) return reply diff --git a/les/vflux/server/metrics.go b/les/vflux/server/metrics.go new file mode 100644 index 000000000000..307b8347afe6 --- /dev/null +++ b/les/vflux/server/metrics.go @@ -0,0 +1,33 @@ +// Copyright 2021 The go-ethereum Authors +// This file is part of the go-ethereum library. +// +// The go-ethereum library is free software: you can redistribute it and/or modify +// it under the terms of the GNU Lesser General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. +// +// The go-ethereum library is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Lesser General Public License for more details. +// +// You should have received a copy of the GNU Lesser General Public License +// along with the go-ethereum library. If not, see . + +package server + +import ( + "github.com/ethereum/go-ethereum/metrics" +) + +var ( + totalConnectedGauge = metrics.NewRegisteredGauge("vflux/server/totalConnected", nil) + + clientConnectedMeter = metrics.NewRegisteredMeter("vflux/server/clientEvent/connected", nil) + clientActivatedMeter = metrics.NewRegisteredMeter("vflux/server/clientEvent/activated", nil) + clientDeactivatedMeter = metrics.NewRegisteredMeter("vflux/server/clientEvent/deactivated", nil) + clientDisconnectedMeter = metrics.NewRegisteredMeter("vflux/server/clientEvent/disconnected", nil) + + capacityQueryZeroMeter = metrics.NewRegisteredMeter("vflux/server/capQueryZero", nil) + capacityQueryNonZeroMeter = metrics.NewRegisteredMeter("vflux/server/capQueryNonZero", nil) +) From 590a83416e946b57ff1222d175e106b852180cb1 Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Thu, 1 Apr 2021 11:16:05 +0200 Subject: [PATCH 25/27] les/vflux/client: make priorityPool temp state logic cleaner --- les/vflux/server/prioritypool.go | 132 +++++++++++++++++-------------- 1 file changed, 73 insertions(+), 59 deletions(-) diff --git a/les/vflux/server/prioritypool.go b/les/vflux/server/prioritypool.go index ace23f11511d..9183c021be88 100644 --- a/les/vflux/server/prioritypool.go +++ b/les/vflux/server/prioritypool.go @@ -63,10 +63,7 @@ type priorityPool struct { ns *nodestate.NodeStateMachine clock mclock.Clock lock sync.Mutex - activeQueue *prque.LazyQueue inactiveQueue *prque.Prque - changed []*ppNodeInfo - activeCount, activeCap uint64 maxCount, maxCap uint64 minCap uint64 activeBias time.Duration @@ -75,17 +72,27 @@ type priorityPool struct { cachedCurve *capacityCurve ccUpdatedAt mclock.AbsTime ccUpdateForced bool + + tempState []*ppNodeInfo // nodes currently in temporary state + // the following fields represent the temporary state if tempState is not empty + activeCount, activeCap uint64 + activeQueue *prque.LazyQueue } // ppNodeInfo is the internal node descriptor of priorityPool type ppNodeInfo struct { - nodePriority nodePriority - node *enode.Node - connected bool - capacity, origCap, minTarget, stepDiv uint64 - bias time.Duration - changed bool - activeIndex, inactiveIndex int + nodePriority nodePriority + node *enode.Node + connected bool + capacity uint64 // only changed when temporary state is committed + activeIndex, inactiveIndex int + + tempState bool // should only be true while the priorityPool lock is held + tempCapacity uint64 // equals capacity when tempState is false + // the following fields only affect the temporary state and they are set to their + // default value when entering the temp state + minTarget, stepDiv uint64 + bias time.Duration } // newPriorityPool creates a new priorityPool @@ -112,8 +119,6 @@ func newPriorityPool(ns *nodestate.NodeStateMachine, setup *serverSetup, clock m nodePriority: newValue.(nodePriority), activeIndex: -1, inactiveIndex: -1, - minTarget: pp.minCap, - stepDiv: pp.capacityStepDiv, } ns.SetFieldSub(node, pp.setup.queueField, c) ns.SetStateSub(node, setup.inactiveFlag, nodestate.Flags{}, 0) @@ -172,19 +177,18 @@ func (pp *priorityPool) requestCapacity(node *enode.Node, minTarget, maxTarget u log.Error("requestCapacity called for unknown node", "id", node.ID()) return 0 } - pp.markForChange(c) + pp.setTempState(c) if maxTarget > c.capacity { c.bias = bias c.stepDiv = pp.fineStepDiv } - oldCapacity := c.capacity - pp.setCapacity(c, maxTarget) + pp.setTempCapacity(c, maxTarget) c.minTarget = minTarget pp.activeQueue.Remove(c.activeIndex) pp.inactiveQueue.Remove(c.inactiveIndex) pp.activeQueue.Push(c) pp.enforceLimits() - updates = pp.finalizeChanges(c.capacity >= minTarget && c.capacity <= maxTarget && c.capacity != oldCapacity) + updates = pp.finalizeChanges(c.tempCapacity >= minTarget && c.tempCapacity <= maxTarget && c.tempCapacity != c.capacity) return c.capacity } @@ -206,7 +210,7 @@ func (pp *priorityPool) SetLimits(maxCount, maxCap uint64) { updates = pp.finalizeChanges(true) } if inc { - updates = append(updates, pp.tryActivate()...) + updates = append(updates, pp.tryActivate(false)...) } } @@ -217,7 +221,7 @@ func (pp *priorityPool) setActiveBias(bias time.Duration) { if pp.activeBias < time.Duration(1) { pp.activeBias = time.Duration(1) } - updates := pp.tryActivate() + updates := pp.tryActivate(false) pp.lock.Unlock() pp.ns.Operation(func() { pp.updateFlags(updates) }) } @@ -261,9 +265,9 @@ func invertPriority(p int64) int64 { func activePriority(a interface{}) int64 { c := a.(*ppNodeInfo) if c.bias == 0 { - return invertPriority(c.nodePriority.priority(c.capacity)) + return invertPriority(c.nodePriority.priority(c.tempCapacity)) } else { - return invertPriority(c.nodePriority.estimatePriority(c.capacity, 0, 0, c.bias, true)) + return invertPriority(c.nodePriority.estimatePriority(c.tempCapacity, 0, 0, c.bias, true)) } } @@ -274,7 +278,7 @@ func (pp *priorityPool) activeMaxPriority(a interface{}, until mclock.AbsTime) i if future < 0 { future = 0 } - return invertPriority(c.nodePriority.estimatePriority(c.capacity, 0, future, c.bias, false)) + return invertPriority(c.nodePriority.estimatePriority(c.tempCapacity, 0, future, c.bias, false)) } // inactivePriority callback returns actual priority of ppNodeInfo item in inactiveQueue @@ -298,7 +302,7 @@ func (pp *priorityPool) connectedNode(c *ppNodeInfo) { } c.connected = true pp.inactiveQueue.Push(c, pp.inactivePriority(c)) - updates = pp.tryActivate() + updates = pp.tryActivate(false) } // disconnectedNode is called when a node has been removed from the pool (both inactiveFlag @@ -320,36 +324,45 @@ func (pp *priorityPool) disconnectedNode(c *ppNodeInfo) { pp.activeQueue.Remove(c.activeIndex) pp.inactiveQueue.Remove(c.inactiveIndex) if c.capacity != 0 { - pp.setCapacity(c, 0) - updates = pp.tryActivate() + pp.setTempState(c) + pp.setTempCapacity(c, 0) + updates = pp.tryActivate(true) } } -// markForChange internally puts a node in a temporary state that can either be reverted +// setTempState internally puts a node in a temporary state that can either be reverted // or confirmed later. This temporary state allows changing the capacity of a node and // moving it between the active and inactive queue. activeFlag/inactiveFlag and // capacityField are not changed while the changes are still temporary. -func (pp *priorityPool) markForChange(c *ppNodeInfo) { - if c.changed { +func (pp *priorityPool) setTempState(c *ppNodeInfo) { + if c.tempState { return } - c.changed = true - c.origCap = c.capacity - pp.changed = append(pp.changed, c) + c.tempState = true + if c.tempCapacity != c.capacity { // should never happen + log.Crit("tempCapacity != capacity when entering tempState") + } + c.minTarget = pp.minCap + c.stepDiv = pp.capacityStepDiv + pp.tempState = append(pp.tempState, c) } -// setCapacity changes the capacity of a node and adjusts activeCap and activeCount -// accordingly. Note that this change is performed in the temporary state so it should -// be called after markForChange and before finalizeChanges. -func (pp *priorityPool) setCapacity(n *ppNodeInfo, cap uint64) { - pp.activeCap += cap - n.capacity - if n.capacity == 0 { +// setTempCapacity changes the capacity of a node in the temporary state and adjusts +// activeCap and activeCount accordingly. Since this change is performed in the temporary +// state it should be called after setTempState and before finalizeChanges. +func (pp *priorityPool) setTempCapacity(n *ppNodeInfo, cap uint64) { + if !n.tempState { // should never happen + log.Crit("Node is not in temporary state") + return + } + pp.activeCap += cap - n.tempCapacity + if n.tempCapacity == 0 { pp.activeCount++ } if cap == 0 { pp.activeCount-- } - n.capacity = cap + n.tempCapacity = cap } // enforceLimits enforces active node count and total capacity limits. It returns the @@ -365,19 +378,19 @@ func (pp *priorityPool) enforceLimits() (*ppNodeInfo, int64) { ) pp.activeQueue.MultiPop(func(data interface{}, priority int64) bool { c = data.(*ppNodeInfo) - pp.markForChange(c) + pp.setTempState(c) maxActivePriority = priority - if c.capacity == c.minTarget || pp.activeCount > pp.maxCount { - pp.setCapacity(c, 0) + if c.tempCapacity == c.minTarget || pp.activeCount > pp.maxCount { + pp.setTempCapacity(c, 0) } else { - sub := c.capacity / c.stepDiv + sub := c.tempCapacity / c.stepDiv if sub == 0 { sub = 1 } - if c.capacity-sub < c.minTarget { - sub = c.capacity - c.minTarget + if c.tempCapacity-sub < c.minTarget { + sub = c.tempCapacity - c.minTarget } - pp.setCapacity(c, c.capacity-sub) + pp.setTempCapacity(c, c.tempCapacity-sub) pp.activeQueue.Push(c) } return pp.activeCap > pp.maxCap || pp.activeCount > pp.maxCount @@ -389,30 +402,32 @@ func (pp *priorityPool) enforceLimits() (*ppNodeInfo, int64) { // field and according flag updates are not performed here but returned in a list because // they should be performed while the mutex is not held. func (pp *priorityPool) finalizeChanges(commit bool) (updates []capUpdate) { - for _, c := range pp.changed { + for _, c := range pp.tempState { // always remove and push back in order to update biased priority pp.activeQueue.Remove(c.activeIndex) pp.inactiveQueue.Remove(c.inactiveIndex) + oldCapacity := c.capacity + if commit { + c.capacity = c.tempCapacity + } else { + pp.setTempCapacity(c, c.capacity) // revert activeCount/activeCap + } + c.tempState = false c.bias = 0 c.stepDiv = pp.capacityStepDiv c.minTarget = pp.minCap - c.changed = false - if !commit { - pp.setCapacity(c, c.origCap) - } if c.connected { if c.capacity != 0 { pp.activeQueue.Push(c) } else { pp.inactiveQueue.Push(c, pp.inactivePriority(c)) } - if c.capacity != c.origCap && commit { - updates = append(updates, capUpdate{c.node, c.origCap, c.capacity}) + if c.capacity != oldCapacity { + updates = append(updates, capUpdate{c.node, oldCapacity, c.capacity}) } } - c.origCap = 0 } - pp.changed = nil + pp.tempState = nil if commit { pp.ccUpdateForced = true } @@ -443,16 +458,15 @@ func (pp *priorityPool) updateFlags(updates []capUpdate) { } // tryActivate tries to activate inactive nodes if possible -func (pp *priorityPool) tryActivate() []capUpdate { - var commit bool +func (pp *priorityPool) tryActivate(commit bool) []capUpdate { for pp.inactiveQueue.Size() > 0 { c := pp.inactiveQueue.PopItem().(*ppNodeInfo) - pp.markForChange(c) - pp.setCapacity(c, pp.minCap) + pp.setTempState(c) + pp.setTempCapacity(c, pp.minCap) c.bias = pp.activeBias pp.activeQueue.Push(c) pp.enforceLimits() - if c.capacity > 0 { + if c.tempCapacity > 0 { commit = true c.bias = 0 } else { @@ -486,7 +500,7 @@ func (pp *priorityPool) updatePriority(node *enode.Node) { } else { pp.inactiveQueue.Push(c, pp.inactivePriority(c)) } - updates = pp.tryActivate() + updates = pp.tryActivate(false) } // capacityCurve is a snapshot of the priority pool contents in a format that can efficiently From 46a3a27a5e716462f536724746b88f18cad362c8 Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Thu, 1 Apr 2021 13:32:58 +0200 Subject: [PATCH 26/27] les/vflux/server: changed log.Crit to log.Error --- les/vflux/server/prioritypool.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/les/vflux/server/prioritypool.go b/les/vflux/server/prioritypool.go index 9183c021be88..573a3570a4ee 100644 --- a/les/vflux/server/prioritypool.go +++ b/les/vflux/server/prioritypool.go @@ -340,7 +340,7 @@ func (pp *priorityPool) setTempState(c *ppNodeInfo) { } c.tempState = true if c.tempCapacity != c.capacity { // should never happen - log.Crit("tempCapacity != capacity when entering tempState") + log.Error("tempCapacity != capacity when entering tempState") } c.minTarget = pp.minCap c.stepDiv = pp.capacityStepDiv @@ -352,7 +352,7 @@ func (pp *priorityPool) setTempState(c *ppNodeInfo) { // state it should be called after setTempState and before finalizeChanges. func (pp *priorityPool) setTempCapacity(n *ppNodeInfo, cap uint64) { if !n.tempState { // should never happen - log.Crit("Node is not in temporary state") + log.Error("Node is not in temporary state") return } pp.activeCap += cap - n.tempCapacity From aca9e3a9e165f31334d45d6e5de8f300f7a4adde Mon Sep 17 00:00:00 2001 From: Zsolt Felfoldi Date: Thu, 1 Apr 2021 13:48:29 +0200 Subject: [PATCH 27/27] add vflux fuzzer to oss-fuzz --- oss-fuzz.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/oss-fuzz.sh b/oss-fuzz.sh index ac93a5a4670e..f8152f0fad01 100644 --- a/oss-fuzz.sh +++ b/oss-fuzz.sh @@ -102,6 +102,7 @@ compile_fuzzer tests/fuzzers/stacktrie Fuzz fuzzStackTrie compile_fuzzer tests/fuzzers/difficulty Fuzz fuzzDifficulty compile_fuzzer tests/fuzzers/abi Fuzz fuzzAbi compile_fuzzer tests/fuzzers/les Fuzz fuzzLes +compile_fuzzer tests/fuzzers/vflux FuzzClientPool fuzzClientPool compile_fuzzer tests/fuzzers/bls12381 FuzzG1Add fuzz_g1_add compile_fuzzer tests/fuzzers/bls12381 FuzzG1Mul fuzz_g1_mul