Skip to content

Commit

Permalink
feat: use CommonService in peerConnector
Browse files Browse the repository at this point in the history
  • Loading branch information
harsh-98 committed Sep 14, 2023
1 parent 020ab99 commit 70f555e
Show file tree
Hide file tree
Showing 2 changed files with 54 additions and 77 deletions.
129 changes: 53 additions & 76 deletions waku/v2/peermanager/peer_connector.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import (
"context"
"errors"
"math/rand"
"sync"
"sync/atomic"
"time"

"github.com/ethereum/go-ethereum/p2p/enode"
Expand All @@ -17,8 +17,7 @@ import (
"github.com/libp2p/go-libp2p/p2p/discovery/backoff"
"github.com/waku-org/go-waku/logging"
wps "github.com/waku-org/go-waku/waku/v2/peerstore"

"sync/atomic"
"github.com/waku-org/go-waku/waku/v2/protocol"

"go.uber.org/zap"

Expand All @@ -35,22 +34,16 @@ type PeerData struct {
// PeerConnectionStrategy is a utility to connect to peers,
// but only if we have not recently tried connecting to them already
type PeerConnectionStrategy struct {
sync.RWMutex

cache *lru.TwoQueueCache
host host.Host
pm *PeerManager
cancel context.CancelFunc

paused atomic.Bool
cache *lru.TwoQueueCache
host host.Host
pm *PeerManager

wg sync.WaitGroup
dialTimeout time.Duration
dialCh chan peer.AddrInfo
paused atomic.Bool
dialTimeout time.Duration
*protocol.CommonService[peer.AddrInfo]
subscriptions []<-chan PeerData

backoff backoff.BackoffFactory
mux sync.Mutex
logger *zap.Logger
}

Expand All @@ -77,12 +70,12 @@ func NewPeerConnectionStrategy(pm *PeerManager,
}
//
pc := &PeerConnectionStrategy{
cache: cache,
wg: sync.WaitGroup{},
dialTimeout: dialTimeout,
pm: pm,
backoff: getBackOff(),
logger: logger.Named("discovery-connector"),
cache: cache,
dialTimeout: dialTimeout,
CommonService: protocol.NewCommonService[peer.AddrInfo](),
pm: pm,
backoff: getBackOff(),
logger: logger.Named("discovery-connector"),
}
pm.SetPeerConnector(pc)
return pc, nil
Expand All @@ -95,36 +88,40 @@ type connCacheData struct {

// Subscribe receives channels on which discovered peers should be pushed
func (c *PeerConnectionStrategy) Subscribe(ctx context.Context, ch <-chan PeerData) {
if c.cancel != nil {
c.wg.Add(1)
go func() {
defer c.wg.Done()
c.consumeSubscription(ctx, ch)
}()
} else {
// if not running yet, store the subscription and return
if err := c.ErrOnNotRunning(); err != nil {
c.Lock()
c.subscriptions = append(c.subscriptions, ch)
c.Unlock()
return
}
// if running start a goroutine to consume the subscription
c.WaitGroup().Add(1)
go func() {
defer c.WaitGroup().Done()
c.consumeSubscription(ch)
}()
}

func (c *PeerConnectionStrategy) consumeSubscription(ctx context.Context, ch <-chan PeerData) {
func (c *PeerConnectionStrategy) consumeSubscription(ch <-chan PeerData) {
for {
// for returning from the loop when peerConnector is paused.
select {
case <-ctx.Done():
case <-c.Context().Done():
return
default:
}
//
if !c.isPaused() {
select {
case <-ctx.Done():
case <-c.Context().Done():
return
case p, ok := <-ch:
if !ok {
return
}
c.pm.AddDiscoveredPeer(p)
c.publishWork(ctx, p.AddrInfo)
c.PushToChan(p.AddrInfo)
case <-time.After(1 * time.Second):
// This timeout is to not lock the goroutine
break
Expand All @@ -143,48 +140,36 @@ func (c *PeerConnectionStrategy) SetHost(h host.Host) {
// Start attempts to connect to the peers passed in by peerCh.
// Will not connect to peers if they are within the backoff period.
func (c *PeerConnectionStrategy) Start(ctx context.Context) error {
if c.cancel != nil {
return errors.New("already started")
}

ctx, cancel := context.WithCancel(ctx)
c.cancel = cancel
c.dialCh = make(chan peer.AddrInfo)
return c.CommonService.Start(ctx, c.start)

c.wg.Add(2)
go c.shouldDialPeers(ctx)
go c.dialPeers(ctx)
}
func (c *PeerConnectionStrategy) start() error {
c.WaitGroup().Add(2)
go c.shouldDialPeers()
go c.dialPeers()

c.consumeSubscriptions(ctx)
c.consumeSubscriptions()

return nil
}

// Stop terminates the peer-connector
func (c *PeerConnectionStrategy) Stop() {
if c.cancel == nil {
return
}

c.cancel()
c.cancel = nil
c.wg.Wait()

close(c.dialCh)
c.CommonService.Stop(func() {})
}

func (c *PeerConnectionStrategy) isPaused() bool {
return c.paused.Load()
}

func (c *PeerConnectionStrategy) shouldDialPeers(ctx context.Context) {
defer c.wg.Done()
func (c *PeerConnectionStrategy) shouldDialPeers() {
defer c.WaitGroup().Done()

ticker := time.NewTicker(1 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
case <-c.Context().Done():
return
case <-ticker.C:
_, outRelayPeers := c.pm.getRelayPeers()
Expand All @@ -194,32 +179,24 @@ func (c *PeerConnectionStrategy) shouldDialPeers(ctx context.Context) {
}

// it might happen Subscribe is called before peerConnector has started so store these subscriptions in subscriptions array and custom after c.cancel is set.
func (c *PeerConnectionStrategy) consumeSubscriptions(ctx context.Context) {
func (c *PeerConnectionStrategy) consumeSubscriptions() {
for _, subs := range c.subscriptions {
c.wg.Add(1)
c.WaitGroup().Add(1)
go func(s <-chan PeerData) {
defer c.wg.Done()
c.consumeSubscription(ctx, s)
defer c.WaitGroup().Done()
c.consumeSubscription(s)
}(subs)
}
c.subscriptions = nil
}

func (c *PeerConnectionStrategy) publishWork(ctx context.Context, p peer.AddrInfo) {
select {
case c.dialCh <- p:
case <-ctx.Done():
return
}
}

const maxActiveDials = 5

// c.cache is thread safe
// only reason why mutex is used: if canDialPeer is queried twice for the same peer.
func (c *PeerConnectionStrategy) canDialPeer(pi peer.AddrInfo) bool {
c.mux.Lock()
defer c.mux.Unlock()
c.Lock()
defer c.Unlock()
val, ok := c.cache.Get(pi.ID)
var cachedPeer *connCacheData
if ok {
Expand All @@ -239,7 +216,7 @@ func (c *PeerConnectionStrategy) canDialPeer(pi peer.AddrInfo) bool {
}

func (c *PeerConnectionStrategy) dialPeers(ctx context.Context) {
defer c.wg.Done()
defer c.WaitGroup().Done()

maxGoRoutines := c.pm.OutRelayPeersTarget
if maxGoRoutines > maxActiveDials {
Expand All @@ -250,7 +227,7 @@ func (c *PeerConnectionStrategy) dialPeers(ctx context.Context) {

for {
select {
case pi, ok := <-c.dialCh:
case pi, ok := <-c.GetListeningChan():
if !ok {
return
}
Expand All @@ -262,18 +239,18 @@ func (c *PeerConnectionStrategy) dialPeers(ctx context.Context) {

if c.canDialPeer(pi) {
sem <- struct{}{}
c.wg.Add(1)
go c.dialPeer(ctx, pi, sem)
c.WaitGroup().Add(1)
go c.dialPeer(pi, sem)
}
case <-ctx.Done():
return
}
}
}

func (c *PeerConnectionStrategy) dialPeer(ctx context.Context, pi peer.AddrInfo, sem chan struct{}) {
defer c.wg.Done()
ctx, cancel := context.WithTimeout(ctx, c.dialTimeout)
func (c *PeerConnectionStrategy) dialPeer(pi peer.AddrInfo, sem chan struct{}) {
defer c.WaitGroup().Done()
ctx, cancel := context.WithTimeout(c.Context(), c.dialTimeout)
defer cancel()
err := c.host.Connect(ctx, pi)
if err != nil && !errors.Is(err, context.Canceled) {
Expand Down
2 changes: 1 addition & 1 deletion waku/v2/peermanager/peer_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ func (pm *PeerManager) connectToPeers(peers peer.IDSlice) {
ID: peerID,
Addrs: pm.host.Peerstore().Addrs(peerID),
}
pm.peerConnector.publishWork(pm.ctx, peerInfo)
pm.peerConnector.PushToChan(peerInfo)
}
}

Expand Down

0 comments on commit 70f555e

Please sign in to comment.