2019-07-28 22:30:51 +08:00
|
|
|
from ast import literal_eval
|
2019-05-07 11:44:13 +08:00
|
|
|
import asyncio
|
2019-09-14 21:54:26 +08:00
|
|
|
import logging
|
2019-07-26 18:35:25 +08:00
|
|
|
import random
|
2019-08-03 13:36:19 +08:00
|
|
|
from typing import Any, Dict, Iterable, List, Sequence, Set
|
2019-08-01 06:00:12 +08:00
|
|
|
|
2019-11-12 18:10:41 +08:00
|
|
|
from libp2p.network.stream.exceptions import StreamClosed
|
2019-07-31 19:31:58 +08:00
|
|
|
from libp2p.peer.id import ID
|
2019-09-03 23:37:34 +08:00
|
|
|
from libp2p.pubsub import floodsub
|
2019-08-11 16:47:54 +08:00
|
|
|
from libp2p.typing import TProtocol
|
2019-09-03 16:07:44 +08:00
|
|
|
from libp2p.utils import encode_varint_prefixed
|
2019-07-26 18:35:25 +08:00
|
|
|
|
|
|
|
from .mcache import MessageCache
|
2019-05-07 11:44:13 +08:00
|
|
|
from .pb import rpc_pb2
|
2019-07-24 16:29:14 +08:00
|
|
|
from .pubsub import Pubsub
|
2019-05-07 11:44:13 +08:00
|
|
|
from .pubsub_router_interface import IPubsubRouter
|
|
|
|
|
2019-08-31 00:18:22 +08:00
|
|
|
PROTOCOL_ID = TProtocol("/meshsub/1.0.0")
|
|
|
|
|
2019-09-14 21:54:26 +08:00
|
|
|
logger = logging.getLogger("libp2p.pubsub.gossipsub")
|
|
|
|
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
class GossipSub(IPubsubRouter):
|
|
|
|
|
2019-08-11 16:47:54 +08:00
|
|
|
protocols: List[TProtocol]
|
2019-07-24 16:29:14 +08:00
|
|
|
pubsub: Pubsub
|
|
|
|
|
|
|
|
degree: int
|
|
|
|
degree_high: int
|
|
|
|
degree_low: int
|
|
|
|
|
|
|
|
time_to_live: int
|
|
|
|
|
2019-08-01 12:05:28 +08:00
|
|
|
mesh: Dict[str, List[ID]]
|
|
|
|
fanout: Dict[str, List[ID]]
|
2019-07-24 16:29:14 +08:00
|
|
|
|
2019-08-01 12:05:28 +08:00
|
|
|
peers_to_protocol: Dict[ID, str]
|
2019-07-28 14:52:02 +08:00
|
|
|
|
2019-07-24 16:29:14 +08:00
|
|
|
time_since_last_publish: Dict[str, int]
|
|
|
|
|
2019-08-01 12:05:28 +08:00
|
|
|
peers_gossipsub: List[ID]
|
|
|
|
peers_floodsub: List[ID]
|
2019-07-24 16:29:14 +08:00
|
|
|
|
|
|
|
mcache: MessageCache
|
|
|
|
|
2019-12-02 16:55:16 +08:00
|
|
|
heartbeat_initial_delay: float
|
2019-07-24 16:29:14 +08:00
|
|
|
heartbeat_interval: int
|
|
|
|
|
2019-08-01 06:00:12 +08:00
|
|
|
def __init__(
|
|
|
|
self,
|
2019-08-11 16:47:54 +08:00
|
|
|
protocols: Sequence[TProtocol],
|
2019-08-01 06:00:12 +08:00
|
|
|
degree: int,
|
|
|
|
degree_low: int,
|
|
|
|
degree_high: int,
|
|
|
|
time_to_live: int,
|
|
|
|
gossip_window: int = 3,
|
|
|
|
gossip_history: int = 5,
|
2019-12-02 22:49:27 +08:00
|
|
|
heartbeat_initial_delay: float = 0.1,
|
2019-08-01 06:00:12 +08:00
|
|
|
heartbeat_interval: int = 120,
|
|
|
|
) -> None:
|
2019-07-27 11:27:47 +08:00
|
|
|
self.protocols = list(protocols)
|
|
|
|
self.pubsub = None
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# Store target degree, upper degree bound, and lower degree bound
|
|
|
|
self.degree = degree
|
|
|
|
self.degree_low = degree_low
|
2019-07-27 11:49:03 +08:00
|
|
|
self.degree_high = degree_high
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# Store time to live (for topics in fanout)
|
2019-07-27 11:27:47 +08:00
|
|
|
self.time_to_live = time_to_live
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# Create topic --> list of peers mappings
|
|
|
|
self.mesh = {}
|
|
|
|
self.fanout = {}
|
|
|
|
|
2019-07-29 12:42:13 +08:00
|
|
|
# Create peer --> protocol mapping
|
|
|
|
self.peers_to_protocol = {}
|
|
|
|
|
2019-05-07 11:44:13 +08:00
|
|
|
# Create topic --> time since last publish map
|
|
|
|
self.time_since_last_publish = {}
|
|
|
|
|
|
|
|
self.peers_gossipsub = []
|
|
|
|
self.peers_floodsub = []
|
|
|
|
|
|
|
|
# Create message cache
|
|
|
|
self.mcache = MessageCache(gossip_window, gossip_history)
|
|
|
|
|
|
|
|
# Create heartbeat timer
|
2019-12-02 16:55:16 +08:00
|
|
|
self.heartbeat_initial_delay = heartbeat_initial_delay
|
2019-05-07 11:44:13 +08:00
|
|
|
self.heartbeat_interval = heartbeat_interval
|
|
|
|
|
|
|
|
# Interface functions
|
|
|
|
|
2019-08-11 16:47:54 +08:00
|
|
|
def get_protocols(self) -> List[TProtocol]:
|
2019-05-07 11:44:13 +08:00
|
|
|
"""
|
|
|
|
:return: the list of protocols supported by the router
|
|
|
|
"""
|
|
|
|
return self.protocols
|
|
|
|
|
2019-07-24 16:29:14 +08:00
|
|
|
def attach(self, pubsub: Pubsub) -> None:
|
2019-10-25 02:10:45 +08:00
|
|
|
"""
|
|
|
|
Attach is invoked by the PubSub constructor to attach the router to a
|
|
|
|
freshly initialized PubSub instance.
|
2019-10-24 14:41:10 +08:00
|
|
|
|
2019-05-07 11:44:13 +08:00
|
|
|
:param pubsub: pubsub instance to attach to
|
|
|
|
"""
|
|
|
|
self.pubsub = pubsub
|
|
|
|
|
2019-09-14 21:54:26 +08:00
|
|
|
logger.debug("attached to pusub")
|
|
|
|
|
2019-05-07 11:44:13 +08:00
|
|
|
# Start heartbeat now that we have a pubsub instance
|
|
|
|
asyncio.ensure_future(self.heartbeat())
|
|
|
|
|
2019-08-11 16:47:54 +08:00
|
|
|
def add_peer(self, peer_id: ID, protocol_id: TProtocol) -> None:
|
2019-10-25 02:10:45 +08:00
|
|
|
"""
|
|
|
|
Notifies the router that a new peer has been connected.
|
2019-10-24 14:41:10 +08:00
|
|
|
|
2019-05-07 11:44:13 +08:00
|
|
|
:param peer_id: id of peer to add
|
2019-07-24 16:29:14 +08:00
|
|
|
:param protocol_id: router protocol the peer speaks, e.g., floodsub, gossipsub
|
2019-05-07 11:44:13 +08:00
|
|
|
"""
|
2019-09-14 21:54:26 +08:00
|
|
|
logger.debug("adding peer %s with protocol %s", peer_id, protocol_id)
|
2019-07-28 14:52:02 +08:00
|
|
|
|
2019-09-03 23:37:34 +08:00
|
|
|
if protocol_id == PROTOCOL_ID:
|
2019-08-01 12:05:28 +08:00
|
|
|
self.peers_gossipsub.append(peer_id)
|
2019-09-03 23:37:34 +08:00
|
|
|
elif protocol_id == floodsub.PROTOCOL_ID:
|
2019-08-01 12:05:28 +08:00
|
|
|
self.peers_floodsub.append(peer_id)
|
2019-09-03 23:37:34 +08:00
|
|
|
else:
|
|
|
|
# We should never enter here. Becuase the `protocol_id` is registered by your pubsub
|
2019-09-04 20:49:14 +08:00
|
|
|
# instance in multistream-select, but it is not the protocol that gossipsub supports.
|
|
|
|
# In this case, probably we registered gossipsub to a wrong `protocol_id`
|
|
|
|
# in multistream-select, or wrong versions.
|
2019-12-02 22:49:27 +08:00
|
|
|
raise Exception(
|
|
|
|
f"This should not happen. Protocol={protocol_id} is not supported."
|
|
|
|
)
|
2019-09-14 21:54:26 +08:00
|
|
|
self.peers_to_protocol[peer_id] = protocol_id
|
2019-05-07 11:44:13 +08:00
|
|
|
|
2019-07-24 16:29:14 +08:00
|
|
|
def remove_peer(self, peer_id: ID) -> None:
|
2019-10-25 02:10:45 +08:00
|
|
|
"""
|
|
|
|
Notifies the router that a peer has been disconnected.
|
2019-10-24 14:41:10 +08:00
|
|
|
|
2019-05-07 11:44:13 +08:00
|
|
|
:param peer_id: id of peer to remove
|
|
|
|
"""
|
2019-09-14 21:54:26 +08:00
|
|
|
logger.debug("removing peer %s", peer_id)
|
2019-07-28 14:52:02 +08:00
|
|
|
|
2019-08-01 12:05:28 +08:00
|
|
|
if peer_id in self.peers_gossipsub:
|
|
|
|
self.peers_gossipsub.remove(peer_id)
|
2019-09-14 21:54:26 +08:00
|
|
|
elif peer_id in self.peers_floodsub:
|
2019-08-01 12:05:28 +08:00
|
|
|
self.peers_floodsub.remove(peer_id)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
2019-12-02 22:41:49 +08:00
|
|
|
for topic in self.mesh:
|
|
|
|
if peer_id in self.mesh[topic]:
|
|
|
|
# Delete the entry if no other peers left
|
|
|
|
if len(self.mesh[topic]) == 1:
|
|
|
|
del self.mesh[topic]
|
|
|
|
else:
|
|
|
|
self.mesh[topic].remove(peer_id)
|
|
|
|
for topic in self.fanout:
|
|
|
|
if peer_id in self.fanout[topic]:
|
|
|
|
# Delete the entry if no other peers left
|
|
|
|
if len(self.fanout[topic]) == 1:
|
|
|
|
del self.fanout[topic]
|
|
|
|
else:
|
|
|
|
self.fanout[topic].remove(peer_id)
|
|
|
|
|
2019-11-20 23:06:37 +08:00
|
|
|
self.peers_to_protocol.pop(peer_id, None)
|
2019-09-14 21:54:26 +08:00
|
|
|
|
2019-08-11 16:47:54 +08:00
|
|
|
async def handle_rpc(self, rpc: rpc_pb2.RPC, sender_peer_id: ID) -> None:
|
2019-10-25 02:10:45 +08:00
|
|
|
"""
|
|
|
|
Invoked to process control messages in the RPC envelope. It is invoked
|
|
|
|
after subscriptions and payload messages have been processed.
|
2019-10-24 14:41:10 +08:00
|
|
|
|
2019-07-24 16:29:14 +08:00
|
|
|
:param rpc: RPC message
|
|
|
|
:param sender_peer_id: id of the peer who sent the message
|
2019-05-07 11:44:13 +08:00
|
|
|
"""
|
|
|
|
control_message = rpc.control
|
|
|
|
|
2019-07-24 16:29:14 +08:00
|
|
|
# Relay each rpc control message to the appropriate handler
|
2019-05-07 11:44:13 +08:00
|
|
|
if control_message.ihave:
|
|
|
|
for ihave in control_message.ihave:
|
2019-08-01 12:05:28 +08:00
|
|
|
await self.handle_ihave(ihave, sender_peer_id)
|
2019-05-07 11:44:13 +08:00
|
|
|
if control_message.iwant:
|
|
|
|
for iwant in control_message.iwant:
|
2019-08-01 12:05:28 +08:00
|
|
|
await self.handle_iwant(iwant, sender_peer_id)
|
2019-05-07 11:44:13 +08:00
|
|
|
if control_message.graft:
|
|
|
|
for graft in control_message.graft:
|
2019-08-01 12:05:28 +08:00
|
|
|
await self.handle_graft(graft, sender_peer_id)
|
2019-05-07 11:44:13 +08:00
|
|
|
if control_message.prune:
|
|
|
|
for prune in control_message.prune:
|
2019-08-01 12:05:28 +08:00
|
|
|
await self.handle_prune(prune, sender_peer_id)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
2019-07-29 12:09:35 +08:00
|
|
|
async def publish(self, msg_forwarder: ID, pubsub_msg: rpc_pb2.Message) -> None:
|
2019-10-24 14:41:10 +08:00
|
|
|
"""Invoked to forward a new message that has been validated."""
|
2019-07-26 18:35:25 +08:00
|
|
|
self.mcache.put(pubsub_msg)
|
|
|
|
|
|
|
|
peers_gen = self._get_peers_to_send(
|
2019-08-14 05:36:42 +08:00
|
|
|
pubsub_msg.topicIDs,
|
|
|
|
msg_forwarder=msg_forwarder,
|
|
|
|
origin=ID(pubsub_msg.from_id),
|
2019-07-26 18:35:25 +08:00
|
|
|
)
|
2019-08-01 06:00:12 +08:00
|
|
|
rpc_msg = rpc_pb2.RPC(publish=[pubsub_msg])
|
2019-09-14 21:54:26 +08:00
|
|
|
|
|
|
|
logger.debug("publishing message %s", pubsub_msg)
|
|
|
|
|
2019-07-26 18:35:25 +08:00
|
|
|
for peer_id in peers_gen:
|
2019-08-01 12:05:28 +08:00
|
|
|
stream = self.pubsub.peers[peer_id]
|
2019-07-26 18:35:25 +08:00
|
|
|
# FIXME: We should add a `WriteMsg` similar to write delimited messages.
|
|
|
|
# Ref: https://github.com/libp2p/go-libp2p-pubsub/blob/master/comm.go#L107
|
2019-11-12 18:10:41 +08:00
|
|
|
try:
|
|
|
|
await stream.write(encode_varint_prefixed(rpc_msg.SerializeToString()))
|
|
|
|
except StreamClosed:
|
|
|
|
logger.debug("Fail to publish message to %s: stream closed", peer_id)
|
2019-11-16 17:03:04 +08:00
|
|
|
self.pubsub._handle_dead_peer(peer_id)
|
2019-07-26 18:35:25 +08:00
|
|
|
|
|
|
|
def _get_peers_to_send(
|
2019-08-01 06:00:12 +08:00
|
|
|
self, topic_ids: Iterable[str], msg_forwarder: ID, origin: ID
|
|
|
|
) -> Iterable[ID]:
|
2019-10-25 02:10:45 +08:00
|
|
|
"""
|
|
|
|
Get the eligible peers to send the data to.
|
2019-10-24 14:41:10 +08:00
|
|
|
|
2019-07-29 12:09:35 +08:00
|
|
|
:param msg_forwarder: the peer id of the peer who forwards the message to me.
|
2019-07-28 18:06:38 +08:00
|
|
|
:param origin: peer id of the peer the message originate from.
|
2019-07-26 18:35:25 +08:00
|
|
|
:return: a generator of the peer ids who we send data to.
|
|
|
|
"""
|
2019-07-30 17:31:08 +08:00
|
|
|
send_to: Set[ID] = set()
|
2019-07-26 18:35:25 +08:00
|
|
|
for topic in topic_ids:
|
|
|
|
if topic not in self.pubsub.peer_topics:
|
|
|
|
continue
|
2019-05-07 11:44:13 +08:00
|
|
|
|
2019-07-26 18:35:25 +08:00
|
|
|
# floodsub peers
|
2019-08-01 12:05:28 +08:00
|
|
|
for peer_id in self.pubsub.peer_topics[topic]:
|
2019-07-26 18:35:25 +08:00
|
|
|
# FIXME: `gossipsub.peers_floodsub` can be changed to `gossipsub.peers` in go.
|
|
|
|
# This will improve the efficiency when searching for a peer's protocol id.
|
2019-08-01 12:05:28 +08:00
|
|
|
if peer_id in self.peers_floodsub:
|
2019-07-28 18:06:38 +08:00
|
|
|
send_to.add(peer_id)
|
2019-07-26 18:35:25 +08:00
|
|
|
|
|
|
|
# gossipsub peers
|
2019-08-01 12:05:28 +08:00
|
|
|
in_topic_gossipsub_peers: List[ID] = None
|
2019-07-26 18:35:25 +08:00
|
|
|
if topic in self.mesh:
|
2019-07-29 12:09:35 +08:00
|
|
|
in_topic_gossipsub_peers = self.mesh[topic]
|
2019-07-26 18:35:25 +08:00
|
|
|
else:
|
|
|
|
if (topic not in self.fanout) or (len(self.fanout[topic]) == 0):
|
|
|
|
# If no peers in fanout, choose some peers from gossipsub peers in topic.
|
2019-07-28 18:06:38 +08:00
|
|
|
self.fanout[topic] = self._get_in_topic_gossipsub_peers_from_minus(
|
2019-08-01 06:00:12 +08:00
|
|
|
topic, self.degree, []
|
2019-07-28 18:06:38 +08:00
|
|
|
)
|
2019-07-29 12:09:35 +08:00
|
|
|
in_topic_gossipsub_peers = self.fanout[topic]
|
2019-08-01 12:05:28 +08:00
|
|
|
for peer_id in in_topic_gossipsub_peers:
|
|
|
|
send_to.add(peer_id)
|
2019-07-29 12:09:35 +08:00
|
|
|
# Excludes `msg_forwarder` and `origin`
|
|
|
|
yield from send_to.difference([msg_forwarder, origin])
|
2019-05-07 11:44:13 +08:00
|
|
|
|
2019-07-24 16:29:14 +08:00
|
|
|
async def join(self, topic: str) -> None:
|
2019-05-07 11:44:13 +08:00
|
|
|
# Note: the comments here are the near-exact algorithm description from the spec
|
2019-10-25 02:10:45 +08:00
|
|
|
"""
|
|
|
|
Join notifies the router that we want to receive and forward messages
|
|
|
|
in a topic. It is invoked after the subscription announcement.
|
2019-10-24 14:41:10 +08:00
|
|
|
|
2019-05-07 11:44:13 +08:00
|
|
|
:param topic: topic to join
|
|
|
|
"""
|
2019-09-14 21:54:26 +08:00
|
|
|
logger.debug("joining topic %s", topic)
|
|
|
|
|
2019-07-19 19:56:25 +08:00
|
|
|
if topic in self.mesh:
|
|
|
|
return
|
2019-05-07 11:44:13 +08:00
|
|
|
# Create mesh[topic] if it does not yet exist
|
2019-07-19 19:56:25 +08:00
|
|
|
self.mesh[topic] = []
|
2019-05-07 11:44:13 +08:00
|
|
|
|
2019-07-24 16:29:14 +08:00
|
|
|
topic_in_fanout: bool = topic in self.fanout
|
2019-08-01 12:05:28 +08:00
|
|
|
fanout_peers: List[ID] = self.fanout[topic] if topic_in_fanout else []
|
2019-07-15 17:13:46 +08:00
|
|
|
fanout_size = len(fanout_peers)
|
|
|
|
if not topic_in_fanout or (topic_in_fanout and fanout_size < self.degree):
|
|
|
|
# There are less than D peers (let this number be x)
|
|
|
|
# in the fanout for a topic (or the topic is not in the fanout).
|
|
|
|
# Selects the remaining number of peers (D-x) from peers.gossipsub[topic].
|
2019-07-18 19:39:01 +08:00
|
|
|
if topic in self.pubsub.peer_topics:
|
2019-07-28 18:06:38 +08:00
|
|
|
selected_peers = self._get_in_topic_gossipsub_peers_from_minus(
|
2019-08-01 06:00:12 +08:00
|
|
|
topic, self.degree - fanout_size, fanout_peers
|
2019-07-27 12:06:36 +08:00
|
|
|
)
|
2019-07-18 19:39:01 +08:00
|
|
|
# Combine fanout peers with selected peers
|
|
|
|
fanout_peers += selected_peers
|
2019-07-15 17:13:46 +08:00
|
|
|
|
|
|
|
# Add fanout peers to mesh and notifies them with a GRAFT(topic) control message.
|
|
|
|
for peer in fanout_peers:
|
2019-07-21 22:28:17 +08:00
|
|
|
if peer not in self.mesh[topic]:
|
|
|
|
self.mesh[topic].append(peer)
|
|
|
|
await self.emit_graft(topic, peer)
|
2019-07-15 17:13:46 +08:00
|
|
|
|
2019-11-20 23:06:37 +08:00
|
|
|
self.fanout.pop(topic, None)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
2019-07-24 16:29:14 +08:00
|
|
|
async def leave(self, topic: str) -> None:
|
2019-05-07 11:44:13 +08:00
|
|
|
# Note: the comments here are the near-exact algorithm description from the spec
|
2019-10-25 02:10:45 +08:00
|
|
|
"""
|
|
|
|
Leave notifies the router that we are no longer interested in a topic.
|
|
|
|
It is invoked after the unsubscription announcement.
|
2019-10-24 14:41:10 +08:00
|
|
|
|
2019-05-07 11:44:13 +08:00
|
|
|
:param topic: topic to leave
|
|
|
|
"""
|
2019-09-14 21:54:26 +08:00
|
|
|
logger.debug("leaving topic %s", topic)
|
|
|
|
|
2019-07-19 19:56:25 +08:00
|
|
|
if topic not in self.mesh:
|
|
|
|
return
|
2019-05-07 11:44:13 +08:00
|
|
|
# Notify the peers in mesh[topic] with a PRUNE(topic) message
|
|
|
|
for peer in self.mesh[topic]:
|
|
|
|
await self.emit_prune(topic, peer)
|
|
|
|
|
|
|
|
# Forget mesh[topic]
|
2019-11-23 16:04:22 +08:00
|
|
|
del self.mesh[topic]
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# Heartbeat
|
2019-07-24 16:29:14 +08:00
|
|
|
async def heartbeat(self) -> None:
|
2019-10-25 02:10:45 +08:00
|
|
|
"""
|
|
|
|
Call individual heartbeats.
|
2019-10-24 14:41:10 +08:00
|
|
|
|
2019-05-07 11:44:13 +08:00
|
|
|
Note: the heartbeats are called with awaits because each heartbeat depends on the
|
|
|
|
state changes in the preceding heartbeat
|
|
|
|
"""
|
2019-12-02 16:38:48 +08:00
|
|
|
# Start after a delay. Ref: https://github.com/libp2p/go-libp2p-pubsub/blob/01b9825fbee1848751d90a8469e3f5f43bac8466/gossipsub.go#L410 # Noqa: E501
|
2019-12-02 16:55:16 +08:00
|
|
|
await asyncio.sleep(self.heartbeat_initial_delay)
|
2019-05-07 11:44:13 +08:00
|
|
|
while True:
|
|
|
|
|
|
|
|
await self.mesh_heartbeat()
|
|
|
|
await self.fanout_heartbeat()
|
|
|
|
await self.gossip_heartbeat()
|
|
|
|
|
|
|
|
await asyncio.sleep(self.heartbeat_interval)
|
|
|
|
|
2019-07-24 16:29:14 +08:00
|
|
|
async def mesh_heartbeat(self) -> None:
|
2019-05-07 11:44:13 +08:00
|
|
|
# Note: the comments here are the exact pseudocode from the spec
|
|
|
|
for topic in self.mesh:
|
2019-07-22 23:22:07 +08:00
|
|
|
# Skip if no peers have subscribed to the topic
|
|
|
|
if topic not in self.pubsub.peer_topics:
|
|
|
|
continue
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
num_mesh_peers_in_topic = len(self.mesh[topic])
|
|
|
|
if num_mesh_peers_in_topic < self.degree_low:
|
2019-07-22 23:22:07 +08:00
|
|
|
# Select D - |mesh[topic]| peers from peers.gossipsub[topic] - mesh[topic]
|
2019-07-28 18:06:38 +08:00
|
|
|
selected_peers = self._get_in_topic_gossipsub_peers_from_minus(
|
2019-08-01 06:00:12 +08:00
|
|
|
topic, self.degree - num_mesh_peers_in_topic, self.mesh[topic]
|
2019-07-22 23:22:07 +08:00
|
|
|
)
|
|
|
|
|
2019-12-02 22:40:35 +08:00
|
|
|
for peer in selected_peers:
|
2019-07-22 23:22:07 +08:00
|
|
|
# Add peer to mesh[topic]
|
|
|
|
self.mesh[topic].append(peer)
|
|
|
|
|
|
|
|
# Emit GRAFT(topic) control message to peer
|
|
|
|
await self.emit_graft(topic, peer)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
if num_mesh_peers_in_topic > self.degree_high:
|
|
|
|
# Select |mesh[topic]| - D peers from mesh[topic]
|
2019-07-27 11:27:47 +08:00
|
|
|
selected_peers = GossipSub.select_from_minus(
|
2019-08-01 06:00:12 +08:00
|
|
|
num_mesh_peers_in_topic - self.degree, self.mesh[topic], []
|
2019-07-24 16:29:14 +08:00
|
|
|
)
|
2019-05-07 11:44:13 +08:00
|
|
|
for peer in selected_peers:
|
|
|
|
# Remove peer from mesh[topic]
|
|
|
|
self.mesh[topic].remove(peer)
|
|
|
|
|
|
|
|
# Emit PRUNE(topic) control message to peer
|
|
|
|
await self.emit_prune(topic, peer)
|
|
|
|
|
2019-07-24 16:29:14 +08:00
|
|
|
async def fanout_heartbeat(self) -> None:
|
2019-05-07 11:44:13 +08:00
|
|
|
# Note: the comments here are the exact pseudocode from the spec
|
|
|
|
for topic in self.fanout:
|
2019-12-02 22:49:27 +08:00
|
|
|
# Delete topic entry if it's not in `pubsub.peer_topics`
|
|
|
|
# or if it's time-since-last-published > ttl
|
2019-05-07 11:44:13 +08:00
|
|
|
# TODO: there's no way time_since_last_publish gets set anywhere yet
|
2019-12-02 22:49:27 +08:00
|
|
|
if (
|
|
|
|
topic not in self.pubsub.peer_topics
|
|
|
|
or self.time_since_last_publish[topic] > self.time_to_live
|
|
|
|
):
|
2019-05-07 11:44:13 +08:00
|
|
|
# Remove topic from fanout
|
2019-11-21 14:48:03 +08:00
|
|
|
del self.fanout[topic]
|
|
|
|
del self.time_since_last_publish[topic]
|
2019-05-07 11:44:13 +08:00
|
|
|
else:
|
2019-12-02 22:49:27 +08:00
|
|
|
# Check whether our peers are still in the topic
|
|
|
|
# ref: https://github.com/libp2p/go-libp2p-pubsub/blob/01b9825fbee1848751d90a8469e3f5f43bac8466/gossipsub.go#L498-L504 # noqa: E501
|
|
|
|
for peer in self.fanout[topic]:
|
|
|
|
if peer not in self.pubsub.peer_topics[topic]:
|
|
|
|
self.fanout[topic].remove(peer)
|
2019-05-07 11:44:13 +08:00
|
|
|
num_fanout_peers_in_topic = len(self.fanout[topic])
|
|
|
|
|
|
|
|
# If |fanout[topic]| < D
|
|
|
|
if num_fanout_peers_in_topic < self.degree:
|
|
|
|
# Select D - |fanout[topic]| peers from peers.gossipsub[topic] - fanout[topic]
|
2019-07-28 18:06:38 +08:00
|
|
|
selected_peers = self._get_in_topic_gossipsub_peers_from_minus(
|
2019-08-14 05:36:42 +08:00
|
|
|
topic,
|
|
|
|
self.degree - num_fanout_peers_in_topic,
|
|
|
|
self.fanout[topic],
|
2019-07-28 18:06:38 +08:00
|
|
|
)
|
2019-05-07 11:44:13 +08:00
|
|
|
# Add the peers to fanout[topic]
|
|
|
|
self.fanout[topic].extend(selected_peers)
|
|
|
|
|
2019-07-27 11:27:47 +08:00
|
|
|
async def gossip_heartbeat(self) -> None:
|
2019-05-07 11:44:13 +08:00
|
|
|
for topic in self.mesh:
|
|
|
|
msg_ids = self.mcache.window(topic)
|
|
|
|
if msg_ids:
|
|
|
|
# Get all pubsub peers in a topic and only add them if they are gossipsub peers too
|
|
|
|
if topic in self.pubsub.peer_topics:
|
|
|
|
# Select D peers from peers.gossipsub[topic]
|
2019-07-28 18:06:38 +08:00
|
|
|
peers_to_emit_ihave_to = self._get_in_topic_gossipsub_peers_from_minus(
|
2019-08-01 06:00:12 +08:00
|
|
|
topic, self.degree, []
|
2019-07-28 18:06:38 +08:00
|
|
|
)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
for peer in peers_to_emit_ihave_to:
|
2019-12-02 22:40:35 +08:00
|
|
|
if peer not in self.mesh[topic]:
|
2019-07-27 11:27:47 +08:00
|
|
|
msg_id_strs = [str(msg_id) for msg_id in msg_ids]
|
|
|
|
await self.emit_ihave(topic, msg_id_strs, peer)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
2019-07-28 18:06:38 +08:00
|
|
|
# TODO: Refactor and Dedup. This section is the roughly the same as the above.
|
2019-05-07 11:44:13 +08:00
|
|
|
# Do the same for fanout, for all topics not already hit in mesh
|
|
|
|
for topic in self.fanout:
|
|
|
|
if topic not in self.mesh:
|
2019-07-27 11:27:47 +08:00
|
|
|
msg_ids = self.mcache.window(topic)
|
2019-05-07 11:44:13 +08:00
|
|
|
if msg_ids:
|
|
|
|
# Get all pubsub peers in topic and only add if they are gossipsub peers also
|
|
|
|
if topic in self.pubsub.peer_topics:
|
|
|
|
# Select D peers from peers.gossipsub[topic]
|
2019-07-28 18:06:38 +08:00
|
|
|
peers_to_emit_ihave_to = self._get_in_topic_gossipsub_peers_from_minus(
|
2019-08-01 06:00:12 +08:00
|
|
|
topic, self.degree, []
|
2019-07-28 18:06:38 +08:00
|
|
|
)
|
2019-05-07 11:44:13 +08:00
|
|
|
for peer in peers_to_emit_ihave_to:
|
2019-12-02 22:40:35 +08:00
|
|
|
if peer not in self.fanout[topic]:
|
2019-07-27 11:27:47 +08:00
|
|
|
msg_id_strs = [str(msg) for msg in msg_ids]
|
|
|
|
await self.emit_ihave(topic, msg_id_strs, peer)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
self.mcache.shift()
|
|
|
|
|
|
|
|
@staticmethod
|
2019-08-01 06:00:12 +08:00
|
|
|
def select_from_minus(
|
|
|
|
num_to_select: int, pool: Sequence[Any], minus: Sequence[Any]
|
|
|
|
) -> List[Any]:
|
2019-05-07 11:44:13 +08:00
|
|
|
"""
|
|
|
|
Select at most num_to_select subset of elements from the set (pool - minus) randomly.
|
|
|
|
:param num_to_select: number of elements to randomly select
|
|
|
|
:param pool: list of items to select from (excluding elements in minus)
|
|
|
|
:param minus: elements to be excluded from selection pool
|
|
|
|
:return: list of selected elements
|
|
|
|
"""
|
|
|
|
# Create selection pool, which is selection_pool = pool - minus
|
|
|
|
if minus:
|
|
|
|
# Create a new selection pool by removing elements of minus
|
2019-07-24 16:29:14 +08:00
|
|
|
selection_pool: List[Any] = [x for x in pool if x not in minus]
|
2019-05-07 11:44:13 +08:00
|
|
|
else:
|
|
|
|
# Don't create a new selection_pool if we are not subbing anything
|
2019-07-27 11:27:47 +08:00
|
|
|
selection_pool = list(pool)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# If num_to_select > size(selection_pool), then return selection_pool (which has the most
|
|
|
|
# possible elements s.t. the number of elements is less than num_to_select)
|
|
|
|
if num_to_select > len(selection_pool):
|
|
|
|
return selection_pool
|
|
|
|
|
|
|
|
# Random selection
|
2019-07-24 16:29:14 +08:00
|
|
|
selection: List[Any] = random.sample(selection_pool, num_to_select)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
return selection
|
|
|
|
|
2019-07-28 18:06:38 +08:00
|
|
|
def _get_in_topic_gossipsub_peers_from_minus(
|
2019-08-01 12:05:28 +08:00
|
|
|
self, topic: str, num_to_select: int, minus: Sequence[ID]
|
|
|
|
) -> List[ID]:
|
2019-07-26 18:35:25 +08:00
|
|
|
gossipsub_peers_in_topic = [
|
2019-08-14 05:36:42 +08:00
|
|
|
peer_id
|
|
|
|
for peer_id in self.pubsub.peer_topics[topic]
|
|
|
|
if peer_id in self.peers_gossipsub
|
2019-07-26 18:35:25 +08:00
|
|
|
]
|
2019-08-14 05:36:42 +08:00
|
|
|
return self.select_from_minus(
|
|
|
|
num_to_select, gossipsub_peers_in_topic, list(minus)
|
|
|
|
)
|
2019-07-26 18:35:25 +08:00
|
|
|
|
2019-05-07 11:44:13 +08:00
|
|
|
# RPC handlers
|
|
|
|
|
2019-08-14 05:36:42 +08:00
|
|
|
async def handle_ihave(
|
|
|
|
self, ihave_msg: rpc_pb2.ControlIHave, sender_peer_id: ID
|
|
|
|
) -> None:
|
2019-10-24 14:41:10 +08:00
|
|
|
"""Checks the seen set and requests unknown messages with an IWANT
|
|
|
|
message."""
|
2019-05-07 11:44:13 +08:00
|
|
|
# Get list of all seen (seqnos, from) from the (seqno, from) tuples in seen_messages cache
|
2019-07-24 16:29:14 +08:00
|
|
|
seen_seqnos_and_peers = [
|
2019-08-01 06:00:12 +08:00
|
|
|
seqno_and_from for seqno_and_from in self.pubsub.seen_messages.keys()
|
2019-07-24 16:29:14 +08:00
|
|
|
]
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# Add all unknown message ids (ids that appear in ihave_msg but not in seen_seqnos) to list
|
|
|
|
# of messages we want to request
|
2019-07-24 16:29:14 +08:00
|
|
|
# FIXME: Update type of message ID
|
2019-07-29 12:42:13 +08:00
|
|
|
msg_ids_wanted: List[Any] = [
|
2019-07-24 16:29:14 +08:00
|
|
|
msg_id
|
|
|
|
for msg_id in ihave_msg.messageIDs
|
|
|
|
if literal_eval(msg_id) not in seen_seqnos_and_peers
|
|
|
|
]
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# Request messages with IWANT message
|
|
|
|
if msg_ids_wanted:
|
2019-08-01 12:05:28 +08:00
|
|
|
await self.emit_iwant(msg_ids_wanted, sender_peer_id)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
2019-08-14 05:36:42 +08:00
|
|
|
async def handle_iwant(
|
|
|
|
self, iwant_msg: rpc_pb2.ControlIWant, sender_peer_id: ID
|
|
|
|
) -> None:
|
2019-10-24 14:41:10 +08:00
|
|
|
"""Forwards all request messages that are present in mcache to the
|
|
|
|
requesting peer."""
|
2019-07-24 16:29:14 +08:00
|
|
|
# FIXME: Update type of message ID
|
2019-07-30 23:41:28 +08:00
|
|
|
# FIXME: Find a better way to parse the msg ids
|
2019-07-24 16:29:14 +08:00
|
|
|
msg_ids: List[Any] = [literal_eval(msg) for msg in iwant_msg.messageIDs]
|
2019-07-27 11:27:47 +08:00
|
|
|
msgs_to_forward: List[rpc_pb2.Message] = []
|
2019-05-07 11:44:13 +08:00
|
|
|
for msg_id_iwant in msg_ids:
|
|
|
|
# Check if the wanted message ID is present in mcache
|
2019-07-24 16:29:14 +08:00
|
|
|
msg: rpc_pb2.Message = self.mcache.get(msg_id_iwant)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# Cache hit
|
|
|
|
if msg:
|
|
|
|
# Add message to list of messages to forward to requesting peers
|
|
|
|
msgs_to_forward.append(msg)
|
|
|
|
|
|
|
|
# Forward messages to requesting peer
|
|
|
|
# Should this just be publishing? No
|
|
|
|
# because then the message will forwarded to peers in the topics contained in the messages.
|
|
|
|
# We should
|
|
|
|
# 1) Package these messages into a single packet
|
2019-07-24 16:29:14 +08:00
|
|
|
packet: rpc_pb2.RPC = rpc_pb2.RPC()
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
packet.publish.extend(msgs_to_forward)
|
|
|
|
|
|
|
|
# 2) Serialize that packet
|
2019-07-24 16:29:14 +08:00
|
|
|
rpc_msg: bytes = packet.SerializeToString()
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# 3) Get the stream to this peer
|
2019-08-01 12:05:28 +08:00
|
|
|
peer_stream = self.pubsub.peers[sender_peer_id]
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# 4) And write the packet to the stream
|
2019-11-16 16:24:48 +08:00
|
|
|
try:
|
|
|
|
await peer_stream.write(encode_varint_prefixed(rpc_msg))
|
|
|
|
except StreamClosed:
|
2019-11-16 17:03:04 +08:00
|
|
|
logger.debug(
|
|
|
|
"Fail to responed to iwant request from %s: stream closed",
|
|
|
|
sender_peer_id,
|
|
|
|
)
|
|
|
|
self.pubsub._handle_dead_peer(sender_peer_id)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
2019-08-14 05:36:42 +08:00
|
|
|
async def handle_graft(
|
|
|
|
self, graft_msg: rpc_pb2.ControlGraft, sender_peer_id: ID
|
|
|
|
) -> None:
|
2019-07-24 16:29:14 +08:00
|
|
|
topic: str = graft_msg.topicID
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# Add peer to mesh for topic
|
|
|
|
if topic in self.mesh:
|
2019-08-01 12:05:28 +08:00
|
|
|
if sender_peer_id not in self.mesh[topic]:
|
|
|
|
self.mesh[topic].append(sender_peer_id)
|
2019-05-07 11:44:13 +08:00
|
|
|
else:
|
2019-07-18 22:37:44 +08:00
|
|
|
# Respond with PRUNE if not subscribed to the topic
|
|
|
|
await self.emit_prune(topic, sender_peer_id)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
2019-08-14 05:36:42 +08:00
|
|
|
async def handle_prune(
|
|
|
|
self, prune_msg: rpc_pb2.ControlPrune, sender_peer_id: ID
|
|
|
|
) -> None:
|
2019-07-24 16:29:14 +08:00
|
|
|
topic: str = prune_msg.topicID
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# Remove peer from mesh for topic, if peer is in topic
|
2019-08-01 12:05:28 +08:00
|
|
|
if topic in self.mesh and sender_peer_id in self.mesh[topic]:
|
|
|
|
self.mesh[topic].remove(sender_peer_id)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# RPC emitters
|
|
|
|
|
2019-08-01 12:05:28 +08:00
|
|
|
async def emit_ihave(self, topic: str, msg_ids: Any, to_peer: ID) -> None:
|
2019-10-24 14:41:10 +08:00
|
|
|
"""Emit ihave message, sent to to_peer, for topic and msg_ids."""
|
2019-05-07 11:44:13 +08:00
|
|
|
|
2019-07-24 16:29:14 +08:00
|
|
|
ihave_msg: rpc_pb2.ControlIHave = rpc_pb2.ControlIHave()
|
2019-05-07 11:44:13 +08:00
|
|
|
ihave_msg.messageIDs.extend(msg_ids)
|
|
|
|
ihave_msg.topicID = topic
|
|
|
|
|
2019-07-24 16:29:14 +08:00
|
|
|
control_msg: rpc_pb2.ControlMessage = rpc_pb2.ControlMessage()
|
2019-05-07 11:44:13 +08:00
|
|
|
control_msg.ihave.extend([ihave_msg])
|
|
|
|
|
|
|
|
await self.emit_control_message(control_msg, to_peer)
|
|
|
|
|
2019-08-01 12:05:28 +08:00
|
|
|
async def emit_iwant(self, msg_ids: Any, to_peer: ID) -> None:
|
2019-10-24 14:41:10 +08:00
|
|
|
"""Emit iwant message, sent to to_peer, for msg_ids."""
|
2019-05-07 11:44:13 +08:00
|
|
|
|
2019-07-24 16:29:14 +08:00
|
|
|
iwant_msg: rpc_pb2.ControlIWant = rpc_pb2.ControlIWant()
|
2019-05-07 11:44:13 +08:00
|
|
|
iwant_msg.messageIDs.extend(msg_ids)
|
|
|
|
|
2019-07-24 16:29:14 +08:00
|
|
|
control_msg: rpc_pb2.ControlMessage = rpc_pb2.ControlMessage()
|
2019-05-07 11:44:13 +08:00
|
|
|
control_msg.iwant.extend([iwant_msg])
|
|
|
|
|
|
|
|
await self.emit_control_message(control_msg, to_peer)
|
|
|
|
|
2019-08-01 12:05:28 +08:00
|
|
|
async def emit_graft(self, topic: str, to_peer: ID) -> None:
|
2019-10-24 14:41:10 +08:00
|
|
|
"""Emit graft message, sent to to_peer, for topic."""
|
2019-05-07 11:44:13 +08:00
|
|
|
|
2019-07-24 16:29:14 +08:00
|
|
|
graft_msg: rpc_pb2.ControlGraft = rpc_pb2.ControlGraft()
|
2019-05-07 11:44:13 +08:00
|
|
|
graft_msg.topicID = topic
|
|
|
|
|
2019-07-24 16:29:14 +08:00
|
|
|
control_msg: rpc_pb2.ControlMessage = rpc_pb2.ControlMessage()
|
2019-05-07 11:44:13 +08:00
|
|
|
control_msg.graft.extend([graft_msg])
|
|
|
|
|
|
|
|
await self.emit_control_message(control_msg, to_peer)
|
|
|
|
|
2019-08-01 12:05:28 +08:00
|
|
|
async def emit_prune(self, topic: str, to_peer: ID) -> None:
|
2019-10-24 14:41:10 +08:00
|
|
|
"""Emit graft message, sent to to_peer, for topic."""
|
2019-05-07 11:44:13 +08:00
|
|
|
|
2019-07-24 16:29:14 +08:00
|
|
|
prune_msg: rpc_pb2.ControlPrune = rpc_pb2.ControlPrune()
|
2019-05-07 11:44:13 +08:00
|
|
|
prune_msg.topicID = topic
|
|
|
|
|
2019-07-24 16:29:14 +08:00
|
|
|
control_msg: rpc_pb2.ControlMessage = rpc_pb2.ControlMessage()
|
2019-05-07 11:44:13 +08:00
|
|
|
control_msg.prune.extend([prune_msg])
|
|
|
|
|
|
|
|
await self.emit_control_message(control_msg, to_peer)
|
|
|
|
|
2019-08-14 05:36:42 +08:00
|
|
|
async def emit_control_message(
|
|
|
|
self, control_msg: rpc_pb2.ControlMessage, to_peer: ID
|
|
|
|
) -> None:
|
2019-05-07 11:44:13 +08:00
|
|
|
# Add control message to packet
|
2019-07-24 16:29:14 +08:00
|
|
|
packet: rpc_pb2.RPC = rpc_pb2.RPC()
|
2019-05-07 11:44:13 +08:00
|
|
|
packet.control.CopyFrom(control_msg)
|
|
|
|
|
2019-07-24 16:29:14 +08:00
|
|
|
rpc_msg: bytes = packet.SerializeToString()
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# Get stream for peer from pubsub
|
|
|
|
peer_stream = self.pubsub.peers[to_peer]
|
|
|
|
|
|
|
|
# Write rpc to stream
|
2019-11-16 16:24:48 +08:00
|
|
|
try:
|
|
|
|
await peer_stream.write(encode_varint_prefixed(rpc_msg))
|
|
|
|
except StreamClosed:
|
|
|
|
logger.debug("Fail to emit control message to %s: stream closed", to_peer)
|
2019-11-16 17:03:04 +08:00
|
|
|
self.pubsub._handle_dead_peer(to_peer)
|