2019-05-07 11:44:13 +08:00
|
|
|
import asyncio
|
|
|
|
import random
|
|
|
|
|
2019-07-26 18:35:25 +08:00
|
|
|
import pytest
|
|
|
|
|
2019-12-03 15:49:45 +08:00
|
|
|
from libp2p.peer.id import ID
|
|
|
|
from libp2p.tools.constants import GOSSIPSUB_PARAMS, GossipsubParams
|
2019-11-21 11:47:54 +08:00
|
|
|
from libp2p.tools.pubsub.utils import dense_connect, one_to_all_connect
|
|
|
|
from libp2p.tools.utils import connect
|
2019-08-01 00:09:09 +08:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
2019-08-14 05:36:42 +08:00
|
|
|
"num_hosts, gossipsub_params",
|
|
|
|
((4, GossipsubParams(degree=4, degree_low=3, degree_high=5)),),
|
2019-07-27 11:49:03 +08:00
|
|
|
)
|
2019-07-19 20:16:53 +08:00
|
|
|
@pytest.mark.asyncio
|
2019-09-03 16:49:00 +08:00
|
|
|
async def test_join(num_hosts, hosts, pubsubs_gsub):
|
|
|
|
gossipsubs = tuple(pubsub.router for pubsub in pubsubs_gsub)
|
2019-07-19 12:56:15 +08:00
|
|
|
hosts_indices = list(range(num_hosts))
|
2019-07-19 20:16:53 +08:00
|
|
|
|
|
|
|
topic = "test_join"
|
2019-07-19 12:56:15 +08:00
|
|
|
central_node_index = 0
|
|
|
|
# Remove index of central host from the indices
|
|
|
|
hosts_indices.remove(central_node_index)
|
2019-07-23 17:28:46 +08:00
|
|
|
num_subscribed_peer = 2
|
2019-07-19 12:56:15 +08:00
|
|
|
subscribed_peer_indices = random.sample(hosts_indices, num_subscribed_peer)
|
|
|
|
|
|
|
|
# All pubsub except the one of central node subscribe to topic
|
|
|
|
for i in subscribed_peer_indices:
|
2019-08-01 00:09:09 +08:00
|
|
|
await pubsubs_gsub[i].subscribe(topic)
|
2019-07-19 12:56:15 +08:00
|
|
|
|
|
|
|
# Connect central host to all other hosts
|
2019-08-01 00:09:09 +08:00
|
|
|
await one_to_all_connect(hosts, central_node_index)
|
2019-07-19 12:56:15 +08:00
|
|
|
|
|
|
|
# Wait 2 seconds for heartbeat to allow mesh to connect
|
|
|
|
await asyncio.sleep(2)
|
2019-07-19 20:16:53 +08:00
|
|
|
|
2019-07-22 19:28:12 +08:00
|
|
|
# Central node publish to the topic so that this topic
|
|
|
|
# is added to central node's fanout
|
|
|
|
# publish from the randomly chosen host
|
2019-08-01 00:09:09 +08:00
|
|
|
await pubsubs_gsub[central_node_index].publish(topic, b"data")
|
2019-07-22 19:28:12 +08:00
|
|
|
|
|
|
|
# Check that the gossipsub of central node has fanout for the topic
|
|
|
|
assert topic in gossipsubs[central_node_index].fanout
|
2019-07-23 16:37:01 +08:00
|
|
|
# Check that the gossipsub of central node does not have a mesh for the topic
|
2019-07-19 12:56:15 +08:00
|
|
|
assert topic not in gossipsubs[central_node_index].mesh
|
2019-07-19 20:16:53 +08:00
|
|
|
|
2019-07-23 16:37:41 +08:00
|
|
|
# Central node subscribes the topic
|
2019-08-01 00:09:09 +08:00
|
|
|
await pubsubs_gsub[central_node_index].subscribe(topic)
|
2019-07-19 20:16:53 +08:00
|
|
|
|
2019-07-23 17:28:46 +08:00
|
|
|
await asyncio.sleep(2)
|
|
|
|
|
2019-07-22 19:28:12 +08:00
|
|
|
# Check that the gossipsub of central node no longer has fanout for the topic
|
|
|
|
assert topic not in gossipsubs[central_node_index].fanout
|
|
|
|
|
2019-07-19 12:56:15 +08:00
|
|
|
for i in hosts_indices:
|
|
|
|
if i in subscribed_peer_indices:
|
2019-08-01 12:05:28 +08:00
|
|
|
assert hosts[i].get_id() in gossipsubs[central_node_index].mesh[topic]
|
|
|
|
assert hosts[central_node_index].get_id() in gossipsubs[i].mesh[topic]
|
2019-07-19 12:56:15 +08:00
|
|
|
else:
|
2019-08-03 02:44:11 +08:00
|
|
|
assert hosts[i].get_id() not in gossipsubs[central_node_index].mesh[topic]
|
2019-07-23 17:28:46 +08:00
|
|
|
assert topic not in gossipsubs[i].mesh
|
2019-07-19 20:16:53 +08:00
|
|
|
|
|
|
|
|
2019-08-01 00:09:09 +08:00
|
|
|
@pytest.mark.parametrize("num_hosts", (1,))
|
2019-07-19 20:16:53 +08:00
|
|
|
@pytest.mark.asyncio
|
2019-08-01 00:09:09 +08:00
|
|
|
async def test_leave(pubsubs_gsub):
|
|
|
|
gossipsub = pubsubs_gsub[0].router
|
2019-07-19 20:16:53 +08:00
|
|
|
topic = "test_leave"
|
|
|
|
|
2019-07-26 18:35:25 +08:00
|
|
|
assert topic not in gossipsub.mesh
|
|
|
|
|
2019-07-19 20:16:53 +08:00
|
|
|
await gossipsub.join(topic)
|
|
|
|
assert topic in gossipsub.mesh
|
|
|
|
|
|
|
|
await gossipsub.leave(topic)
|
|
|
|
assert topic not in gossipsub.mesh
|
|
|
|
|
|
|
|
# Test re-leave
|
|
|
|
await gossipsub.leave(topic)
|
|
|
|
|
|
|
|
|
2019-08-01 00:09:09 +08:00
|
|
|
@pytest.mark.parametrize("num_hosts", (2,))
|
2019-07-21 21:22:20 +08:00
|
|
|
@pytest.mark.asyncio
|
2019-09-03 16:49:00 +08:00
|
|
|
async def test_handle_graft(pubsubs_gsub, hosts, event_loop, monkeypatch):
|
|
|
|
gossipsubs = tuple(pubsub.router for pubsub in pubsubs_gsub)
|
|
|
|
|
2019-07-21 21:22:20 +08:00
|
|
|
index_alice = 0
|
2019-08-01 12:05:28 +08:00
|
|
|
id_alice = hosts[index_alice].get_id()
|
2019-07-21 21:22:20 +08:00
|
|
|
index_bob = 1
|
2019-08-01 12:05:28 +08:00
|
|
|
id_bob = hosts[index_bob].get_id()
|
2019-08-01 00:09:09 +08:00
|
|
|
await connect(hosts[index_alice], hosts[index_bob])
|
2019-07-21 21:22:20 +08:00
|
|
|
|
|
|
|
# Wait 2 seconds for heartbeat to allow mesh to connect
|
|
|
|
await asyncio.sleep(2)
|
|
|
|
|
|
|
|
topic = "test_handle_graft"
|
|
|
|
# Only lice subscribe to the topic
|
|
|
|
await gossipsubs[index_alice].join(topic)
|
|
|
|
|
|
|
|
# Monkey patch bob's `emit_prune` function so we can
|
|
|
|
# check if it is called in `handle_graft`
|
|
|
|
event_emit_prune = asyncio.Event()
|
2019-08-01 06:00:12 +08:00
|
|
|
|
2019-07-21 21:22:20 +08:00
|
|
|
async def emit_prune(topic, sender_peer_id):
|
|
|
|
event_emit_prune.set()
|
|
|
|
|
2019-08-01 06:00:12 +08:00
|
|
|
monkeypatch.setattr(gossipsubs[index_bob], "emit_prune", emit_prune)
|
2019-07-21 21:22:20 +08:00
|
|
|
|
|
|
|
# Check that alice is bob's peer but not his mesh peer
|
|
|
|
assert id_alice in gossipsubs[index_bob].peers_gossipsub
|
|
|
|
assert topic not in gossipsubs[index_bob].mesh
|
2019-07-21 22:28:43 +08:00
|
|
|
|
2019-07-21 21:22:20 +08:00
|
|
|
await gossipsubs[index_alice].emit_graft(topic, id_bob)
|
|
|
|
|
|
|
|
# Check that `emit_prune` is called
|
2019-08-01 06:00:12 +08:00
|
|
|
await asyncio.wait_for(event_emit_prune.wait(), timeout=1, loop=event_loop)
|
2019-07-21 21:22:20 +08:00
|
|
|
assert event_emit_prune.is_set()
|
|
|
|
|
|
|
|
# Check that bob is alice's peer but not her mesh peer
|
|
|
|
assert topic in gossipsubs[index_alice].mesh
|
|
|
|
assert id_bob not in gossipsubs[index_alice].mesh[topic]
|
|
|
|
assert id_bob in gossipsubs[index_alice].peers_gossipsub
|
2019-07-21 22:28:43 +08:00
|
|
|
|
2019-07-21 21:22:20 +08:00
|
|
|
await gossipsubs[index_bob].emit_graft(topic, id_alice)
|
|
|
|
|
|
|
|
await asyncio.sleep(1)
|
|
|
|
|
|
|
|
# Check that bob is now alice's mesh peer
|
|
|
|
assert id_bob in gossipsubs[index_alice].mesh[topic]
|
|
|
|
|
|
|
|
|
2019-08-01 00:09:09 +08:00
|
|
|
@pytest.mark.parametrize(
|
2019-08-01 21:38:14 +08:00
|
|
|
"num_hosts, gossipsub_params", ((2, GossipsubParams(heartbeat_interval=3)),)
|
2019-08-01 00:09:09 +08:00
|
|
|
)
|
2019-07-21 22:28:43 +08:00
|
|
|
@pytest.mark.asyncio
|
2019-09-03 23:37:34 +08:00
|
|
|
async def test_handle_prune(pubsubs_gsub, hosts):
|
|
|
|
gossipsubs = tuple(pubsub.router for pubsub in pubsubs_gsub)
|
|
|
|
|
2019-07-21 22:28:43 +08:00
|
|
|
index_alice = 0
|
2019-08-01 12:05:28 +08:00
|
|
|
id_alice = hosts[index_alice].get_id()
|
2019-07-21 22:28:43 +08:00
|
|
|
index_bob = 1
|
2019-08-01 12:05:28 +08:00
|
|
|
id_bob = hosts[index_bob].get_id()
|
2019-07-21 22:28:43 +08:00
|
|
|
|
|
|
|
topic = "test_handle_prune"
|
2019-08-01 00:09:09 +08:00
|
|
|
for pubsub in pubsubs_gsub:
|
2019-07-24 11:35:14 +08:00
|
|
|
await pubsub.subscribe(topic)
|
2019-07-21 22:28:43 +08:00
|
|
|
|
2019-08-01 00:09:09 +08:00
|
|
|
await connect(hosts[index_alice], hosts[index_bob])
|
2019-07-21 22:28:43 +08:00
|
|
|
|
2019-12-05 17:33:07 +08:00
|
|
|
# Wait for heartbeat to allow mesh to connect
|
2019-12-03 23:10:56 +08:00
|
|
|
await asyncio.sleep(1)
|
2019-07-21 22:28:43 +08:00
|
|
|
|
|
|
|
# Check that they are each other's mesh peer
|
|
|
|
assert id_alice in gossipsubs[index_bob].mesh[topic]
|
|
|
|
assert id_bob in gossipsubs[index_alice].mesh[topic]
|
|
|
|
|
|
|
|
# alice emit prune message to bob, alice should be removed
|
|
|
|
# from bob's mesh peer
|
|
|
|
await gossipsubs[index_alice].emit_prune(topic, id_bob)
|
2019-12-03 23:10:56 +08:00
|
|
|
# `emit_prune` does not remove bob from alice's mesh peers
|
|
|
|
assert id_bob in gossipsubs[index_alice].mesh[topic]
|
2019-07-21 22:28:43 +08:00
|
|
|
|
2019-12-05 14:40:49 +08:00
|
|
|
# NOTE: We increase `heartbeat_interval` to 3 seconds so that bob will not
|
|
|
|
# add alice back to his mesh after heartbeat.
|
2019-12-03 23:10:56 +08:00
|
|
|
# Wait for bob to `handle_prune`
|
|
|
|
await asyncio.sleep(0.1)
|
2019-07-21 22:28:43 +08:00
|
|
|
|
|
|
|
# Check that alice is no longer bob's mesh peer
|
|
|
|
assert id_alice not in gossipsubs[index_bob].mesh[topic]
|
|
|
|
|
|
|
|
|
2019-08-01 00:09:09 +08:00
|
|
|
@pytest.mark.parametrize("num_hosts", (10,))
|
2019-05-07 11:44:13 +08:00
|
|
|
@pytest.mark.asyncio
|
2019-08-01 00:09:09 +08:00
|
|
|
async def test_dense(num_hosts, pubsubs_gsub, hosts):
|
2019-05-07 11:44:13 +08:00
|
|
|
num_msgs = 5
|
|
|
|
|
|
|
|
# All pubsub subscribe to foobar
|
|
|
|
queues = []
|
2019-08-01 00:09:09 +08:00
|
|
|
for pubsub in pubsubs_gsub:
|
2019-05-07 11:44:13 +08:00
|
|
|
q = await pubsub.subscribe("foobar")
|
|
|
|
|
|
|
|
# Add each blocking queue to an array of blocking queues
|
|
|
|
queues.append(q)
|
|
|
|
|
2019-08-01 00:09:09 +08:00
|
|
|
# Densely connect libp2p hosts in a random way
|
|
|
|
await dense_connect(hosts)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# Wait 2 seconds for heartbeat to allow mesh to connect
|
|
|
|
await asyncio.sleep(2)
|
|
|
|
|
|
|
|
for i in range(num_msgs):
|
2019-08-01 06:00:12 +08:00
|
|
|
msg_content = b"foo " + i.to_bytes(1, "big")
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# randomly pick a message origin
|
|
|
|
origin_idx = random.randint(0, num_hosts - 1)
|
|
|
|
|
|
|
|
# publish from the randomly chosen host
|
2019-08-01 00:09:09 +08:00
|
|
|
await pubsubs_gsub[origin_idx].publish("foobar", msg_content)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
2019-12-03 22:14:45 +08:00
|
|
|
await asyncio.sleep(0.5)
|
2019-05-07 11:44:13 +08:00
|
|
|
# Assert that all blocking queues receive the message
|
|
|
|
for queue in queues:
|
|
|
|
msg = await queue.get()
|
2019-07-26 18:35:25 +08:00
|
|
|
assert msg.data == msg_content
|
2019-05-07 11:44:13 +08:00
|
|
|
|
2019-07-26 18:35:25 +08:00
|
|
|
|
2019-08-01 00:09:09 +08:00
|
|
|
@pytest.mark.parametrize("num_hosts", (10,))
|
2019-05-07 11:44:13 +08:00
|
|
|
@pytest.mark.asyncio
|
2019-08-01 00:09:09 +08:00
|
|
|
async def test_fanout(hosts, pubsubs_gsub):
|
2019-05-07 11:44:13 +08:00
|
|
|
num_msgs = 5
|
|
|
|
|
2019-08-01 00:09:09 +08:00
|
|
|
# All pubsub subscribe to foobar except for `pubsubs_gsub[0]`
|
2019-05-07 11:44:13 +08:00
|
|
|
queues = []
|
2019-08-01 00:09:09 +08:00
|
|
|
for i in range(1, len(pubsubs_gsub)):
|
|
|
|
q = await pubsubs_gsub[i].subscribe("foobar")
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# Add each blocking queue to an array of blocking queues
|
|
|
|
queues.append(q)
|
|
|
|
|
|
|
|
# Sparsely connect libp2p hosts in random way
|
2019-08-01 00:09:09 +08:00
|
|
|
await dense_connect(hosts)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# Wait 2 seconds for heartbeat to allow mesh to connect
|
|
|
|
await asyncio.sleep(2)
|
|
|
|
|
2019-07-26 18:35:25 +08:00
|
|
|
topic = "foobar"
|
2019-05-07 11:44:13 +08:00
|
|
|
# Send messages with origin not subscribed
|
|
|
|
for i in range(num_msgs):
|
2019-07-26 18:35:25 +08:00
|
|
|
msg_content = b"foo " + i.to_bytes(1, "big")
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# Pick the message origin to the node that is not subscribed to 'foobar'
|
|
|
|
origin_idx = 0
|
|
|
|
|
|
|
|
# publish from the randomly chosen host
|
2019-08-01 00:09:09 +08:00
|
|
|
await pubsubs_gsub[origin_idx].publish(topic, msg_content)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
await asyncio.sleep(0.5)
|
|
|
|
# Assert that all blocking queues receive the message
|
|
|
|
for queue in queues:
|
|
|
|
msg = await queue.get()
|
2019-07-26 18:35:25 +08:00
|
|
|
assert msg.data == msg_content
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# Subscribe message origin
|
2019-08-01 00:09:09 +08:00
|
|
|
queues.insert(0, await pubsubs_gsub[0].subscribe(topic))
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# Send messages again
|
|
|
|
for i in range(num_msgs):
|
2019-08-01 06:00:12 +08:00
|
|
|
msg_content = b"bar " + i.to_bytes(1, "big")
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# Pick the message origin to the node that is not subscribed to 'foobar'
|
|
|
|
origin_idx = 0
|
|
|
|
|
|
|
|
# publish from the randomly chosen host
|
2019-08-01 00:09:09 +08:00
|
|
|
await pubsubs_gsub[origin_idx].publish(topic, msg_content)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
await asyncio.sleep(0.5)
|
|
|
|
# Assert that all blocking queues receive the message
|
|
|
|
for queue in queues:
|
|
|
|
msg = await queue.get()
|
2019-07-26 18:35:25 +08:00
|
|
|
assert msg.data == msg_content
|
2019-05-07 11:44:13 +08:00
|
|
|
|
2019-08-01 06:00:12 +08:00
|
|
|
|
2019-08-01 00:09:09 +08:00
|
|
|
@pytest.mark.parametrize("num_hosts", (10,))
|
2019-05-07 11:44:13 +08:00
|
|
|
@pytest.mark.asyncio
|
2019-08-03 07:48:43 +08:00
|
|
|
@pytest.mark.slow
|
2019-08-01 00:09:09 +08:00
|
|
|
async def test_fanout_maintenance(hosts, pubsubs_gsub):
|
2019-05-07 11:44:13 +08:00
|
|
|
num_msgs = 5
|
|
|
|
|
|
|
|
# All pubsub subscribe to foobar
|
|
|
|
queues = []
|
2019-07-26 18:35:25 +08:00
|
|
|
topic = "foobar"
|
2019-08-01 00:09:09 +08:00
|
|
|
for i in range(1, len(pubsubs_gsub)):
|
|
|
|
q = await pubsubs_gsub[i].subscribe(topic)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# Add each blocking queue to an array of blocking queues
|
|
|
|
queues.append(q)
|
|
|
|
|
|
|
|
# Sparsely connect libp2p hosts in random way
|
2019-08-01 00:09:09 +08:00
|
|
|
await dense_connect(hosts)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# Wait 2 seconds for heartbeat to allow mesh to connect
|
|
|
|
await asyncio.sleep(2)
|
|
|
|
|
|
|
|
# Send messages with origin not subscribed
|
|
|
|
for i in range(num_msgs):
|
2019-08-01 06:00:12 +08:00
|
|
|
msg_content = b"foo " + i.to_bytes(1, "big")
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# Pick the message origin to the node that is not subscribed to 'foobar'
|
|
|
|
origin_idx = 0
|
|
|
|
|
|
|
|
# publish from the randomly chosen host
|
2019-08-01 00:09:09 +08:00
|
|
|
await pubsubs_gsub[origin_idx].publish(topic, msg_content)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
await asyncio.sleep(0.5)
|
|
|
|
# Assert that all blocking queues receive the message
|
|
|
|
for queue in queues:
|
|
|
|
msg = await queue.get()
|
2019-07-26 18:35:25 +08:00
|
|
|
assert msg.data == msg_content
|
2019-05-07 11:44:13 +08:00
|
|
|
|
2019-08-01 00:09:09 +08:00
|
|
|
for sub in pubsubs_gsub:
|
2019-07-26 18:35:25 +08:00
|
|
|
await sub.unsubscribe(topic)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
queues = []
|
|
|
|
|
|
|
|
await asyncio.sleep(2)
|
|
|
|
|
|
|
|
# Resub and repeat
|
2019-08-01 00:09:09 +08:00
|
|
|
for i in range(1, len(pubsubs_gsub)):
|
|
|
|
q = await pubsubs_gsub[i].subscribe(topic)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# Add each blocking queue to an array of blocking queues
|
|
|
|
queues.append(q)
|
|
|
|
|
|
|
|
await asyncio.sleep(2)
|
|
|
|
|
|
|
|
# Check messages can still be sent
|
|
|
|
for i in range(num_msgs):
|
2019-08-01 06:00:12 +08:00
|
|
|
msg_content = b"bar " + i.to_bytes(1, "big")
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# Pick the message origin to the node that is not subscribed to 'foobar'
|
|
|
|
origin_idx = 0
|
|
|
|
|
|
|
|
# publish from the randomly chosen host
|
2019-08-01 00:09:09 +08:00
|
|
|
await pubsubs_gsub[origin_idx].publish(topic, msg_content)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
await asyncio.sleep(0.5)
|
|
|
|
# Assert that all blocking queues receive the message
|
|
|
|
for queue in queues:
|
|
|
|
msg = await queue.get()
|
2019-07-26 18:35:25 +08:00
|
|
|
assert msg.data == msg_content
|
2019-05-07 11:44:13 +08:00
|
|
|
|
2019-07-26 18:35:25 +08:00
|
|
|
|
2019-08-01 00:09:09 +08:00
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"num_hosts, gossipsub_params",
|
|
|
|
(
|
|
|
|
(
|
|
|
|
2,
|
|
|
|
GossipsubParams(
|
2019-08-14 05:36:42 +08:00
|
|
|
degree=1,
|
|
|
|
degree_low=0,
|
|
|
|
degree_high=2,
|
|
|
|
gossip_window=50,
|
|
|
|
gossip_history=100,
|
2019-08-01 00:09:09 +08:00
|
|
|
),
|
|
|
|
),
|
|
|
|
),
|
|
|
|
)
|
2019-05-07 11:44:13 +08:00
|
|
|
@pytest.mark.asyncio
|
2019-08-01 00:09:09 +08:00
|
|
|
async def test_gossip_propagation(hosts, pubsubs_gsub):
|
2019-07-26 18:35:25 +08:00
|
|
|
topic = "foo"
|
2019-08-01 00:09:09 +08:00
|
|
|
await pubsubs_gsub[0].subscribe(topic)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
2019-07-26 18:35:25 +08:00
|
|
|
# node 0 publish to topic
|
2019-08-01 06:00:12 +08:00
|
|
|
msg_content = b"foo_msg"
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# publish from the randomly chosen host
|
2019-08-01 00:09:09 +08:00
|
|
|
await pubsubs_gsub[0].publish(topic, msg_content)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
2019-07-26 18:35:25 +08:00
|
|
|
# now node 1 subscribes
|
2019-08-01 00:09:09 +08:00
|
|
|
queue_1 = await pubsubs_gsub[1].subscribe(topic)
|
2019-05-07 11:44:13 +08:00
|
|
|
|
2019-07-26 18:35:25 +08:00
|
|
|
await connect(hosts[0], hosts[1])
|
2019-05-07 11:44:13 +08:00
|
|
|
|
|
|
|
# wait for gossip heartbeat
|
|
|
|
await asyncio.sleep(2)
|
|
|
|
|
|
|
|
# should be able to read message
|
2019-07-26 18:35:25 +08:00
|
|
|
msg = await queue_1.get()
|
|
|
|
assert msg.data == msg_content
|
2019-12-03 15:49:45 +08:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"num_hosts, gossipsub_params", ((1, GossipsubParams(heartbeat_initial_delay=100)),)
|
|
|
|
)
|
|
|
|
@pytest.mark.parametrize("initial_mesh_peer_count", (7, 10, 13))
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_mesh_heartbeat(
|
|
|
|
num_hosts, initial_mesh_peer_count, pubsubs_gsub, hosts, monkeypatch
|
|
|
|
):
|
2019-12-03 18:45:33 +08:00
|
|
|
# It's difficult to set up the initial peer subscription condition.
|
|
|
|
# Ideally I would like to have initial mesh peer count that's below ``GossipSubDegree``
|
|
|
|
# so I can test if `mesh_heartbeat` return correct peers to GRAFT.
|
|
|
|
# The problem is that I can not set it up so that we have peers subscribe to the topic
|
|
|
|
# but not being part of our mesh peers (as these peers are the peers to GRAFT).
|
|
|
|
# So I monkeypatch the peer subscriptions and our mesh peers.
|
2019-12-03 15:49:45 +08:00
|
|
|
total_peer_count = 14
|
|
|
|
topic = "TEST_MESH_HEARTBEAT"
|
|
|
|
|
|
|
|
fake_peer_ids = [
|
|
|
|
ID((i).to_bytes(2, byteorder="big")) for i in range(total_peer_count)
|
|
|
|
]
|
|
|
|
monkeypatch.setattr(pubsubs_gsub[0].router, "peers_gossipsub", fake_peer_ids)
|
|
|
|
|
|
|
|
peer_topics = {topic: fake_peer_ids}
|
2019-12-03 18:45:33 +08:00
|
|
|
# Monkeypatch the peer subscriptions
|
2019-12-03 15:49:45 +08:00
|
|
|
monkeypatch.setattr(pubsubs_gsub[0], "peer_topics", peer_topics)
|
|
|
|
|
|
|
|
mesh_peer_indices = random.sample(range(total_peer_count), initial_mesh_peer_count)
|
|
|
|
mesh_peers = [fake_peer_ids[i] for i in mesh_peer_indices]
|
|
|
|
router_mesh = {topic: list(mesh_peers)}
|
2019-12-03 18:45:33 +08:00
|
|
|
# Monkeypatch our mesh peers
|
2019-12-03 15:49:45 +08:00
|
|
|
monkeypatch.setattr(pubsubs_gsub[0].router, "mesh", router_mesh)
|
|
|
|
|
|
|
|
peers_to_graft, peers_to_prune = pubsubs_gsub[0].router.mesh_heartbeat()
|
|
|
|
if initial_mesh_peer_count > GOSSIPSUB_PARAMS.degree:
|
2019-12-03 18:45:33 +08:00
|
|
|
# If number of initial mesh peers is more than `GossipSubDegree`, we should PRUNE mesh peers
|
2019-12-03 15:49:45 +08:00
|
|
|
assert len(peers_to_graft) == 0
|
|
|
|
assert len(peers_to_prune) == initial_mesh_peer_count - GOSSIPSUB_PARAMS.degree
|
|
|
|
for peer in peers_to_prune:
|
|
|
|
assert peer in mesh_peers
|
|
|
|
elif initial_mesh_peer_count < GOSSIPSUB_PARAMS.degree:
|
2019-12-03 18:45:33 +08:00
|
|
|
# If number of initial mesh peers is less than `GossipSubDegree`, we should GRAFT more peers
|
2019-12-03 15:49:45 +08:00
|
|
|
assert len(peers_to_prune) == 0
|
|
|
|
assert len(peers_to_graft) == GOSSIPSUB_PARAMS.degree - initial_mesh_peer_count
|
|
|
|
for peer in peers_to_graft:
|
|
|
|
assert peer not in mesh_peers
|
|
|
|
else:
|
|
|
|
assert len(peers_to_prune) == 0 and len(peers_to_graft) == 0
|
2019-12-03 15:49:58 +08:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.mark.parametrize(
|
|
|
|
"num_hosts, gossipsub_params", ((1, GossipsubParams(heartbeat_initial_delay=100)),)
|
|
|
|
)
|
|
|
|
@pytest.mark.parametrize("initial_peer_count", (1, 4, 7))
|
|
|
|
@pytest.mark.asyncio
|
|
|
|
async def test_gossip_heartbeat(
|
|
|
|
num_hosts, initial_peer_count, pubsubs_gsub, hosts, monkeypatch
|
|
|
|
):
|
2019-12-03 18:45:33 +08:00
|
|
|
# The problem is that I can not set it up so that we have peers subscribe to the topic
|
|
|
|
# but not being part of our mesh peers (as these peers are the peers to GRAFT).
|
|
|
|
# So I monkeypatch the peer subscriptions and our mesh peers.
|
2019-12-03 15:49:58 +08:00
|
|
|
total_peer_count = 28
|
|
|
|
topic_mesh = "TEST_GOSSIP_HEARTBEAT_1"
|
|
|
|
topic_fanout = "TEST_GOSSIP_HEARTBEAT_2"
|
|
|
|
|
|
|
|
fake_peer_ids = [
|
|
|
|
ID((i).to_bytes(2, byteorder="big")) for i in range(total_peer_count)
|
|
|
|
]
|
|
|
|
monkeypatch.setattr(pubsubs_gsub[0].router, "peers_gossipsub", fake_peer_ids)
|
|
|
|
|
|
|
|
topic_mesh_peer_count = 14
|
2019-12-03 18:45:33 +08:00
|
|
|
# Split into mesh peers and fanout peers
|
2019-12-03 15:49:58 +08:00
|
|
|
peer_topics = {
|
|
|
|
topic_mesh: fake_peer_ids[:topic_mesh_peer_count],
|
|
|
|
topic_fanout: fake_peer_ids[topic_mesh_peer_count:],
|
|
|
|
}
|
2019-12-03 18:45:33 +08:00
|
|
|
# Monkeypatch the peer subscriptions
|
2019-12-03 15:49:58 +08:00
|
|
|
monkeypatch.setattr(pubsubs_gsub[0], "peer_topics", peer_topics)
|
|
|
|
|
|
|
|
mesh_peer_indices = random.sample(range(topic_mesh_peer_count), initial_peer_count)
|
|
|
|
mesh_peers = [fake_peer_ids[i] for i in mesh_peer_indices]
|
|
|
|
router_mesh = {topic_mesh: list(mesh_peers)}
|
2019-12-03 18:45:33 +08:00
|
|
|
# Monkeypatch our mesh peers
|
2019-12-03 15:49:58 +08:00
|
|
|
monkeypatch.setattr(pubsubs_gsub[0].router, "mesh", router_mesh)
|
|
|
|
fanout_peer_indices = random.sample(
|
|
|
|
range(topic_mesh_peer_count, total_peer_count), initial_peer_count
|
|
|
|
)
|
|
|
|
fanout_peers = [fake_peer_ids[i] for i in fanout_peer_indices]
|
|
|
|
router_fanout = {topic_fanout: list(fanout_peers)}
|
2019-12-03 18:45:33 +08:00
|
|
|
# Monkeypatch our fanout peers
|
2019-12-03 15:49:58 +08:00
|
|
|
monkeypatch.setattr(pubsubs_gsub[0].router, "fanout", router_fanout)
|
|
|
|
|
|
|
|
def window(topic):
|
|
|
|
if topic == topic_mesh:
|
|
|
|
return [topic_mesh]
|
|
|
|
elif topic == topic_fanout:
|
|
|
|
return [topic_fanout]
|
|
|
|
else:
|
|
|
|
return []
|
|
|
|
|
2019-12-03 18:45:33 +08:00
|
|
|
# Monkeypatch the memory cache messages
|
2019-12-03 15:49:58 +08:00
|
|
|
monkeypatch.setattr(pubsubs_gsub[0].router.mcache, "window", window)
|
|
|
|
|
|
|
|
peers_to_gossip = pubsubs_gsub[0].router.gossip_heartbeat()
|
2019-12-03 18:45:33 +08:00
|
|
|
# If our mesh peer count is less than `GossipSubDegree`, we should gossip to up to
|
|
|
|
# `GossipSubDegree` peers (exclude mesh peers).
|
2019-12-03 15:49:58 +08:00
|
|
|
if topic_mesh_peer_count - initial_peer_count < GOSSIPSUB_PARAMS.degree:
|
2019-12-03 18:45:33 +08:00
|
|
|
# The same goes for fanout so it's two times the number of peers to gossip.
|
2019-12-03 15:49:58 +08:00
|
|
|
assert len(peers_to_gossip) == 2 * (topic_mesh_peer_count - initial_peer_count)
|
|
|
|
elif topic_mesh_peer_count - initial_peer_count >= GOSSIPSUB_PARAMS.degree:
|
|
|
|
assert len(peers_to_gossip) == 2 * (GOSSIPSUB_PARAMS.degree)
|
|
|
|
|
|
|
|
for peer in peers_to_gossip:
|
|
|
|
if peer in peer_topics[topic_mesh]:
|
2019-12-03 18:45:33 +08:00
|
|
|
# Check that the peer to gossip to is not in our mesh peers
|
2019-12-03 15:49:58 +08:00
|
|
|
assert peer not in mesh_peers
|
|
|
|
assert topic_mesh in peers_to_gossip[peer]
|
|
|
|
elif peer in peer_topics[topic_fanout]:
|
2019-12-03 18:45:33 +08:00
|
|
|
# Check that the peer to gossip to is not in our fanout peers
|
2019-12-03 15:49:58 +08:00
|
|
|
assert peer not in fanout_peers
|
|
|
|
assert topic_fanout in peers_to_gossip[peer]
|