Compare commits
13 Commits
master
...
bee-movie-
Author | SHA1 | Date | |
---|---|---|---|
|
fa292ae7c8 | ||
|
4c2bf6873a | ||
|
7cc9ddda75 | ||
|
3b3ff61755 | ||
|
853be062a2 | ||
|
7b67c9cb2f | ||
|
f4fb71e0cf | ||
|
c1d011ac64 | ||
|
41b2a0c6d5 | ||
|
437e9e9ee6 | ||
|
b31773f00b | ||
|
2a5aa14c42 | ||
|
21bcf742a4 |
0
examples/bee_movie/__init__.py
Normal file
0
examples/bee_movie/__init__.py
Normal file
80
examples/bee_movie/msg_ordering_node.py
Normal file
80
examples/bee_movie/msg_ordering_node.py
Normal file
|
@ -0,0 +1,80 @@
|
|||
import asyncio
|
||||
import multiaddr
|
||||
|
||||
from tests.pubsub.utils import message_id_generator, generate_RPC_packet
|
||||
from libp2p import new_node
|
||||
from libp2p.pubsub.pubsub import Pubsub
|
||||
from libp2p.pubsub.floodsub import FloodSub
|
||||
from .ordered_queue import OrderedQueue
|
||||
|
||||
SUPPORTED_PUBSUB_PROTOCOLS = ["/floodsub/1.0.0"]
|
||||
BEE_MOVIE_TOPIC = "bee_movie"
|
||||
|
||||
|
||||
class MsgOrderingNode():
|
||||
|
||||
def __init__(self):
|
||||
self.balances = {}
|
||||
self.next_msg_id_func = message_id_generator(0)
|
||||
self.priority_queue = OrderedQueue()
|
||||
|
||||
self.libp2p_node = None
|
||||
self.floodsub = None
|
||||
self.pubsub = None
|
||||
|
||||
@classmethod
|
||||
async def create(cls):
|
||||
"""
|
||||
Create a new MsgOrderingNode and attach a libp2p node, a floodsub, and a pubsub
|
||||
instance to this new node
|
||||
|
||||
We use create as this serves as a factory function and allows us
|
||||
to use async await, unlike the init function
|
||||
"""
|
||||
self = MsgOrderingNode()
|
||||
|
||||
libp2p_node = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"])
|
||||
await libp2p_node.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0"))
|
||||
|
||||
self.libp2p_node = libp2p_node
|
||||
|
||||
self.floodsub = FloodSub(SUPPORTED_PUBSUB_PROTOCOLS)
|
||||
self.pubsub = Pubsub(self.libp2p_node, self.floodsub, "a")
|
||||
return self
|
||||
|
||||
async def handle_incoming_msgs(self):
|
||||
"""
|
||||
Handle all incoming messages on the BEE_MOVIE_TOPIC from peers
|
||||
"""
|
||||
while True:
|
||||
incoming = await self.queue.get()
|
||||
seqno = int.from_bytes(incoming.seqno, byteorder='big')
|
||||
word = incoming.data.decode('utf-8')
|
||||
|
||||
await self.handle_bee_movie_word(seqno, word)
|
||||
|
||||
async def setup_crypto_networking(self):
|
||||
"""
|
||||
Subscribe to BEE_MOVIE_TOPIC and perform call to function that handles
|
||||
all incoming messages on said topic
|
||||
"""
|
||||
self.queue = await self.pubsub.subscribe(BEE_MOVIE_TOPIC)
|
||||
|
||||
asyncio.ensure_future(self.handle_incoming_msgs())
|
||||
|
||||
async def publish_bee_movie_word(self, word, msg_id=None):
|
||||
# Publish a bee movie word to all peers
|
||||
my_id = str(self.libp2p_node.get_id())
|
||||
if msg_id is None:
|
||||
msg_id = self.next_msg_id_func()
|
||||
packet = generate_RPC_packet(my_id, [BEE_MOVIE_TOPIC], word, msg_id)
|
||||
await self.floodsub.publish(my_id, packet.SerializeToString())
|
||||
|
||||
async def handle_bee_movie_word(self, seqno, word):
|
||||
# Handle bee movie word received
|
||||
await self.priority_queue.put((seqno, word))
|
||||
|
||||
async def get_next_word_in_bee_movie(self):
|
||||
# Get just the word (and not the seqno) and return the word
|
||||
next_word = (await self.priority_queue.get())[1]
|
||||
return next_word
|
51
examples/bee_movie/ordered_queue.py
Normal file
51
examples/bee_movie/ordered_queue.py
Normal file
|
@ -0,0 +1,51 @@
|
|||
import asyncio
|
||||
|
||||
class OrderedQueue():
|
||||
"""
|
||||
asyncio.queue wrapper that delivers messages in order of subsequent sequence numbers,
|
||||
so if message 1 and 3 are received and the following get calls occur:
|
||||
get(), get(), get()
|
||||
the queue will deliver message 1, will wait until message 2 is received to deliver message 2,
|
||||
and then deliver message 3
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.last_gotten_seqno = 0
|
||||
self.queue = asyncio.PriorityQueue()
|
||||
self.task = None
|
||||
|
||||
async def put(self, item):
|
||||
"""
|
||||
:param item: put item tuple (seqno, data) onto queue
|
||||
"""
|
||||
seqno = item[0]
|
||||
await self.queue.put(item)
|
||||
if self.last_gotten_seqno + 1 == seqno and self.task is not None:
|
||||
# Allow get future to return the most recent item that is put
|
||||
self.task.set()
|
||||
|
||||
async def get(self):
|
||||
"""
|
||||
Get item with last_gotten_seqno + 1 from the queue
|
||||
:return: (seqno, data)
|
||||
"""
|
||||
if self.queue.qsize() > 0:
|
||||
front_item = await self.queue.get()
|
||||
|
||||
if front_item[0] == self.last_gotten_seqno + 1:
|
||||
self.last_gotten_seqno += 1
|
||||
return front_item
|
||||
# Put element back as it should not be delivered yet
|
||||
await self.queue.put(front_item)
|
||||
|
||||
# Wait until item with subsequent seqno is put on queue
|
||||
self.task = asyncio.Event()
|
||||
await self.task.wait()
|
||||
item = await self.queue.get()
|
||||
|
||||
# Remove task
|
||||
self.task = None
|
||||
|
||||
self.last_gotten_seqno += 1
|
||||
|
||||
return item
|
427
examples/bee_movie/test_bee_movie.py
Normal file
427
examples/bee_movie/test_bee_movie.py
Normal file
|
@ -0,0 +1,427 @@
|
|||
import asyncio
|
||||
from threading import Thread
|
||||
import struct
|
||||
import pytest
|
||||
import urllib.request
|
||||
|
||||
from libp2p.peer.peerinfo import info_from_p2p_addr
|
||||
from .msg_ordering_node import MsgOrderingNode
|
||||
from tests.utils import cleanup
|
||||
|
||||
# pylint: disable=too-many-locals
|
||||
|
||||
"""
|
||||
Test-cases demonstrating how to create nodes that continuously stream data
|
||||
and ensure that data is delivered to each node with pre-determined ordering.
|
||||
The ordering is such that if a peer A sends a publish 1 and 2 with seqno=1 and with seqno=2,
|
||||
respectively, even if the publish 2 (with seqno=2) reaches the peers first, it will not
|
||||
be processed until seqno=1 is received (and then publish 1 with seqno=1 must be
|
||||
processed before publish 2 with seqno=2 will be).
|
||||
|
||||
This concept is demonstrated by streaming the script to the entire bee movie to several nodes
|
||||
"""
|
||||
|
||||
async def connect(node1, node2):
|
||||
# node1 connects to node2
|
||||
addr = node2.get_addrs()[0]
|
||||
info = info_from_p2p_addr(addr)
|
||||
await node1.connect(info)
|
||||
|
||||
def create_setup_in_new_thread_func(dummy_node):
|
||||
def setup_in_new_thread():
|
||||
asyncio.ensure_future(dummy_node.setup_crypto_networking())
|
||||
return setup_in_new_thread
|
||||
|
||||
async def perform_test(num_nodes, adjacency_map, action_func, assertion_func):
|
||||
"""
|
||||
Helper function to allow for easy construction of custom tests for msg ordering nodes
|
||||
in various network topologies
|
||||
:param num_nodes: number of nodes in the test
|
||||
:param adjacency_map: adjacency map defining each node and its list of neighbors
|
||||
:param action_func: function to execute that includes actions by the nodes
|
||||
:param assertion_func: assertions for testing the results of the actions are correct
|
||||
"""
|
||||
|
||||
# Create nodes
|
||||
dummy_nodes = []
|
||||
for _ in range(num_nodes):
|
||||
dummy_nodes.append(await MsgOrderingNode.create())
|
||||
|
||||
# Create network
|
||||
for source_num in adjacency_map:
|
||||
target_nums = adjacency_map[source_num]
|
||||
for target_num in target_nums:
|
||||
await connect(dummy_nodes[source_num].libp2p_node, \
|
||||
dummy_nodes[target_num].libp2p_node)
|
||||
|
||||
# Allow time for network creation to take place
|
||||
await asyncio.sleep(0.25)
|
||||
|
||||
# Start a thread for each node so that each node can listen and respond
|
||||
# to messages on its own thread, which will avoid waiting indefinitely
|
||||
# on the main thread. On this thread, call the setup func for the node,
|
||||
# which subscribes the node to the BEE_MOVIE_TOPIC topic
|
||||
for dummy_node in dummy_nodes:
|
||||
thread = Thread(target=create_setup_in_new_thread_func(dummy_node))
|
||||
thread.run()
|
||||
|
||||
# Allow time for nodes to subscribe to BEE_MOVIE_TOPIC topic
|
||||
await asyncio.sleep(0.25)
|
||||
|
||||
# Perform action function
|
||||
await action_func(dummy_nodes)
|
||||
|
||||
# Allow time for action function to be performed (i.e. messages to propogate)
|
||||
await asyncio.sleep(1)
|
||||
|
||||
# Perform assertion function
|
||||
for dummy_node in dummy_nodes:
|
||||
await assertion_func(dummy_node)
|
||||
|
||||
# Success, terminate pending tasks.
|
||||
await cleanup()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_simple_two_nodes_one_word():
|
||||
num_nodes = 2
|
||||
adj_map = {0: [1]}
|
||||
|
||||
async def action_func(dummy_nodes):
|
||||
await dummy_nodes[0].publish_bee_movie_word("aspyn")
|
||||
# await asyncio.sleep(0.25)
|
||||
await dummy_nodes[0].publish_bee_movie_word("hello")
|
||||
# await asyncio.sleep(0.25)
|
||||
|
||||
async def assertion_func(dummy_node):
|
||||
next_word = await dummy_node.get_next_word_in_bee_movie()
|
||||
assert next_word == "aspyn"
|
||||
next_word = await dummy_node.get_next_word_in_bee_movie()
|
||||
assert next_word == "hello"
|
||||
|
||||
await perform_test(num_nodes, adj_map, action_func, assertion_func)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_simple_two_nodes_ten_words():
|
||||
num_nodes = 2
|
||||
adj_map = {0: [1]}
|
||||
|
||||
words = ["aspyn", "is", "so", "good", "at", "writing", "code", "XD", ":)", "foobar"]
|
||||
|
||||
async def action_func(dummy_nodes):
|
||||
for word in words:
|
||||
await dummy_nodes[0].publish_bee_movie_word(word)
|
||||
# await asyncio.sleep(0.25)
|
||||
|
||||
async def assertion_func(dummy_node):
|
||||
for word in words:
|
||||
assert await dummy_node.get_next_word_in_bee_movie() == word
|
||||
|
||||
await perform_test(num_nodes, adj_map, action_func, assertion_func)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_simple_two_nodes_two_words_out_of_order_ids():
|
||||
num_nodes = 2
|
||||
adj_map = {0: [1]}
|
||||
|
||||
async def action_func(dummy_nodes):
|
||||
await dummy_nodes[0].publish_bee_movie_word("word 2", struct.pack('>I', 2))
|
||||
word, _, _ = await asyncio.gather(dummy_nodes[0].get_next_word_in_bee_movie(),\
|
||||
asyncio.sleep(0.25), \
|
||||
dummy_nodes[0].publish_bee_movie_word("word 1", struct.pack('>I', 1)))
|
||||
assert word == "word 1"
|
||||
assert await dummy_nodes[0].get_next_word_in_bee_movie() == "word 2"
|
||||
|
||||
async def assertion_func(dummy_node):
|
||||
pass
|
||||
|
||||
await perform_test(num_nodes, adj_map, action_func, assertion_func)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_simple_two_nodes_two_words_read_then_publish_out_of_order_ids():
|
||||
num_nodes = 2
|
||||
adj_map = {0: [1]}
|
||||
collected = None
|
||||
|
||||
async def collect_all_words(expected_len, dummy_node):
|
||||
collected_words = []
|
||||
while True:
|
||||
word = await dummy_node.get_next_word_in_bee_movie()
|
||||
collected_words.append(word)
|
||||
if len(collected_words) == expected_len:
|
||||
return collected_words
|
||||
|
||||
async def action_func(dummy_nodes):
|
||||
words, _, _, _ = await asyncio.gather(collect_all_words(2, dummy_nodes[0]),\
|
||||
asyncio.sleep(0.25), \
|
||||
dummy_nodes[0].publish_bee_movie_word("word 2", struct.pack('>I', 2)),\
|
||||
dummy_nodes[0].publish_bee_movie_word("word 1", struct.pack('>I', 1)))
|
||||
|
||||
# Store collected words to be checked in assertion func
|
||||
nonlocal collected
|
||||
collected = words
|
||||
|
||||
async def assertion_func(dummy_node):
|
||||
assert collected[0] == "word 1"
|
||||
assert collected[1] == "word 2"
|
||||
|
||||
await perform_test(num_nodes, adj_map, action_func, assertion_func)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_simple_two_nodes_ten_words_out_of_order_ids():
|
||||
num_nodes = 2
|
||||
adj_map = {0: [1]}
|
||||
collected = None
|
||||
|
||||
async def collect_all_words(expected_len, dummy_node):
|
||||
collected_words = []
|
||||
while True:
|
||||
word = await dummy_node.get_next_word_in_bee_movie()
|
||||
collected_words.append(word)
|
||||
if len(collected_words) == expected_len:
|
||||
return collected_words
|
||||
|
||||
async def action_func(dummy_nodes):
|
||||
words = ["e", "b", "d", "i", "a", "h", "c", "f", "g", "j"]
|
||||
msg_id_nums = [5, 2, 4, 9, 1, 8, 3, 6, 7, 10]
|
||||
msg_ids = []
|
||||
tasks = []
|
||||
for msg_id_num in msg_id_nums:
|
||||
msg_ids.append(struct.pack('>I', msg_id_num))
|
||||
|
||||
tasks.append(collect_all_words(len(words), dummy_nodes[0]))
|
||||
tasks.append(asyncio.sleep(0.25))
|
||||
|
||||
for i in range(len(words)):
|
||||
tasks.append(dummy_nodes[0].publish_bee_movie_word(words[i], msg_ids[i]))
|
||||
|
||||
res = await asyncio.gather(*tasks)
|
||||
|
||||
nonlocal collected
|
||||
collected = res[0]
|
||||
|
||||
async def assertion_func(dummy_node):
|
||||
correct_words = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]
|
||||
for i in range(len(correct_words)):
|
||||
assert collected[i] == correct_words[i]
|
||||
assert len(collected) == len(correct_words)
|
||||
|
||||
await perform_test(num_nodes, adj_map, action_func, assertion_func)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_simple_five_nodes_rings_words_out_of_order_ids():
|
||||
num_nodes = 5
|
||||
adj_map = {0: [1], 1: [2], 2: [3], 3: [4], 4: [0]}
|
||||
collected = []
|
||||
|
||||
async def collect_all_words(expected_len, dummy_node):
|
||||
collected_words = []
|
||||
while True:
|
||||
word = await dummy_node.get_next_word_in_bee_movie()
|
||||
collected_words.append(word)
|
||||
if len(collected_words) == expected_len:
|
||||
return collected_words
|
||||
|
||||
async def action_func(dummy_nodes):
|
||||
words = ["e", "b", "d", "i", "a", "h", "c", "f", "g", "j"]
|
||||
msg_id_nums = [5, 2, 4, 9, 1, 8, 3, 6, 7, 10]
|
||||
msg_ids = []
|
||||
tasks = []
|
||||
for msg_id_num in msg_id_nums:
|
||||
msg_ids.append(struct.pack('>I', msg_id_num))
|
||||
|
||||
for i in range(num_nodes):
|
||||
tasks.append(collect_all_words(len(words), dummy_nodes[i]))
|
||||
|
||||
tasks.append(asyncio.sleep(0.25))
|
||||
|
||||
for i in range(len(words)):
|
||||
tasks.append(dummy_nodes[0].publish_bee_movie_word(words[i], msg_ids[i]))
|
||||
|
||||
res = await asyncio.gather(*tasks)
|
||||
|
||||
nonlocal collected
|
||||
for i in range(num_nodes):
|
||||
collected.append(res[i])
|
||||
|
||||
async def assertion_func(dummy_node):
|
||||
correct_words = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]
|
||||
for i in range(num_nodes):
|
||||
assert collected[i] == correct_words
|
||||
assert len(collected[i]) == len(correct_words)
|
||||
|
||||
await perform_test(num_nodes, adj_map, action_func, assertion_func)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_simple_seven_nodes_tree_words_out_of_order_ids():
|
||||
num_nodes = 7
|
||||
adj_map = {0: [1, 2], 1: [3, 4], 2: [5, 6]}
|
||||
collected = []
|
||||
|
||||
async def collect_all_words(expected_len, dummy_node):
|
||||
collected_words = []
|
||||
while True:
|
||||
word = await dummy_node.get_next_word_in_bee_movie()
|
||||
collected_words.append(word)
|
||||
if len(collected_words) == expected_len:
|
||||
return collected_words
|
||||
|
||||
async def action_func(dummy_nodes):
|
||||
words = ["e", "b", "d", "i", "a", "h", "c", "f", "g", "j"]
|
||||
msg_id_nums = [5, 2, 4, 9, 1, 8, 3, 6, 7, 10]
|
||||
msg_ids = []
|
||||
tasks = []
|
||||
for msg_id_num in msg_id_nums:
|
||||
msg_ids.append(struct.pack('>I', msg_id_num))
|
||||
|
||||
for i in range(num_nodes):
|
||||
tasks.append(collect_all_words(len(words), dummy_nodes[i]))
|
||||
|
||||
tasks.append(asyncio.sleep(0.25))
|
||||
|
||||
for i in range(len(words)):
|
||||
tasks.append(dummy_nodes[0].publish_bee_movie_word(words[i], msg_ids[i]))
|
||||
|
||||
res = await asyncio.gather(*tasks)
|
||||
|
||||
nonlocal collected
|
||||
for i in range(num_nodes):
|
||||
collected.append(res[i])
|
||||
|
||||
async def assertion_func(dummy_node):
|
||||
correct_words = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]
|
||||
for i in range(num_nodes):
|
||||
assert collected[i] == correct_words
|
||||
assert len(collected[i]) == len(correct_words)
|
||||
|
||||
await perform_test(num_nodes, adj_map, action_func, assertion_func)
|
||||
|
||||
def download_bee_movie():
|
||||
url = "https://gist.githubusercontent.com/stuckinaboot/c531823814af1f6785f75ed7eedf60cb/raw/5107c5e6c2fda2ff54cfcc9803bbb297a53db71b/bee_movie.txt"
|
||||
response = urllib.request.urlopen(url)
|
||||
data = response.read() # a `bytes` object
|
||||
text = data.decode('utf-8') # a `str`; this step can't be used if data is binary
|
||||
return text
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_simple_two_nodes_bee_movie():
|
||||
print("Downloading Bee Movie")
|
||||
bee_movie_script = download_bee_movie()
|
||||
print("Downloaded Bee Movie")
|
||||
bee_movie_words = bee_movie_script.split(" ")
|
||||
print("Bee Movie Script Split on Spaces, # spaces = " + str(len(bee_movie_words)))
|
||||
|
||||
num_nodes = 2
|
||||
adj_map = {0: [1]}
|
||||
collected = []
|
||||
|
||||
async def collect_all_words(expected_len, dummy_node, log_nodes=[]):
|
||||
collected_words = []
|
||||
while True:
|
||||
word = await dummy_node.get_next_word_in_bee_movie()
|
||||
collected_words.append(word)
|
||||
|
||||
# Log if needed
|
||||
if dummy_node in log_nodes:
|
||||
print(word + "| " + str(len(collected_words)) + "/" + str(expected_len))
|
||||
|
||||
if len(collected_words) == expected_len:
|
||||
print("Returned collected words")
|
||||
return collected_words
|
||||
|
||||
async def action_func(dummy_nodes):
|
||||
print("Start action function")
|
||||
words = bee_movie_words
|
||||
tasks = []
|
||||
|
||||
print("Add collect all words")
|
||||
log_nodes = [dummy_nodes[0]]
|
||||
for i in range(num_nodes):
|
||||
tasks.append(collect_all_words(len(words), dummy_nodes[i], log_nodes))
|
||||
|
||||
print("Add sleep")
|
||||
tasks.append(asyncio.sleep(0.25))
|
||||
|
||||
print("Add publish")
|
||||
for i in range(len(words)):
|
||||
tasks.append(dummy_nodes[0].publish_bee_movie_word(words[i]))
|
||||
|
||||
print("Perform gather")
|
||||
res = await asyncio.gather(*tasks)
|
||||
|
||||
print("Filling collected")
|
||||
nonlocal collected
|
||||
for i in range(num_nodes):
|
||||
collected.append(res[i])
|
||||
print("Filled collected")
|
||||
|
||||
async def assertion_func(dummy_node):
|
||||
print("Perform assertion")
|
||||
correct_words = bee_movie_words
|
||||
for i in range(num_nodes):
|
||||
assert collected[i] == correct_words
|
||||
assert len(collected[i]) == len(correct_words)
|
||||
print("Assertion performed")
|
||||
|
||||
await perform_test(num_nodes, adj_map, action_func, assertion_func)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_simple_seven_nodes_tree_bee_movie():
|
||||
print("Downloading Bee Movie")
|
||||
bee_movie_script = download_bee_movie()
|
||||
print("Downloaded Bee Movie")
|
||||
bee_movie_words = bee_movie_script.split(" ")
|
||||
print("Bee Movie Script Split on Spaces, # spaces = " + str(len(bee_movie_words)))
|
||||
|
||||
num_nodes = 7
|
||||
adj_map = {0: [1, 2], 1: [3, 4], 2: [5, 6]}
|
||||
collected = []
|
||||
|
||||
async def collect_all_words(expected_len, dummy_node, log_nodes=[]):
|
||||
collected_words = []
|
||||
while True:
|
||||
word = await dummy_node.get_next_word_in_bee_movie()
|
||||
collected_words.append(word)
|
||||
|
||||
# Log if needed
|
||||
if dummy_node in log_nodes:
|
||||
print(word + "| " + str(len(collected_words)) + "/" + str(expected_len))
|
||||
|
||||
if len(collected_words) == expected_len:
|
||||
print("Returned collected words")
|
||||
return collected_words
|
||||
|
||||
async def action_func(dummy_nodes):
|
||||
print("Start action function")
|
||||
words = bee_movie_words
|
||||
tasks = []
|
||||
|
||||
print("Add collect all words")
|
||||
log_nodes = [dummy_nodes[0]]
|
||||
for i in range(num_nodes):
|
||||
tasks.append(collect_all_words(len(words), dummy_nodes[i], log_nodes))
|
||||
|
||||
print("Add sleep")
|
||||
tasks.append(asyncio.sleep(0.25))
|
||||
|
||||
print("Add publish")
|
||||
for i in range(len(words)):
|
||||
tasks.append(dummy_nodes[0].publish_bee_movie_word(words[i]))
|
||||
|
||||
print("Perform gather")
|
||||
res = await asyncio.gather(*tasks, return_exceptions=True)
|
||||
|
||||
print("Filling collected")
|
||||
nonlocal collected
|
||||
for i in range(num_nodes):
|
||||
collected.append(res[i])
|
||||
print("Filled collected")
|
||||
|
||||
async def assertion_func(dummy_node):
|
||||
print("Perform assertion")
|
||||
correct_words = bee_movie_words
|
||||
for i in range(num_nodes):
|
||||
assert collected[i] == correct_words
|
||||
assert len(collected[i]) == len(correct_words)
|
||||
print("Assertion performed")
|
||||
|
||||
await perform_test(num_nodes, adj_map, action_func, assertion_func)
|
|
@ -78,9 +78,6 @@ class FloodSub(IPubsubRouter):
|
|||
new_packet = rpc_pb2.RPC()
|
||||
new_packet.publish.extend([message])
|
||||
await stream.write(new_packet.SerializeToString())
|
||||
else:
|
||||
# Implies publish did not write
|
||||
print("publish did not write")
|
||||
|
||||
def join(self, topic):
|
||||
"""
|
||||
|
|
|
@ -60,7 +60,7 @@ class Mplex(IMuxedConn):
|
|||
# TODO: pass down timeout from user and use that
|
||||
if stream_id in self.buffers:
|
||||
try:
|
||||
data = await asyncio.wait_for(self.buffers[stream_id].get(), timeout=8)
|
||||
data = await asyncio.wait_for(self.buffers[stream_id].get(), timeout=5000)
|
||||
return data
|
||||
except asyncio.TimeoutError:
|
||||
return None
|
||||
|
|
|
@ -5,3 +5,4 @@ pytest-asyncio
|
|||
pylint
|
||||
grpcio
|
||||
grpcio-tools
|
||||
pyvis
|
||||
|
|
|
@ -176,8 +176,13 @@ async def perform_test_from_obj(obj):
|
|||
topics_in_msgs_ordered = []
|
||||
messages = obj["messages"]
|
||||
tasks_publish = []
|
||||
<<<<<<< HEAD
|
||||
next_msg_id_func = message_id_generator(0)
|
||||
|
||||
=======
|
||||
|
||||
all_actual_msgs = {}
|
||||
>>>>>>> Modify perform_test_obj to handle messages received in any order
|
||||
for msg in messages:
|
||||
topics = msg["topics"]
|
||||
|
||||
|
@ -199,7 +204,10 @@ async def perform_test_from_obj(obj):
|
|||
# TODO: Update message sender to be correct message sender before
|
||||
# adding msg_talk to this list
|
||||
for topic in topics:
|
||||
topics_in_msgs_ordered.append((topic, msg_talk))
|
||||
if topic in all_actual_msgs:
|
||||
all_actual_msgs[topic].append(msg_talk.publish[0].SerializeToString())
|
||||
else:
|
||||
all_actual_msgs[topic] = [msg_talk.publish[0].SerializeToString()]
|
||||
|
||||
# Allow time for publishing before continuing
|
||||
# await asyncio.sleep(0.4)
|
||||
|
@ -207,15 +215,22 @@ async def perform_test_from_obj(obj):
|
|||
await asyncio.gather(*tasks_publish)
|
||||
|
||||
# Step 4) Check that all messages were received correctly.
|
||||
# TODO: Check message sender too
|
||||
for i in range(len(topics_in_msgs_ordered)):
|
||||
topic, actual_msg = topics_in_msgs_ordered[i]
|
||||
|
||||
# Look at each node in each topic
|
||||
for topic in all_actual_msgs:
|
||||
for node_id in topic_map[topic]:
|
||||
# Get message from subscription queue
|
||||
msg_on_node_str = await queues_map[node_id][topic].get()
|
||||
assert actual_msg.publish[0].SerializeToString() == msg_on_node_str.SerializeToString()
|
||||
all_received_msgs_in_topic = []
|
||||
|
||||
# Add all messages to message received list for given node in given topic
|
||||
while (queues_map[node_id][topic].qsize() > 0):
|
||||
# Get message from subscription queue
|
||||
msg_on_node = (await queues_map[node_id][topic].get()).SerializeToString()
|
||||
all_received_msgs_in_topic.append(msg_on_node)
|
||||
|
||||
# Ensure each message received was the same as one sent
|
||||
for msg_on_node in all_received_msgs_in_topic:
|
||||
assert msg_on_node in all_actual_msgs[topic]
|
||||
|
||||
# Ensure same number of messages received as sent
|
||||
assert len(all_received_msgs_in_topic) == len(all_actual_msgs[topic])
|
||||
|
||||
# Success, terminate pending tasks.
|
||||
await cleanup()
|
||||
|
|
383
tests/pubsub/test_random_topology.py
Normal file
383
tests/pubsub/test_random_topology.py
Normal file
|
@ -0,0 +1,383 @@
|
|||
import asyncio
|
||||
import multiaddr
|
||||
import pytest
|
||||
import random
|
||||
import pprint
|
||||
|
||||
from pyvis.network import Network
|
||||
from tests.utils import cleanup
|
||||
from libp2p import new_node
|
||||
from libp2p.peer.peerinfo import info_from_p2p_addr
|
||||
from libp2p.pubsub.pb import rpc_pb2
|
||||
from libp2p.pubsub.pubsub import Pubsub
|
||||
from libp2p.pubsub.floodsub import FloodSub
|
||||
from utils import generate_message_id, generate_RPC_packet
|
||||
|
||||
# pylint: disable=too-many-locals
|
||||
|
||||
async def connect(node1, node2):
|
||||
"""
|
||||
Connect node1 to node2
|
||||
"""
|
||||
addr = node2.get_addrs()[0]
|
||||
info = info_from_p2p_addr(addr)
|
||||
await node1.connect(info)
|
||||
|
||||
async def perform_test_from_obj(obj,timeout_len=2):
|
||||
"""
|
||||
Perform a floodsub test from a test obj.
|
||||
test obj are composed as follows:
|
||||
|
||||
{
|
||||
"supported_protocols": ["supported/protocol/1.0.0",...],
|
||||
"adj_list": {
|
||||
"node1": ["neighbor1_of_node1", "neighbor2_of_node1", ...],
|
||||
"node2": ["neighbor1_of_node2", "neighbor2_of_node2", ...],
|
||||
...
|
||||
},
|
||||
"topic_map": {
|
||||
"topic1": ["node1_subscribed_to_topic1", "node2_subscribed_to_topic1", ...]
|
||||
},
|
||||
"messages": [
|
||||
{
|
||||
"topics": ["topic1_for_message", "topic2_for_message", ...],
|
||||
"data": "some contents of the message (newlines are not supported)",
|
||||
"node_id": "message sender node id"
|
||||
},
|
||||
...
|
||||
]
|
||||
}
|
||||
NOTE: In adj_list, for any neighbors A and B, only list B as a neighbor of A
|
||||
or B as a neighbor of A once. Do NOT list both A: ["B"] and B:["A"] as the behavior
|
||||
is undefined (even if it may work)
|
||||
"""
|
||||
|
||||
# Step 1) Create graph
|
||||
adj_list = obj["adj_list"]
|
||||
node_map = {}
|
||||
floodsub_map = {}
|
||||
pubsub_map = {}
|
||||
|
||||
supported_protocols = obj["supported_protocols"]
|
||||
|
||||
tasks_connect = []
|
||||
for start_node_id in adj_list:
|
||||
# Create node if node does not yet exist
|
||||
if start_node_id not in node_map:
|
||||
node = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"])
|
||||
await node.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0"))
|
||||
|
||||
node_map[start_node_id] = node
|
||||
|
||||
floodsub = FloodSub(supported_protocols)
|
||||
floodsub_map[start_node_id] = floodsub
|
||||
pubsub = Pubsub(node, floodsub, start_node_id)
|
||||
pubsub_map[start_node_id] = pubsub
|
||||
|
||||
# For each neighbor of start_node, create if does not yet exist,
|
||||
# then connect start_node to neighbor
|
||||
for neighbor_id in adj_list[start_node_id]:
|
||||
# Create neighbor if neighbor does not yet exist
|
||||
if neighbor_id not in node_map:
|
||||
neighbor_node = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"])
|
||||
await neighbor_node.get_network().listen(multiaddr.Multiaddr("/ip4/127.0.0.1/tcp/0"))
|
||||
|
||||
node_map[neighbor_id] = neighbor_node
|
||||
|
||||
floodsub = FloodSub(supported_protocols)
|
||||
floodsub_map[neighbor_id] = floodsub
|
||||
pubsub = Pubsub(neighbor_node, floodsub, neighbor_id)
|
||||
pubsub_map[neighbor_id] = pubsub
|
||||
|
||||
# Connect node and neighbor
|
||||
# await connect(node_map[start_node_id], node_map[neighbor_id])
|
||||
tasks_connect.append(asyncio.ensure_future(connect(node_map[start_node_id], node_map[neighbor_id])))
|
||||
tasks_connect.append(asyncio.sleep(2))
|
||||
await asyncio.gather(*tasks_connect)
|
||||
|
||||
# Allow time for graph creation before continuing
|
||||
# await asyncio.sleep(0.25)
|
||||
|
||||
# Step 2) Subscribe to topics
|
||||
queues_map = {}
|
||||
topic_map = obj["topic_map"]
|
||||
|
||||
tasks_topic = []
|
||||
tasks_topic_data = []
|
||||
for topic in topic_map:
|
||||
for node_id in topic_map[topic]:
|
||||
"""
|
||||
# Subscribe node to topic
|
||||
q = await pubsub_map[node_id].subscribe(topic)
|
||||
|
||||
# Create topic-queue map for node_id if one does not yet exist
|
||||
if node_id not in queues_map:
|
||||
queues_map[node_id] = {}
|
||||
|
||||
# Store queue in topic-queue map for node
|
||||
queues_map[node_id][topic] = q
|
||||
"""
|
||||
tasks_topic.append(asyncio.ensure_future(pubsub_map[node_id].subscribe(topic)))
|
||||
tasks_topic_data.append((node_id, topic))
|
||||
tasks_topic.append(asyncio.sleep(2))
|
||||
|
||||
# Gather is like Promise.all
|
||||
responses = await asyncio.gather(*tasks_topic, return_exceptions=True)
|
||||
for i in range(len(responses) - 1):
|
||||
q = responses[i]
|
||||
node_id, topic = tasks_topic_data[i]
|
||||
if node_id not in queues_map:
|
||||
queues_map[node_id] = {}
|
||||
|
||||
# Store queue in topic-queue map for node
|
||||
queues_map[node_id][topic] = q
|
||||
|
||||
# Allow time for subscribing before continuing
|
||||
# await asyncio.sleep(0.01)
|
||||
|
||||
# Step 3) Publish messages
|
||||
topics_in_msgs_ordered = []
|
||||
messages = obj["messages"]
|
||||
tasks_publish = []
|
||||
|
||||
all_actual_msgs = {}
|
||||
for msg in messages:
|
||||
topics = msg["topics"]
|
||||
|
||||
data = msg["data"]
|
||||
node_id = msg["node_id"]
|
||||
|
||||
# Get actual id for sender node (not the id from the test obj)
|
||||
actual_node_id = str(node_map[node_id].get_id())
|
||||
|
||||
# Create correctly formatted message
|
||||
msg_talk = generate_RPC_packet(actual_node_id, topics, data, generate_message_id())
|
||||
|
||||
# Publish message
|
||||
# await floodsub_map[node_id].publish(actual_node_id, msg_talk.to_str())
|
||||
tasks_publish.append(asyncio.ensure_future(floodsub_map[node_id].publish(\
|
||||
actual_node_id, msg_talk.SerializeToString())))
|
||||
|
||||
# For each topic in topics, add topic, msg_talk tuple to ordered test list
|
||||
# TODO: Update message sender to be correct message sender before
|
||||
# adding msg_talk to this list
|
||||
for topic in topics:
|
||||
if topic in all_actual_msgs:
|
||||
all_actual_msgs[topic].append(msg_talk.publish[0].SerializeToString())
|
||||
else:
|
||||
all_actual_msgs[topic] = [msg_talk.publish[0].SerializeToString()]
|
||||
|
||||
# Allow time for publishing before continuing
|
||||
# await asyncio.sleep(0.4)
|
||||
tasks_publish.append(asyncio.sleep(2))
|
||||
await asyncio.gather(*tasks_publish)
|
||||
|
||||
# Step 4) Check that all messages were received correctly.
|
||||
for topic in all_actual_msgs:
|
||||
for node_id in topic_map[topic]:
|
||||
all_received_msgs_in_topic = []
|
||||
|
||||
# Add all messages to message received list for given node in given topic
|
||||
while (queues_map[node_id][topic].qsize() > 0):
|
||||
# Get message from subscription queue
|
||||
msg_on_node = (await queues_map[node_id][topic].get()).SerializeToString()
|
||||
all_received_msgs_in_topic.append(msg_on_node)
|
||||
|
||||
# Ensure each message received was the same as one sent
|
||||
for msg_on_node in all_received_msgs_in_topic:
|
||||
assert msg_on_node in all_actual_msgs[topic]
|
||||
|
||||
# Ensure same number of messages received as sent
|
||||
assert len(all_received_msgs_in_topic) == len(all_actual_msgs[topic])
|
||||
|
||||
def generate_test_obj_with_random_params(params):
|
||||
return {
|
||||
"num_nodes": random.randint(params["min_num_nodes"], params["max_num_nodes"]),
|
||||
"density": random.uniform(0.01, params["max_density"]),
|
||||
"num_topics": random.randint(1, params["max_num_topics"]),
|
||||
"max_nodes_per_topic": random.randint(params["min_max_nodes_per_topic"], params["max_max_nodes_per_topic"]),
|
||||
"max_msgs_per_topic": random.randint(params["min_max_msgs_per_topic"], params["max_max_msgs_per_topic"])
|
||||
}
|
||||
|
||||
def generate_random_topology(num_nodes, density, num_topics, max_nodes_per_topic, max_msgs_per_topic):
|
||||
# Give nodes string labels so that perform_test_with_obj works correctly
|
||||
# Note: "n" is appended so that visualizations work properly ('00' caused issues)
|
||||
nodes = ["n" + str(i).zfill(2) for i in range(0,num_nodes)]
|
||||
|
||||
# Adjust max_nodes_per_topic if it exceeds number of nodes
|
||||
max_nodes_per_topic = min(max_nodes_per_topic, num_nodes)
|
||||
|
||||
# 1) Generate random graph structure
|
||||
|
||||
# Create initial graph by connecting each node to its previous node
|
||||
# This ensures the graph is connected
|
||||
graph = {}
|
||||
|
||||
graph[nodes[0]] = []
|
||||
|
||||
max_num_edges = num_nodes * (num_nodes - 1) / 2
|
||||
num_edges = 0
|
||||
|
||||
for i in range(1, len(nodes)):
|
||||
prev = nodes[i - 1]
|
||||
curr = nodes[i]
|
||||
|
||||
graph[curr] = [prev]
|
||||
graph[prev].append(curr)
|
||||
num_edges += 1
|
||||
|
||||
# Add random edges until density is hit
|
||||
while num_edges / max_num_edges < density:
|
||||
selected_nodes = random.sample(nodes, 2)
|
||||
|
||||
# Only add the nodes as neighbors if they are not already neighbors
|
||||
if selected_nodes[0] not in graph[selected_nodes[1]]:
|
||||
graph[selected_nodes[0]].append(selected_nodes[1])
|
||||
graph[selected_nodes[1]].append(selected_nodes[0])
|
||||
num_edges += 1
|
||||
|
||||
# 2) Pick num_topics random nodes to perform random walks at
|
||||
nodes_to_start_topics_from = random.sample(nodes, num_topics)
|
||||
|
||||
nodes_in_topic_list = []
|
||||
for node in nodes_to_start_topics_from:
|
||||
nodes_walked = []
|
||||
curr = node
|
||||
nodes_walked.append(curr)
|
||||
|
||||
# TODO: Pick random num of nodes per topic
|
||||
while len(nodes_walked) < max_nodes_per_topic:
|
||||
# Pick a random neighbor of curr to walk to
|
||||
neighbors = graph[curr]
|
||||
rand_num = random.randint(0, len(neighbors) - 1)
|
||||
neighbor = neighbors[rand_num]
|
||||
curr = neighbor
|
||||
if curr not in nodes_walked:
|
||||
nodes_walked.append(curr)
|
||||
|
||||
nodes_in_topic_list.append(nodes_walked)
|
||||
|
||||
# 3) Start creating test_obj
|
||||
test_obj = {"supported_protocols": ["/floodsub/1.0.0"]}
|
||||
test_obj["adj_list"] = graph
|
||||
test_obj["topic_map"] = {}
|
||||
for i in range(len(nodes_in_topic_list)):
|
||||
test_obj["topic_map"][str(i)] = nodes_in_topic_list[i]
|
||||
|
||||
# 4) Finish creating test_obj by adding messages at random start nodes in each topic
|
||||
test_obj["messages"] = []
|
||||
for i in range(len(nodes_in_topic_list)):
|
||||
nodes_in_topic = nodes_in_topic_list[i]
|
||||
rand_num = random.randint(0, len(nodes_in_topic) - 1)
|
||||
start_node = nodes_in_topic[rand_num]
|
||||
for j in range(max_msgs_per_topic):
|
||||
test_obj["messages"].append({
|
||||
"topics": [str(i)],
|
||||
"data": str(random.randint(0, 100000)),
|
||||
"node_id": str(start_node)
|
||||
})
|
||||
|
||||
# 5) Return completed test_obj
|
||||
return test_obj
|
||||
|
||||
def create_graph(test_obj):
|
||||
net = Network()
|
||||
net.barnes_hut()
|
||||
|
||||
adj_list = test_obj["adj_list"]
|
||||
# print(list(adj_list.keys()))
|
||||
nodes_to_add = list(adj_list.keys())
|
||||
net.add_nodes(nodes_to_add)
|
||||
for node in adj_list:
|
||||
neighbors = adj_list[node]
|
||||
for neighbor in neighbors:
|
||||
net.add_edge(node, neighbor)
|
||||
|
||||
net.show("random_topology.html")
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_simple_random():
|
||||
num_nodes = 5
|
||||
density = 1
|
||||
num_topics = 2
|
||||
max_nodes_per_topic = 5
|
||||
max_msgs_per_topic = 5
|
||||
print("Generating random topology")
|
||||
topology_test_obj = generate_random_topology(num_nodes, density, num_topics,\
|
||||
max_nodes_per_topic, max_msgs_per_topic)
|
||||
|
||||
print("*****Topology Summary*****")
|
||||
print("# nodes: " + str(num_nodes))
|
||||
print("Density: " + str(density))
|
||||
print("# topics: " + str(num_topics))
|
||||
print("Nodes per topic: " + str(max_nodes_per_topic))
|
||||
print("Msgs per topic: " + str(max_msgs_per_topic))
|
||||
print("**************************")
|
||||
|
||||
print("Performing Test")
|
||||
await perform_test_from_obj(topology_test_obj, timeout_len=20)
|
||||
print("Test Completed")
|
||||
print("Generating Graph")
|
||||
create_graph(topology_test_obj)
|
||||
print("Graph Generated")
|
||||
|
||||
# Success, terminate pending tasks.
|
||||
await cleanup()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_random_10():
|
||||
min_num_nodes = 8
|
||||
max_num_nodes = 10
|
||||
max_density = 0.4
|
||||
max_num_topics = 5
|
||||
min_max_nodes_per_topic = 10
|
||||
max_max_nodes_per_topic = 20
|
||||
min_max_msgs_per_topic = 10
|
||||
max_max_msgs_per_topic = 20
|
||||
|
||||
num_random_tests = 10
|
||||
|
||||
summaries = []
|
||||
|
||||
params_to_generate_random_params = {
|
||||
"min_num_nodes": min_num_nodes,
|
||||
"max_num_nodes": max_num_nodes,
|
||||
"max_density": max_density,
|
||||
"max_num_topics": max_num_topics,
|
||||
"min_max_nodes_per_topic": min_max_nodes_per_topic,
|
||||
"max_max_nodes_per_topic": max_max_nodes_per_topic,
|
||||
"min_max_msgs_per_topic": min_max_msgs_per_topic,
|
||||
"max_max_msgs_per_topic": max_max_msgs_per_topic
|
||||
}
|
||||
|
||||
for i in range(0, num_random_tests):
|
||||
random_params = generate_test_obj_with_random_params(params_to_generate_random_params)
|
||||
|
||||
print("Generating random topology")
|
||||
topology_test_obj = generate_random_topology(random_params["num_nodes"], random_params["density"],\
|
||||
random_params["num_topics"], random_params["max_nodes_per_topic"], \
|
||||
random_params["max_msgs_per_topic"])
|
||||
|
||||
summary = {
|
||||
"num_nodes": random_params["num_nodes"],
|
||||
"density": random_params["density"],
|
||||
"num_topics": random_params["num_topics"],
|
||||
"nodes_per_topics": random_params["max_nodes_per_topic"],
|
||||
"msgs_per_topics": random_params["max_msgs_per_topic"]
|
||||
}
|
||||
summaries.append(pprint.pformat(summary, indent=4))
|
||||
|
||||
print("Performing Test")
|
||||
await perform_test_from_obj(topology_test_obj, timeout_len=30)
|
||||
print("Test Completed")
|
||||
# print("Generating Graph")
|
||||
# create_graph(topology_test_obj)
|
||||
# print("Graph Generated")
|
||||
print("***Test " + str(i + 1) + "/" + str(num_random_tests) + " Completed***")
|
||||
|
||||
with open('summaries.rand_test', 'a') as out_file:
|
||||
out_file.write(pprint.pformat(summaries, indent=4))
|
||||
|
||||
# Success, terminate pending tasks.
|
||||
await cleanup()
|
||||
|
Loading…
Reference in New Issue
Block a user