py-libp2p/tests/libp2p/test_notify.py

376 lines
12 KiB
Python
Raw Normal View History

2019-03-01 08:08:13 +08:00
"""
Test Notify and Notifee by ensuring that the proper events get
2019-03-01 08:17:11 +08:00
called, and that the stream passed into opened_stream is correct
2019-03-01 08:08:13 +08:00
Note: Listen event does not get hit because MyNotifee is passed
into network after network has already started listening
2019-03-01 08:17:11 +08:00
TODO: Add tests for closed_stream disconnected, listen_close when those
features are implemented in swarm
2019-03-01 08:08:13 +08:00
"""
2019-03-01 08:17:11 +08:00
2019-03-03 20:54:04 +08:00
import pytest
2019-03-18 09:30:56 +08:00
import multiaddr
2019-03-03 20:54:04 +08:00
2019-03-18 09:30:56 +08:00
from tests.utils import cleanup, echo_stream_handler, \
perform_two_host_set_up_custom_handler
2019-03-18 09:15:14 +08:00
from libp2p import new_node, initialize_default_swarm
2019-03-03 20:54:04 +08:00
from libp2p.network.notifee_interface import INotifee
2019-03-18 09:15:14 +08:00
from libp2p.host.basic_host import BasicHost
2019-03-03 20:54:04 +08:00
# pylint: disable=too-many-locals
2019-03-01 08:08:13 +08:00
class MyNotifee(INotifee):
# pylint: disable=too-many-instance-attributes, cell-var-from-loop
def __init__(self, events, val_to_append_to_event):
self.events = events
self.val_to_append_to_event = val_to_append_to_event
2019-03-01 08:11:04 +08:00
async def opened_stream(self, network, stream):
2019-03-01 08:08:13 +08:00
self.events.append(["opened_stream" + \
self.val_to_append_to_event, stream])
2019-03-01 08:11:04 +08:00
async def closed_stream(self, network, stream):
2019-03-01 08:08:13 +08:00
pass
2019-03-01 08:11:04 +08:00
async def connected(self, network, conn):
2019-03-03 20:34:49 +08:00
self.events.append(["connected" + self.val_to_append_to_event,\
conn])
2019-03-01 08:08:13 +08:00
2019-03-01 08:11:04 +08:00
async def disconnected(self, network, conn):
2019-03-01 08:08:13 +08:00
pass
2019-03-18 09:30:56 +08:00
async def listen(self, network, _multiaddr):
2019-03-18 09:15:14 +08:00
self.events.append(["listened" + self.val_to_append_to_event,\
2019-03-18 09:30:56 +08:00
_multiaddr])
2019-03-01 08:08:13 +08:00
2019-03-18 09:30:56 +08:00
async def listen_close(self, network, _multiaddr):
2019-03-01 08:08:13 +08:00
pass
[WIP] PubSub and FloodSub development (#133) * Add notifee interface * Add notify function to network interface * Implement notify feature * Add tests for notify * Make notifee functions all async * Fix linting issue * Fix linting issue * Scaffold pubsub router interface * Scaffold pubsub directory * Store peer_id in muxed connection * Implement pubsub notifee * Remove outdated files * Implement pubsub first attempt * Prepare pubsub for floodsub * Add mplex conn to net stream and add conn in notify tests * Implement floodsub * Use NetStream in generic protocol handler * Debugging async issues * Modify test to perform proper assert. Test passes * Remove callbacks. Reduce sleep time * Add simple three node test * Clean up code. Add message classes * Add test for two topics * Add conn to net stream and conn tests * Refactor test setup to remove duplicate code * Fix linting issues * Fix linting issue * Fix linting issue * Fix outstanding unrelated lint issue in multiselect_client * Add connect function * Remove debug prints * Remove debug prints from floodsub * Use MessageTalk in place of direct message breakdown * Remove extra prints * Remove outdated function * Add message to queues for all topics in message * Debugging * Add message self delivery * Increase read timeout to 5 to get pubsub tests passing * Refactor testing helper func. Add tests * Add tests and increase timeout to get tests passing * Add dummy account demo scaffolding * Attempt to use threads. Test fails * Implement basic dummy node tests using threads * Add generic testing function * Add simple seven node tree test * Add more complex seven node tree tests * Add five node ring tests * Remove unnecessary get_message_type func * Add documentation to classes * Add message id to messages * Add documentation to test helper func * Add docs to dummy account node helper func * Add more docs to dummy account node test helper func * fixed linting errors in floodsub * small notify bugfix * move pubsub into libp2p * fixed pubsub linting * fixing pubsub test failures * linting
2019-03-24 01:52:02 +08:00
class InvalidNotifee():
# pylint: disable=too-many-instance-attributes, cell-var-from-loop
def __init__(self):
pass
async def opened_stream(self):
assert False
async def closed_stream(self):
assert False
async def connected(self):
assert False
async def disconnected(self):
assert False
async def listen(self):
assert False
[WIP] PubSub and FloodSub development (#133) * Add notifee interface * Add notify function to network interface * Implement notify feature * Add tests for notify * Make notifee functions all async * Fix linting issue * Fix linting issue * Scaffold pubsub router interface * Scaffold pubsub directory * Store peer_id in muxed connection * Implement pubsub notifee * Remove outdated files * Implement pubsub first attempt * Prepare pubsub for floodsub * Add mplex conn to net stream and add conn in notify tests * Implement floodsub * Use NetStream in generic protocol handler * Debugging async issues * Modify test to perform proper assert. Test passes * Remove callbacks. Reduce sleep time * Add simple three node test * Clean up code. Add message classes * Add test for two topics * Add conn to net stream and conn tests * Refactor test setup to remove duplicate code * Fix linting issues * Fix linting issue * Fix linting issue * Fix outstanding unrelated lint issue in multiselect_client * Add connect function * Remove debug prints * Remove debug prints from floodsub * Use MessageTalk in place of direct message breakdown * Remove extra prints * Remove outdated function * Add message to queues for all topics in message * Debugging * Add message self delivery * Increase read timeout to 5 to get pubsub tests passing * Refactor testing helper func. Add tests * Add tests and increase timeout to get tests passing * Add dummy account demo scaffolding * Attempt to use threads. Test fails * Implement basic dummy node tests using threads * Add generic testing function * Add simple seven node tree test * Add more complex seven node tree tests * Add five node ring tests * Remove unnecessary get_message_type func * Add documentation to classes * Add message id to messages * Add documentation to test helper func * Add docs to dummy account node helper func * Add more docs to dummy account node test helper func * fixed linting errors in floodsub * small notify bugfix * move pubsub into libp2p * fixed pubsub linting * fixing pubsub test failures * linting
2019-03-24 01:52:02 +08:00
async def perform_two_host_simple_set_up():
node_a = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"])
node_b = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"])
async def my_stream_handler(stream):
while True:
read_string = (await stream.read()).decode()
resp = "ack:" + read_string
await stream.write(resp.encode())
node_b.set_stream_handler("/echo/1.0.0", my_stream_handler)
# Associate the peer with local ip address (see default parameters of Libp2p())
node_a.get_peerstore().add_addrs(node_b.get_id(), node_b.get_addrs(), 10)
return node_a, node_b
async def perform_two_host_simple_set_up_custom_handler(handler):
node_a = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"])
node_b = await new_node(transport_opt=["/ip4/127.0.0.1/tcp/0"])
node_b.set_stream_handler("/echo/1.0.0", handler)
# Associate the peer with local ip address (see default parameters of Libp2p())
node_a.get_peerstore().add_addrs(node_b.get_id(), node_b.get_addrs(), 10)
return node_a, node_b
@pytest.mark.asyncio
async def test_one_notifier():
2019-03-18 07:33:40 +08:00
node_a, node_b = await perform_two_host_set_up_custom_handler(echo_stream_handler)
2019-03-01 08:08:13 +08:00
# Add notifee for node_a
events = []
assert node_a.get_network().notify(MyNotifee(events, "0"))
2019-03-01 08:08:13 +08:00
stream = await node_a.new_stream(node_b.get_id(), ["/echo/1.0.0"])
# Ensure the connected and opened_stream events were hit in MyNotifee obj
# and that stream passed into opened_stream matches the stream created on
# node_a
2019-03-03 20:34:49 +08:00
assert events == [["connected0", stream.mplex_conn], \
["opened_stream0", stream]]
2019-03-01 08:08:13 +08:00
messages = ["hello", "hello"]
for message in messages:
await stream.write(message.encode())
response = (await stream.read()).decode()
assert response == ("ack:" + message)
# Success, terminate pending tasks.
await cleanup()
@pytest.mark.asyncio
async def test_one_notifier_on_two_nodes():
events_b = []
async def my_stream_handler(stream):
# Ensure the connected and opened_stream events were hit in Notifee obj
# and that the stream passed into opened_stream matches the stream created on
# node_b
assert events_b == [["connectedb", stream.mplex_conn], \
["opened_streamb", stream]]
while True:
read_string = (await stream.read()).decode()
resp = "ack:" + read_string
await stream.write(resp.encode())
2019-03-18 07:33:40 +08:00
node_a, node_b = await perform_two_host_set_up_custom_handler(my_stream_handler)
# Add notifee for node_a
events_a = []
assert node_a.get_network().notify(MyNotifee(events_a, "a"))
# Add notifee for node_b
assert node_b.get_network().notify(MyNotifee(events_b, "b"))
stream = await node_a.new_stream(node_b.get_id(), ["/echo/1.0.0"])
# Ensure the connected and opened_stream events were hit in MyNotifee obj
# and that stream passed into opened_stream matches the stream created on
# node_a
assert events_a == [["connecteda", stream.mplex_conn], \
["opened_streama", stream]]
messages = ["hello", "hello"]
for message in messages:
await stream.write(message.encode())
response = (await stream.read()).decode()
assert response == ("ack:" + message)
# Success, terminate pending tasks.
await cleanup()
2019-03-18 09:15:14 +08:00
@pytest.mark.asyncio
async def test_one_notifier_on_two_nodes_with_listen():
events_b = []
node_a_transport_opt = ["/ip4/127.0.0.1/tcp/0"]
node_a = await new_node(transport_opt=node_a_transport_opt)
await node_a.get_network().listen(multiaddr.Multiaddr(node_a_transport_opt[0]))
# Set up node_b swarm to pass into host
node_b_transport_opt = ["/ip4/127.0.0.1/tcp/0"]
node_b_multiaddr = multiaddr.Multiaddr(node_b_transport_opt[0])
node_b_swarm = initialize_default_swarm(transport_opt=node_b_transport_opt)
node_b = BasicHost(node_b_swarm)
async def my_stream_handler(stream):
# Ensure the listened, connected and opened_stream events were hit in Notifee obj
# and that the stream passed into opened_stream matches the stream created on
# node_b
assert events_b == [
["listenedb", node_b_multiaddr], \
["connectedb", stream.mplex_conn], \
["opened_streamb", stream]
]
while True:
read_string = (await stream.read()).decode()
resp = "ack:" + read_string
await stream.write(resp.encode())
# Add notifee for node_a
events_a = []
assert node_a.get_network().notify(MyNotifee(events_a, "a"))
# Add notifee for node_b
assert node_b.get_network().notify(MyNotifee(events_b, "b"))
# start listen on node_b_swarm
await node_b.get_network().listen(node_b_multiaddr)
node_b.set_stream_handler("/echo/1.0.0", my_stream_handler)
# Associate the peer with local ip address (see default parameters of Libp2p())
node_a.get_peerstore().add_addrs(node_b.get_id(), node_b.get_addrs(), 10)
stream = await node_a.new_stream(node_b.get_id(), ["/echo/1.0.0"])
# Ensure the connected and opened_stream events were hit in MyNotifee obj
# and that stream passed into opened_stream matches the stream created on
# node_a
assert events_a == [
["connecteda", stream.mplex_conn], \
["opened_streama", stream]
]
messages = ["hello", "hello"]
for message in messages:
await stream.write(message.encode())
response = (await stream.read()).decode()
assert response == ("ack:" + message)
# Success, terminate pending tasks.
await cleanup()
2019-03-01 08:08:13 +08:00
@pytest.mark.asyncio
async def test_two_notifiers():
2019-03-18 07:33:40 +08:00
node_a, node_b = await perform_two_host_set_up_custom_handler(echo_stream_handler)
2019-03-01 08:08:13 +08:00
# Add notifee for node_a
events0 = []
assert node_a.get_network().notify(MyNotifee(events0, "0"))
2019-03-01 08:08:13 +08:00
events1 = []
assert node_a.get_network().notify(MyNotifee(events1, "1"))
2019-03-01 08:08:13 +08:00
stream = await node_a.new_stream(node_b.get_id(), ["/echo/1.0.0"])
# Ensure the connected and opened_stream events were hit in both Notifee objs
# and that the stream passed into opened_stream matches the stream created on
# node_a
2019-03-03 20:34:49 +08:00
assert events0 == [["connected0", stream.mplex_conn], ["opened_stream0", stream]]
assert events1 == [["connected1", stream.mplex_conn], ["opened_stream1", stream]]
2019-03-01 08:08:13 +08:00
messages = ["hello", "hello"]
for message in messages:
await stream.write(message.encode())
response = (await stream.read()).decode()
assert response == ("ack:" + message)
# Success, terminate pending tasks.
await cleanup()
@pytest.mark.asyncio
async def test_ten_notifiers():
num_notifiers = 10
2019-03-18 07:33:40 +08:00
node_a, node_b = await perform_two_host_set_up_custom_handler(echo_stream_handler)
2019-03-01 08:08:13 +08:00
# Add notifee for node_a
events_lst = []
for i in range(num_notifiers):
events_lst.append([])
assert node_a.get_network().notify(MyNotifee(events_lst[i], str(i)))
2019-03-01 08:08:13 +08:00
stream = await node_a.new_stream(node_b.get_id(), ["/echo/1.0.0"])
# Ensure the connected and opened_stream events were hit in both Notifee objs
# and that the stream passed into opened_stream matches the stream created on
# node_a
for i in range(num_notifiers):
2019-03-03 20:34:49 +08:00
assert events_lst[i] == [["connected" + str(i), stream.mplex_conn], \
2019-03-01 08:08:13 +08:00
["opened_stream" + str(i), stream]]
messages = ["hello", "hello"]
for message in messages:
await stream.write(message.encode())
response = (await stream.read()).decode()
assert response == ("ack:" + message)
# Success, terminate pending tasks.
2019-03-01 08:17:11 +08:00
await cleanup()
@pytest.mark.asyncio
async def test_ten_notifiers_on_two_nodes():
num_notifiers = 10
events_lst_b = []
async def my_stream_handler(stream):
# Ensure the connected and opened_stream events were hit in all Notifee objs
# and that the stream passed into opened_stream matches the stream created on
# node_b
for i in range(num_notifiers):
assert events_lst_b[i] == [["connectedb" + str(i), stream.mplex_conn], \
["opened_streamb" + str(i), stream]]
while True:
read_string = (await stream.read()).decode()
resp = "ack:" + read_string
await stream.write(resp.encode())
2019-03-18 07:33:40 +08:00
node_a, node_b = await perform_two_host_set_up_custom_handler(my_stream_handler)
# Add notifee for node_a and node_b
events_lst_a = []
for i in range(num_notifiers):
events_lst_a.append([])
events_lst_b.append([])
assert node_a.get_network().notify(MyNotifee(events_lst_a[i], "a" + str(i)))
assert node_b.get_network().notify(MyNotifee(events_lst_b[i], "b" + str(i)))
stream = await node_a.new_stream(node_b.get_id(), ["/echo/1.0.0"])
# Ensure the connected and opened_stream events were hit in all Notifee objs
# and that the stream passed into opened_stream matches the stream created on
# node_a
for i in range(num_notifiers):
assert events_lst_a[i] == [["connecteda" + str(i), stream.mplex_conn], \
["opened_streama" + str(i), stream]]
messages = ["hello", "hello"]
for message in messages:
await stream.write(message.encode())
response = (await stream.read()).decode()
assert response == ("ack:" + message)
# Success, terminate pending tasks.
await cleanup()
@pytest.mark.asyncio
async def test_invalid_notifee():
num_notifiers = 10
2019-03-18 07:33:40 +08:00
node_a, node_b = await perform_two_host_set_up_custom_handler(echo_stream_handler)
# Add notifee for node_a
events_lst = []
for _ in range(num_notifiers):
events_lst.append([])
assert not node_a.get_network().notify(InvalidNotifee())
stream = await node_a.new_stream(node_b.get_id(), ["/echo/1.0.0"])
# If this point is reached, this implies that the InvalidNotifee instance
# did not assert false, i.e. no functions of InvalidNotifee were called (which is correct
# given that InvalidNotifee should not have been added as a notifee)
messages = ["hello", "hello"]
for message in messages:
await stream.write(message.encode())
response = (await stream.read()).decode()
assert response == ("ack:" + message)
# Success, terminate pending tasks.
await cleanup()