Fix tests in protocol_muxer and libp2p

This commit is contained in:
mhchia 2019-12-01 17:43:14 +08:00
parent 62e47080f5
commit 31bf774a16
No known key found for this signature in database
GPG Key ID: 389EFBEA1362589A
6 changed files with 325 additions and 398 deletions

View File

@ -1,4 +1,4 @@
from typing import List, Sequence, Tuple from typing import Callable, List, Sequence, Tuple
import multiaddr import multiaddr
import trio import trio
@ -12,7 +12,6 @@ from libp2p.network.swarm import Swarm
from libp2p.peer.peerinfo import info_from_p2p_addr from libp2p.peer.peerinfo import info_from_p2p_addr
from libp2p.routing.interfaces import IPeerRouting from libp2p.routing.interfaces import IPeerRouting
from libp2p.routing.kademlia.kademlia_peer_router import KadmeliaPeerRouter from libp2p.routing.kademlia.kademlia_peer_router import KadmeliaPeerRouter
from libp2p.typing import StreamHandlerFn, TProtocol
from .constants import MAX_READ_LEN from .constants import MAX_READ_LEN
@ -79,22 +78,12 @@ async def set_up_routers(
return routers return routers
async def echo_stream_handler(stream: INetStream) -> None: def create_echo_stream_handler(ack_prefix: str) -> Callable[[INetStream], None]:
async def echo_stream_handler(stream: INetStream) -> None:
while True: while True:
read_string = (await stream.read(MAX_READ_LEN)).decode() read_string = (await stream.read(MAX_READ_LEN)).decode()
resp = "ack:" + read_string resp = ack_prefix + read_string
await stream.write(resp.encode()) await stream.write(resp.encode())
return echo_stream_handler
async def perform_two_host_set_up(
handler: StreamHandlerFn = echo_stream_handler
) -> Tuple[BasicHost, BasicHost]:
transport_opt_list = [["/ip4/127.0.0.1/tcp/0"], ["/ip4/127.0.0.1/tcp/0"]]
(node_a, node_b) = await set_up_nodes_by_transport_opt(transport_opt_list)
node_b.set_stream_handler(TProtocol("/echo/1.0.0"), handler)
# Associate the peer with local ip address (see default parameters of Libp2p())
node_a.get_peerstore().add_addrs(node_b.get_id(), node_b.get_addrs(), 10)
return node_a, node_b

View File

@ -1,107 +1,92 @@
import multiaddr import multiaddr
import pytest import pytest
import trio
from libp2p.peer.peerinfo import info_from_p2p_addr from libp2p.peer.peerinfo import info_from_p2p_addr
from libp2p.tools.constants import MAX_READ_LEN from libp2p.tools.constants import MAX_READ_LEN
from libp2p.tools.utils import set_up_nodes_by_transport_opt from libp2p.tools.factories import HostFactory
from libp2p.tools.utils import create_echo_stream_handler
from libp2p.typing import TProtocol
PROTOCOL_ID_0 = TProtocol("/echo/0")
PROTOCOL_ID_1 = TProtocol("/echo/1")
PROTOCOL_ID_2 = TProtocol("/echo/2")
PROTOCOL_ID_3 = TProtocol("/echo/3")
ACK_STR_0 = "ack_0:"
ACK_STR_1 = "ack_1:"
ACK_STR_2 = "ack_2:"
ACK_STR_3 = "ack_3:"
@pytest.mark.trio @pytest.mark.trio
async def test_simple_messages(nursery): async def test_simple_messages(is_host_secure):
transport_opt_list = [["/ip4/127.0.0.1/tcp/0"], ["/ip4/127.0.0.1/tcp/0"]] async with HostFactory.create_batch_and_listen(is_host_secure, 2) as hosts:
(node_a, node_b) = await set_up_nodes_by_transport_opt(transport_opt_list, nursery) hosts[1].set_stream_handler(
PROTOCOL_ID_0, create_echo_stream_handler(ACK_STR_0)
async def stream_handler(stream): )
while True:
read_string = (await stream.read(MAX_READ_LEN)).decode()
response = "ack:" + read_string
await stream.write(response.encode())
node_b.set_stream_handler("/echo/1.0.0", stream_handler)
# Associate the peer with local ip address (see default parameters of Libp2p()) # Associate the peer with local ip address (see default parameters of Libp2p())
node_a.get_peerstore().add_addrs(node_b.get_id(), node_b.get_addrs(), 10) hosts[0].get_peerstore().add_addrs(hosts[1].get_id(), hosts[1].get_addrs(), 10)
stream = await node_a.new_stream(node_b.get_id(), ["/echo/1.0.0"]) stream = await hosts[0].new_stream(hosts[1].get_id(), [PROTOCOL_ID_0])
messages = ["hello" + str(x) for x in range(10)] messages = ["hello" + str(x) for x in range(10)]
for message in messages: for message in messages:
await stream.write(message.encode()) await stream.write(message.encode())
response = (await stream.read(MAX_READ_LEN)).decode() response = (await stream.read(MAX_READ_LEN)).decode()
assert response == (ACK_STR_0 + message)
assert response == ("ack:" + message)
# Success, terminate pending tasks.
@pytest.mark.asyncio @pytest.mark.trio
async def test_double_response(): async def test_double_response(is_host_secure):
transport_opt_list = [["/ip4/127.0.0.1/tcp/0"], ["/ip4/127.0.0.1/tcp/0"]] async with HostFactory.create_batch_and_listen(is_host_secure, 2) as hosts:
(node_a, node_b) = await set_up_nodes_by_transport_opt(transport_opt_list)
async def stream_handler(stream): async def double_response_stream_handler(stream):
while True: while True:
read_string = (await stream.read(MAX_READ_LEN)).decode() read_string = (await stream.read(MAX_READ_LEN)).decode()
response = "ack1:" + read_string response = ACK_STR_0 + read_string
await stream.write(response.encode()) await stream.write(response.encode())
response = "ack2:" + read_string response = ACK_STR_1 + read_string
await stream.write(response.encode()) await stream.write(response.encode())
node_b.set_stream_handler("/echo/1.0.0", stream_handler) hosts[1].set_stream_handler(PROTOCOL_ID_0, double_response_stream_handler)
# Associate the peer with local ip address (see default parameters of Libp2p()) # Associate the peer with local ip address (see default parameters of Libp2p())
node_a.get_peerstore().add_addrs(node_b.get_id(), node_b.get_addrs(), 10) hosts[0].get_peerstore().add_addrs(hosts[1].get_id(), hosts[1].get_addrs(), 10)
stream = await node_a.new_stream(node_b.get_id(), ["/echo/1.0.0"]) stream = await hosts[0].new_stream(hosts[1].get_id(), [PROTOCOL_ID_0])
messages = ["hello" + str(x) for x in range(10)] messages = ["hello" + str(x) for x in range(10)]
for message in messages: for message in messages:
await stream.write(message.encode()) await stream.write(message.encode())
response1 = (await stream.read(MAX_READ_LEN)).decode() response1 = (await stream.read(MAX_READ_LEN)).decode()
assert response1 == ("ack1:" + message) assert response1 == (ACK_STR_0 + message)
response2 = (await stream.read(MAX_READ_LEN)).decode() response2 = (await stream.read(MAX_READ_LEN)).decode()
assert response2 == ("ack2:" + message) assert response2 == (ACK_STR_1 + message)
# Success, terminate pending tasks.
@pytest.mark.asyncio @pytest.mark.trio
async def test_multiple_streams(): async def test_multiple_streams(is_host_secure):
# Node A should be able to open a stream with node B and then vice versa. # hosts[0] should be able to open a stream with hosts[1] and then vice versa.
# Stream IDs should be generated uniquely so that the stream state is not overwritten # Stream IDs should be generated uniquely so that the stream state is not overwritten
transport_opt_list = [["/ip4/127.0.0.1/tcp/0"], ["/ip4/127.0.0.1/tcp/0"]]
(node_a, node_b) = await set_up_nodes_by_transport_opt(transport_opt_list)
async def stream_handler_a(stream): async with HostFactory.create_batch_and_listen(is_host_secure, 2) as hosts:
while True: hosts[0].set_stream_handler(
read_string = (await stream.read(MAX_READ_LEN)).decode() PROTOCOL_ID_0, create_echo_stream_handler(ACK_STR_0)
)
response = "ack_a:" + read_string hosts[1].set_stream_handler(
await stream.write(response.encode()) PROTOCOL_ID_1, create_echo_stream_handler(ACK_STR_1)
)
async def stream_handler_b(stream):
while True:
read_string = (await stream.read(MAX_READ_LEN)).decode()
response = "ack_b:" + read_string
await stream.write(response.encode())
node_a.set_stream_handler("/echo_a/1.0.0", stream_handler_a)
node_b.set_stream_handler("/echo_b/1.0.0", stream_handler_b)
# Associate the peer with local ip address (see default parameters of Libp2p()) # Associate the peer with local ip address (see default parameters of Libp2p())
node_a.get_peerstore().add_addrs(node_b.get_id(), node_b.get_addrs(), 10) hosts[0].get_peerstore().add_addrs(hosts[1].get_id(), hosts[1].get_addrs(), 10)
node_b.get_peerstore().add_addrs(node_a.get_id(), node_a.get_addrs(), 10) hosts[1].get_peerstore().add_addrs(hosts[0].get_id(), hosts[0].get_addrs(), 10)
stream_a = await node_a.new_stream(node_b.get_id(), ["/echo_b/1.0.0"]) stream_a = await hosts[0].new_stream(hosts[1].get_id(), [PROTOCOL_ID_1])
stream_b = await node_b.new_stream(node_a.get_id(), ["/echo_a/1.0.0"]) stream_b = await hosts[1].new_stream(hosts[0].get_id(), [PROTOCOL_ID_0])
# A writes to /echo_b via stream_a, and B writes to /echo_a via stream_b # A writes to /echo_b via stream_a, and B writes to /echo_a via stream_b
messages = ["hello" + str(x) for x in range(10)] messages = ["hello" + str(x) for x in range(10)]
@ -115,51 +100,33 @@ async def test_multiple_streams():
response_a = (await stream_a.read(MAX_READ_LEN)).decode() response_a = (await stream_a.read(MAX_READ_LEN)).decode()
response_b = (await stream_b.read(MAX_READ_LEN)).decode() response_b = (await stream_b.read(MAX_READ_LEN)).decode()
assert response_a == ("ack_b:" + a_message) and response_b == ( assert response_a == (ACK_STR_1 + a_message) and response_b == (
"ack_a:" + b_message ACK_STR_0 + b_message
) )
# Success, terminate pending tasks.
@pytest.mark.trio
async def test_multiple_streams_same_initiator_different_protocols(is_host_secure):
async with HostFactory.create_batch_and_listen(is_host_secure, 2) as hosts:
@pytest.mark.asyncio hosts[1].set_stream_handler(
async def test_multiple_streams_same_initiator_different_protocols(): PROTOCOL_ID_0, create_echo_stream_handler(ACK_STR_0)
transport_opt_list = [["/ip4/127.0.0.1/tcp/0"], ["/ip4/127.0.0.1/tcp/0"]] )
(node_a, node_b) = await set_up_nodes_by_transport_opt(transport_opt_list) hosts[1].set_stream_handler(
PROTOCOL_ID_1, create_echo_stream_handler(ACK_STR_1)
async def stream_handler_a1(stream): )
while True: hosts[1].set_stream_handler(
read_string = (await stream.read(MAX_READ_LEN)).decode() PROTOCOL_ID_2, create_echo_stream_handler(ACK_STR_2)
)
response = "ack_a1:" + read_string
await stream.write(response.encode())
async def stream_handler_a2(stream):
while True:
read_string = (await stream.read(MAX_READ_LEN)).decode()
response = "ack_a2:" + read_string
await stream.write(response.encode())
async def stream_handler_a3(stream):
while True:
read_string = (await stream.read(MAX_READ_LEN)).decode()
response = "ack_a3:" + read_string
await stream.write(response.encode())
node_b.set_stream_handler("/echo_a1/1.0.0", stream_handler_a1)
node_b.set_stream_handler("/echo_a2/1.0.0", stream_handler_a2)
node_b.set_stream_handler("/echo_a3/1.0.0", stream_handler_a3)
# Associate the peer with local ip address (see default parameters of Libp2p()) # Associate the peer with local ip address (see default parameters of Libp2p())
node_a.get_peerstore().add_addrs(node_b.get_id(), node_b.get_addrs(), 10) hosts[0].get_peerstore().add_addrs(hosts[1].get_id(), hosts[1].get_addrs(), 10)
node_b.get_peerstore().add_addrs(node_a.get_id(), node_a.get_addrs(), 10) hosts[1].get_peerstore().add_addrs(hosts[0].get_id(), hosts[0].get_addrs(), 10)
# Open streams to node_b over echo_a1 echo_a2 echo_a3 protocols # Open streams to hosts[1] over echo_a1 echo_a2 echo_a3 protocols
stream_a1 = await node_a.new_stream(node_b.get_id(), ["/echo_a1/1.0.0"]) stream_a1 = await hosts[0].new_stream(hosts[1].get_id(), [PROTOCOL_ID_0])
stream_a2 = await node_a.new_stream(node_b.get_id(), ["/echo_a2/1.0.0"]) stream_a2 = await hosts[0].new_stream(hosts[1].get_id(), [PROTOCOL_ID_1])
stream_a3 = await node_a.new_stream(node_b.get_id(), ["/echo_a3/1.0.0"]) stream_a3 = await hosts[0].new_stream(hosts[1].get_id(), [PROTOCOL_ID_2])
messages = ["hello" + str(x) for x in range(10)] messages = ["hello" + str(x) for x in range(10)]
for message in messages: for message in messages:
@ -176,62 +143,40 @@ async def test_multiple_streams_same_initiator_different_protocols():
response_a3 = (await stream_a3.read(MAX_READ_LEN)).decode() response_a3 = (await stream_a3.read(MAX_READ_LEN)).decode()
assert ( assert (
response_a1 == ("ack_a1:" + a1_message) response_a1 == (ACK_STR_0 + a1_message)
and response_a2 == ("ack_a2:" + a2_message) and response_a2 == (ACK_STR_1 + a2_message)
and response_a3 == ("ack_a3:" + a3_message) and response_a3 == (ACK_STR_2 + a3_message)
) )
# Success, terminate pending tasks. # Success, terminate pending tasks.
@pytest.mark.asyncio @pytest.mark.trio
async def test_multiple_streams_two_initiators(): async def test_multiple_streams_two_initiators(is_host_secure):
transport_opt_list = [["/ip4/127.0.0.1/tcp/0"], ["/ip4/127.0.0.1/tcp/0"]] async with HostFactory.create_batch_and_listen(is_host_secure, 2) as hosts:
(node_a, node_b) = await set_up_nodes_by_transport_opt(transport_opt_list) hosts[0].set_stream_handler(
PROTOCOL_ID_2, create_echo_stream_handler(ACK_STR_2)
)
hosts[0].set_stream_handler(
PROTOCOL_ID_3, create_echo_stream_handler(ACK_STR_3)
)
async def stream_handler_a1(stream): hosts[1].set_stream_handler(
while True: PROTOCOL_ID_0, create_echo_stream_handler(ACK_STR_0)
read_string = (await stream.read(MAX_READ_LEN)).decode() )
hosts[1].set_stream_handler(
response = "ack_a1:" + read_string PROTOCOL_ID_1, create_echo_stream_handler(ACK_STR_1)
await stream.write(response.encode()) )
async def stream_handler_a2(stream):
while True:
read_string = (await stream.read(MAX_READ_LEN)).decode()
response = "ack_a2:" + read_string
await stream.write(response.encode())
async def stream_handler_b1(stream):
while True:
read_string = (await stream.read(MAX_READ_LEN)).decode()
response = "ack_b1:" + read_string
await stream.write(response.encode())
async def stream_handler_b2(stream):
while True:
read_string = (await stream.read(MAX_READ_LEN)).decode()
response = "ack_b2:" + read_string
await stream.write(response.encode())
node_a.set_stream_handler("/echo_b1/1.0.0", stream_handler_b1)
node_a.set_stream_handler("/echo_b2/1.0.0", stream_handler_b2)
node_b.set_stream_handler("/echo_a1/1.0.0", stream_handler_a1)
node_b.set_stream_handler("/echo_a2/1.0.0", stream_handler_a2)
# Associate the peer with local ip address (see default parameters of Libp2p()) # Associate the peer with local ip address (see default parameters of Libp2p())
node_a.get_peerstore().add_addrs(node_b.get_id(), node_b.get_addrs(), 10) hosts[0].get_peerstore().add_addrs(hosts[1].get_id(), hosts[1].get_addrs(), 10)
node_b.get_peerstore().add_addrs(node_a.get_id(), node_a.get_addrs(), 10) hosts[1].get_peerstore().add_addrs(hosts[0].get_id(), hosts[0].get_addrs(), 10)
stream_a1 = await node_a.new_stream(node_b.get_id(), ["/echo_a1/1.0.0"]) stream_a1 = await hosts[0].new_stream(hosts[1].get_id(), [PROTOCOL_ID_0])
stream_a2 = await node_a.new_stream(node_b.get_id(), ["/echo_a2/1.0.0"]) stream_a2 = await hosts[0].new_stream(hosts[1].get_id(), [PROTOCOL_ID_1])
stream_b1 = await node_b.new_stream(node_a.get_id(), ["/echo_b1/1.0.0"]) stream_b1 = await hosts[1].new_stream(hosts[0].get_id(), [PROTOCOL_ID_2])
stream_b2 = await node_b.new_stream(node_a.get_id(), ["/echo_b2/1.0.0"]) stream_b2 = await hosts[1].new_stream(hosts[0].get_id(), [PROTOCOL_ID_3])
# A writes to /echo_b via stream_a, and B writes to /echo_a via stream_b # A writes to /echo_b via stream_a, and B writes to /echo_a via stream_b
messages = ["hello" + str(x) for x in range(10)] messages = ["hello" + str(x) for x in range(10)]
@ -255,97 +200,81 @@ async def test_multiple_streams_two_initiators():
response_b2 = (await stream_b2.read(MAX_READ_LEN)).decode() response_b2 = (await stream_b2.read(MAX_READ_LEN)).decode()
assert ( assert (
response_a1 == ("ack_a1:" + a1_message) response_a1 == (ACK_STR_0 + a1_message)
and response_a2 == ("ack_a2:" + a2_message) and response_a2 == (ACK_STR_1 + a2_message)
and response_b1 == ("ack_b1:" + b1_message) and response_b1 == (ACK_STR_2 + b1_message)
and response_b2 == ("ack_b2:" + b2_message) and response_b2 == (ACK_STR_3 + b2_message)
) )
# Success, terminate pending tasks.
@pytest.mark.trio
async def test_triangle_nodes_connection(is_host_secure):
async with HostFactory.create_batch_and_listen(is_host_secure, 3) as hosts:
@pytest.mark.asyncio hosts[0].set_stream_handler(
async def test_triangle_nodes_connection(): PROTOCOL_ID_0, create_echo_stream_handler(ACK_STR_0)
transport_opt_list = [ )
["/ip4/127.0.0.1/tcp/0"], hosts[1].set_stream_handler(
["/ip4/127.0.0.1/tcp/0"], PROTOCOL_ID_0, create_echo_stream_handler(ACK_STR_0)
["/ip4/127.0.0.1/tcp/0"], )
] hosts[2].set_stream_handler(
(node_a, node_b, node_c) = await set_up_nodes_by_transport_opt(transport_opt_list) PROTOCOL_ID_0, create_echo_stream_handler(ACK_STR_0)
)
async def stream_handler(stream):
while True:
read_string = (await stream.read(MAX_READ_LEN)).decode()
response = "ack:" + read_string
await stream.write(response.encode())
node_a.set_stream_handler("/echo/1.0.0", stream_handler)
node_b.set_stream_handler("/echo/1.0.0", stream_handler)
node_c.set_stream_handler("/echo/1.0.0", stream_handler)
# Associate the peer with local ip address (see default parameters of Libp2p()) # Associate the peer with local ip address (see default parameters of Libp2p())
# Associate all permutations # Associate all permutations
node_a.get_peerstore().add_addrs(node_b.get_id(), node_b.get_addrs(), 10) hosts[0].get_peerstore().add_addrs(hosts[1].get_id(), hosts[1].get_addrs(), 10)
node_a.get_peerstore().add_addrs(node_c.get_id(), node_c.get_addrs(), 10) hosts[0].get_peerstore().add_addrs(hosts[2].get_id(), hosts[2].get_addrs(), 10)
node_b.get_peerstore().add_addrs(node_a.get_id(), node_a.get_addrs(), 10) hosts[1].get_peerstore().add_addrs(hosts[0].get_id(), hosts[0].get_addrs(), 10)
node_b.get_peerstore().add_addrs(node_c.get_id(), node_c.get_addrs(), 10) hosts[1].get_peerstore().add_addrs(hosts[2].get_id(), hosts[2].get_addrs(), 10)
node_c.get_peerstore().add_addrs(node_a.get_id(), node_a.get_addrs(), 10) hosts[2].get_peerstore().add_addrs(hosts[0].get_id(), hosts[0].get_addrs(), 10)
node_c.get_peerstore().add_addrs(node_b.get_id(), node_b.get_addrs(), 10) hosts[2].get_peerstore().add_addrs(hosts[1].get_id(), hosts[1].get_addrs(), 10)
stream_a_to_b = await node_a.new_stream(node_b.get_id(), ["/echo/1.0.0"]) stream_0_to_1 = await hosts[0].new_stream(hosts[1].get_id(), [PROTOCOL_ID_0])
stream_a_to_c = await node_a.new_stream(node_c.get_id(), ["/echo/1.0.0"]) stream_0_to_2 = await hosts[0].new_stream(hosts[2].get_id(), [PROTOCOL_ID_0])
stream_b_to_a = await node_b.new_stream(node_a.get_id(), ["/echo/1.0.0"]) stream_1_to_0 = await hosts[1].new_stream(hosts[0].get_id(), [PROTOCOL_ID_0])
stream_b_to_c = await node_b.new_stream(node_c.get_id(), ["/echo/1.0.0"]) stream_1_to_2 = await hosts[1].new_stream(hosts[2].get_id(), [PROTOCOL_ID_0])
stream_c_to_a = await node_c.new_stream(node_a.get_id(), ["/echo/1.0.0"]) stream_2_to_0 = await hosts[2].new_stream(hosts[0].get_id(), [PROTOCOL_ID_0])
stream_c_to_b = await node_c.new_stream(node_b.get_id(), ["/echo/1.0.0"]) stream_2_to_1 = await hosts[2].new_stream(hosts[1].get_id(), [PROTOCOL_ID_0])
messages = ["hello" + str(x) for x in range(5)] messages = ["hello" + str(x) for x in range(5)]
streams = [ streams = [
stream_a_to_b, stream_0_to_1,
stream_a_to_c, stream_0_to_2,
stream_b_to_a, stream_1_to_0,
stream_b_to_c, stream_1_to_2,
stream_c_to_a, stream_2_to_0,
stream_c_to_b, stream_2_to_1,
] ]
for message in messages: for message in messages:
for stream in streams: for stream in streams:
await stream.write(message.encode()) await stream.write(message.encode())
response = (await stream.read(MAX_READ_LEN)).decode() response = (await stream.read(MAX_READ_LEN)).decode()
assert response == (ACK_STR_0 + message)
assert response == ("ack:" + message)
# Success, terminate pending tasks.
@pytest.mark.asyncio @pytest.mark.trio
async def test_host_connect(): async def test_host_connect(is_host_secure):
transport_opt_list = [["/ip4/127.0.0.1/tcp/0"], ["/ip4/127.0.0.1/tcp/0"]] async with HostFactory.create_batch_and_listen(is_host_secure, 2) as hosts:
(node_a, node_b) = await set_up_nodes_by_transport_opt(transport_opt_list) assert not hosts[0].get_peerstore().peer_ids()
assert not node_a.get_peerstore().peer_ids() addr = hosts[1].get_addrs()[0]
addr = node_b.get_addrs()[0]
info = info_from_p2p_addr(addr) info = info_from_p2p_addr(addr)
await node_a.connect(info) await hosts[0].connect(info)
assert len(node_a.get_peerstore().peer_ids()) == 1 assert len(hosts[0].get_peerstore().peer_ids()) == 1
await node_a.connect(info) await hosts[0].connect(info)
# make sure we don't do double connection # make sure we don't do double connection
assert len(node_a.get_peerstore().peer_ids()) == 1 assert len(hosts[0].get_peerstore().peer_ids()) == 1
assert node_b.get_id() in node_a.get_peerstore().peer_ids() assert hosts[1].get_id() in hosts[0].get_peerstore().peer_ids()
ma_node_b = multiaddr.Multiaddr("/p2p/%s" % node_b.get_id().pretty()) ma_node_b = multiaddr.Multiaddr("/p2p/%s" % hosts[1].get_id().pretty())
for addr in node_a.get_peerstore().addrs(node_b.get_id()): for addr in hosts[0].get_peerstore().addrs(hosts[1].get_id()):
assert addr.encapsulate(ma_node_b) in node_b.get_addrs() assert addr.encapsulate(ma_node_b) in hosts[1].get_addrs()
# Success, terminate pending tasks.

View File

@ -1,6 +1,5 @@
import trio
import pytest import pytest
import trio
from libp2p.network.stream.exceptions import StreamClosed, StreamEOF, StreamReset from libp2p.network.stream.exceptions import StreamClosed, StreamEOF, StreamReset
from libp2p.tools.constants import MAX_READ_LEN from libp2p.tools.constants import MAX_READ_LEN

View File

@ -8,12 +8,11 @@ into network after network has already started listening
TODO: Add tests for closed_stream, listen_close when those TODO: Add tests for closed_stream, listen_close when those
features are implemented in swarm features are implemented in swarm
""" """
import trio
import enum import enum
import pytest
from async_service import background_trio_service from async_service import background_trio_service
import pytest
import trio
from libp2p.network.notifee_interface import INotifee from libp2p.network.notifee_interface import INotifee
from libp2p.tools.constants import LISTEN_MADDR from libp2p.tools.constants import LISTEN_MADDR

View File

@ -1,6 +1,5 @@
import trio
import pytest import pytest
import trio
@pytest.mark.trio @pytest.mark.trio

View File

@ -1,30 +1,33 @@
import pytest import pytest
from libp2p.host.exceptions import StreamFailure from libp2p.host.exceptions import StreamFailure
from libp2p.tools.utils import echo_stream_handler, set_up_nodes_by_transport_opt from libp2p.tools.factories import HostFactory
from libp2p.tools.utils import create_echo_stream_handler
# TODO: Add tests for multiple streams being opened on different
# protocols through the same connection
# Note: async issues occurred when using the same port PROTOCOL_ECHO = "/echo/1.0.0"
# so that's why I use different ports here. PROTOCOL_POTATO = "/potato/1.0.0"
# TODO: modify tests so that those async issues don't occur PROTOCOL_FOO = "/foo/1.0.0"
# when using the same ports across tests PROTOCOL_ROCK = "/rock/1.0.0"
ACK_PREFIX = "ack:"
async def perform_simple_test( async def perform_simple_test(
expected_selected_protocol, protocols_for_client, protocols_with_handlers expected_selected_protocol,
protocols_for_client,
protocols_with_handlers,
is_host_secure,
): ):
transport_opt_list = [["/ip4/127.0.0.1/tcp/0"], ["/ip4/127.0.0.1/tcp/0"]] async with HostFactory.create_batch_and_listen(is_host_secure, 2) as hosts:
(node_a, node_b) = await set_up_nodes_by_transport_opt(transport_opt_list)
for protocol in protocols_with_handlers: for protocol in protocols_with_handlers:
node_b.set_stream_handler(protocol, echo_stream_handler) hosts[1].set_stream_handler(
protocol, create_echo_stream_handler(ACK_PREFIX)
)
# Associate the peer with local ip address (see default parameters of Libp2p()) # Associate the peer with local ip address (see default parameters of Libp2p())
node_a.get_peerstore().add_addrs(node_b.get_id(), node_b.get_addrs(), 10) hosts[0].get_peerstore().add_addrs(hosts[1].get_id(), hosts[1].get_addrs(), 10)
stream = await hosts[0].new_stream(hosts[1].get_id(), protocols_for_client)
stream = await node_a.new_stream(node_b.get_id(), protocols_for_client)
messages = ["hello" + str(x) for x in range(10)] messages = ["hello" + str(x) for x in range(10)]
for message in messages: for message in messages:
expected_resp = "ack:" + message expected_resp = "ack:" + message
@ -34,50 +37,59 @@ async def perform_simple_test(
assert expected_selected_protocol == stream.get_protocol() assert expected_selected_protocol == stream.get_protocol()
# Success, terminate pending tasks.
@pytest.mark.trio
@pytest.mark.asyncio async def test_single_protocol_succeeds(is_host_secure):
async def test_single_protocol_succeeds(): expected_selected_protocol = PROTOCOL_ECHO
expected_selected_protocol = "/echo/1.0.0"
await perform_simple_test( await perform_simple_test(
expected_selected_protocol, ["/echo/1.0.0"], ["/echo/1.0.0"] expected_selected_protocol,
[expected_selected_protocol],
[expected_selected_protocol],
is_host_secure,
) )
@pytest.mark.asyncio @pytest.mark.trio
async def test_single_protocol_fails(): async def test_single_protocol_fails(is_host_secure):
with pytest.raises(StreamFailure): with pytest.raises(StreamFailure):
await perform_simple_test("", ["/echo/1.0.0"], ["/potato/1.0.0"]) await perform_simple_test(
"", [PROTOCOL_ECHO], [PROTOCOL_POTATO], is_host_secure
)
# Cleanup not reached on error # Cleanup not reached on error
@pytest.mark.asyncio @pytest.mark.trio
async def test_multiple_protocol_first_is_valid_succeeds(): async def test_multiple_protocol_first_is_valid_succeeds(is_host_secure):
expected_selected_protocol = "/echo/1.0.0" expected_selected_protocol = PROTOCOL_ECHO
protocols_for_client = ["/echo/1.0.0", "/potato/1.0.0"] protocols_for_client = [PROTOCOL_ECHO, PROTOCOL_POTATO]
protocols_for_listener = ["/foo/1.0.0", "/echo/1.0.0"] protocols_for_listener = [PROTOCOL_FOO, PROTOCOL_ECHO]
await perform_simple_test( await perform_simple_test(
expected_selected_protocol, protocols_for_client, protocols_for_listener expected_selected_protocol,
protocols_for_client,
protocols_for_listener,
is_host_secure,
) )
@pytest.mark.asyncio @pytest.mark.trio
async def test_multiple_protocol_second_is_valid_succeeds(): async def test_multiple_protocol_second_is_valid_succeeds(is_host_secure):
expected_selected_protocol = "/foo/1.0.0" expected_selected_protocol = PROTOCOL_FOO
protocols_for_client = ["/rock/1.0.0", "/foo/1.0.0"] protocols_for_client = [PROTOCOL_ROCK, PROTOCOL_FOO]
protocols_for_listener = ["/foo/1.0.0", "/echo/1.0.0"] protocols_for_listener = [PROTOCOL_FOO, PROTOCOL_ECHO]
await perform_simple_test( await perform_simple_test(
expected_selected_protocol, protocols_for_client, protocols_for_listener expected_selected_protocol,
protocols_for_client,
protocols_for_listener,
is_host_secure,
) )
@pytest.mark.asyncio @pytest.mark.trio
async def test_multiple_protocol_fails(): async def test_multiple_protocol_fails(is_host_secure):
protocols_for_client = ["/rock/1.0.0", "/foo/1.0.0", "/bar/1.0.0"] protocols_for_client = [PROTOCOL_ROCK, PROTOCOL_FOO, "/bar/1.0.0"]
protocols_for_listener = ["/aspyn/1.0.0", "/rob/1.0.0", "/zx/1.0.0", "/alex/1.0.0"] protocols_for_listener = ["/aspyn/1.0.0", "/rob/1.0.0", "/zx/1.0.0", "/alex/1.0.0"]
with pytest.raises(StreamFailure): with pytest.raises(StreamFailure):
await perform_simple_test("", protocols_for_client, protocols_for_listener) await perform_simple_test(
"", protocols_for_client, protocols_for_listener, is_host_secure
# Cleanup not reached on error )