py-libp2p/libp2p/network/swarm.py

345 lines
12 KiB
Python
Raw Normal View History

import asyncio
2019-09-11 07:02:29 +08:00
import logging
2019-09-11 18:05:41 +08:00
from typing import Callable, Dict, List, Sequence
2019-07-28 14:06:29 +08:00
from multiaddr import Multiaddr
2019-01-10 02:38:56 +08:00
from libp2p.peer.id import ID
from libp2p.peer.peerstore import PeerStoreError
from libp2p.peer.peerstore_interface import IPeerStore
2019-07-28 14:06:29 +08:00
from libp2p.protocol_muxer.multiselect import Multiselect
from libp2p.protocol_muxer.multiselect_client import MultiselectClient
from libp2p.protocol_muxer.multiselect_communicator import MultiselectCommunicator
2019-07-28 14:06:29 +08:00
from libp2p.routing.interfaces import IPeerRouting
from libp2p.stream_muxer.abc import IMuxedConn, IMuxedStream
from libp2p.transport.exceptions import MuxerUpgradeFailure, SecurityUpgradeFailure
2019-07-28 14:06:29 +08:00
from libp2p.transport.listener_interface import IListener
2019-08-03 13:36:19 +08:00
from libp2p.transport.transport_interface import ITransport
from libp2p.transport.upgrader import TransportUpgrader
from libp2p.typing import StreamHandlerFn, TProtocol
2019-07-28 14:06:29 +08:00
2019-08-03 13:36:19 +08:00
from .connection.raw_connection import RawConnection
from .exceptions import SwarmException
2019-07-28 22:30:51 +08:00
from .network_interface import INetwork
from .notifee_interface import INotifee
from .stream.net_stream import NetStream
2019-07-30 15:31:02 +08:00
from .stream.net_stream_interface import INetStream
2019-08-05 10:35:56 +08:00
from .typing import GenericProtocolHandlerFn
2019-07-28 22:30:51 +08:00
2019-09-11 07:02:29 +08:00
logger = logging.getLogger("libp2p.network.swarm")
logger.setLevel(logging.DEBUG)
2019-07-29 12:42:13 +08:00
2018-10-15 13:52:25 +08:00
class Swarm(INetwork):
2019-07-28 14:06:29 +08:00
self_id: ID
peerstore: IPeerStore
2019-07-28 14:06:29 +08:00
upgrader: TransportUpgrader
transport: ITransport
router: IPeerRouting
# TODO: Connection and `peer_id` are 1-1 mapping in our implementation,
2019-08-29 22:00:07 +08:00
# whereas in Go one `peer_id` may point to multiple connections.
2019-07-28 14:06:29 +08:00
connections: Dict[ID, IMuxedConn]
listeners: Dict[str, IListener]
2019-07-30 15:31:02 +08:00
stream_handlers: Dict[INetStream, Callable[[INetStream], None]]
2019-07-28 14:06:29 +08:00
multiselect: Multiselect
multiselect_client: MultiselectClient
notifees: List[INotifee]
2019-08-01 06:00:12 +08:00
def __init__(
self,
peer_id: ID,
peerstore: IPeerStore,
2019-08-01 06:00:12 +08:00
upgrader: TransportUpgrader,
transport: ITransport,
router: IPeerRouting,
):
self.self_id = peer_id
2018-11-12 01:36:15 +08:00
self.peerstore = peerstore
2018-11-12 05:42:10 +08:00
self.upgrader = upgrader
2019-04-25 10:36:09 +08:00
self.transport = transport
2019-04-29 03:00:43 +08:00
self.router = router
2018-11-12 06:10:37 +08:00
self.connections = dict()
self.listeners = dict()
2018-11-12 09:29:17 +08:00
self.stream_handlers = dict()
2018-10-15 13:52:25 +08:00
# Protocol muxing
self.multiselect = Multiselect()
self.multiselect_client = MultiselectClient()
2019-03-01 07:18:58 +08:00
# Create Notifee array
self.notifees = []
# Create generic protocol handler
self.generic_protocol_handler = create_generic_protocol_handler(self)
2019-07-28 14:06:29 +08:00
def get_peer_id(self) -> ID:
return self.self_id
2018-11-19 00:22:56 +08:00
def set_stream_handler(
self, protocol_id: TProtocol, stream_handler: StreamHandlerFn
) -> None:
2018-10-15 13:52:25 +08:00
"""
2018-11-12 09:29:17 +08:00
:param protocol_id: protocol id used on stream
2018-10-15 13:52:25 +08:00
:param stream_handler: a stream handler instance
"""
self.multiselect.add_handler(protocol_id, stream_handler)
2018-10-15 13:52:25 +08:00
2019-07-28 14:06:29 +08:00
async def dial_peer(self, peer_id: ID) -> IMuxedConn:
2018-11-01 05:40:01 +08:00
"""
dial_peer try to create a connection to peer_id
:param peer_id: peer if we want to dial
:raises SwarmException: raised when an error occurs
:return: muxed connection
2018-11-01 05:40:01 +08:00
"""
if peer_id in self.connections:
# If muxed connection already exists for peer_id,
# set muxed connection equal to existing muxed connection
return self.connections[peer_id]
2019-09-11 07:02:29 +08:00
logger.debug("attempting to dial peer %s", peer_id)
try:
# Get peer info from peer store
addrs = self.peerstore.addrs(peer_id)
except PeerStoreError:
raise SwarmException(f"No known addresses to peer {peer_id}")
2018-11-13 00:00:43 +08:00
if not addrs:
raise SwarmException(f"No known addresses to peer {peer_id}")
2018-11-13 00:00:43 +08:00
if not self.router:
multiaddr = addrs[0]
else:
multiaddr = self.router.find_peer(peer_id)
# Dial peer (connection to peer does not yet exist)
# Transport dials peer (gets back a raw conn)
2019-09-11 17:13:21 +08:00
raw_conn = await self.transport.dial(multiaddr)
2019-09-11 07:02:29 +08:00
logger.debug("dialed peer %s over base transport", peer_id)
# Per, https://discuss.libp2p.io/t/multistream-security/130, we first secure
# the conn and then mux the conn
try:
secured_conn = await self.upgrader.upgrade_security(raw_conn, peer_id, True)
except SecurityUpgradeFailure as error:
2019-09-15 15:09:58 +08:00
error_msg = "fail to upgrade security for peer %s"
logger.debug(error_msg, peer_id)
await raw_conn.close()
2019-09-15 15:09:58 +08:00
raise SwarmException(error_msg % peer_id) from error
2019-09-11 07:02:29 +08:00
logger.debug("upgraded security for peer %s", peer_id)
try:
muxed_conn = await self.upgrader.upgrade_connection(
secured_conn, self.generic_protocol_handler, peer_id
)
except MuxerUpgradeFailure as error:
2019-09-15 15:09:58 +08:00
error_msg = "fail to upgrade mux for peer %s"
logger.debug(error_msg, peer_id)
await secured_conn.close()
2019-09-15 15:09:58 +08:00
raise SwarmException(error_msg % peer_id) from error
2019-09-11 07:02:29 +08:00
logger.debug("upgraded mux for peer %s", peer_id)
# Store muxed connection in connections
self.connections[peer_id] = muxed_conn
2018-11-13 00:00:43 +08:00
# Call notifiers since event occurred
for notifee in self.notifees:
await notifee.connected(self, muxed_conn)
2019-03-01 07:18:58 +08:00
2019-09-11 07:02:29 +08:00
logger.debug("successfully dialed peer %s", peer_id)
return muxed_conn
async def new_stream(
self, peer_id: ID, protocol_ids: Sequence[TProtocol]
) -> NetStream:
"""
:param peer_id: peer_id of destination
:param protocol_id: protocol id
:return: net stream instance
"""
logger.debug(
"attempting to open a stream to peer %s, over one of the protocols %s",
peer_id,
protocol_ids,
)
muxed_conn = await self.dial_peer(peer_id)
# Use muxed conn to open stream, which returns a muxed stream
muxed_stream = await muxed_conn.open_stream()
# Perform protocol muxing to determine protocol to use
2019-07-29 12:42:13 +08:00
selected_protocol = await self.multiselect_client.select_one_of(
list(protocol_ids), MultiselectCommunicator(muxed_stream)
2019-07-29 12:42:13 +08:00
)
2018-11-12 05:42:10 +08:00
# Create a net stream with the selected protocol
2018-11-12 05:42:10 +08:00
net_stream = NetStream(muxed_stream)
net_stream.set_protocol(selected_protocol)
2018-11-12 05:42:10 +08:00
logger.debug(
"successfully opened a stream to peer %s, over protocol %s",
peer_id,
selected_protocol,
)
2019-03-01 07:18:58 +08:00
# Call notifiers since event occurred
for notifee in self.notifees:
2019-03-01 08:11:04 +08:00
await notifee.opened_stream(self, net_stream)
2019-03-01 07:18:58 +08:00
2018-11-12 05:42:10 +08:00
return net_stream
2018-10-22 01:51:55 +08:00
async def listen(self, *multiaddrs: Multiaddr) -> bool:
2018-10-22 01:51:55 +08:00
"""
:param multiaddrs: one or many multiaddrs to start listening on
2018-10-22 01:51:55 +08:00
:return: true if at least one success
2018-11-13 02:02:49 +08:00
For each multiaddr
2018-11-13 00:00:43 +08:00
Check if a listener for multiaddr exists already
If listener already exists, continue
Otherwise:
Capture multiaddr in conn handler
Have conn handler delegate to stream handler
Call listener listen with the multiaddr
Map multiaddr to listener
"""
for maddr in multiaddrs:
if str(maddr) in self.listeners:
2018-11-12 09:29:17 +08:00
return True
2019-08-01 06:00:12 +08:00
async def conn_handler(
reader: asyncio.StreamReader, writer: asyncio.StreamWriter
) -> None:
2019-09-11 07:02:29 +08:00
connection_info = writer.get_extra_info("peername")
# TODO make a proper multiaddr
peer_addr = f"/ip4/{connection_info[0]}/tcp/{connection_info[1]}"
logger.debug("inbound connection at %s", peer_addr)
# logger.debug("inbound connection request", peer_id)
2019-08-25 03:02:30 +08:00
raw_conn = RawConnection(reader, writer, False)
2019-05-02 01:54:19 +08:00
# Per, https://discuss.libp2p.io/t/multistream-security/130, we first secure
# the conn and then mux the conn
try:
# FIXME: This dummy `ID(b"")` for the remote peer is useless.
secured_conn = await self.upgrader.upgrade_security(
raw_conn, ID(b""), False
)
except SecurityUpgradeFailure as error:
2019-09-15 15:09:58 +08:00
error_msg = "fail to upgrade security for peer at %s"
logger.debug(error_msg, peer_addr)
await raw_conn.close()
2019-09-15 15:09:58 +08:00
raise SwarmException(error_msg % peer_addr) from error
peer_id = secured_conn.get_remote_peer()
2019-09-11 07:02:29 +08:00
logger.debug("upgraded security for peer at %s", peer_addr)
logger.debug("identified peer at %s as %s", peer_addr, peer_id)
try:
muxed_conn = await self.upgrader.upgrade_connection(
secured_conn, self.generic_protocol_handler, peer_id
)
except MuxerUpgradeFailure as error:
2019-09-15 15:09:58 +08:00
error_msg = "fail to upgrade mux for peer %s"
logger.debug(error_msg, peer_id)
await secured_conn.close()
2019-09-15 15:09:58 +08:00
raise SwarmException(error_msg % peer_id) from error
2019-09-11 07:02:29 +08:00
logger.debug("upgraded mux for peer %s", peer_id)
# Store muxed_conn with peer id
self.connections[peer_id] = muxed_conn
2019-03-01 07:18:58 +08:00
# Call notifiers since event occurred
for notifee in self.notifees:
2019-03-01 08:11:04 +08:00
await notifee.connected(self, muxed_conn)
2019-03-01 07:18:58 +08:00
2019-09-11 07:02:29 +08:00
logger.debug("successfully opened connection to peer %s", peer_id)
2018-11-12 09:29:17 +08:00
try:
# Success
listener = self.transport.create_listener(conn_handler)
self.listeners[str(maddr)] = listener
await listener.listen(maddr)
2019-03-01 07:18:58 +08:00
# Call notifiers since event occurred
for notifee in self.notifees:
await notifee.listen(self, maddr)
2019-03-01 07:18:58 +08:00
2018-11-12 09:29:17 +08:00
return True
except IOError:
# Failed. Continue looping.
logger.debug("fail to listen on: " + str(maddr))
2018-11-12 09:29:17 +08:00
# No maddr succeeded
2018-11-12 09:29:17 +08:00
return False
2018-11-12 05:42:10 +08:00
2019-07-28 14:06:29 +08:00
def notify(self, notifee: INotifee) -> bool:
2019-03-01 07:18:58 +08:00
"""
:param notifee: object implementing Notifee interface
2019-03-15 02:01:37 +08:00
:return: true if notifee registered successfully, false otherwise
2019-03-01 07:18:58 +08:00
"""
if isinstance(notifee, INotifee):
self.notifees.append(notifee)
2019-03-15 02:01:37 +08:00
return True
return False
2019-03-01 07:18:58 +08:00
2019-07-28 14:06:29 +08:00
def add_router(self, router: IPeerRouting) -> None:
self.router = router
async def close(self) -> None:
# TODO: Prevent from new listeners and conns being added.
# Reference: https://github.com/libp2p/go-libp2p-swarm/blob/8be680aef8dea0a4497283f2f98470c2aeae6b65/swarm.go#L124-L134 # noqa: E501
2019-07-22 18:12:54 +08:00
# Close listeners
await asyncio.gather(
*[listener.close() for listener in self.listeners.values()]
)
# Close connections
await asyncio.gather(
*[connection.close() for connection in self.connections.values()]
)
logger.debug("swarm successfully closed")
async def close_peer(self, peer_id: ID) -> None:
if peer_id not in self.connections:
return
connection = self.connections[peer_id]
del self.connections[peer_id]
await connection.close()
2019-07-22 18:12:54 +08:00
logger.debug("successfully close the connection to peer %s", peer_id)
2019-07-22 18:12:54 +08:00
2019-07-28 14:06:29 +08:00
def create_generic_protocol_handler(swarm: Swarm) -> GenericProtocolHandlerFn:
"""
Create a generic protocol handler from the given swarm. We use swarm
to extract the multiselect module so that generic_protocol_handler
can use multiselect when generic_protocol_handler is called
from a different class
"""
multiselect = swarm.multiselect
2019-07-30 23:41:28 +08:00
async def generic_protocol_handler(muxed_stream: IMuxedStream) -> None:
# Perform protocol muxing to determine protocol to use
protocol, handler = await multiselect.negotiate(
MultiselectCommunicator(muxed_stream)
)
[WIP] PubSub and FloodSub development (#133) * Add notifee interface * Add notify function to network interface * Implement notify feature * Add tests for notify * Make notifee functions all async * Fix linting issue * Fix linting issue * Scaffold pubsub router interface * Scaffold pubsub directory * Store peer_id in muxed connection * Implement pubsub notifee * Remove outdated files * Implement pubsub first attempt * Prepare pubsub for floodsub * Add mplex conn to net stream and add conn in notify tests * Implement floodsub * Use NetStream in generic protocol handler * Debugging async issues * Modify test to perform proper assert. Test passes * Remove callbacks. Reduce sleep time * Add simple three node test * Clean up code. Add message classes * Add test for two topics * Add conn to net stream and conn tests * Refactor test setup to remove duplicate code * Fix linting issues * Fix linting issue * Fix linting issue * Fix outstanding unrelated lint issue in multiselect_client * Add connect function * Remove debug prints * Remove debug prints from floodsub * Use MessageTalk in place of direct message breakdown * Remove extra prints * Remove outdated function * Add message to queues for all topics in message * Debugging * Add message self delivery * Increase read timeout to 5 to get pubsub tests passing * Refactor testing helper func. Add tests * Add tests and increase timeout to get tests passing * Add dummy account demo scaffolding * Attempt to use threads. Test fails * Implement basic dummy node tests using threads * Add generic testing function * Add simple seven node tree test * Add more complex seven node tree tests * Add five node ring tests * Remove unnecessary get_message_type func * Add documentation to classes * Add message id to messages * Add documentation to test helper func * Add docs to dummy account node helper func * Add more docs to dummy account node test helper func * fixed linting errors in floodsub * small notify bugfix * move pubsub into libp2p * fixed pubsub linting * fixing pubsub test failures * linting
2019-03-24 01:52:02 +08:00
net_stream = NetStream(muxed_stream)
net_stream.set_protocol(protocol)
# Call notifiers since event occurred
for notifee in swarm.notifees:
[WIP] PubSub and FloodSub development (#133) * Add notifee interface * Add notify function to network interface * Implement notify feature * Add tests for notify * Make notifee functions all async * Fix linting issue * Fix linting issue * Scaffold pubsub router interface * Scaffold pubsub directory * Store peer_id in muxed connection * Implement pubsub notifee * Remove outdated files * Implement pubsub first attempt * Prepare pubsub for floodsub * Add mplex conn to net stream and add conn in notify tests * Implement floodsub * Use NetStream in generic protocol handler * Debugging async issues * Modify test to perform proper assert. Test passes * Remove callbacks. Reduce sleep time * Add simple three node test * Clean up code. Add message classes * Add test for two topics * Add conn to net stream and conn tests * Refactor test setup to remove duplicate code * Fix linting issues * Fix linting issue * Fix linting issue * Fix outstanding unrelated lint issue in multiselect_client * Add connect function * Remove debug prints * Remove debug prints from floodsub * Use MessageTalk in place of direct message breakdown * Remove extra prints * Remove outdated function * Add message to queues for all topics in message * Debugging * Add message self delivery * Increase read timeout to 5 to get pubsub tests passing * Refactor testing helper func. Add tests * Add tests and increase timeout to get tests passing * Add dummy account demo scaffolding * Attempt to use threads. Test fails * Implement basic dummy node tests using threads * Add generic testing function * Add simple seven node tree test * Add more complex seven node tree tests * Add five node ring tests * Remove unnecessary get_message_type func * Add documentation to classes * Add message id to messages * Add documentation to test helper func * Add docs to dummy account node helper func * Add more docs to dummy account node test helper func * fixed linting errors in floodsub * small notify bugfix * move pubsub into libp2p * fixed pubsub linting * fixing pubsub test failures * linting
2019-03-24 01:52:02 +08:00
await notifee.opened_stream(swarm, net_stream)
# Give to stream handler
[WIP] PubSub and FloodSub development (#133) * Add notifee interface * Add notify function to network interface * Implement notify feature * Add tests for notify * Make notifee functions all async * Fix linting issue * Fix linting issue * Scaffold pubsub router interface * Scaffold pubsub directory * Store peer_id in muxed connection * Implement pubsub notifee * Remove outdated files * Implement pubsub first attempt * Prepare pubsub for floodsub * Add mplex conn to net stream and add conn in notify tests * Implement floodsub * Use NetStream in generic protocol handler * Debugging async issues * Modify test to perform proper assert. Test passes * Remove callbacks. Reduce sleep time * Add simple three node test * Clean up code. Add message classes * Add test for two topics * Add conn to net stream and conn tests * Refactor test setup to remove duplicate code * Fix linting issues * Fix linting issue * Fix linting issue * Fix outstanding unrelated lint issue in multiselect_client * Add connect function * Remove debug prints * Remove debug prints from floodsub * Use MessageTalk in place of direct message breakdown * Remove extra prints * Remove outdated function * Add message to queues for all topics in message * Debugging * Add message self delivery * Increase read timeout to 5 to get pubsub tests passing * Refactor testing helper func. Add tests * Add tests and increase timeout to get tests passing * Add dummy account demo scaffolding * Attempt to use threads. Test fails * Implement basic dummy node tests using threads * Add generic testing function * Add simple seven node tree test * Add more complex seven node tree tests * Add five node ring tests * Remove unnecessary get_message_type func * Add documentation to classes * Add message id to messages * Add documentation to test helper func * Add docs to dummy account node helper func * Add more docs to dummy account node test helper func * fixed linting errors in floodsub * small notify bugfix * move pubsub into libp2p * fixed pubsub linting * fixing pubsub test failures * linting
2019-03-24 01:52:02 +08:00
asyncio.ensure_future(handler(net_stream))
return generic_protocol_handler