py-libp2p/libp2p/network/swarm.py

304 lines
11 KiB
Python
Raw Normal View History

2019-09-11 07:02:29 +08:00
import logging
2019-09-17 21:54:20 +08:00
from typing import Dict, List, Optional
from async_service import Service
2019-07-28 14:06:29 +08:00
from multiaddr import Multiaddr
import trio
2019-01-10 02:38:56 +08:00
2019-11-19 14:01:12 +08:00
from libp2p.io.abc import ReadWriteCloser
from libp2p.network.connection.net_connection_interface import INetConn
from libp2p.peer.id import ID
from libp2p.peer.peerstore import PeerStoreError
from libp2p.peer.peerstore_interface import IPeerStore
from libp2p.stream_muxer.abc import IMuxedConn
2019-09-20 16:17:13 +08:00
from libp2p.transport.exceptions import (
MuxerUpgradeFailure,
OpenConnectionError,
SecurityUpgradeFailure,
)
2019-07-28 14:06:29 +08:00
from libp2p.transport.listener_interface import IListener
2019-08-03 13:36:19 +08:00
from libp2p.transport.transport_interface import ITransport
from libp2p.transport.upgrader import TransportUpgrader
2019-09-17 21:54:20 +08:00
from libp2p.typing import StreamHandlerFn
2019-07-28 14:06:29 +08:00
2019-08-03 13:36:19 +08:00
from .connection.raw_connection import RawConnection
from .connection.swarm_connection import SwarmConn
from .exceptions import SwarmException
2019-07-28 22:30:51 +08:00
from .network_interface import INetwork
from .notifee_interface import INotifee
2019-07-30 15:31:02 +08:00
from .stream.net_stream_interface import INetStream
2019-07-28 22:30:51 +08:00
2019-09-11 07:02:29 +08:00
logger = logging.getLogger("libp2p.network.swarm")
2019-07-29 12:42:13 +08:00
class Swarm(INetwork, Service):
2018-10-15 13:52:25 +08:00
2019-07-28 14:06:29 +08:00
self_id: ID
peerstore: IPeerStore
2019-07-28 14:06:29 +08:00
upgrader: TransportUpgrader
transport: ITransport
# TODO: Connection and `peer_id` are 1-1 mapping in our implementation,
2019-08-29 22:00:07 +08:00
# whereas in Go one `peer_id` may point to multiple connections.
connections: Dict[ID, INetConn]
2019-07-28 14:06:29 +08:00
listeners: Dict[str, IListener]
common_stream_handler: Optional[StreamHandlerFn]
2019-07-28 14:06:29 +08:00
notifees: List[INotifee]
2019-08-01 06:00:12 +08:00
def __init__(
self,
peer_id: ID,
peerstore: IPeerStore,
2019-08-01 06:00:12 +08:00
upgrader: TransportUpgrader,
transport: ITransport,
):
self.self_id = peer_id
2018-11-12 01:36:15 +08:00
self.peerstore = peerstore
2018-11-12 05:42:10 +08:00
self.upgrader = upgrader
2019-04-25 10:36:09 +08:00
self.transport = transport
2018-11-12 06:10:37 +08:00
self.connections = dict()
self.listeners = dict()
2018-10-15 13:52:25 +08:00
2019-03-01 07:18:58 +08:00
# Create Notifee array
self.notifees = []
self.common_stream_handler = None
async def run(self) -> None:
await self.manager.wait_finished()
2019-07-28 14:06:29 +08:00
def get_peer_id(self) -> ID:
return self.self_id
2018-11-19 00:22:56 +08:00
def set_stream_handler(self, stream_handler: StreamHandlerFn) -> None:
self.common_stream_handler = stream_handler
2018-10-15 13:52:25 +08:00
async def dial_peer(self, peer_id: ID) -> INetConn:
"""
dial_peer try to create a connection to peer_id.
:param peer_id: peer if we want to dial
:raises SwarmException: raised when an error occurs
:return: muxed connection
2018-11-01 05:40:01 +08:00
"""
if peer_id in self.connections:
# If muxed connection already exists for peer_id,
# set muxed connection equal to existing muxed connection
return self.connections[peer_id]
2019-09-11 07:02:29 +08:00
logger.debug("attempting to dial peer %s", peer_id)
try:
# Get peer info from peer store
addrs = self.peerstore.addrs(peer_id)
except PeerStoreError:
raise SwarmException(f"No known addresses to peer {peer_id}")
2018-11-13 00:00:43 +08:00
if not addrs:
raise SwarmException(f"No known addresses to peer {peer_id}")
2018-11-13 00:00:43 +08:00
multiaddr = addrs[0]
# Dial peer (connection to peer does not yet exist)
# Transport dials peer (gets back a raw conn)
try:
raw_conn = await self.transport.dial(multiaddr)
except OpenConnectionError as error:
logger.debug("fail to dial peer %s over base transport", peer_id)
raise SwarmException(
"fail to open connection to peer %s", peer_id
) from error
2019-09-11 07:02:29 +08:00
logger.debug("dialed peer %s over base transport", peer_id)
# Per, https://discuss.libp2p.io/t/multistream-security/130, we first secure
# the conn and then mux the conn
try:
secured_conn = await self.upgrader.upgrade_security(raw_conn, peer_id, True)
except SecurityUpgradeFailure as error:
2019-09-15 15:09:58 +08:00
error_msg = "fail to upgrade security for peer %s"
logger.debug(error_msg, peer_id)
await raw_conn.close()
2019-09-15 15:09:58 +08:00
raise SwarmException(error_msg % peer_id) from error
2019-09-11 07:02:29 +08:00
logger.debug("upgraded security for peer %s", peer_id)
try:
muxed_conn = await self.upgrader.upgrade_connection(secured_conn, peer_id)
self.manager.run_child_service(muxed_conn)
except MuxerUpgradeFailure as error:
2019-09-15 15:09:58 +08:00
error_msg = "fail to upgrade mux for peer %s"
logger.debug(error_msg, peer_id)
await secured_conn.close()
2019-09-15 15:09:58 +08:00
raise SwarmException(error_msg % peer_id) from error
2019-09-11 07:02:29 +08:00
logger.debug("upgraded mux for peer %s", peer_id)
swarm_conn = await self.add_conn(muxed_conn)
2019-03-01 07:18:58 +08:00
2019-09-11 07:02:29 +08:00
logger.debug("successfully dialed peer %s", peer_id)
return swarm_conn
async def new_stream(self, peer_id: ID) -> INetStream:
"""
:param peer_id: peer_id of destination
:param protocol_id: protocol id
2019-09-19 22:19:36 +08:00
:raises SwarmException: raised when an error occurs
:return: net stream instance
"""
2019-09-19 14:10:50 +08:00
logger.debug("attempting to open a stream to peer %s", peer_id)
swarm_conn = await self.dial_peer(peer_id)
net_stream = await swarm_conn.new_stream()
logger.debug("successfully opened a stream to peer %s", peer_id)
2018-11-12 05:42:10 +08:00
return net_stream
2018-10-22 01:51:55 +08:00
async def listen(self, *multiaddrs: Multiaddr) -> bool:
2018-10-22 01:51:55 +08:00
"""
:param multiaddrs: one or many multiaddrs to start listening on
2018-10-22 01:51:55 +08:00
:return: true if at least one success
2018-11-13 02:02:49 +08:00
For each multiaddr
2018-11-13 00:00:43 +08:00
Check if a listener for multiaddr exists already
If listener already exists, continue
Otherwise:
Capture multiaddr in conn handler
Have conn handler delegate to stream handler
Call listener listen with the multiaddr
Map multiaddr to listener
"""
for maddr in multiaddrs:
if str(maddr) in self.listeners:
2018-11-12 09:29:17 +08:00
return True
2019-11-19 14:01:12 +08:00
async def conn_handler(read_write_closer: ReadWriteCloser) -> None:
raw_conn = RawConnection(read_write_closer, False)
2019-05-02 01:54:19 +08:00
# Per, https://discuss.libp2p.io/t/multistream-security/130, we first secure
# the conn and then mux the conn
try:
# FIXME: This dummy `ID(b"")` for the remote peer is useless.
secured_conn = await self.upgrader.upgrade_security(
raw_conn, ID(b""), False
)
except SecurityUpgradeFailure as error:
await raw_conn.close()
2019-11-19 14:01:12 +08:00
raise SwarmException() from error
peer_id = secured_conn.get_remote_peer()
2019-09-11 07:02:29 +08:00
try:
muxed_conn = await self.upgrader.upgrade_connection(
secured_conn, peer_id
)
self.manager.run_child_service(muxed_conn)
except MuxerUpgradeFailure as error:
2019-09-15 15:09:58 +08:00
error_msg = "fail to upgrade mux for peer %s"
logger.debug(error_msg, peer_id)
await secured_conn.close()
2019-09-15 15:09:58 +08:00
raise SwarmException(error_msg % peer_id) from error
2019-09-11 07:02:29 +08:00
logger.debug("upgraded mux for peer %s", peer_id)
await self.add_conn(muxed_conn)
2019-03-01 07:18:58 +08:00
2019-09-11 07:02:29 +08:00
logger.debug("successfully opened connection to peer %s", peer_id)
# FIXME: This is a intentional barrier to prevent from the handler exiting and
# closing the connection. Probably change to `Service.manager.wait_finished`?
2019-11-29 19:09:56 +08:00
await trio.sleep_forever()
2019-09-11 07:02:29 +08:00
2018-11-12 09:29:17 +08:00
try:
# Success
listener = self.transport.create_listener(conn_handler)
self.listeners[str(maddr)] = listener
# FIXME: Hack
await listener.listen(maddr, self.manager._task_nursery)
2019-03-01 07:18:58 +08:00
# Call notifiers since event occurred
await self.notify_listen(maddr)
2019-03-01 07:18:58 +08:00
2018-11-12 09:29:17 +08:00
return True
except IOError:
# Failed. Continue looping.
logger.debug("fail to listen on: " + str(maddr))
2018-11-12 09:29:17 +08:00
# No maddr succeeded
2018-11-12 09:29:17 +08:00
return False
2018-11-12 05:42:10 +08:00
async def close(self) -> None:
# TODO: Prevent from new listeners and conns being added.
# Reference: https://github.com/libp2p/go-libp2p-swarm/blob/8be680aef8dea0a4497283f2f98470c2aeae6b65/swarm.go#L124-L134 # noqa: E501
2019-11-29 19:09:56 +08:00
await self.manager.stop()
await self.manager.wait_finished()
logger.debug("swarm successfully closed")
async def close_peer(self, peer_id: ID) -> None:
if peer_id not in self.connections:
return
# TODO: Should be changed to close multisple connections,
# if we have several connections per peer in the future.
connection = self.connections[peer_id]
# NOTE: `connection.close` will perform `del self.connections[peer_id]`
# and `notify_disconnected` for us.
await connection.close()
2019-07-22 18:12:54 +08:00
logger.debug("successfully close the connection to peer %s", peer_id)
async def add_conn(self, muxed_conn: IMuxedConn) -> SwarmConn:
"""Add a `IMuxedConn` to `Swarm` as a `SwarmConn`, notify "connected",
and start to monitor the connection for its new streams and
disconnection."""
swarm_conn = SwarmConn(muxed_conn, self)
manager = self.manager.run_child_service(swarm_conn)
# Store muxed_conn with peer id
self.connections[muxed_conn.peer_id] = swarm_conn
# Call notifiers since event occurred
self.manager.run_task(self.notify_connected, swarm_conn)
await manager.wait_started()
return swarm_conn
def remove_conn(self, swarm_conn: SwarmConn) -> None:
"""Simply remove the connection from Swarm's records, without closing
the connection."""
peer_id = swarm_conn.muxed_conn.peer_id
if peer_id not in self.connections:
return
# TODO: Should be changed to remove the exact connection,
# if we have several connections per peer in the future.
del self.connections[peer_id]
2019-09-23 15:01:58 +08:00
# Notifee
# TODO: Remeber the spawn notifying tasks and clean them up when closing.
def register_notifee(self, notifee: INotifee) -> None:
"""
:param notifee: object implementing Notifee interface
:return: true if notifee registered successfully, false otherwise
"""
self.notifees.append(notifee)
async def notify_opened_stream(self, stream: INetStream) -> None:
async with trio.open_nursery() as nursery:
for notifee in self.notifees:
nursery.start_soon(notifee.opened_stream, self, stream)
2019-09-23 15:01:58 +08:00
# TODO: `notify_closed_stream`
async def notify_connected(self, conn: INetConn) -> None:
async with trio.open_nursery() as nursery:
for notifee in self.notifees:
nursery.start_soon(notifee.connected, self, conn)
2019-09-23 15:01:58 +08:00
async def notify_disconnected(self, conn: INetConn) -> None:
async with trio.open_nursery() as nursery:
for notifee in self.notifees:
nursery.start_soon(notifee.disconnected, self, conn)
2019-09-23 15:01:58 +08:00
async def notify_listen(self, multiaddr: Multiaddr) -> None:
async with trio.open_nursery() as nursery:
for notifee in self.notifees:
nursery.start_soon(notifee.listen, self, multiaddr)
2019-09-23 15:01:58 +08:00
# TODO: `notify_listen_close`