py-libp2p/libp2p/stream_muxer/mplex/mplex_stream.py

241 lines
7.9 KiB
Python
Raw Normal View History

2018-11-21 10:46:18 +08:00
import asyncio
2019-09-09 22:48:49 +08:00
from typing import TYPE_CHECKING
2018-11-12 07:03:04 +08:00
from libp2p.stream_muxer.abc import IMuxedStream
2018-11-01 05:31:00 +08:00
from .constants import HeaderTags
2019-08-28 21:43:34 +08:00
from .datastructures import StreamID
2019-09-09 15:45:35 +08:00
from .exceptions import MplexStreamClosed, MplexStreamEOF, MplexStreamReset
2019-08-02 18:28:04 +08:00
if TYPE_CHECKING:
from libp2p.stream_muxer.mplex.mplex import Mplex
2019-08-02 18:28:04 +08:00
2018-11-21 09:28:41 +08:00
class MplexStream(IMuxedStream):
2018-11-01 06:02:00 +08:00
"""
reference: https://github.com/libp2p/go-mplex/blob/master/stream.go
"""
name: str
2019-08-28 21:43:34 +08:00
stream_id: StreamID
muxed_conn: "Mplex"
2019-08-05 17:02:18 +08:00
read_deadline: int
write_deadline: int
close_lock: asyncio.Lock
# NOTE: `dataIn` is size of 8 in Go implementation.
2019-09-05 22:29:33 +08:00
incoming_data: "asyncio.Queue[bytes]"
event_local_closed: asyncio.Event
event_remote_closed: asyncio.Event
event_reset: asyncio.Event
2019-08-02 18:28:04 +08:00
_buf: bytearray
2019-08-07 15:23:20 +08:00
def __init__(self, name: str, stream_id: StreamID, muxed_conn: "Mplex") -> None:
"""
create new MuxedStream in muxer.
2019-08-28 21:43:34 +08:00
:param stream_id: stream id of this stream
:param muxed_conn: muxed connection of this muxed_stream
2018-11-12 07:03:04 +08:00
"""
self.name = name
2018-11-13 02:02:49 +08:00
self.stream_id = stream_id
self.muxed_conn = muxed_conn
2018-11-13 02:02:49 +08:00
self.read_deadline = None
self.write_deadline = None
self.event_local_closed = asyncio.Event()
self.event_remote_closed = asyncio.Event()
self.event_reset = asyncio.Event()
self.close_lock = asyncio.Lock()
2019-09-05 22:29:33 +08:00
self.incoming_data = asyncio.Queue()
self._buf = bytearray()
2018-11-12 07:03:04 +08:00
2019-08-28 21:43:34 +08:00
@property
def is_initiator(self) -> bool:
return self.stream_id.is_initiator
2019-09-06 01:08:42 +08:00
async def _wait_for_data(self) -> None:
task_event_reset = asyncio.ensure_future(self.event_reset.wait())
task_incoming_data_get = asyncio.ensure_future(self.incoming_data.get())
task_event_remote_closed = asyncio.ensure_future(
self.event_remote_closed.wait()
)
done, pending = await asyncio.wait( # type: ignore
2019-10-02 15:45:54 +08:00
[ # type: ignore
task_event_reset,
task_incoming_data_get,
task_event_remote_closed,
],
2019-09-06 01:08:42 +08:00
return_when=asyncio.FIRST_COMPLETED,
)
2019-09-09 15:45:35 +08:00
for fut in pending:
fut.cancel()
2019-09-09 22:48:49 +08:00
if task_event_reset in done:
if self.event_reset.is_set():
raise MplexStreamReset
else:
# However, it is abnormal that `Event.wait` is unblocked without any of the flag
# is set. The task is probably cancelled.
raise Exception(
"Should not enter here. "
f"It is probably because {task_event_remote_closed} is cancelled."
)
if task_incoming_data_get in done:
data = task_incoming_data_get.result()
2019-09-09 15:45:35 +08:00
self._buf.extend(data)
return
2019-09-09 22:48:49 +08:00
if task_event_remote_closed in done:
if self.event_remote_closed.is_set():
raise MplexStreamEOF
else:
# However, it is abnormal that `Event.wait` is unblocked without any of the flag
# is set. The task is probably cancelled.
raise Exception(
"Should not enter here. "
f"It is probably because {task_event_remote_closed} is cancelled."
)
2019-09-09 22:48:49 +08:00
2019-09-06 17:26:40 +08:00
# TODO: Handle timeout when deadline is used.
async def _read_until_eof(self) -> bytes:
while True:
try:
await self._wait_for_data()
except MplexStreamEOF:
break
payload = self._buf
self._buf = self._buf[len(payload) :]
return bytes(payload)
async def read(self, n: int = -1) -> bytes:
"""
Read up to n bytes. Read possibly returns fewer than `n` bytes, if
there are not enough bytes in the Mplex buffer. If `n == -1`, read
until EOF.
2019-08-07 15:23:20 +08:00
:param n: number of bytes to read
:return: bytes actually read
2018-11-12 07:03:04 +08:00
"""
if n < 0 and n != -1:
2019-08-16 10:21:51 +08:00
raise ValueError(
f"the number of bytes to read `n` must be positive or -1 to indicate read until EOF"
)
2019-09-06 17:26:40 +08:00
if self.event_reset.is_set():
raise MplexStreamReset
if n == -1:
2019-09-06 17:26:40 +08:00
return await self._read_until_eof()
2019-09-09 15:45:35 +08:00
if len(self._buf) == 0 and self.incoming_data.empty():
2019-09-06 17:26:40 +08:00
await self._wait_for_data()
2019-09-09 22:48:49 +08:00
# Now we are sure we have something to read.
2019-09-09 15:45:35 +08:00
# Try to put enough incoming data into `self._buf`.
2019-09-06 17:26:40 +08:00
while len(self._buf) < n:
2019-09-09 15:45:35 +08:00
try:
self._buf.extend(self.incoming_data.get_nowait())
except asyncio.QueueEmpty:
2019-09-06 17:26:40 +08:00
break
payload = self._buf[:n]
self._buf = self._buf[len(payload) :]
return bytes(payload)
2018-11-01 05:31:00 +08:00
2019-08-02 18:28:04 +08:00
async def write(self, data: bytes) -> int:
"""
write to stream.
2018-11-12 07:03:04 +08:00
:return: number of bytes written
"""
2019-09-09 15:45:35 +08:00
if self.event_local_closed.is_set():
2019-10-24 14:53:19 +08:00
raise MplexStreamClosed(f"cannot write to closed stream: data={data!r}")
flag = (
HeaderTags.MessageInitiator
2019-08-28 21:43:34 +08:00
if self.is_initiator
else HeaderTags.MessageReceiver
)
return await self.muxed_conn.send_message(flag, data, self.stream_id)
2018-11-01 05:31:00 +08:00
async def close(self) -> None:
"""Closing a stream closes it for writing and closes the remote end for
reading but allows writing in the other direction."""
2018-11-21 13:41:13 +08:00
# TODO error handling with timeout
async with self.close_lock:
if self.event_local_closed.is_set():
return
2019-08-28 21:43:34 +08:00
flag = (
HeaderTags.CloseInitiator if self.is_initiator else HeaderTags.CloseReceiver
)
# TODO: Raise when `muxed_conn.send_message` fails and `Mplex` isn't shutdown.
await self.muxed_conn.send_message(flag, None, self.stream_id)
2018-11-12 07:03:04 +08:00
_is_remote_closed: bool
async with self.close_lock:
self.event_local_closed.set()
_is_remote_closed = self.event_remote_closed.is_set()
2018-11-12 07:03:04 +08:00
if _is_remote_closed:
# Both sides are closed, we can safely remove the buffer from the dict.
async with self.muxed_conn.streams_lock:
self.muxed_conn.streams.pop(self.stream_id, None)
2018-11-12 07:03:04 +08:00
async def reset(self) -> None:
"""closes both ends of the stream tells this remote side to hang up."""
async with self.close_lock:
# Both sides have been closed. No need to event_reset.
if self.event_remote_closed.is_set() and self.event_local_closed.is_set():
return
if self.event_reset.is_set():
return
self.event_reset.set()
2018-11-21 10:46:18 +08:00
if not self.event_remote_closed.is_set():
flag = (
HeaderTags.ResetInitiator
2019-08-28 21:43:34 +08:00
if self.is_initiator
else HeaderTags.ResetReceiver
)
asyncio.ensure_future(
self.muxed_conn.send_message(flag, None, self.stream_id)
)
await asyncio.sleep(0)
2018-11-21 10:46:18 +08:00
self.event_local_closed.set()
self.event_remote_closed.set()
2018-11-21 10:46:18 +08:00
async with self.muxed_conn.streams_lock:
if self.muxed_conn.streams is not None:
self.muxed_conn.streams.pop(self.stream_id, None)
2018-11-01 05:31:00 +08:00
2018-11-12 07:03:04 +08:00
# TODO deadline not in use
2019-08-05 17:02:18 +08:00
def set_deadline(self, ttl: int) -> bool:
"""
set deadline for muxed stream.
2018-11-12 07:03:04 +08:00
:return: True if successful
2018-11-01 05:31:00 +08:00
"""
2018-11-12 07:03:04 +08:00
self.read_deadline = ttl
self.write_deadline = ttl
return True
2019-08-05 17:02:18 +08:00
def set_read_deadline(self, ttl: int) -> bool:
"""
set read deadline for muxed stream.
2018-11-12 07:03:04 +08:00
:return: True if successful
"""
self.read_deadline = ttl
return True
2019-08-05 17:02:18 +08:00
def set_write_deadline(self, ttl: int) -> bool:
"""
set write deadline for muxed stream.
2018-11-12 07:03:04 +08:00
:return: True if successful
"""
self.write_deadline = ttl
return True