2018-11-21 10:46:18 +08:00
|
|
|
import asyncio
|
2019-09-09 22:48:49 +08:00
|
|
|
from typing import TYPE_CHECKING
|
2018-11-12 07:03:04 +08:00
|
|
|
|
2019-09-05 18:18:08 +08:00
|
|
|
from libp2p.stream_muxer.abc import IMuxedStream
|
2018-11-01 05:31:00 +08:00
|
|
|
|
2019-08-05 10:20:30 +08:00
|
|
|
from .constants import HeaderTags
|
2019-08-28 21:43:34 +08:00
|
|
|
from .datastructures import StreamID
|
2019-09-09 15:45:35 +08:00
|
|
|
from .exceptions import MplexStreamClosed, MplexStreamEOF, MplexStreamReset
|
2019-08-02 18:28:04 +08:00
|
|
|
|
2019-09-05 18:18:08 +08:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
from libp2p.stream_muxer.mplex.mplex import Mplex
|
|
|
|
|
2019-08-02 18:28:04 +08:00
|
|
|
|
2018-11-21 09:28:41 +08:00
|
|
|
class MplexStream(IMuxedStream):
|
2018-11-01 06:02:00 +08:00
|
|
|
"""
|
|
|
|
reference: https://github.com/libp2p/go-mplex/blob/master/stream.go
|
|
|
|
"""
|
|
|
|
|
2019-08-26 20:26:22 +08:00
|
|
|
name: str
|
2019-08-28 21:43:34 +08:00
|
|
|
stream_id: StreamID
|
2019-09-05 18:18:08 +08:00
|
|
|
mplex_conn: "Mplex"
|
2019-08-05 17:02:18 +08:00
|
|
|
read_deadline: int
|
|
|
|
write_deadline: int
|
2019-09-05 18:18:08 +08:00
|
|
|
|
|
|
|
close_lock: asyncio.Lock
|
|
|
|
|
2019-09-05 22:29:33 +08:00
|
|
|
incoming_data: "asyncio.Queue[bytes]"
|
|
|
|
|
2019-09-05 18:18:08 +08:00
|
|
|
event_local_closed: asyncio.Event
|
|
|
|
event_remote_closed: asyncio.Event
|
|
|
|
event_reset: asyncio.Event
|
2019-08-02 18:28:04 +08:00
|
|
|
|
2019-08-15 18:31:18 +08:00
|
|
|
_buf: bytearray
|
2019-08-07 15:23:20 +08:00
|
|
|
|
2019-09-05 18:18:08 +08:00
|
|
|
def __init__(self, name: str, stream_id: StreamID, mplex_conn: "Mplex") -> None:
|
2018-11-12 07:03:04 +08:00
|
|
|
"""
|
|
|
|
create new MuxedStream in muxer
|
2019-08-28 21:43:34 +08:00
|
|
|
:param stream_id: stream id of this stream
|
2018-11-21 10:46:18 +08:00
|
|
|
:param mplex_conn: muxed connection of this muxed_stream
|
2018-11-12 07:03:04 +08:00
|
|
|
"""
|
2019-08-26 20:26:22 +08:00
|
|
|
self.name = name
|
2018-11-13 02:02:49 +08:00
|
|
|
self.stream_id = stream_id
|
2018-11-21 10:46:18 +08:00
|
|
|
self.mplex_conn = mplex_conn
|
2018-11-13 02:02:49 +08:00
|
|
|
self.read_deadline = None
|
|
|
|
self.write_deadline = None
|
2019-09-05 18:18:08 +08:00
|
|
|
self.event_local_closed = asyncio.Event()
|
|
|
|
self.event_remote_closed = asyncio.Event()
|
|
|
|
self.event_reset = asyncio.Event()
|
|
|
|
self.close_lock = asyncio.Lock()
|
2019-09-05 22:29:33 +08:00
|
|
|
self.incoming_data = asyncio.Queue()
|
2019-08-15 18:31:18 +08:00
|
|
|
self._buf = bytearray()
|
2018-11-12 07:03:04 +08:00
|
|
|
|
2019-08-28 21:43:34 +08:00
|
|
|
@property
|
|
|
|
def is_initiator(self) -> bool:
|
|
|
|
return self.stream_id.is_initiator
|
|
|
|
|
2019-09-06 01:08:42 +08:00
|
|
|
async def _wait_for_data(self) -> None:
|
2019-09-06 21:35:15 +08:00
|
|
|
done, pending = await asyncio.wait( # type: ignore
|
2019-09-06 01:08:42 +08:00
|
|
|
[
|
|
|
|
self.event_reset.wait(),
|
|
|
|
self.incoming_data.get(),
|
2019-09-09 15:45:35 +08:00
|
|
|
self.event_remote_closed.wait(),
|
2019-09-06 01:08:42 +08:00
|
|
|
],
|
|
|
|
return_when=asyncio.FIRST_COMPLETED,
|
|
|
|
)
|
2019-09-09 15:45:35 +08:00
|
|
|
for fut in pending:
|
|
|
|
fut.cancel()
|
2019-09-09 22:48:49 +08:00
|
|
|
|
2019-09-06 01:08:42 +08:00
|
|
|
if self.event_reset.is_set():
|
|
|
|
raise MplexStreamReset
|
2019-09-09 22:48:49 +08:00
|
|
|
|
|
|
|
if len(done) != 1:
|
|
|
|
raise Exception(f"Should be exactly 1 job in {done}.")
|
|
|
|
done_task = tuple(done)[0]
|
|
|
|
# NOTE: Ignore type check because the typeshed for `asyncio.Task` does not
|
|
|
|
# have the field `_coro`.
|
|
|
|
coro_qualname = done_task._coro.__qualname__ # type: ignore
|
|
|
|
# If `qualname == "Queue.get"` then there is incoming data. We can add it to the buffer.
|
|
|
|
if coro_qualname == "Queue.get":
|
2019-09-09 15:45:35 +08:00
|
|
|
data = done_task.result()
|
|
|
|
self._buf.extend(data)
|
|
|
|
return
|
2019-09-09 22:48:49 +08:00
|
|
|
|
2019-09-06 01:08:42 +08:00
|
|
|
if self.event_remote_closed.is_set():
|
|
|
|
raise MplexStreamEOF
|
2019-09-09 22:48:49 +08:00
|
|
|
|
|
|
|
# If the task is not `Queue.get`, then it must be `Event.wait`.
|
|
|
|
# However, it is abnormal that `Event.wait` is unblocked without any of the event
|
|
|
|
# (remote_closed and reset) is set. Then it is highly possible that the task
|
|
|
|
# is cancelled.
|
|
|
|
raise Exception(
|
|
|
|
"Should not enter here. "
|
|
|
|
f"It is highly possible that `done_task` is cancelled. `done_task`={done_task}"
|
|
|
|
)
|
2019-09-06 17:26:40 +08:00
|
|
|
# TODO: Handle timeout when deadline is used.
|
|
|
|
|
|
|
|
async def _read_until_eof(self) -> bytes:
|
|
|
|
while True:
|
|
|
|
try:
|
|
|
|
await self._wait_for_data()
|
|
|
|
except MplexStreamEOF:
|
|
|
|
break
|
|
|
|
payload = self._buf
|
|
|
|
self._buf = self._buf[len(payload) :]
|
|
|
|
return bytes(payload)
|
|
|
|
|
2019-08-12 14:25:17 +08:00
|
|
|
async def read(self, n: int = -1) -> bytes:
|
2018-11-12 07:03:04 +08:00
|
|
|
"""
|
2019-08-11 23:49:58 +08:00
|
|
|
Read up to n bytes. Read possibly returns fewer than `n` bytes,
|
|
|
|
if there are not enough bytes in the Mplex buffer.
|
2019-08-12 14:25:17 +08:00
|
|
|
If `n == -1`, read until EOF.
|
2019-08-07 15:23:20 +08:00
|
|
|
:param n: number of bytes to read
|
2019-08-11 23:49:58 +08:00
|
|
|
:return: bytes actually read
|
2018-11-12 07:03:04 +08:00
|
|
|
"""
|
2019-08-16 10:21:51 +08:00
|
|
|
# TODO: Add exceptions and handle/raise them in this class.
|
2019-08-12 14:25:17 +08:00
|
|
|
if n < 0 and n != -1:
|
2019-08-16 10:21:51 +08:00
|
|
|
raise ValueError(
|
|
|
|
f"the number of bytes to read `n` must be positive or -1 to indicate read until EOF"
|
|
|
|
)
|
2019-09-06 17:26:40 +08:00
|
|
|
if self.event_reset.is_set():
|
|
|
|
raise MplexStreamReset
|
2019-08-12 14:25:17 +08:00
|
|
|
if n == -1:
|
2019-09-06 17:26:40 +08:00
|
|
|
return await self._read_until_eof()
|
2019-09-09 15:45:35 +08:00
|
|
|
if len(self._buf) == 0 and self.incoming_data.empty():
|
2019-09-06 17:26:40 +08:00
|
|
|
await self._wait_for_data()
|
2019-09-09 22:48:49 +08:00
|
|
|
# Now we are sure we have something to read.
|
2019-09-09 15:45:35 +08:00
|
|
|
# Try to put enough incoming data into `self._buf`.
|
2019-09-06 17:26:40 +08:00
|
|
|
while len(self._buf) < n:
|
2019-09-09 15:45:35 +08:00
|
|
|
try:
|
|
|
|
self._buf.extend(self.incoming_data.get_nowait())
|
|
|
|
except asyncio.QueueEmpty:
|
2019-09-06 17:26:40 +08:00
|
|
|
break
|
|
|
|
payload = self._buf[:n]
|
2019-08-12 14:25:17 +08:00
|
|
|
self._buf = self._buf[len(payload) :]
|
2019-08-15 18:31:18 +08:00
|
|
|
return bytes(payload)
|
2018-11-01 05:31:00 +08:00
|
|
|
|
2019-08-02 18:28:04 +08:00
|
|
|
async def write(self, data: bytes) -> int:
|
2018-11-12 07:03:04 +08:00
|
|
|
"""
|
|
|
|
write to stream
|
|
|
|
:return: number of bytes written
|
|
|
|
"""
|
2019-09-09 15:45:35 +08:00
|
|
|
if self.event_local_closed.is_set():
|
|
|
|
raise MplexStreamClosed(f"cannot write to closed stream: data={data}")
|
2019-08-14 05:36:42 +08:00
|
|
|
flag = (
|
|
|
|
HeaderTags.MessageInitiator
|
2019-08-28 21:43:34 +08:00
|
|
|
if self.is_initiator
|
2019-08-14 05:36:42 +08:00
|
|
|
else HeaderTags.MessageReceiver
|
|
|
|
)
|
2019-08-02 17:14:43 +08:00
|
|
|
return await self.mplex_conn.send_message(flag, data, self.stream_id)
|
2018-11-01 05:31:00 +08:00
|
|
|
|
2019-09-05 18:18:08 +08:00
|
|
|
async def close(self) -> None:
|
2018-11-12 07:03:04 +08:00
|
|
|
"""
|
2018-11-21 10:46:18 +08:00
|
|
|
Closing a stream closes it for writing and closes the remote end for reading
|
|
|
|
but allows writing in the other direction.
|
2018-11-12 07:03:04 +08:00
|
|
|
"""
|
2018-11-21 13:41:13 +08:00
|
|
|
# TODO error handling with timeout
|
2019-09-05 18:18:08 +08:00
|
|
|
|
|
|
|
async with self.close_lock:
|
|
|
|
if self.event_local_closed.is_set():
|
|
|
|
return
|
|
|
|
|
2019-08-28 21:43:34 +08:00
|
|
|
flag = (
|
|
|
|
HeaderTags.CloseInitiator if self.is_initiator else HeaderTags.CloseReceiver
|
|
|
|
)
|
2019-09-05 18:18:08 +08:00
|
|
|
# TODO: Raise when `mplex_conn.send_message` fails and `Mplex` isn't shutdown.
|
2019-08-02 17:14:43 +08:00
|
|
|
await self.mplex_conn.send_message(flag, None, self.stream_id)
|
2018-11-12 07:03:04 +08:00
|
|
|
|
2019-09-05 18:18:08 +08:00
|
|
|
_is_remote_closed: bool
|
|
|
|
async with self.close_lock:
|
|
|
|
self.event_local_closed.set()
|
|
|
|
_is_remote_closed = self.event_remote_closed.is_set()
|
2018-11-12 07:03:04 +08:00
|
|
|
|
2019-09-05 18:18:08 +08:00
|
|
|
if _is_remote_closed:
|
|
|
|
# Both sides are closed, we can safely remove the buffer from the dict.
|
2019-09-05 22:29:33 +08:00
|
|
|
async with self.mplex_conn.streams_lock:
|
|
|
|
del self.mplex_conn.streams[self.stream_id]
|
2018-11-12 07:03:04 +08:00
|
|
|
|
2019-09-05 18:18:08 +08:00
|
|
|
async def reset(self) -> None:
|
2018-11-01 05:31:00 +08:00
|
|
|
"""
|
|
|
|
closes both ends of the stream
|
|
|
|
tells this remote side to hang up
|
|
|
|
"""
|
2019-09-05 18:18:08 +08:00
|
|
|
async with self.close_lock:
|
|
|
|
# Both sides have been closed. No need to event_reset.
|
|
|
|
if self.event_remote_closed.is_set() and self.event_local_closed.is_set():
|
|
|
|
return
|
|
|
|
if self.event_reset.is_set():
|
|
|
|
return
|
|
|
|
self.event_reset.set()
|
2018-11-21 10:46:18 +08:00
|
|
|
|
2019-09-05 18:18:08 +08:00
|
|
|
if not self.event_remote_closed.is_set():
|
2019-08-14 05:36:42 +08:00
|
|
|
flag = (
|
|
|
|
HeaderTags.ResetInitiator
|
2019-08-28 21:43:34 +08:00
|
|
|
if self.is_initiator
|
2019-08-27 02:39:30 +08:00
|
|
|
else HeaderTags.ResetReceiver
|
2019-08-14 05:36:42 +08:00
|
|
|
)
|
2019-09-05 18:18:08 +08:00
|
|
|
asyncio.ensure_future(
|
|
|
|
self.mplex_conn.send_message(flag, None, self.stream_id)
|
|
|
|
)
|
|
|
|
await asyncio.sleep(0)
|
2018-11-21 10:46:18 +08:00
|
|
|
|
2019-09-05 18:18:08 +08:00
|
|
|
self.event_local_closed.set()
|
|
|
|
self.event_remote_closed.set()
|
2018-11-21 10:46:18 +08:00
|
|
|
|
2019-09-05 22:29:33 +08:00
|
|
|
async with self.mplex_conn.streams_lock:
|
|
|
|
del self.mplex_conn.streams[self.stream_id]
|
2018-11-01 05:31:00 +08:00
|
|
|
|
2018-11-12 07:03:04 +08:00
|
|
|
# TODO deadline not in use
|
2019-08-05 17:02:18 +08:00
|
|
|
def set_deadline(self, ttl: int) -> bool:
|
2018-11-01 05:31:00 +08:00
|
|
|
"""
|
|
|
|
set deadline for muxed stream
|
2018-11-12 07:03:04 +08:00
|
|
|
:return: True if successful
|
2018-11-01 05:31:00 +08:00
|
|
|
"""
|
2018-11-12 07:03:04 +08:00
|
|
|
self.read_deadline = ttl
|
|
|
|
self.write_deadline = ttl
|
|
|
|
return True
|
|
|
|
|
2019-08-05 17:02:18 +08:00
|
|
|
def set_read_deadline(self, ttl: int) -> bool:
|
2018-11-12 07:03:04 +08:00
|
|
|
"""
|
|
|
|
set read deadline for muxed stream
|
|
|
|
:return: True if successful
|
|
|
|
"""
|
|
|
|
self.read_deadline = ttl
|
|
|
|
return True
|
|
|
|
|
2019-08-05 17:02:18 +08:00
|
|
|
def set_write_deadline(self, ttl: int) -> bool:
|
2018-11-12 07:03:04 +08:00
|
|
|
"""
|
|
|
|
set write deadline for muxed stream
|
|
|
|
:return: True if successful
|
|
|
|
"""
|
|
|
|
self.write_deadline = ttl
|
|
|
|
return True
|