Handle exceptions inside read_message

And remove the need of checking `None` for every read messages.
This commit is contained in:
mhchia 2019-09-14 14:16:40 +08:00
parent f62f07bb9f
commit b51c2939a8
No known key found for this signature in database
GPG Key ID: 389EFBEA1362589A

View File

@ -178,107 +178,101 @@ class Mplex(IMuxedConn):
""" """
Read a message off of the secured connection and add it to the corresponding message buffer Read a message off of the secured connection and add it to the corresponding message buffer
""" """
# TODO Deal with other types of messages using flag (currently _)
while True: while True:
try: try:
channel_id, flag, message = await self._wait_until_shutting_down_or_closed( channel_id, flag, message = await self._wait_until_shutting_down_or_closed(
self.read_message() self.read_message()
) )
except ( except MplexUnavailable as error:
MplexUnavailable,
ConnectionResetError,
IncompleteReadError,
) as error:
print(f"!@# handle_incoming: read_message: exception={error}") print(f"!@# handle_incoming: read_message: exception={error}")
break break
if channel_id is not None and flag is not None and message is not None: stream_id = StreamID(channel_id=channel_id, is_initiator=bool(flag & 1))
stream_id = StreamID(channel_id=channel_id, is_initiator=bool(flag & 1)) is_stream_id_seen: bool
is_stream_id_seen: bool stream: MplexStream
stream: MplexStream async with self.streams_lock:
async with self.streams_lock: is_stream_id_seen = stream_id in self.streams
is_stream_id_seen = stream_id in self.streams if is_stream_id_seen:
if is_stream_id_seen: stream = self.streams[stream_id]
stream = self.streams[stream_id] if flag == HeaderTags.NewStream.value:
# Other consequent stream message should wait until the stream get accepted if is_stream_id_seen:
# TODO: Handle more tags, and refactor `HeaderTags` # `NewStream` for the same id is received twice...
if flag == HeaderTags.NewStream.value: # TODO: Shutdown
if is_stream_id_seen: pass
# `NewStream` for the same id is received twice... mplex_stream = await self._initialize_stream(
# TODO: Shutdown stream_id, message.decode()
pass )
mplex_stream = await self._initialize_stream( try:
stream_id, message.decode() await self._wait_until_shutting_down_or_closed(
self.new_stream_queue.put(mplex_stream)
) )
try: except MplexUnavailable:
await self._wait_until_shutting_down_or_closed( break
self.new_stream_queue.put(mplex_stream) elif flag in (
) HeaderTags.MessageInitiator.value,
except MplexUnavailable: HeaderTags.MessageReceiver.value,
break ):
elif flag in ( if not is_stream_id_seen:
HeaderTags.MessageInitiator.value, # We receive a message of the stream `stream_id` which is not accepted
HeaderTags.MessageReceiver.value, # before. It is abnormal. Possibly disconnect?
): # TODO: Warn and emit logs about this.
if not is_stream_id_seen: continue
# We receive a message of the stream `stream_id` which is not accepted async with stream.close_lock:
# before. It is abnormal. Possibly disconnect? if stream.event_remote_closed.is_set():
# TODO: Warn and emit logs about this. # TODO: Warn "Received data from remote after stream was closed by them. (len = %d)" # noqa: E501
continue continue
async with stream.close_lock: try:
if stream.event_remote_closed.is_set(): await self._wait_until_shutting_down_or_closed(
# TODO: Warn "Received data from remote after stream was closed by them. (len = %d)" # noqa: E501 stream.incoming_data.put(message)
continue )
try: except MplexUnavailable:
await self._wait_until_shutting_down_or_closed( break
stream.incoming_data.put(message) elif flag in (
) HeaderTags.CloseInitiator.value,
except MplexUnavailable: HeaderTags.CloseReceiver.value,
break ):
elif flag in ( if not is_stream_id_seen:
HeaderTags.CloseInitiator.value, continue
HeaderTags.CloseReceiver.value, # NOTE: If remote is already closed, then return: Technically a bug
): # on the other side. We should consider killing the connection.
if not is_stream_id_seen: async with stream.close_lock:
if stream.event_remote_closed.is_set():
continue continue
# NOTE: If remote is already closed, then return: Technically a bug is_local_closed: bool
# on the other side. We should consider killing the connection. async with stream.close_lock:
async with stream.close_lock: stream.event_remote_closed.set()
if stream.event_remote_closed.is_set(): is_local_closed = stream.event_local_closed.is_set()
continue # If local is also closed, both sides are closed. Then, we should clean up
is_local_closed: bool # the entry of this stream, to avoid others from accessing it.
async with stream.close_lock: if is_local_closed:
stream.event_remote_closed.set()
is_local_closed = stream.event_local_closed.is_set()
# If local is also closed, both sides are closed. Then, we should clean up
# the entry of this stream, to avoid others from accessing it.
if is_local_closed:
async with self.streams_lock:
del self.streams[stream_id]
elif flag in (
HeaderTags.ResetInitiator.value,
HeaderTags.ResetReceiver.value,
):
if not is_stream_id_seen:
# This is *ok*. We forget the stream on reset.
continue
async with stream.close_lock:
if not stream.event_remote_closed.is_set():
stream.event_reset.set()
stream.event_remote_closed.set()
# If local is not closed, we should close it.
if not stream.event_local_closed.is_set():
stream.event_local_closed.set()
async with self.streams_lock: async with self.streams_lock:
del self.streams[stream_id] del self.streams[stream_id]
else: elif flag in (
# TODO: logging HeaderTags.ResetInitiator.value,
if is_stream_id_seen: HeaderTags.ResetReceiver.value,
await stream.reset() ):
if not is_stream_id_seen:
# This is *ok*. We forget the stream on reset.
continue
async with stream.close_lock:
if not stream.event_remote_closed.is_set():
stream.event_reset.set()
stream.event_remote_closed.set()
# If local is not closed, we should close it.
if not stream.event_local_closed.is_set():
stream.event_local_closed.set()
async with self.streams_lock:
del self.streams[stream_id]
else:
# TODO: logging
if is_stream_id_seen:
await stream.reset()
# Force context switch # Force context switch
await asyncio.sleep(0) await asyncio.sleep(0)
# If we enter here, it means this connection is shutting down.
# We should clean the things up.
await self._cleanup() await self._cleanup()
async def read_message(self) -> Tuple[int, int, bytes]: async def read_message(self) -> Tuple[int, int, bytes]:
@ -290,15 +284,19 @@ class Mplex(IMuxedConn):
# FIXME: No timeout is used in Go implementation. # FIXME: No timeout is used in Go implementation.
# Timeout is set to a relatively small value to alleviate wait time to exit # Timeout is set to a relatively small value to alleviate wait time to exit
# loop in handle_incoming # loop in handle_incoming
header = await decode_uvarint_from_stream(self.secured_conn)
# TODO: Handle the case of EOF and other exceptions?
try: try:
header = await decode_uvarint_from_stream(self.secured_conn)
message = await asyncio.wait_for( message = await asyncio.wait_for(
read_varint_prefixed_bytes(self.secured_conn), timeout=5 read_varint_prefixed_bytes(self.secured_conn), timeout=5
) )
except asyncio.TimeoutError: except (ConnectionResetError, IncompleteReadError) as error:
# TODO: Investigate what we should do if time is out. raise MplexUnavailable(
return None, None, None "failed to read messages correctly from the underlying connection"
) from error
except asyncio.TimeoutError as error:
raise MplexUnavailable(
"failed to read more message body within the timeout"
) from error
flag = header & 0x07 flag = header & 0x07
channel_id = header >> 3 channel_id = header >> 3