py-libp2p/libp2p/network/connection/raw_connection.py

60 lines
1.6 KiB
Python
Raw Normal View History

2019-07-28 14:06:29 +08:00
import asyncio
2018-11-11 22:56:44 +08:00
from .raw_connection_interface import IRawConnection
2019-08-01 06:00:12 +08:00
2018-11-11 22:56:44 +08:00
class RawConnection(IRawConnection):
2019-07-28 14:06:29 +08:00
conn_ip: str
conn_port: str
reader: asyncio.StreamReader
writer: asyncio.StreamWriter
initiator: bool
_drain_lock: asyncio.Lock
_next_id: int
2019-08-01 06:00:12 +08:00
def __init__(
self,
ip: str,
port: str,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
initiator: bool,
) -> None:
2018-11-11 22:56:44 +08:00
self.conn_ip = ip
self.conn_port = port
2018-11-12 01:17:12 +08:00
self.reader = reader
self.writer = writer
2018-11-30 02:42:05 +08:00
self.initiator = initiator
2018-11-11 22:56:44 +08:00
self._drain_lock = asyncio.Lock()
self._next_id = 0 if initiator else 1
2019-07-28 14:06:29 +08:00
async def write(self, data: bytes) -> None:
self.writer.write(data)
# Reference: https://github.com/ethereum/lahja/blob/93610b2eb46969ff1797e0748c7ac2595e130aef/lahja/asyncio/endpoint.py#L99-L102 # noqa: E501
# Use a lock to serialize drain() calls. Circumvents this bug:
# https://bugs.python.org/issue29930
async with self._drain_lock:
await self.writer.drain()
async def read(self, n: int = -1) -> bytes:
"""
Read up to ``n`` bytes from the underlying stream.
This call is delegated directly to the underlying ``self.reader``.
"""
return await self.reader.read(n)
2019-07-28 14:06:29 +08:00
def close(self) -> None:
self.writer.close()
2018-11-30 02:42:05 +08:00
2019-07-28 14:06:29 +08:00
def next_stream_id(self) -> int:
2018-11-30 02:42:05 +08:00
"""
Get next available stream id
:return: next available stream id for the connection
"""
next_id = self._next_id
self._next_id += 2
return next_id