Initial commit

This commit is contained in:
2026-02-01 09:31:38 +01:00
commit e02db93960
4396 changed files with 1511612 additions and 0 deletions

View File

@@ -0,0 +1,23 @@
"""
Connection Adapters
===================
Pika provides multiple adapters to connect to RabbitMQ:
- adapters.asyncio_connection.AsyncioConnection: Native Python3 AsyncIO use
- adapters.blocking_connection.BlockingConnection: Enables blocking,
synchronous operation on top of library for simple uses.
- adapters.gevent_connection.GeventConnection: Connection adapter for use with
Gevent.
- adapters.select_connection.SelectConnection: A native event based connection
adapter that implements select, kqueue, poll and epoll.
- adapters.tornado_connection.TornadoConnection: Connection adapter for use
with the Tornado web framework.
- adapters.twisted_connection.TwistedProtocolConnection: Connection adapter for
use with the Twisted framework
"""
from pika.adapters.base_connection import BaseConnection
from pika.adapters.blocking_connection import BlockingConnection
from pika.adapters.select_connection import SelectConnection
from pika.adapters.select_connection import IOLoop

View File

@@ -0,0 +1,283 @@
"""Use pika with the Asyncio EventLoop"""
import asyncio
import logging
import sys
from pika.adapters import base_connection
from pika.adapters.utils import nbio_interface, io_services_utils
LOGGER = logging.getLogger(__name__)
if sys.platform == 'win32':
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
class AsyncioConnection(base_connection.BaseConnection):
""" The AsyncioConnection runs on the Asyncio EventLoop.
"""
def __init__(self,
parameters=None,
on_open_callback=None,
on_open_error_callback=None,
on_close_callback=None,
custom_ioloop=None,
internal_connection_workflow=True):
""" Create a new instance of the AsyncioConnection class, connecting
to RabbitMQ automatically
:param pika.connection.Parameters parameters: Connection parameters
:param callable on_open_callback: The method to call when the connection
is open
:param None | method on_open_error_callback: Called if the connection
can't be established or connection establishment is interrupted by
`Connection.close()`: on_open_error_callback(Connection, exception).
:param None | method on_close_callback: Called when a previously fully
open connection is closed:
`on_close_callback(Connection, exception)`, where `exception` is
either an instance of `exceptions.ConnectionClosed` if closed by
user or broker or exception of another type that describes the cause
of connection failure.
:param None | asyncio.AbstractEventLoop |
nbio_interface.AbstractIOServices custom_ioloop:
Defaults to asyncio.get_event_loop().
:param bool internal_connection_workflow: True for autonomous connection
establishment which is default; False for externally-managed
connection workflow via the `create_connection()` factory.
"""
if isinstance(custom_ioloop, nbio_interface.AbstractIOServices):
nbio = custom_ioloop
else:
nbio = _AsyncioIOServicesAdapter(custom_ioloop)
super().__init__(
parameters,
on_open_callback,
on_open_error_callback,
on_close_callback,
nbio,
internal_connection_workflow=internal_connection_workflow)
@classmethod
def create_connection(cls,
connection_configs,
on_done,
custom_ioloop=None,
workflow=None):
"""Implement
:py:classmethod::`pika.adapters.BaseConnection.create_connection()`.
"""
nbio = _AsyncioIOServicesAdapter(custom_ioloop)
def connection_factory(params):
"""Connection factory."""
if params is None:
raise ValueError('Expected pika.connection.Parameters '
'instance, but got None in params arg.')
return cls(
parameters=params,
custom_ioloop=nbio,
internal_connection_workflow=False)
return cls._start_connection_workflow(
connection_configs=connection_configs,
connection_factory=connection_factory,
nbio=nbio,
workflow=workflow,
on_done=on_done)
class _AsyncioIOServicesAdapter(io_services_utils.SocketConnectionMixin,
io_services_utils.StreamingConnectionMixin,
nbio_interface.AbstractIOServices,
nbio_interface.AbstractFileDescriptorServices):
"""Implements
:py:class:`.utils.nbio_interface.AbstractIOServices` interface
on top of `asyncio`.
NOTE:
:py:class:`.utils.nbio_interface.AbstractFileDescriptorServices`
interface is only required by the mixins.
"""
def __init__(self, loop=None):
"""
:param asyncio.AbstractEventLoop | None loop: If None, gets default
event loop from asyncio.
"""
self._loop = loop or asyncio.get_event_loop()
def get_native_ioloop(self):
"""Implement
:py:meth:`.utils.nbio_interface.AbstractIOServices.get_native_ioloop()`.
"""
return self._loop
def close(self):
"""Implement
:py:meth:`.utils.nbio_interface.AbstractIOServices.close()`.
"""
self._loop.close()
def run(self):
"""Implement :py:meth:`.utils.nbio_interface.AbstractIOServices.run()`.
"""
self._loop.run_forever()
def stop(self):
"""Implement :py:meth:`.utils.nbio_interface.AbstractIOServices.stop()`.
"""
self._loop.stop()
def add_callback_threadsafe(self, callback):
"""Implement
:py:meth:`.utils.nbio_interface.AbstractIOServices.add_callback_threadsafe()`.
"""
self._loop.call_soon_threadsafe(callback)
def call_later(self, delay, callback):
"""Implement
:py:meth:`.utils.nbio_interface.AbstractIOServices.call_later()`.
"""
return _TimerHandle(self._loop.call_later(delay, callback))
def getaddrinfo(self,
host,
port,
on_done,
family=0,
socktype=0,
proto=0,
flags=0):
"""Implement
:py:meth:`.utils.nbio_interface.AbstractIOServices.getaddrinfo()`.
"""
return self._schedule_and_wrap_in_io_ref(
self._loop.getaddrinfo(
host,
port,
family=family,
type=socktype,
proto=proto,
flags=flags), on_done)
def set_reader(self, fd, on_readable):
"""Implement
:py:meth:`.utils.nbio_interface.AbstractFileDescriptorServices.set_reader()`.
"""
self._loop.add_reader(fd, on_readable)
LOGGER.debug('set_reader(%s, _)', fd)
def remove_reader(self, fd):
"""Implement
:py:meth:`.utils.nbio_interface.AbstractFileDescriptorServices.remove_reader()`.
"""
LOGGER.debug('remove_reader(%s)', fd)
return self._loop.remove_reader(fd)
def set_writer(self, fd, on_writable):
"""Implement
:py:meth:`.utils.nbio_interface.AbstractFileDescriptorServices.set_writer()`.
"""
self._loop.add_writer(fd, on_writable)
LOGGER.debug('set_writer(%s, _)', fd)
def remove_writer(self, fd):
"""Implement
:py:meth:`.utils.nbio_interface.AbstractFileDescriptorServices.remove_writer()`.
"""
LOGGER.debug('remove_writer(%s)', fd)
return self._loop.remove_writer(fd)
def _schedule_and_wrap_in_io_ref(self, coro, on_done):
"""Schedule the coroutine to run and return _AsyncioIOReference
:param coroutine-obj coro:
:param callable on_done: user callback that takes the completion result
or exception as its only arg. It will not be called if the operation
was cancelled.
:rtype: _AsyncioIOReference which is derived from
nbio_interface.AbstractIOReference
"""
if not callable(on_done):
raise TypeError(
'on_done arg must be callable, but got {!r}'.format(on_done))
return _AsyncioIOReference(
asyncio.ensure_future(coro, loop=self._loop), on_done)
class _TimerHandle(nbio_interface.AbstractTimerReference):
"""This module's adaptation of `nbio_interface.AbstractTimerReference`.
"""
def __init__(self, handle):
"""
:param asyncio.Handle handle:
"""
self._handle = handle
def cancel(self):
if self._handle is not None:
self._handle.cancel()
self._handle = None
class _AsyncioIOReference(nbio_interface.AbstractIOReference):
"""This module's adaptation of `nbio_interface.AbstractIOReference`.
"""
def __init__(self, future, on_done):
"""
:param asyncio.Future future:
:param callable on_done: user callback that takes the completion result
or exception as its only arg. It will not be called if the operation
was cancelled.
"""
if not callable(on_done):
raise TypeError(
'on_done arg must be callable, but got {!r}'.format(on_done))
self._future = future
def on_done_adapter(future):
"""Handle completion callback from the future instance"""
# NOTE: Asyncio schedules callback for cancelled futures, but pika
# doesn't want that
if not future.cancelled():
on_done(future.exception() or future.result())
future.add_done_callback(on_done_adapter)
def cancel(self):
"""Cancel pending operation
:returns: False if was already done or cancelled; True otherwise
:rtype: bool
"""
return self._future.cancel()

View File

@@ -0,0 +1,501 @@
"""Base class extended by connection adapters. This extends the
connection.Connection class to encapsulate connection behavior but still
isolate socket and low level communication.
"""
import abc
import functools
import logging
import pika.compat
import pika.exceptions
import pika.tcp_socket_opts
from pika.adapters.utils import connection_workflow, nbio_interface
from pika import connection
LOGGER = logging.getLogger(__name__)
class BaseConnection(connection.Connection):
"""BaseConnection class that should be extended by connection adapters.
This class abstracts I/O loop and transport services from pika core.
"""
def __init__(self, parameters, on_open_callback, on_open_error_callback,
on_close_callback, nbio, internal_connection_workflow):
"""Create a new instance of the Connection object.
:param None|pika.connection.Parameters parameters: Connection parameters
:param None|method on_open_callback: Method to call on connection open
:param None | method on_open_error_callback: Called if the connection
can't be established or connection establishment is interrupted by
`Connection.close()`: on_open_error_callback(Connection, exception).
:param None | method on_close_callback: Called when a previously fully
open connection is closed:
`on_close_callback(Connection, exception)`, where `exception` is
either an instance of `exceptions.ConnectionClosed` if closed by
user or broker or exception of another type that describes the cause
of connection failure.
:param pika.adapters.utils.nbio_interface.AbstractIOServices nbio:
asynchronous services
:param bool internal_connection_workflow: True for autonomous connection
establishment which is default; False for externally-managed
connection workflow via the `create_connection()` factory.
:raises: RuntimeError
:raises: ValueError
"""
if parameters and not isinstance(parameters, connection.Parameters):
raise ValueError(
'Expected instance of Parameters, not %r' % (parameters,))
self._nbio = nbio
self._connection_workflow = None # type: connection_workflow.AMQPConnectionWorkflow
self._transport = None # type: pika.adapters.utils.nbio_interface.AbstractStreamTransport
self._got_eof = False # transport indicated EOF (connection reset)
super(BaseConnection, self).__init__(
parameters,
on_open_callback,
on_open_error_callback,
on_close_callback,
internal_connection_workflow=internal_connection_workflow)
def _init_connection_state(self):
"""Initialize or reset all of our internal state variables for a given
connection. If we disconnect and reconnect, all of our state needs to
be wiped.
"""
super(BaseConnection, self)._init_connection_state()
self._connection_workflow = None
self._transport = None
self._got_eof = False
def __repr__(self):
# def get_socket_repr(sock):
# """Return socket info suitable for use in repr"""
# if sock is None:
# return None
#
# sockname = None
# peername = None
# try:
# sockname = sock.getsockname()
# except pika.compat.SOCKET_ERROR:
# # closed?
# pass
# else:
# try:
# peername = sock.getpeername()
# except pika.compat.SOCKET_ERROR:
# # not connected?
# pass
#
# return '%s->%s' % (sockname, peername)
# TODO need helpful __repr__ in transports
return ('<%s %s transport=%s params=%s>' % (
self.__class__.__name__, self._STATE_NAMES[self.connection_state],
self._transport, self.params))
@classmethod
@abc.abstractmethod
def create_connection(cls,
connection_configs,
on_done,
custom_ioloop=None,
workflow=None):
"""Asynchronously create a connection to an AMQP broker using the given
configurations. Will attempt to connect using each config in the given
order, including all compatible resolved IP addresses of the hostname
supplied in each config, until one is established or all attempts fail.
See also `_start_connection_workflow()`.
:param sequence connection_configs: A sequence of one or more
`pika.connection.Parameters`-based objects.
:param callable on_done: as defined in
`connection_workflow.AbstractAMQPConnectionWorkflow.start()`.
:param object | None custom_ioloop: Provide a custom I/O loop that is
native to the specific adapter implementation; if None, the adapter
will use a default loop instance, which is typically a singleton.
:param connection_workflow.AbstractAMQPConnectionWorkflow | None workflow:
Pass an instance of an implementation of the
`connection_workflow.AbstractAMQPConnectionWorkflow` interface;
defaults to a `connection_workflow.AMQPConnectionWorkflow` instance
with default values for optional args.
:returns: Connection workflow instance in use. The user should limit
their interaction with this object only to it's `abort()` method.
:rtype: connection_workflow.AbstractAMQPConnectionWorkflow
"""
raise NotImplementedError
@classmethod
def _start_connection_workflow(cls, connection_configs, connection_factory,
nbio, workflow, on_done):
"""Helper function for custom implementations of `create_connection()`.
:param sequence connection_configs: A sequence of one or more
`pika.connection.Parameters`-based objects.
:param callable connection_factory: A function that takes
`pika.connection.Parameters` as its only arg and returns a brand new
`pika.connection.Connection`-based adapter instance each time it is
called. The factory must instantiate the connection with
`internal_connection_workflow=False`.
:param pika.adapters.utils.nbio_interface.AbstractIOServices nbio:
:param connection_workflow.AbstractAMQPConnectionWorkflow | None workflow:
Pass an instance of an implementation of the
`connection_workflow.AbstractAMQPConnectionWorkflow` interface;
defaults to a `connection_workflow.AMQPConnectionWorkflow` instance
with default values for optional args.
:param callable on_done: as defined in
:py:meth:`connection_workflow.AbstractAMQPConnectionWorkflow.start()`.
:returns: Connection workflow instance in use. The user should limit
their interaction with this object only to it's `abort()` method.
:rtype: connection_workflow.AbstractAMQPConnectionWorkflow
"""
if workflow is None:
workflow = connection_workflow.AMQPConnectionWorkflow()
LOGGER.debug('Created default connection workflow %r', workflow)
if isinstance(workflow, connection_workflow.AMQPConnectionWorkflow):
workflow.set_io_services(nbio)
def create_connector():
"""`AMQPConnector` factory."""
return connection_workflow.AMQPConnector(
lambda params: _StreamingProtocolShim(
connection_factory(params)),
nbio)
workflow.start(
connection_configs=connection_configs,
connector_factory=create_connector,
native_loop=nbio.get_native_ioloop(),
on_done=functools.partial(cls._unshim_connection_workflow_callback,
on_done))
return workflow
@property
def ioloop(self):
"""
:returns: the native I/O loop instance underlying async services selected
by user or the default selected by the specialized connection
adapter (e.g., Twisted reactor, `asyncio.SelectorEventLoop`,
`select_connection.IOLoop`, etc.)
:rtype: object
"""
return self._nbio.get_native_ioloop()
def _adapter_call_later(self, delay, callback):
"""Implement
:py:meth:`pika.connection.Connection._adapter_call_later()`.
"""
return self._nbio.call_later(delay, callback)
def _adapter_remove_timeout(self, timeout_id):
"""Implement
:py:meth:`pika.connection.Connection._adapter_remove_timeout()`.
"""
timeout_id.cancel()
def _adapter_add_callback_threadsafe(self, callback):
"""Implement
:py:meth:`pika.connection.Connection._adapter_add_callback_threadsafe()`.
"""
if not callable(callback):
raise TypeError(
'callback must be a callable, but got %r' % (callback,))
self._nbio.add_callback_threadsafe(callback)
def _adapter_connect_stream(self):
"""Initiate full-stack connection establishment asynchronously for
internally-initiated connection bring-up.
Upon failed completion, we will invoke
`Connection._on_stream_terminated()`. NOTE: On success,
the stack will be up already, so there is no corresponding callback.
"""
self._connection_workflow = connection_workflow.AMQPConnectionWorkflow(
_until_first_amqp_attempt=True)
self._connection_workflow.set_io_services(self._nbio)
def create_connector():
"""`AMQPConnector` factory"""
return connection_workflow.AMQPConnector(
lambda _params: _StreamingProtocolShim(self), self._nbio)
self._connection_workflow.start(
[self.params],
connector_factory=create_connector,
native_loop=self._nbio.get_native_ioloop(),
on_done=functools.partial(self._unshim_connection_workflow_callback,
self._on_connection_workflow_done))
@staticmethod
def _unshim_connection_workflow_callback(user_on_done, shim_or_exc):
"""
:param callable user_on_done: user's `on_done` callback as defined in
:py:meth:`connection_workflow.AbstractAMQPConnectionWorkflow.start()`.
:param _StreamingProtocolShim | Exception shim_or_exc:
"""
result = shim_or_exc
if isinstance(result, _StreamingProtocolShim):
result = result.conn
user_on_done(result)
def _abort_connection_workflow(self):
"""Asynchronously abort connection workflow. Upon
completion, `Connection._on_stream_terminated()` will be called with None
as the error argument.
Assumption: may be called only while connection is opening.
"""
assert not self._opened, (
'_abort_connection_workflow() may be called only when '
'connection is opening.')
if self._transport is None:
# NOTE: this is possible only when user calls Connection.close() to
# interrupt internally-initiated connection establishment.
# self._connection_workflow.abort() would not call
# Connection.close() before pairing of connection with transport.
assert self._internal_connection_workflow, (
'Unexpected _abort_connection_workflow() call with '
'no transport in external connection workflow mode.')
# This will result in call to _on_connection_workflow_done() upon
# completion
self._connection_workflow.abort()
else:
# NOTE: we can't use self._connection_workflow.abort() in this case,
# because it would result in infinite recursion as we're called
# from Connection.close() and _connection_workflow.abort() calls
# Connection.close() to abort a connection that's already been
# paired with a transport. During internally-initiated connection
# establishment, AMQPConnectionWorkflow will discover that user
# aborted the connection when it receives
# pika.exceptions.ConnectionOpenAborted.
# This completes asynchronously, culminating in call to our method
# `connection_lost()`
self._transport.abort()
def _on_connection_workflow_done(self, conn_or_exc):
"""`AMQPConnectionWorkflow` completion callback.
:param BaseConnection | Exception conn_or_exc: Our own connection
instance on success; exception on failure. See
`AbstractAMQPConnectionWorkflow.start()` for details.
"""
LOGGER.debug('Full-stack connection workflow completed: %r',
conn_or_exc)
self._connection_workflow = None
# Notify protocol of failure
if isinstance(conn_or_exc, Exception):
self._transport = None
if isinstance(conn_or_exc,
connection_workflow.AMQPConnectionWorkflowAborted):
LOGGER.info('Full-stack connection workflow aborted: %r',
conn_or_exc)
# So that _handle_connection_workflow_failure() will know it's
# not a failure
conn_or_exc = None
else:
LOGGER.error('Full-stack connection workflow failed: %r',
conn_or_exc)
if (isinstance(conn_or_exc,
connection_workflow.AMQPConnectionWorkflowFailed)
and isinstance(
conn_or_exc.exceptions[-1], connection_workflow.
AMQPConnectorSocketConnectError)):
conn_or_exc = pika.exceptions.AMQPConnectionError(
conn_or_exc)
self._handle_connection_workflow_failure(conn_or_exc)
else:
# NOTE: On success, the stack will be up already, so there is no
# corresponding callback.
assert conn_or_exc is self, \
'Expected self conn={!r} from workflow, but got {!r}.'.format(
self, conn_or_exc)
def _handle_connection_workflow_failure(self, error):
"""Handle failure of self-initiated stack bring-up and call
`Connection._on_stream_terminated()` if connection is not in closed state
yet. Called by adapter layer when the full-stack connection workflow
fails.
:param Exception | None error: exception instance describing the reason
for failure or None if the connection workflow was aborted.
"""
if error is None:
LOGGER.info('Self-initiated stack bring-up aborted.')
else:
LOGGER.error('Self-initiated stack bring-up failed: %r', error)
if not self.is_closed:
self._on_stream_terminated(error)
else:
# This may happen when AMQP layer bring up was started but did not
# complete
LOGGER.debug('_handle_connection_workflow_failure(): '
'suppressing - connection already closed.')
def _adapter_disconnect_stream(self):
"""Asynchronously bring down the streaming transport layer and invoke
`Connection._on_stream_terminated()` asynchronously when complete.
"""
if not self._opened:
self._abort_connection_workflow()
else:
# This completes asynchronously, culminating in call to our method
# `connection_lost()`
self._transport.abort()
def _adapter_emit_data(self, data):
"""Take ownership of data and send it to AMQP server as soon as
possible.
:param bytes data:
"""
self._transport.write(data)
def _proto_connection_made(self, transport):
"""Introduces transport to protocol after transport is connected.
:py:class:`.utils.nbio_interface.AbstractStreamProtocol` implementation.
:param nbio_interface.AbstractStreamTransport transport:
:raises Exception: Exception-based exception on error
"""
self._transport = transport
# Let connection know that stream is available
self._on_stream_connected()
def _proto_connection_lost(self, error):
"""Called upon loss or closing of TCP connection.
:py:class:`.utils.nbio_interface.AbstractStreamProtocol` implementation.
NOTE: `connection_made()` and `connection_lost()` are each called just
once and in that order. All other callbacks are called between them.
:param BaseException | None error: An exception (check for
`BaseException`) indicates connection failure. None indicates that
connection was closed on this side, such as when it's aborted or
when `AbstractStreamProtocol.eof_received()` returns a falsy result.
:raises Exception: Exception-based exception on error
"""
self._transport = None
if error is None:
# Either result of `eof_received()` or abort
if self._got_eof:
error = pika.exceptions.StreamLostError(
'Transport indicated EOF')
else:
error = pika.exceptions.StreamLostError(
'Stream connection lost: {!r}'.format(error))
LOGGER.log(logging.DEBUG if error is None else logging.ERROR,
'connection_lost: %r', error)
self._on_stream_terminated(error)
def _proto_eof_received(self): # pylint: disable=R0201
"""Called after the remote peer shuts its write end of the connection.
:py:class:`.utils.nbio_interface.AbstractStreamProtocol` implementation.
:returns: A falsy value (including None) will cause the transport to
close itself, resulting in an eventual `connection_lost()` call
from the transport. If a truthy value is returned, it will be the
protocol's responsibility to close/abort the transport.
:rtype: falsy|truthy
:raises Exception: Exception-based exception on error
"""
LOGGER.error('Transport indicated EOF.')
self._got_eof = True
# This is how a reset connection will typically present itself
# when we have nothing to send to the server over plaintext stream.
#
# Have transport tear down the connection and invoke our
# `connection_lost` method
return False
def _proto_data_received(self, data):
"""Called to deliver incoming data from the server to the protocol.
:py:class:`.utils.nbio_interface.AbstractStreamProtocol` implementation.
:param data: Non-empty data bytes.
:raises Exception: Exception-based exception on error
"""
self._on_data_available(data)
class _StreamingProtocolShim(nbio_interface.AbstractStreamProtocol):
"""Shim for callbacks from transport so that we BaseConnection can
delegate to private methods, thus avoiding contamination of API with
methods that look public, but aren't.
"""
# Override AbstractStreamProtocol abstract methods to enable instantiation
connection_made = None
connection_lost = None
eof_received = None
data_received = None
def __init__(self, conn):
"""
:param BaseConnection conn:
"""
self.conn = conn
# pylint: disable=W0212
self.connection_made = conn._proto_connection_made
self.connection_lost = conn._proto_connection_lost
self.eof_received = conn._proto_eof_received
self.data_received = conn._proto_data_received
def __getattr__(self, attr):
"""Proxy inexistent attribute requests to our connection instance
so that AMQPConnectionWorkflow/AMQPConnector may treat the shim as an
actual connection.
"""
return getattr(self.conn, attr)
def __repr__(self):
return '{}: {!r}'.format(self.__class__.__name__, self.conn)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,469 @@
"""Use pika with the Gevent IOLoop."""
import functools
import logging
import os
import threading
import weakref
try:
import queue
except ImportError: # Python <= v2.7
import Queue as queue
import gevent
import gevent.hub
import gevent.socket
import pika.compat
from pika.adapters.base_connection import BaseConnection
from pika.adapters.utils.io_services_utils import check_callback_arg
from pika.adapters.utils.nbio_interface import (
AbstractIOReference,
AbstractIOServices,
)
from pika.adapters.utils.selector_ioloop_adapter import (
AbstractSelectorIOLoop,
SelectorIOServicesAdapter,
)
LOGGER = logging.getLogger(__name__)
class GeventConnection(BaseConnection):
"""Implementation of pika's ``BaseConnection``.
An async selector-based connection which integrates with Gevent.
"""
def __init__(self,
parameters=None,
on_open_callback=None,
on_open_error_callback=None,
on_close_callback=None,
custom_ioloop=None,
internal_connection_workflow=True):
"""Create a new GeventConnection instance and connect to RabbitMQ on
Gevent's event-loop.
:param pika.connection.Parameters|None parameters: The connection
parameters
:param callable|None on_open_callback: The method to call when the
connection is open
:param callable|None on_open_error_callback: Called if the connection
can't be established or connection establishment is interrupted by
`Connection.close()`:
on_open_error_callback(Connection, exception)
:param callable|None on_close_callback: Called when a previously fully
open connection is closed:
`on_close_callback(Connection, exception)`, where `exception` is
either an instance of `exceptions.ConnectionClosed` if closed by
user or broker or exception of another type that describes the
cause of connection failure
:param gevent._interfaces.ILoop|nbio_interface.AbstractIOServices|None
custom_ioloop: Use a custom Gevent ILoop.
:param bool internal_connection_workflow: True for autonomous connection
establishment which is default; False for externally-managed
connection workflow via the `create_connection()` factory
"""
if pika.compat.ON_WINDOWS:
raise RuntimeError('GeventConnection is not supported on Windows.')
custom_ioloop = (custom_ioloop or
_GeventSelectorIOLoop(gevent.get_hub()))
if isinstance(custom_ioloop, AbstractIOServices):
nbio = custom_ioloop
else:
nbio = _GeventSelectorIOServicesAdapter(custom_ioloop)
super(GeventConnection, self).__init__(
parameters,
on_open_callback,
on_open_error_callback,
on_close_callback,
nbio,
internal_connection_workflow=internal_connection_workflow)
@classmethod
def create_connection(cls,
connection_configs,
on_done,
custom_ioloop=None,
workflow=None):
"""Implement
:py:classmethod::`pika.adapters.BaseConnection.create_connection()`.
"""
custom_ioloop = (custom_ioloop or
_GeventSelectorIOLoop(gevent.get_hub()))
nbio = _GeventSelectorIOServicesAdapter(custom_ioloop)
def connection_factory(params):
"""Connection factory."""
if params is None:
raise ValueError('Expected pika.connection.Parameters '
'instance, but got None in params arg.')
return cls(parameters=params,
custom_ioloop=nbio,
internal_connection_workflow=False)
return cls._start_connection_workflow(
connection_configs=connection_configs,
connection_factory=connection_factory,
nbio=nbio,
workflow=workflow,
on_done=on_done)
class _TSafeCallbackQueue(object):
"""Dispatch callbacks from any thread to be executed in the main thread
efficiently with IO events.
"""
def __init__(self):
"""
:param _GeventSelectorIOLoop loop: IO loop to add callbacks to.
"""
# Thread-safe, blocking queue.
self._queue = queue.Queue()
# PIPE to trigger an event when the queue is ready.
self._read_fd, self._write_fd = os.pipe()
# Lock around writes to the PIPE in case some platform/implementation
# requires this.
self._write_lock = threading.RLock()
@property
def fd(self):
"""The file-descriptor to register for READ events in the IO loop."""
return self._read_fd
def add_callback_threadsafe(self, callback):
"""Add an item to the queue from any thread. The configured handler
will be invoked with the item in the main thread.
:param item: Object to add to the queue.
"""
self._queue.put(callback)
with self._write_lock:
# The value written is not important.
os.write(self._write_fd, b'\xFF')
def run_next_callback(self):
"""Invoke the next callback from the queue.
MUST run in the main thread. If no callback was added to the queue,
this will block the IO loop.
Performs a blocking READ on the pipe so must only be called when the
pipe is ready for reading.
"""
try:
callback = self._queue.get_nowait()
except queue.Empty:
# Should never happen.
LOGGER.warning("Callback queue was empty.")
else:
# Read the byte from the pipe so the event doesn't re-fire.
os.read(self._read_fd, 1)
callback()
class _GeventSelectorIOLoop(AbstractSelectorIOLoop):
"""Implementation of `AbstractSelectorIOLoop` using the Gevent event loop.
Required by implementations of `SelectorIOServicesAdapter`.
"""
# Gevent's READ and WRITE masks are defined as 1 and 2 respectively. No
# ERROR mask is defined.
# See http://www.gevent.org/api/gevent.hub.html#gevent._interfaces.ILoop.io
READ = 1
WRITE = 2
ERROR = 0
def __init__(self, gevent_hub=None):
"""
:param gevent._interfaces.ILoop gevent_loop:
"""
self._hub = gevent_hub or gevent.get_hub()
self._io_watchers_by_fd = {}
# Used to start/stop the loop.
self._waiter = gevent.hub.Waiter()
# For adding callbacks from other threads. See `add_callback(..)`.
self._callback_queue = _TSafeCallbackQueue()
def run_callback_in_main_thread(fd, events):
"""Swallow the fd and events arguments."""
del fd
del events
self._callback_queue.run_next_callback()
self.add_handler(self._callback_queue.fd, run_callback_in_main_thread,
self.READ)
def close(self):
"""Release the loop's resources."""
self._hub.loop.destroy()
self._hub = None
def start(self):
"""Run the I/O loop. It will loop until requested to exit. See `stop()`.
"""
LOGGER.debug("Passing control to Gevent's IOLoop")
self._waiter.get() # Block until 'stop()' is called.
LOGGER.debug("Control was passed back from Gevent's IOLoop")
self._waiter.clear()
def stop(self):
"""Request exit from the ioloop. The loop is NOT guaranteed to
stop before this method returns.
To invoke `stop()` safely from a thread other than this IOLoop's thread,
call it via `add_callback_threadsafe`; e.g.,
`ioloop.add_callback(ioloop.stop)`
"""
self._waiter.switch(None)
def add_callback(self, callback):
"""Requests a call to the given function as soon as possible in the
context of this IOLoop's thread.
NOTE: This is the only thread-safe method in IOLoop. All other
manipulations of IOLoop must be performed from the IOLoop's thread.
For example, a thread may request a call to the `stop` method of an
ioloop that is running in a different thread via
`ioloop.add_callback_threadsafe(ioloop.stop)`
:param callable callback: The callback method
"""
if gevent.get_hub() == self._hub:
# We're in the main thread; just add the callback.
LOGGER.debug("Adding callback from main thread")
self._hub.loop.run_callback(callback)
else:
# This isn't the main thread and Gevent's hub/loop don't provide
# any thread-safety so enqueue the callback for it to be registered
# in the main thread.
LOGGER.debug("Adding callback from another thread")
callback = functools.partial(self._hub.loop.run_callback, callback)
self._callback_queue.add_callback_threadsafe(callback)
def call_later(self, delay, callback):
"""Add the callback to the IOLoop timer to be called after delay seconds
from the time of call on best-effort basis. Returns a handle to the
timeout.
:param float delay: The number of seconds to wait to call callback
:param callable callback: The callback method
:returns: handle to the created timeout that may be passed to
`remove_timeout()`
:rtype: object
"""
timer = self._hub.loop.timer(delay)
timer.start(callback)
return timer
def remove_timeout(self, timeout_handle):
"""Remove a timeout
:param timeout_handle: Handle of timeout to remove
"""
timeout_handle.close()
def add_handler(self, fd, handler, events):
"""Start watching the given file descriptor for events
:param int fd: The file descriptor
:param callable handler: When requested event(s) occur,
`handler(fd, events)` will be called.
:param int events: The event mask (READ|WRITE)
"""
io_watcher = self._hub.loop.io(fd, events)
self._io_watchers_by_fd[fd] = io_watcher
io_watcher.start(handler, fd, events)
def update_handler(self, fd, events):
"""Change the events being watched for.
:param int fd: The file descriptor
:param int events: The new event mask (READ|WRITE)
"""
io_watcher = self._io_watchers_by_fd[fd]
# Save callback from the original watcher. The close the old watcher
# and create a new one using the saved callback and the new events.
callback = io_watcher.callback
io_watcher.close()
del self._io_watchers_by_fd[fd]
self.add_handler(fd, callback, events)
def remove_handler(self, fd):
"""Stop watching the given file descriptor for events
:param int fd: The file descriptor
"""
io_watcher = self._io_watchers_by_fd[fd]
io_watcher.close()
del self._io_watchers_by_fd[fd]
class _GeventSelectorIOServicesAdapter(SelectorIOServicesAdapter):
"""SelectorIOServicesAdapter implementation using Gevent's DNS resolver."""
def getaddrinfo(self,
host,
port,
on_done,
family=0,
socktype=0,
proto=0,
flags=0):
"""Implement :py:meth:`.nbio_interface.AbstractIOServices.getaddrinfo()`.
"""
resolver = _GeventAddressResolver(native_loop=self._loop,
host=host,
port=port,
family=family,
socktype=socktype,
proto=proto,
flags=flags,
on_done=on_done)
resolver.start()
# Return needs an implementation of `AbstractIOReference`.
return _GeventIOLoopIOHandle(resolver)
class _GeventIOLoopIOHandle(AbstractIOReference):
"""Implement `AbstractIOReference`.
Only used to wrap the _GeventAddressResolver.
"""
def __init__(self, subject):
"""
:param subject: subject of the reference containing a `cancel()` method
"""
self._cancel = subject.cancel
def cancel(self):
"""Cancel pending operation
:returns: False if was already done or cancelled; True otherwise
:rtype: bool
"""
return self._cancel()
class _GeventAddressResolver(object):
"""Performs getaddrinfo asynchronously Gevent's configured resolver in a
separate greenlet and invoking the provided callback with the result.
See: http://www.gevent.org/dns.html
"""
__slots__ = (
'_loop',
'_on_done',
'_greenlet',
# getaddrinfo(..) args:
'_ga_host',
'_ga_port',
'_ga_family',
'_ga_socktype',
'_ga_proto',
'_ga_flags')
def __init__(self, native_loop, host, port, family, socktype, proto, flags,
on_done):
"""Initialize the `_GeventAddressResolver`.
:param AbstractSelectorIOLoop native_loop:
:param host: `see socket.getaddrinfo()`
:param port: `see socket.getaddrinfo()`
:param family: `see socket.getaddrinfo()`
:param socktype: `see socket.getaddrinfo()`
:param proto: `see socket.getaddrinfo()`
:param flags: `see socket.getaddrinfo()`
:param on_done: on_done(records|BaseException) callback for reporting
result from the given I/O loop. The single arg will be either an
exception object (check for `BaseException`) in case of failure or
the result returned by `socket.getaddrinfo()`.
"""
check_callback_arg(on_done, 'on_done')
self._loop = native_loop
self._on_done = on_done
# Reference to the greenlet performing `getaddrinfo`.
self._greenlet = None
# getaddrinfo(..) args.
self._ga_host = host
self._ga_port = port
self._ga_family = family
self._ga_socktype = socktype
self._ga_proto = proto
self._ga_flags = flags
def start(self):
"""Start an asynchronous getaddrinfo invocation."""
if self._greenlet is None:
self._greenlet = gevent.spawn_raw(self._resolve)
else:
LOGGER.warning("_GeventAddressResolver already started")
def cancel(self):
"""Cancel the pending resolver."""
changed = False
if self._greenlet is not None:
changed = True
self._stop_greenlet()
self._cleanup()
return changed
def _cleanup(self):
"""Stop the resolver and release any resources."""
self._stop_greenlet()
self._loop = None
self._on_done = None
def _stop_greenlet(self):
"""Stop the greenlet performing getaddrinfo if running.
Otherwise, this is a no-op.
"""
if self._greenlet is not None:
gevent.kill(self._greenlet)
self._greenlet = None
def _resolve(self):
"""Call `getaddrinfo()` and return result via user's callback
function on the configured IO loop.
"""
try:
# NOTE(JG): Can't use kwargs with getaddrinfo on Python <= v2.7.
result = gevent.socket.getaddrinfo(self._ga_host, self._ga_port,
self._ga_family,
self._ga_socktype,
self._ga_proto, self._ga_flags)
except Exception as exc: # pylint: disable=broad-except
LOGGER.error('Address resolution failed: %r', exc)
result = exc
callback = functools.partial(self._dispatch_callback, result)
self._loop.add_callback(callback)
def _dispatch_callback(self, result):
"""Invoke the configured completion callback and any subsequent cleanup.
:param result: result from getaddrinfo, or the exception if raised.
"""
try:
LOGGER.debug(
'Invoking async getaddrinfo() completion callback; host=%r',
self._ga_host)
self._on_done(result)
finally:
self._cleanup()

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,90 @@
"""Use pika with the Tornado IOLoop
"""
import logging
from tornado import ioloop
from pika.adapters.utils import nbio_interface, selector_ioloop_adapter
from pika.adapters import base_connection
LOGGER = logging.getLogger(__name__)
class TornadoConnection(base_connection.BaseConnection):
"""The TornadoConnection runs on the Tornado IOLoop.
"""
def __init__(self,
parameters=None,
on_open_callback=None,
on_open_error_callback=None,
on_close_callback=None,
custom_ioloop=None,
internal_connection_workflow=True):
"""Create a new instance of the TornadoConnection class, connecting
to RabbitMQ automatically.
:param pika.connection.Parameters|None parameters: The connection
parameters
:param callable|None on_open_callback: The method to call when the
connection is open
:param callable|None on_open_error_callback: Called if the connection
can't be established or connection establishment is interrupted by
`Connection.close()`:
on_open_error_callback(Connection, exception)
:param callable|None on_close_callback: Called when a previously fully
open connection is closed:
`on_close_callback(Connection, exception)`, where `exception` is
either an instance of `exceptions.ConnectionClosed` if closed by
user or broker or exception of another type that describes the
cause of connection failure
:param ioloop.IOLoop|nbio_interface.AbstractIOServices|None custom_ioloop:
Override using the global IOLoop in Tornado
:param bool internal_connection_workflow: True for autonomous connection
establishment which is default; False for externally-managed
connection workflow via the `create_connection()` factory
"""
if isinstance(custom_ioloop, nbio_interface.AbstractIOServices):
nbio = custom_ioloop
else:
nbio = (selector_ioloop_adapter.SelectorIOServicesAdapter(
custom_ioloop or ioloop.IOLoop.instance()))
super(TornadoConnection, self).__init__(
parameters,
on_open_callback,
on_open_error_callback,
on_close_callback,
nbio,
internal_connection_workflow=internal_connection_workflow)
@classmethod
def create_connection(cls,
connection_configs,
on_done,
custom_ioloop=None,
workflow=None):
"""Implement
:py:classmethod::`pika.adapters.BaseConnection.create_connection()`.
"""
nbio = selector_ioloop_adapter.SelectorIOServicesAdapter(
custom_ioloop or ioloop.IOLoop.instance())
def connection_factory(params):
"""Connection factory."""
if params is None:
raise ValueError('Expected pika.connection.Parameters '
'instance, but got None in params arg.')
return cls(
parameters=params,
custom_ioloop=nbio,
internal_connection_workflow=False)
return cls._start_connection_workflow(
connection_configs=connection_configs,
connection_factory=connection_factory,
nbio=nbio,
workflow=workflow,
on_done=on_done)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,866 @@
"""Implements `AMQPConnectionWorkflow` - the default workflow of performing
multiple TCP/[SSL]/AMQP connection attempts with timeouts and retries until one
succeeds or all attempts fail.
Defines the interface `AbstractAMQPConnectionWorkflow` that facilitates
implementing custom connection workflows.
"""
import functools
import logging
import socket
import pika.compat
import pika.exceptions
import pika.tcp_socket_opts
from pika import __version__
_LOG = logging.getLogger(__name__)
class AMQPConnectorException(Exception):
"""Base exception for this module"""
class AMQPConnectorStackTimeout(AMQPConnectorException):
"""Overall TCP/[SSL]/AMQP stack connection attempt timed out."""
class AMQPConnectorAborted(AMQPConnectorException):
"""Asynchronous request was aborted"""
class AMQPConnectorWrongState(AMQPConnectorException):
"""AMQPConnector operation requested in wrong state, such as aborting after
completion was reported.
"""
class AMQPConnectorPhaseErrorBase(AMQPConnectorException):
"""Wrapper for exception that occurred during a particular bring-up phase.
"""
def __init__(self, exception, *args):
"""
:param BaseException exception: error that occurred while waiting for a
subclass-specific protocol bring-up phase to complete.
:param args: args for parent class
"""
super(AMQPConnectorPhaseErrorBase, self).__init__(*args)
self.exception = exception
def __repr__(self):
return '{}: {!r}'.format(self.__class__.__name__, self.exception)
class AMQPConnectorSocketConnectError(AMQPConnectorPhaseErrorBase):
"""Error connecting TCP socket to remote peer"""
class AMQPConnectorTransportSetupError(AMQPConnectorPhaseErrorBase):
"""Error setting up transport after TCP connected but before AMQP handshake.
"""
class AMQPConnectorAMQPHandshakeError(AMQPConnectorPhaseErrorBase):
"""Error during AMQP handshake"""
class AMQPConnectionWorkflowAborted(AMQPConnectorException):
"""AMQP Connection workflow was aborted."""
class AMQPConnectionWorkflowWrongState(AMQPConnectorException):
"""AMQP Connection Workflow operation requested in wrong state, such as
aborting after completion was reported.
"""
class AMQPConnectionWorkflowFailed(AMQPConnectorException):
"""Indicates that AMQP connection workflow failed.
"""
def __init__(self, exceptions, *args):
"""
:param sequence exceptions: Exceptions that occurred during the
workflow.
:param args: args to pass to base class
"""
super(AMQPConnectionWorkflowFailed, self).__init__(*args)
self.exceptions = tuple(exceptions)
def __repr__(self):
return ('{}: {} exceptions in all; last exception - {!r}; first '
'exception - {!r}').format(
self.__class__.__name__, len(self.exceptions),
self.exceptions[-1],
self.exceptions[0] if len(self.exceptions) > 1 else None)
class AMQPConnector(object):
"""Performs a single TCP/[SSL]/AMQP connection workflow.
"""
_STATE_INIT = 0 # start() hasn't been called yet
_STATE_TCP = 1 # TCP/IP connection establishment
_STATE_TRANSPORT = 2 # [SSL] and transport linkup
_STATE_AMQP = 3 # AMQP connection handshake
_STATE_TIMEOUT = 4 # overall TCP/[SSL]/AMQP timeout
_STATE_ABORTING = 5 # abort() called - aborting workflow
_STATE_DONE = 6 # result reported to client
def __init__(self, conn_factory, nbio):
"""
:param callable conn_factory: A function that takes
`pika.connection.Parameters` as its only arg and returns a brand new
`pika.connection.Connection`-based adapter instance each time it is
called. The factory must instantiate the connection with
`internal_connection_workflow=False`.
:param pika.adapters.utils.nbio_interface.AbstractIOServices nbio:
"""
self._conn_factory = conn_factory
self._nbio = nbio
self._addr_record = None # type: tuple
self._conn_params = None # type: pika.connection.Parameters
self._on_done = None # will be provided via start()
# TCP connection timeout
# pylint: disable=C0301
self._tcp_timeout_ref = None # type: pika.adapters.utils.nbio_interface.AbstractTimerReference
# Overall TCP/[SSL]/AMQP timeout
self._stack_timeout_ref = None # type: pika.adapters.utils.nbio_interface.AbstractTimerReference
# Current task
self._task_ref = None # type: pika.adapters.utils.nbio_interface.AbstractIOReference
self._sock = None # type: socket.socket
self._amqp_conn = None # type: pika.connection.Connection
self._state = self._STATE_INIT
def start(self, addr_record, conn_params, on_done):
"""Asynchronously perform a single TCP/[SSL]/AMQP connection attempt.
:param tuple addr_record: a single resolved address record compatible
with `socket.getaddrinfo()` format.
:param pika.connection.Parameters conn_params:
:param callable on_done: Function to call upon completion of the
workflow: `on_done(pika.connection.Connection | BaseException)`. If
exception, it's going to be one of the following:
`AMQPConnectorSocketConnectError`
`AMQPConnectorTransportSetupError`
`AMQPConnectorAMQPHandshakeError`
`AMQPConnectorAborted`
"""
if self._state != self._STATE_INIT:
raise AMQPConnectorWrongState(
'Already in progress or finished; state={}'.format(self._state))
self._addr_record = addr_record
self._conn_params = conn_params
self._on_done = on_done
# Create socket and initiate TCP/IP connection
self._state = self._STATE_TCP
self._sock = socket.socket(*self._addr_record[:3])
self._sock.setsockopt(pika.compat.SOL_TCP, socket.TCP_NODELAY, 1)
pika.tcp_socket_opts.set_sock_opts(self._conn_params.tcp_options,
self._sock)
self._sock.setblocking(False)
addr = self._addr_record[4]
_LOG.info('Pika version %s connecting to %r', __version__, addr)
self._task_ref = self._nbio.connect_socket(
self._sock, addr, on_done=self._on_tcp_connection_done)
# Start socket connection timeout timer
self._tcp_timeout_ref = None
if self._conn_params.socket_timeout is not None:
self._tcp_timeout_ref = self._nbio.call_later(
self._conn_params.socket_timeout,
self._on_tcp_connection_timeout)
# Start overall TCP/[SSL]/AMQP stack connection timeout timer
self._stack_timeout_ref = None
if self._conn_params.stack_timeout is not None:
self._stack_timeout_ref = self._nbio.call_later(
self._conn_params.stack_timeout, self._on_overall_timeout)
def abort(self):
"""Abort the workflow asynchronously. The completion callback will be
called with an instance of AMQPConnectorAborted.
NOTE: we can't cancel/close synchronously because aborting pika
Connection and its transport requires an asynchronous operation.
:raises AMQPConnectorWrongState: If called after completion has been
reported or the workflow not started yet.
"""
if self._state == self._STATE_INIT:
raise AMQPConnectorWrongState('Cannot abort before starting.')
if self._state == self._STATE_DONE:
raise AMQPConnectorWrongState('Cannot abort after completion was reported')
self._state = self._STATE_ABORTING
self._deactivate()
_LOG.info(
'AMQPConnector: beginning client-initiated asynchronous '
'abort; %r/%s', self._conn_params.host, self._addr_record)
if self._amqp_conn is None:
_LOG.debug('AMQPConnector.abort(): no connection, so just '
'scheduling completion report via I/O loop.')
self._nbio.add_callback_threadsafe(
functools.partial(self._report_completion_and_cleanup,
AMQPConnectorAborted()))
else:
if not self._amqp_conn.is_closing:
# Initiate close of AMQP connection and wait for asynchronous
# callback from the Connection instance before reporting
# completion to client
_LOG.debug('AMQPConnector.abort(): closing Connection.')
self._amqp_conn.close(
320, 'Client-initiated abort of AMQP Connection Workflow.')
else:
# It's already closing, must be due to our timeout processing,
# so we'll just piggy back on the callback it registered
_LOG.debug('AMQPConnector.abort(): closing of Connection was '
'already initiated.')
assert self._state == self._STATE_TIMEOUT, \
('Connection is closing, but not in TIMEOUT state; state={}'
.format(self._state))
def _close(self):
"""Cancel asynchronous tasks and clean up to assist garbage collection.
Transition to STATE_DONE.
"""
self._deactivate()
if self._sock is not None:
self._sock.close()
self._sock = None
self._conn_factory = None
self._nbio = None
self._addr_record = None
self._on_done = None
self._state = self._STATE_DONE
def _deactivate(self):
"""Cancel asynchronous tasks.
"""
# NOTE: self._amqp_conn requires special handling as it doesn't support
# synchronous closing. We special-case it elsewhere in the code where
# needed.
assert self._amqp_conn is None, \
'_deactivate called with self._amqp_conn not None; state={}'.format(
self._state)
if self._tcp_timeout_ref is not None:
self._tcp_timeout_ref.cancel()
self._tcp_timeout_ref = None
if self._stack_timeout_ref is not None:
self._stack_timeout_ref.cancel()
self._stack_timeout_ref = None
if self._task_ref is not None:
self._task_ref.cancel()
self._task_ref = None
def _report_completion_and_cleanup(self, result):
"""Clean up and invoke client's `on_done` callback.
:param pika.connection.Connection | BaseException result: value to pass
to user's `on_done` callback.
"""
if isinstance(result, BaseException):
_LOG.error('AMQPConnector - reporting failure: %r', result)
else:
_LOG.info('AMQPConnector - reporting success: %r', result)
on_done = self._on_done
self._close()
on_done(result)
def _on_tcp_connection_timeout(self):
"""Handle TCP connection timeout
Reports AMQPConnectorSocketConnectError with socket.timeout inside.
"""
self._tcp_timeout_ref = None
error = AMQPConnectorSocketConnectError(
socket.timeout('TCP connection attempt timed out: {!r}/{}'.format(
self._conn_params.host, self._addr_record)))
self._report_completion_and_cleanup(error)
def _on_overall_timeout(self):
"""Handle overall TCP/[SSL]/AMQP connection attempt timeout by reporting
`Timeout` error to the client.
Reports AMQPConnectorSocketConnectError if timeout occurred during
socket TCP connection attempt.
Reports AMQPConnectorTransportSetupError if timeout occurred during
tramsport [SSL] setup attempt.
Reports AMQPConnectorAMQPHandshakeError if timeout occurred during
AMQP handshake.
"""
self._stack_timeout_ref = None
prev_state = self._state
self._state = self._STATE_TIMEOUT
if prev_state == self._STATE_AMQP:
msg = ('Timeout while setting up AMQP to {!r}/{}; ssl={}'.format(
self._conn_params.host, self._addr_record,
bool(self._conn_params.ssl_options)))
_LOG.error(msg)
# Initiate close of AMQP connection and wait for asynchronous
# callback from the Connection instance before reporting completion
# to client
assert not self._amqp_conn.is_open, \
'Unexpected open state of {!r}'.format(self._amqp_conn)
if not self._amqp_conn.is_closing:
self._amqp_conn.close(320, msg)
return
if prev_state == self._STATE_TCP:
error = AMQPConnectorSocketConnectError(
AMQPConnectorStackTimeout(
'Timeout while connecting socket to {!r}/{}'.format(
self._conn_params.host, self._addr_record)))
else:
assert prev_state == self._STATE_TRANSPORT
error = AMQPConnectorTransportSetupError(
AMQPConnectorStackTimeout(
'Timeout while setting up transport to {!r}/{}; ssl={}'.
format(self._conn_params.host, self._addr_record,
bool(self._conn_params.ssl_options))))
self._report_completion_and_cleanup(error)
def _on_tcp_connection_done(self, exc):
"""Handle completion of asynchronous socket connection attempt.
Reports AMQPConnectorSocketConnectError if TCP socket connection
failed.
:param None|BaseException exc: None on success; exception object on
failure
"""
self._task_ref = None
if self._tcp_timeout_ref is not None:
self._tcp_timeout_ref.cancel()
self._tcp_timeout_ref = None
if exc is not None:
_LOG.error('TCP Connection attempt failed: %r; dest=%r', exc,
self._addr_record)
self._report_completion_and_cleanup(
AMQPConnectorSocketConnectError(exc))
return
# We succeeded in making a TCP/IP connection to the server
_LOG.debug('TCP connection to broker established: %r.', self._sock)
# Now set up the transport
self._state = self._STATE_TRANSPORT
ssl_context = server_hostname = None
if self._conn_params.ssl_options is not None:
ssl_context = self._conn_params.ssl_options.context
server_hostname = self._conn_params.ssl_options.server_hostname
if server_hostname is None:
server_hostname = self._conn_params.host
self._task_ref = self._nbio.create_streaming_connection(
protocol_factory=functools.partial(self._conn_factory,
self._conn_params),
sock=self._sock,
ssl_context=ssl_context,
server_hostname=server_hostname,
on_done=self._on_transport_establishment_done)
self._sock = None # create_streaming_connection() takes ownership
def _on_transport_establishment_done(self, result):
"""Handle asynchronous completion of
`AbstractIOServices.create_streaming_connection()`
Reports AMQPConnectorTransportSetupError if transport ([SSL]) setup
failed.
:param sequence|BaseException result: On success, a two-tuple
(transport, protocol); on failure, exception instance.
"""
self._task_ref = None
if isinstance(result, BaseException):
_LOG.error(
'Attempt to create the streaming transport failed: %r; '
'%r/%s; ssl=%s', result, self._conn_params.host,
self._addr_record, bool(self._conn_params.ssl_options))
self._report_completion_and_cleanup(
AMQPConnectorTransportSetupError(result))
return
# We succeeded in setting up the streaming transport!
# result is a two-tuple (transport, protocol)
_LOG.info('Streaming transport linked up: %r.', result)
_transport, self._amqp_conn = result
# AMQP handshake is in progress - initiated during transport link-up
self._state = self._STATE_AMQP
# We explicitly remove default handler because it raises an exception.
self._amqp_conn.add_on_open_error_callback(
self._on_amqp_handshake_done, remove_default=True)
self._amqp_conn.add_on_open_callback(self._on_amqp_handshake_done)
def _on_amqp_handshake_done(self, connection, error=None):
"""Handle completion of AMQP connection handshake attempt.
NOTE: we handle two types of callbacks - success with just connection
arg as well as the open-error callback with connection and error
Reports AMQPConnectorAMQPHandshakeError if AMQP handshake failed.
:param pika.connection.Connection connection:
:param BaseException | None error: None on success, otherwise
failure
"""
_LOG.debug(
'AMQPConnector: AMQP handshake attempt completed; state=%s; '
'error=%r; %r/%s', self._state, error, self._conn_params.host,
self._addr_record)
# Don't need it any more; and _deactivate() checks that it's None
self._amqp_conn = None
if self._state == self._STATE_ABORTING:
# Client-initiated abort takes precedence over timeout
result = AMQPConnectorAborted()
elif self._state == self._STATE_TIMEOUT:
result = AMQPConnectorAMQPHandshakeError(
AMQPConnectorStackTimeout(
'Timeout during AMQP handshake{!r}/{}; ssl={}'.format(
self._conn_params.host, self._addr_record,
bool(self._conn_params.ssl_options))))
elif self._state == self._STATE_AMQP:
if error is None:
_LOG.debug(
'AMQPConnector: AMQP connection established for %r/%s: %r',
self._conn_params.host, self._addr_record, connection)
result = connection
else:
_LOG.debug(
'AMQPConnector: AMQP connection handshake failed for '
'%r/%s: %r', self._conn_params.host, self._addr_record,
error)
result = AMQPConnectorAMQPHandshakeError(error)
else:
# We timed out or aborted and initiated closing of the connection,
# but this callback snuck in
_LOG.debug(
'AMQPConnector: Ignoring AMQP handshake completion '
'notification due to wrong state=%s; error=%r; conn=%r',
self._state, error, connection)
return
self._report_completion_and_cleanup(result)
class AbstractAMQPConnectionWorkflow(pika.compat.AbstractBase):
"""Interface for implementing a custom TCP/[SSL]/AMQP connection workflow.
"""
def start(self, connection_configs, connector_factory, native_loop,
on_done):
"""Asynchronously perform the workflow until success or all retries
are exhausted. Called by the adapter.
:param sequence connection_configs: A sequence of one or more
`pika.connection.Parameters`-based objects. Will attempt to connect
using each config in the given order.
:param callable connector_factory: call it without args to obtain a new
instance of `AMQPConnector` for each connection attempt.
See `AMQPConnector` for details.
:param native_loop: Native I/O loop passed by app to the adapter or
obtained by the adapter by default.
:param callable on_done: Function to call upon completion of the
workflow:
`on_done(pika.connection.Connection |
AMQPConnectionWorkflowFailed |
AMQPConnectionWorkflowAborted)`.
`Connection`-based adapter on success,
`AMQPConnectionWorkflowFailed` on failure,
`AMQPConnectionWorkflowAborted` if workflow was aborted.
:raises AMQPConnectionWorkflowWrongState: If called in wrong state, such
as after starting the workflow.
"""
raise NotImplementedError
def abort(self):
"""Abort the workflow asynchronously. The completion callback will be
called with an instance of AMQPConnectionWorkflowAborted.
NOTE: we can't cancel/close synchronously because aborting pika
Connection and its transport requires an asynchronous operation.
:raises AMQPConnectionWorkflowWrongState: If called in wrong state, such
as before starting or after completion has been reported.
"""
raise NotImplementedError
class AMQPConnectionWorkflow(AbstractAMQPConnectionWorkflow):
"""Implements Pika's default workflow for performing multiple TCP/[SSL]/AMQP
connection attempts with timeouts and retries until one succeeds or all
attempts fail.
The workflow:
while not success and retries remain:
1. For each given config (pika.connection.Parameters object):
A. Perform DNS resolution of the config's host.
B. Attempt to establish TCP/[SSL]/AMQP for each resolved address
until one succeeds, in which case we're done.
2. If all configs failed but retries remain, resume from beginning
after the given retry pause. NOTE: failure of DNS resolution
is equivalent to one cycle and will be retried after the pause
if retries remain.
"""
_SOCK_TYPE = socket.SOCK_STREAM
_IPPROTO = socket.IPPROTO_TCP
_STATE_INIT = 0
_STATE_ACTIVE = 1
_STATE_ABORTING = 2
_STATE_DONE = 3
def __init__(self, _until_first_amqp_attempt=False):
"""
:param int | float retry_pause: Non-negative number of seconds to wait
before retrying the config sequence. Meaningful only if retries is
greater than 0. Defaults to 2 seconds.
:param bool _until_first_amqp_attempt: INTERNAL USE ONLY; ends workflow
after first AMQP handshake attempt, regardless of outcome (success
or failure). The automatic connection logic in
`pika.connection.Connection` enables this because it's not
designed/tested to reset all state properly to handle more than one
AMQP handshake attempt.
TODO: Do we need getaddrinfo timeout?
TODO: Would it be useful to implement exponential back-off?
"""
self._attempts_remaining = None # supplied by start()
self._retry_pause = None # supplied by start()
self._until_first_amqp_attempt = _until_first_amqp_attempt
# Provided by set_io_services()
# pylint: disable=C0301
self._nbio = None # type: pika.adapters.utils.nbio_interface.AbstractIOServices
# Current index within `_connection_configs`; initialized when
# starting a new connection sequence.
self._current_config_index = None
self._connection_configs = None # supplied by start()
self._connector_factory = None # supplied by start()
self._on_done = None # supplied by start()
self._connector = None # type: AMQPConnector
self._task_ref = None # current cancelable asynchronous task or timer
self._addrinfo_iter = None
# Exceptions from all failed connection attempts in this workflow
self._connection_errors = []
self._state = self._STATE_INIT
def set_io_services(self, nbio):
"""Called by the conneciton adapter only on pika's
`AMQPConnectionWorkflow` instance to provide it the adapter-specific
`AbstractIOServices` object before calling the `start()` method.
NOTE: Custom workflow implementations should use the native I/O loop
directly because `AbstractIOServices` is private to Pika
implementation and its interface may change without notice.
:param pika.adapters.utils.nbio_interface.AbstractIOServices nbio:
"""
self._nbio = nbio
def start(
self,
connection_configs,
connector_factory,
native_loop, # pylint: disable=W0613
on_done):
"""Override `AbstractAMQPConnectionWorkflow.start()`.
NOTE: This implementation uses `connection_attempts` and `retry_delay`
values from the last element of the given `connection_configs` sequence
as the overall number of connection attempts of the entire
`connection_configs` sequence and pause between each sequence.
"""
if self._state != self._STATE_INIT:
raise AMQPConnectorWrongState(
'Already in progress or finished; state={}'.format(self._state))
try:
iter(connection_configs)
except Exception as error:
raise TypeError(
'connection_configs does not support iteration: {!r}'.format(
error))
if not connection_configs:
raise ValueError(
'connection_configs is empty: {!r}.'.format(connection_configs))
self._connection_configs = connection_configs
self._connector_factory = connector_factory
self._on_done = on_done
self._attempts_remaining = connection_configs[-1].connection_attempts
self._retry_pause = connection_configs[-1].retry_delay
self._state = self._STATE_ACTIVE
_LOG.debug('Starting AMQP Connection workflow asynchronously.')
# Begin from our own I/O loop context to avoid calling back into client
# from client's call here
self._task_ref = self._nbio.call_later(
0, functools.partial(self._start_new_cycle_async, first=True))
def abort(self):
"""Override `AbstractAMQPConnectionWorkflow.abort()`.
"""
if self._state == self._STATE_INIT:
raise AMQPConnectorWrongState('Cannot abort before starting.')
elif self._state == self._STATE_DONE:
raise AMQPConnectorWrongState(
'Cannot abort after completion was reported')
self._state = self._STATE_ABORTING
self._deactivate()
_LOG.info('AMQPConnectionWorkflow: beginning client-initiated '
'asynchronous abort.')
if self._connector is None:
_LOG.debug('AMQPConnectionWorkflow.abort(): no connector, so just '
'scheduling completion report via I/O loop.')
self._nbio.add_callback_threadsafe(
functools.partial(self._report_completion_and_cleanup,
AMQPConnectionWorkflowAborted()))
else:
_LOG.debug('AMQPConnectionWorkflow.abort(): requesting '
'connector.abort().')
self._connector.abort()
def _close(self):
"""Cancel asynchronous tasks and clean up to assist garbage collection.
Transition to _STATE_DONE.
"""
self._deactivate()
self._connection_configs = None
self._nbio = None
self._connector_factory = None
self._on_done = None
self._connector = None
self._addrinfo_iter = None
self._connection_errors = None
self._state = self._STATE_DONE
def _deactivate(self):
"""Cancel asynchronous tasks.
"""
if self._task_ref is not None:
self._task_ref.cancel()
self._task_ref = None
def _report_completion_and_cleanup(self, result):
"""Clean up and invoke client's `on_done` callback.
:param pika.connection.Connection | AMQPConnectionWorkflowFailed result:
value to pass to user's `on_done` callback.
"""
if isinstance(result, BaseException):
_LOG.error('AMQPConnectionWorkflow - reporting failure: %r', result)
else:
_LOG.info('AMQPConnectionWorkflow - reporting success: %r', result)
on_done = self._on_done
self._close()
on_done(result)
def _start_new_cycle_async(self, first):
"""Start a new workflow cycle (if any more attempts are left) beginning
with the first Parameters object in self._connection_configs. If out of
attempts, report `AMQPConnectionWorkflowFailed`.
:param bool first: if True, don't delay; otherwise delay next attempt by
`self._retry_pause` seconds.
"""
self._task_ref = None
assert self._attempts_remaining >= 0, self._attempts_remaining
if self._attempts_remaining <= 0:
error = AMQPConnectionWorkflowFailed(self._connection_errors)
_LOG.error('AMQP connection workflow failed: %r.', error)
self._report_completion_and_cleanup(error)
return
self._attempts_remaining -= 1
_LOG.debug(
'Beginning a new AMQP connection workflow cycle; attempts '
'remaining after this: %s', self._attempts_remaining)
self._current_config_index = None
self._task_ref = self._nbio.call_later(
0 if first else self._retry_pause, self._try_next_config_async)
def _try_next_config_async(self):
"""Attempt to connect using the next Parameters config. If there are no
more configs, start a new cycle.
"""
self._task_ref = None
if self._current_config_index is None:
self._current_config_index = 0
else:
self._current_config_index += 1
if self._current_config_index >= len(self._connection_configs):
_LOG.debug('_try_next_config_async: starting a new cycle.')
self._start_new_cycle_async(first=False)
return
params = self._connection_configs[self._current_config_index]
_LOG.debug('_try_next_config_async: %r:%s', params.host, params.port)
# Begin with host address resolution
assert self._task_ref is None
self._task_ref = self._nbio.getaddrinfo(
host=params.host,
port=params.port,
socktype=self._SOCK_TYPE,
proto=self._IPPROTO,
on_done=self._on_getaddrinfo_async_done)
def _on_getaddrinfo_async_done(self, addrinfos_or_exc):
"""Handles completion callback from asynchronous `getaddrinfo()`.
:param list | BaseException addrinfos_or_exc: resolved address records
returned by `getaddrinfo()` or an exception object from failure.
"""
self._task_ref = None
if isinstance(addrinfos_or_exc, BaseException):
_LOG.error('getaddrinfo failed: %r.', addrinfos_or_exc)
self._connection_errors.append(addrinfos_or_exc)
self._start_new_cycle_async(first=False)
return
_LOG.debug('getaddrinfo returned %s records', len(addrinfos_or_exc))
self._addrinfo_iter = iter(addrinfos_or_exc)
self._try_next_resolved_address()
def _try_next_resolved_address(self):
"""Try connecting using next resolved address. If there aren't any left,
continue with next Parameters config.
"""
try:
addr_record = next(self._addrinfo_iter)
except StopIteration:
_LOG.debug(
'_try_next_resolved_address: continuing with next config.')
self._try_next_config_async()
return
_LOG.debug('Attempting to connect using address record %r', addr_record)
self._connector = self._connector_factory() # type: AMQPConnector
self._connector.start(
addr_record=addr_record,
conn_params=self._connection_configs[self._current_config_index],
on_done=self._on_connector_done)
def _on_connector_done(self, conn_or_exc):
"""Handle completion of connection attempt by `AMQPConnector`.
:param pika.connection.Connection | BaseException conn_or_exc: See
`AMQPConnector.start()` for exception details.
"""
self._connector = None
_LOG.debug('Connection attempt completed with %r', conn_or_exc)
if isinstance(conn_or_exc, BaseException):
self._connection_errors.append(conn_or_exc)
if isinstance(conn_or_exc, AMQPConnectorAborted):
assert self._state == self._STATE_ABORTING, \
'Expected _STATE_ABORTING, but got {!r}'.format(self._state)
self._report_completion_and_cleanup(
AMQPConnectionWorkflowAborted())
elif (self._until_first_amqp_attempt and
isinstance(conn_or_exc, AMQPConnectorAMQPHandshakeError)):
_LOG.debug('Ending AMQP connection workflow after first failed '
'AMQP handshake due to _until_first_amqp_attempt.')
if isinstance(conn_or_exc.exception,
pika.exceptions.ConnectionOpenAborted):
error = AMQPConnectionWorkflowAborted
else:
error = AMQPConnectionWorkflowFailed(
self._connection_errors)
self._report_completion_and_cleanup(error)
else:
self._try_next_resolved_address()
else:
# Success!
self._report_completion_and_cleanup(conn_or_exc)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,441 @@
"""Non-blocking I/O interface for pika connection adapters.
I/O interface expected by `pika.adapters.base_connection.BaseConnection`
NOTE: This API is modeled after asyncio in python3 for a couple of reasons
1. It's a sensible API
2. To make it easy to implement at least on top of the built-in asyncio
Furthermore, the API caters to the needs of pika core and lack of generalization
is intentional for the sake of reducing complexity of the implementation and
testing and lessening the maintenance burden.
"""
import abc
import pika.compat
class AbstractIOServices(pika.compat.AbstractBase):
"""Interface to I/O services required by `pika.adapters.BaseConnection` and
related utilities.
NOTE: This is not a public API. Pika users should rely on the native I/O
loop APIs (e.g., asyncio event loop, tornado ioloop, twisted reactor, etc.)
that corresponds to the chosen Connection adapter.
"""
@abc.abstractmethod
def get_native_ioloop(self):
"""Returns the native I/O loop instance, such as Twisted reactor,
asyncio's or tornado's event loop
"""
raise NotImplementedError
@abc.abstractmethod
def close(self):
"""Release IOLoop's resources.
the `close()` method is intended to be called by Pika's own test
code only after `start()` returns. After calling `close()`, no other
interaction with the closed instance of `IOLoop` should be performed.
NOTE: This method is provided for Pika's own test scripts that need to
be able to run I/O loops generically to test multiple Connection Adapter
implementations. Pika users should use the native I/O loop's API
instead.
"""
raise NotImplementedError
@abc.abstractmethod
def run(self):
"""Run the I/O loop. It will loop until requested to exit. See `stop()`.
NOTE: the outcome or restarting an instance that had been stopped is
UNDEFINED!
NOTE: This method is provided for Pika's own test scripts that need to
be able to run I/O loops generically to test multiple Connection Adapter
implementations (not all of the supported I/O Loop frameworks have
methods named start/stop). Pika users should use the native I/O loop's
API instead.
"""
raise NotImplementedError
@abc.abstractmethod
def stop(self):
"""Request exit from the ioloop. The loop is NOT guaranteed to
stop before this method returns.
NOTE: The outcome of calling `stop()` on a non-running instance is
UNDEFINED!
NOTE: This method is provided for Pika's own test scripts that need to
be able to run I/O loops generically to test multiple Connection Adapter
implementations (not all of the supported I/O Loop frameworks have
methods named start/stop). Pika users should use the native I/O loop's
API instead.
To invoke `stop()` safely from a thread other than this IOLoop's thread,
call it via `add_callback_threadsafe`; e.g.,
`ioloop.add_callback_threadsafe(ioloop.stop)`
"""
raise NotImplementedError
@abc.abstractmethod
def add_callback_threadsafe(self, callback):
"""Requests a call to the given function as soon as possible. It will be
called from this IOLoop's thread.
NOTE: This is the only thread-safe method offered by the IOLoop adapter.
All other manipulations of the IOLoop adapter and objects governed
by it must be performed from the IOLoop's thread.
NOTE: if you know that the requester is running on the same thread as
the connection it is more efficient to use the
`ioloop.call_later()` method with a delay of 0.
:param callable callback: The callback method; must be callable.
"""
raise NotImplementedError
@abc.abstractmethod
def call_later(self, delay, callback):
"""Add the callback to the IOLoop timer to be called after delay seconds
from the time of call on best-effort basis. Returns a handle to the
timeout.
If two are scheduled for the same time, it's undefined which one will
be called first.
:param float delay: The number of seconds to wait to call callback
:param callable callback: The callback method
:returns: A handle that can be used to cancel the request.
:rtype: AbstractTimerReference
"""
raise NotImplementedError
@abc.abstractmethod
def getaddrinfo(self,
host,
port,
on_done,
family=0,
socktype=0,
proto=0,
flags=0):
"""Perform the equivalent of `socket.getaddrinfo()` asynchronously.
See `socket.getaddrinfo()` for the standard args.
:param callable on_done: user callback that takes the return value of
`socket.getaddrinfo()` upon successful completion or exception upon
failure (check for `BaseException`) as its only arg. It will not be
called if the operation was cancelled.
:rtype: AbstractIOReference
"""
raise NotImplementedError
@abc.abstractmethod
def connect_socket(self, sock, resolved_addr, on_done):
"""Perform the equivalent of `socket.connect()` on a previously-resolved
address asynchronously.
IMPLEMENTATION NOTE: Pika's connection logic resolves the addresses
prior to making socket connections, so we don't need to burden the
implementations of this method with the extra logic of asynchronous
DNS resolution. Implementations can use `socket.inet_pton()` to
verify the address.
:param socket.socket sock: non-blocking socket that needs to be
connected via `socket.socket.connect()`
:param tuple resolved_addr: resolved destination address/port two-tuple
as per `socket.socket.connect()`, except that the first element must
be an actual IP address that's consistent with the given socket's
address family.
:param callable on_done: user callback that takes None upon successful
completion or exception (check for `BaseException`) upon error as
its only arg. It will not be called if the operation was cancelled.
:rtype: AbstractIOReference
:raises ValueError: if host portion of `resolved_addr` is not an IP
address or is inconsistent with the socket's address family as
validated via `socket.inet_pton()`
"""
raise NotImplementedError
@abc.abstractmethod
def create_streaming_connection(self,
protocol_factory,
sock,
on_done,
ssl_context=None,
server_hostname=None):
"""Perform SSL session establishment, if requested, on the already-
connected socket and link the streaming transport/protocol pair.
NOTE: This method takes ownership of the socket.
:param callable protocol_factory: called without args, returns an
instance with the `AbstractStreamProtocol` interface. The protocol's
`connection_made(transport)` method will be called to link it to
the transport after remaining connection activity (e.g., SSL session
establishment), if any, is completed successfully.
:param socket.socket sock: Already-connected, non-blocking
`socket.SOCK_STREAM` socket to be used by the transport. We take
ownership of this socket.
:param callable on_done: User callback
`on_done(BaseException | (transport, protocol))` to be notified when
the asynchronous operation completes. An exception arg indicates
failure (check for `BaseException`); otherwise the two-tuple will
contain the linked transport/protocol pair having
AbstractStreamTransport and AbstractStreamProtocol interfaces
respectively.
:param None | ssl.SSLContext ssl_context: if None, this will proceed as
a plaintext connection; otherwise, if not None, SSL session
establishment will be performed prior to linking the transport and
protocol.
:param str | None server_hostname: For use during SSL session
establishment to match against the target server's certificate. The
value `None` disables this check (which is a huge security risk)
:rtype: AbstractIOReference
"""
raise NotImplementedError
class AbstractFileDescriptorServices(pika.compat.AbstractBase):
"""Interface definition of common non-blocking file descriptor services
required by some utility implementations.
NOTE: This is not a public API. Pika users should rely on the native I/O
loop APIs (e.g., asyncio event loop, tornado ioloop, twisted reactor, etc.)
that corresponds to the chosen Connection adapter.
"""
@abc.abstractmethod
def set_reader(self, fd, on_readable):
"""Call the given callback when the file descriptor is readable.
Replace prior reader, if any, for the given file descriptor.
:param fd: file descriptor
:param callable on_readable: a callback taking no args to be notified
when fd becomes readable.
"""
raise NotImplementedError
@abc.abstractmethod
def remove_reader(self, fd):
"""Stop watching the given file descriptor for readability
:param fd: file descriptor
:returns: True if reader was removed; False if none was registered.
:rtype: bool
"""
raise NotImplementedError
@abc.abstractmethod
def set_writer(self, fd, on_writable):
"""Call the given callback whenever the file descriptor is writable.
Replace prior writer callback, if any, for the given file descriptor.
IMPLEMENTATION NOTE: For portability, implementations of
`set_writable()` should also watch for indication of error on the
socket and treat it as equivalent to the writable indication (e.g.,
also adding the socket to the `exceptfds` arg of `socket.select()`
and calling the `on_writable` callback if `select.select()`
indicates that the socket is in error state). Specifically, Windows
(unlike POSIX) only indicates error on the socket (but not writable)
when connection establishment fails.
:param fd: file descriptor
:param callable on_writable: a callback taking no args to be notified
when fd becomes writable.
"""
raise NotImplementedError
@abc.abstractmethod
def remove_writer(self, fd):
"""Stop watching the given file descriptor for writability
:param fd: file descriptor
:returns: True if reader was removed; False if none was registered.
:rtype: bool
"""
raise NotImplementedError
class AbstractTimerReference(pika.compat.AbstractBase):
"""Reference to asynchronous operation"""
@abc.abstractmethod
def cancel(self):
"""Cancel callback. If already cancelled, has no affect.
"""
raise NotImplementedError
class AbstractIOReference(pika.compat.AbstractBase):
"""Reference to asynchronous I/O operation"""
@abc.abstractmethod
def cancel(self):
"""Cancel pending operation
:returns: False if was already done or cancelled; True otherwise
:rtype: bool
"""
raise NotImplementedError
class AbstractStreamProtocol(pika.compat.AbstractBase):
"""Stream protocol interface. It's compatible with a subset of
`asyncio.protocols.Protocol` for compatibility with asyncio-based
`AbstractIOServices` implementation.
"""
@abc.abstractmethod
def connection_made(self, transport):
"""Introduces transport to protocol after transport is connected.
:param AbstractStreamTransport transport:
:raises Exception: Exception-based exception on error
"""
raise NotImplementedError
@abc.abstractmethod
def connection_lost(self, error):
"""Called upon loss or closing of connection.
NOTE: `connection_made()` and `connection_lost()` are each called just
once and in that order. All other callbacks are called between them.
:param BaseException | None error: An exception (check for
`BaseException`) indicates connection failure. None indicates that
connection was closed on this side, such as when it's aborted or
when `AbstractStreamProtocol.eof_received()` returns a result that
doesn't evaluate to True.
:raises Exception: Exception-based exception on error
"""
raise NotImplementedError
@abc.abstractmethod
def eof_received(self):
"""Called after the remote peer shuts its write end of the connection.
:returns: A falsy value (including None) will cause the transport to
close itself, resulting in an eventual `connection_lost()` call
from the transport. If a truthy value is returned, it will be the
protocol's responsibility to close/abort the transport.
:rtype: falsy|truthy
:raises Exception: Exception-based exception on error
"""
raise NotImplementedError
@abc.abstractmethod
def data_received(self, data):
"""Called to deliver incoming data to the protocol.
:param data: Non-empty data bytes.
:raises Exception: Exception-based exception on error
"""
raise NotImplementedError
# pylint: disable=W0511
# TODO Undecided whether we need write flow-control yet, although it seems
# like a good idea.
# @abc.abstractmethod
# def pause_writing(self):
# """Called when the transport's write buffer size becomes greater than or
# equal to the transport's high-water mark. It won't be called again until
# the transport's write buffer gets back to its low-water mark and then
# returns to/past the hight-water mark again.
# """
# raise NotImplementedError
#
# @abc.abstractmethod
# def resume_writing(self):
# """Called when the transport's write buffer size becomes less than or
# equal to the transport's low-water mark.
# """
# raise NotImplementedError
class AbstractStreamTransport(pika.compat.AbstractBase):
"""Stream transport interface. It's compatible with a subset of
`asyncio.transports.Transport` for compatibility with asyncio-based
`AbstractIOServices` implementation.
"""
@abc.abstractmethod
def abort(self):
"""Close connection abruptly without waiting for pending I/O to
complete. Will invoke the corresponding protocol's `connection_lost()`
method asynchronously (not in context of the abort() call).
:raises Exception: Exception-based exception on error
"""
raise NotImplementedError
@abc.abstractmethod
def get_protocol(self):
"""Return the protocol linked to this transport.
:rtype: AbstractStreamProtocol
:raises Exception: Exception-based exception on error
"""
raise NotImplementedError
@abc.abstractmethod
def write(self, data):
"""Buffer the given data until it can be sent asynchronously.
:param bytes data:
:raises ValueError: if called with empty data
:raises Exception: Exception-based exception on error
"""
raise NotImplementedError
@abc.abstractmethod
def get_write_buffer_size(self):
"""
:returns: Current size of output data buffered by the transport
:rtype: int
"""
raise NotImplementedError
# pylint: disable=W0511
# TODO Udecided whether we need write flow-control yet, although it seems
# like a good idea.
# @abc.abstractmethod
# def set_write_buffer_limits(self, high, low):
# """Set thresholds for calling the protocol's `pause_writing()`
# and `resume_writing()` methods. `low` must be less than or equal to
# `high`.
#
# NOTE The unintuitive order of the args is preserved to match the
# corresponding method in `asyncio.WriteTransport`. I would expect `low`
# to be the first arg, especially since
# `asyncio.WriteTransport.get_write_buffer_limits()` returns them in the
# opposite order. This seems error-prone.
#
# See `asyncio.WriteTransport.get_write_buffer_limits()` for more details
# about the args.
#
# :param int high: non-negative high-water mark.
# :param int low: non-negative low-water mark.
# """
# raise NotImplementedError

View File

@@ -0,0 +1,600 @@
"""
Implementation of `nbio_interface.AbstractIOServices` on top of a
selector-based I/O loop, such as tornado's and our home-grown
select_connection's I/O loops.
"""
import abc
import logging
import socket
import threading
from pika.adapters.utils import nbio_interface, io_services_utils
from pika.adapters.utils.io_services_utils import (check_callback_arg,
check_fd_arg)
LOGGER = logging.getLogger(__name__)
class AbstractSelectorIOLoop(object):
"""Selector-based I/O loop interface expected by
`selector_ioloop_adapter.SelectorIOServicesAdapter`
NOTE: this interface follows the corresponding methods and attributes
of `tornado.ioloop.IOLoop` in order to avoid additional adapter layering
when wrapping tornado's IOLoop.
"""
@property
@abc.abstractmethod
def READ(self): # pylint: disable=C0103
"""The value of the I/O loop's READ flag; READ/WRITE/ERROR may be used
with bitwise operators as expected.
Implementation note: the implementations can simply replace these
READ/WRITE/ERROR properties with class-level attributes
"""
@property
@abc.abstractmethod
def WRITE(self): # pylint: disable=C0103
"""The value of the I/O loop's WRITE flag; READ/WRITE/ERROR may be used
with bitwise operators as expected
"""
@property
@abc.abstractmethod
def ERROR(self): # pylint: disable=C0103
"""The value of the I/O loop's ERROR flag; READ/WRITE/ERROR may be used
with bitwise operators as expected
"""
@abc.abstractmethod
def close(self):
"""Release IOLoop's resources.
the `close()` method is intended to be called by the application or test
code only after `start()` returns. After calling `close()`, no other
interaction with the closed instance of `IOLoop` should be performed.
"""
@abc.abstractmethod
def start(self):
"""Run the I/O loop. It will loop until requested to exit. See `stop()`.
"""
@abc.abstractmethod
def stop(self):
"""Request exit from the ioloop. The loop is NOT guaranteed to
stop before this method returns.
To invoke `stop()` safely from a thread other than this IOLoop's thread,
call it via `add_callback_threadsafe`; e.g.,
`ioloop.add_callback(ioloop.stop)`
"""
@abc.abstractmethod
def call_later(self, delay, callback):
"""Add the callback to the IOLoop timer to be called after delay seconds
from the time of call on best-effort basis. Returns a handle to the
timeout.
:param float delay: The number of seconds to wait to call callback
:param callable callback: The callback method
:returns: handle to the created timeout that may be passed to
`remove_timeout()`
:rtype: object
"""
@abc.abstractmethod
def remove_timeout(self, timeout_handle):
"""Remove a timeout
:param timeout_handle: Handle of timeout to remove
"""
@abc.abstractmethod
def add_callback(self, callback):
"""Requests a call to the given function as soon as possible in the
context of this IOLoop's thread.
NOTE: This is the only thread-safe method in IOLoop. All other
manipulations of IOLoop must be performed from the IOLoop's thread.
For example, a thread may request a call to the `stop` method of an
ioloop that is running in a different thread via
`ioloop.add_callback_threadsafe(ioloop.stop)`
:param callable callback: The callback method
"""
@abc.abstractmethod
def add_handler(self, fd, handler, events):
"""Start watching the given file descriptor for events
:param int fd: The file descriptor
:param callable handler: When requested event(s) occur,
`handler(fd, events)` will be called.
:param int events: The event mask using READ, WRITE, ERROR.
"""
@abc.abstractmethod
def update_handler(self, fd, events):
"""Changes the events we watch for
:param int fd: The file descriptor
:param int events: The event mask using READ, WRITE, ERROR
"""
@abc.abstractmethod
def remove_handler(self, fd):
"""Stop watching the given file descriptor for events
:param int fd: The file descriptor
"""
class SelectorIOServicesAdapter(io_services_utils.SocketConnectionMixin,
io_services_utils.StreamingConnectionMixin,
nbio_interface.AbstractIOServices,
nbio_interface.AbstractFileDescriptorServices):
"""Implements the
:py:class:`.nbio_interface.AbstractIOServices` interface
on top of selector-style native loop having the
:py:class:`AbstractSelectorIOLoop` interface, such as
:py:class:`pika.selection_connection.IOLoop` and :py:class:`tornado.IOLoop`.
NOTE:
:py:class:`.nbio_interface.AbstractFileDescriptorServices`
interface is only required by the mixins.
"""
def __init__(self, native_loop):
"""
:param AbstractSelectorIOLoop native_loop: An instance compatible with
the `AbstractSelectorIOLoop` interface, but not necessarily derived
from it.
"""
self._loop = native_loop
# Active watchers: maps file descriptors to `_FileDescriptorCallbacks`
self._watchers = dict()
# Native loop-specific event masks of interest
self._readable_mask = self._loop.READ
# NOTE: tying ERROR to WRITE is particularly handy for Windows, whose
# `select.select()` differs from Posix by reporting
# connection-establishment failure only through exceptfds (ERROR event),
# while the typical application workflow is to wait for the socket to
# become writable when waiting for socket connection to be established.
self._writable_mask = self._loop.WRITE | self._loop.ERROR
def get_native_ioloop(self):
"""Implement
:py:meth:`.nbio_interface.AbstractIOServices.get_native_ioloop()`.
"""
return self._loop
def close(self):
"""Implement :py:meth:`.nbio_interface.AbstractIOServices.close()`.
"""
self._loop.close()
def run(self):
"""Implement :py:meth:`.nbio_interface.AbstractIOServices.run()`.
"""
self._loop.start()
def stop(self):
"""Implement :py:meth:`.nbio_interface.AbstractIOServices.stop()`.
"""
self._loop.stop()
def add_callback_threadsafe(self, callback):
"""Implement
:py:meth:`.nbio_interface.AbstractIOServices.add_callback_threadsafe()`.
"""
self._loop.add_callback(callback)
def call_later(self, delay, callback):
"""Implement :py:meth:`.nbio_interface.AbstractIOServices.call_later()`.
"""
return _TimerHandle(self._loop.call_later(delay, callback), self._loop)
def getaddrinfo(self,
host,
port,
on_done,
family=0,
socktype=0,
proto=0,
flags=0):
"""Implement :py:meth:`.nbio_interface.AbstractIOServices.getaddrinfo()`.
"""
return _SelectorIOLoopIOHandle(
_AddressResolver(
native_loop=self._loop,
host=host,
port=port,
family=family,
socktype=socktype,
proto=proto,
flags=flags,
on_done=on_done).start())
def set_reader(self, fd, on_readable):
"""Implement
:py:meth:`.nbio_interface.AbstractFileDescriptorServices.set_reader()`.
"""
LOGGER.debug('SelectorIOServicesAdapter.set_reader(%s, %r)', fd,
on_readable)
check_fd_arg(fd)
check_callback_arg(on_readable, 'on_readable')
try:
callbacks = self._watchers[fd]
except KeyError:
self._loop.add_handler(fd, self._on_reader_writer_fd_events,
self._readable_mask)
self._watchers[fd] = _FileDescriptorCallbacks(reader=on_readable)
LOGGER.debug('set_reader(%s, _) added handler Rd', fd)
else:
if callbacks.reader is None:
assert callbacks.writer is not None
self._loop.update_handler(
fd, self._readable_mask | self._writable_mask)
LOGGER.debug('set_reader(%s, _) updated handler RdWr', fd)
else:
LOGGER.debug('set_reader(%s, _) replacing reader', fd)
callbacks.reader = on_readable
def remove_reader(self, fd):
"""Implement
:py:meth:`.nbio_interface.AbstractFileDescriptorServices.remove_reader()`.
"""
LOGGER.debug('SelectorIOServicesAdapter.remove_reader(%s)', fd)
check_fd_arg(fd)
try:
callbacks = self._watchers[fd]
except KeyError:
LOGGER.debug('remove_reader(%s) neither was set', fd)
return False
if callbacks.reader is None:
assert callbacks.writer is not None
LOGGER.debug('remove_reader(%s) reader wasn\'t set Wr', fd)
return False
callbacks.reader = None
if callbacks.writer is None:
del self._watchers[fd]
self._loop.remove_handler(fd)
LOGGER.debug('remove_reader(%s) removed handler', fd)
else:
self._loop.update_handler(fd, self._writable_mask)
LOGGER.debug('remove_reader(%s) updated handler Wr', fd)
return True
def set_writer(self, fd, on_writable):
"""Implement
:py:meth:`.nbio_interface.AbstractFileDescriptorServices.set_writer()`.
"""
LOGGER.debug('SelectorIOServicesAdapter.set_writer(%s, %r)', fd,
on_writable)
check_fd_arg(fd)
check_callback_arg(on_writable, 'on_writable')
try:
callbacks = self._watchers[fd]
except KeyError:
self._loop.add_handler(fd, self._on_reader_writer_fd_events,
self._writable_mask)
self._watchers[fd] = _FileDescriptorCallbacks(writer=on_writable)
LOGGER.debug('set_writer(%s, _) added handler Wr', fd)
else:
if callbacks.writer is None:
assert callbacks.reader is not None
# NOTE: Set the writer func before setting the mask!
# Otherwise a race condition can occur where ioloop tries to
# call writer when it is still None.
callbacks.writer = on_writable
self._loop.update_handler(
fd, self._readable_mask | self._writable_mask)
LOGGER.debug('set_writer(%s, _) updated handler RdWr', fd)
else:
LOGGER.debug('set_writer(%s, _) replacing writer', fd)
callbacks.writer = on_writable
def remove_writer(self, fd):
"""Implement
:py:meth:`.nbio_interface.AbstractFileDescriptorServices.remove_writer()`.
"""
LOGGER.debug('SelectorIOServicesAdapter.remove_writer(%s)', fd)
check_fd_arg(fd)
try:
callbacks = self._watchers[fd]
except KeyError:
LOGGER.debug('remove_writer(%s) neither was set.', fd)
return False
if callbacks.writer is None:
assert callbacks.reader is not None
LOGGER.debug('remove_writer(%s) writer wasn\'t set Rd', fd)
return False
callbacks.writer = None
if callbacks.reader is None:
del self._watchers[fd]
self._loop.remove_handler(fd)
LOGGER.debug('remove_writer(%s) removed handler', fd)
else:
self._loop.update_handler(fd, self._readable_mask)
LOGGER.debug('remove_writer(%s) updated handler Rd', fd)
return True
def _on_reader_writer_fd_events(self, fd, events):
"""Handle indicated file descriptor events requested via `set_reader()`
and `set_writer()`.
:param fd: file descriptor
:param events: event mask using native loop's READ/WRITE/ERROR. NOTE:
depending on the underlying poller mechanism, ERROR may be indicated
upon certain file description state even though we don't request it.
We ignore ERROR here since `set_reader()`/`set_writer()` don't
request for it.
"""
callbacks = self._watchers[fd]
if events & self._readable_mask and callbacks.reader is None:
# NOTE: we check for consistency here ahead of the writer callback
# because the writer callback, if any, can change the events being
# watched
LOGGER.warning(
'READ indicated on fd=%s, but reader callback is None; '
'events=%s', fd, bin(events))
if events & self._writable_mask:
if callbacks.writer is not None:
callbacks.writer()
else:
LOGGER.warning(
'WRITE indicated on fd=%s, but writer callback is None; '
'events=%s', fd, bin(events))
if events & self._readable_mask:
if callbacks.reader is not None:
callbacks.reader()
else:
# Reader callback might have been removed in the scope of writer
# callback.
pass
class _FileDescriptorCallbacks(object):
"""Holds reader and writer callbacks for a file descriptor"""
__slots__ = ('reader', 'writer')
def __init__(self, reader=None, writer=None):
self.reader = reader
self.writer = writer
class _TimerHandle(nbio_interface.AbstractTimerReference):
"""This module's adaptation of `nbio_interface.AbstractTimerReference`.
"""
def __init__(self, handle, loop):
"""
:param opaque handle: timer handle from the underlying loop
implementation that may be passed to its `remove_timeout()` method
:param AbstractSelectorIOLoop loop: the I/O loop instance that created
the timeout.
"""
self._handle = handle
self._loop = loop
def cancel(self):
if self._loop is not None:
self._loop.remove_timeout(self._handle)
self._handle = None
self._loop = None
class _SelectorIOLoopIOHandle(nbio_interface.AbstractIOReference):
"""This module's adaptation of `nbio_interface.AbstractIOReference`
"""
def __init__(self, subject):
"""
:param subject: subject of the reference containing a `cancel()` method
"""
self._cancel = subject.cancel
def cancel(self):
"""Cancel pending operation
:returns: False if was already done or cancelled; True otherwise
:rtype: bool
"""
return self._cancel()
class _AddressResolver(object):
"""Performs getaddrinfo asynchronously using a thread, then reports result
via callback from the given I/O loop.
NOTE: at this stage, we're using a thread per request, which may prove
inefficient and even prohibitive if the app performs many of these
operations concurrently.
"""
NOT_STARTED = 0
ACTIVE = 1
CANCELED = 2
COMPLETED = 3
def __init__(self, native_loop, host, port, family, socktype, proto, flags,
on_done):
"""
:param AbstractSelectorIOLoop native_loop:
:param host: `see socket.getaddrinfo()`
:param port: `see socket.getaddrinfo()`
:param family: `see socket.getaddrinfo()`
:param socktype: `see socket.getaddrinfo()`
:param proto: `see socket.getaddrinfo()`
:param flags: `see socket.getaddrinfo()`
:param on_done: on_done(records|BaseException) callback for reporting
result from the given I/O loop. The single arg will be either an
exception object (check for `BaseException`) in case of failure or
the result returned by `socket.getaddrinfo()`.
"""
check_callback_arg(on_done, 'on_done')
self._state = self.NOT_STARTED
self._result = None
self._loop = native_loop
self._host = host
self._port = port
self._family = family
self._socktype = socktype
self._proto = proto
self._flags = flags
self._on_done = on_done
self._mutex = threading.Lock()
self._threading_timer = None
def _cleanup(self):
"""Release resources
"""
self._loop = None
self._threading_timer = None
self._on_done = None
def start(self):
"""Start asynchronous DNS lookup.
:rtype: nbio_interface.AbstractIOReference
"""
assert self._state == self.NOT_STARTED, self._state
self._state = self.ACTIVE
self._threading_timer = threading.Timer(0, self._resolve)
self._threading_timer.start()
return _SelectorIOLoopIOHandle(self)
def cancel(self):
"""Cancel the pending resolver
:returns: False if was already done or cancelled; True otherwise
:rtype: bool
"""
# Try to cancel, but no guarantees
with self._mutex:
if self._state == self.ACTIVE:
LOGGER.debug('Canceling resolver for %s:%s', self._host,
self._port)
self._state = self.CANCELED
# Attempt to cancel, but not guaranteed
self._threading_timer.cancel()
self._cleanup()
return True
else:
LOGGER.debug(
'Ignoring _AddressResolver cancel request when not ACTIVE; '
'(%s:%s); state=%s', self._host, self._port, self._state)
return False
def _resolve(self):
"""Call `socket.getaddrinfo()` and return result via user's callback
function on the given I/O loop
"""
try:
# NOTE: on python 2.x, can't pass keyword args to getaddrinfo()
result = socket.getaddrinfo(self._host, self._port, self._family,
self._socktype, self._proto,
self._flags)
except Exception as exc: # pylint: disable=W0703
LOGGER.error('Address resolution failed: %r', exc)
result = exc
self._result = result
# Schedule result to be returned to user via user's event loop
with self._mutex:
if self._state == self.ACTIVE:
self._loop.add_callback(self._dispatch_result)
else:
LOGGER.debug(
'Asynchronous getaddrinfo cancellation detected; '
'in thread; host=%r', self._host)
def _dispatch_result(self):
"""This is called from the user's I/O loop to pass the result to the
user via the user's on_done callback
"""
if self._state == self.ACTIVE:
self._state = self.COMPLETED
try:
LOGGER.debug(
'Invoking asynchronous getaddrinfo() completion callback; '
'host=%r', self._host)
self._on_done(self._result)
finally:
self._cleanup()
else:
LOGGER.debug(
'Asynchronous getaddrinfo cancellation detected; '
'in I/O loop context; host=%r', self._host)