From 5a45c5d75ce3ae109b521cd06c9b0e1e750c25ab Mon Sep 17 00:00:00 2001
From: Varac <varac@varac.net>
Date: Tue, 19 Mar 2019 20:52:22 +0100
Subject: [PATCH] Use ansible_mitogen plugin to speedup deploys
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

mitogen speeds up an ansible run, according to the docs:
" Expect a 1.25x - 7x speedup and a CPU usage reduction of at least 2x, …"
The problem is that we need to ship a bundled version of it since atm
you need to provide the path to the python module, which varies according
to the way how ppl install it (venv, pip user vs. system installation etc).
See https://github.com/dw/mitogen/issues/568 for details.
---
 ansible/ansible.cfg                           |    2 +
 ansible/plugins/mitogen-0.2.6/LICENSE         |   26 +
 ansible/plugins/mitogen-0.2.6/MANIFEST.in     |    1 +
 ansible/plugins/mitogen-0.2.6/PKG-INFO        |   23 +
 ansible/plugins/mitogen-0.2.6/README.md       |   13 +
 .../mitogen-0.2.6/ansible_mitogen/__init__.py |    0
 .../mitogen-0.2.6/ansible_mitogen/affinity.py |  257 ++
 .../ansible_mitogen/compat/__init__.py        |    0
 .../compat/simplejson/__init__.py             |  318 ++
 .../compat/simplejson/decoder.py              |  354 ++
 .../compat/simplejson/encoder.py              |  440 +++
 .../compat/simplejson/scanner.py              |   65 +
 .../ansible_mitogen/connection.py             | 1000 +++++
 .../mitogen-0.2.6/ansible_mitogen/loaders.py  |   48 +
 .../mitogen-0.2.6/ansible_mitogen/logging.py  |  127 +
 .../mitogen-0.2.6/ansible_mitogen/mixins.py   |  432 +++
 .../ansible_mitogen/module_finder.py          |  157 +
 .../mitogen-0.2.6/ansible_mitogen/parsing.py  |   84 +
 .../mitogen-0.2.6/ansible_mitogen/planner.py  |  498 +++
 .../ansible_mitogen/plugins/__init__.py       |    0
 .../plugins/action/__init__.py                |    0
 .../plugins/action/mitogen_get_stack.py       |   54 +
 .../plugins/connection/__init__.py            |    0
 .../plugins/connection/mitogen_doas.py        |   44 +
 .../plugins/connection/mitogen_docker.py      |   51 +
 .../plugins/connection/mitogen_jail.py        |   44 +
 .../plugins/connection/mitogen_kubectl.py     |   71 +
 .../plugins/connection/mitogen_local.py       |   86 +
 .../plugins/connection/mitogen_lxc.py         |   44 +
 .../plugins/connection/mitogen_lxd.py         |   44 +
 .../plugins/connection/mitogen_machinectl.py  |   44 +
 .../plugins/connection/mitogen_setns.py       |   44 +
 .../plugins/connection/mitogen_ssh.py         |   65 +
 .../plugins/connection/mitogen_su.py          |   44 +
 .../plugins/connection/mitogen_sudo.py        |   44 +
 .../plugins/strategy/__init__.py              |    0
 .../plugins/strategy/mitogen.py               |   61 +
 .../plugins/strategy/mitogen_free.py          |   62 +
 .../plugins/strategy/mitogen_host_pinned.py   |   67 +
 .../plugins/strategy/mitogen_linear.py        |   62 +
 .../mitogen-0.2.6/ansible_mitogen/process.py  |  358 ++
 .../mitogen-0.2.6/ansible_mitogen/runner.py   |  928 +++++
 .../mitogen-0.2.6/ansible_mitogen/services.py |  537 +++
 .../mitogen-0.2.6/ansible_mitogen/strategy.py |  257 ++
 .../mitogen-0.2.6/ansible_mitogen/target.py   |  777 ++++
 .../ansible_mitogen/transport_config.py       |  621 +++
 .../mitogen-0.2.6/mitogen.egg-info/PKG-INFO   |   23 +
 .../mitogen.egg-info/SOURCES.txt              |   78 +
 .../mitogen.egg-info/dependency_links.txt     |    1 +
 .../mitogen.egg-info/not-zip-safe             |    1 +
 .../mitogen.egg-info/top_level.txt            |    2 +
 .../plugins/mitogen-0.2.6/mitogen/__init__.py |  120 +
 .../mitogen-0.2.6/mitogen/compat/__init__.py  |    0
 .../mitogen-0.2.6/mitogen/compat/pkgutil.py   |  593 +++
 .../mitogen-0.2.6/mitogen/compat/tokenize.py  |  453 +++
 ansible/plugins/mitogen-0.2.6/mitogen/core.py | 3409 +++++++++++++++++
 .../plugins/mitogen-0.2.6/mitogen/debug.py    |  236 ++
 ansible/plugins/mitogen-0.2.6/mitogen/doas.py |  113 +
 .../plugins/mitogen-0.2.6/mitogen/docker.py   |   81 +
 .../plugins/mitogen-0.2.6/mitogen/fakessh.py  |  461 +++
 ansible/plugins/mitogen-0.2.6/mitogen/fork.py |  223 ++
 ansible/plugins/mitogen-0.2.6/mitogen/jail.py |   65 +
 .../plugins/mitogen-0.2.6/mitogen/kubectl.py  |   65 +
 ansible/plugins/mitogen-0.2.6/mitogen/lxc.py  |   75 +
 ansible/plugins/mitogen-0.2.6/mitogen/lxd.py  |   77 +
 .../plugins/mitogen-0.2.6/mitogen/master.py   | 1173 ++++++
 .../plugins/mitogen-0.2.6/mitogen/minify.py   |  139 +
 .../plugins/mitogen-0.2.6/mitogen/os_fork.py  |  183 +
 .../plugins/mitogen-0.2.6/mitogen/parent.py   | 2330 +++++++++++
 .../plugins/mitogen-0.2.6/mitogen/profiler.py |  166 +
 .../plugins/mitogen-0.2.6/mitogen/select.py   |  333 ++
 .../plugins/mitogen-0.2.6/mitogen/service.py  | 1085 ++++++
 .../plugins/mitogen-0.2.6/mitogen/setns.py    |  238 ++
 ansible/plugins/mitogen-0.2.6/mitogen/ssh.py  |  317 ++
 ansible/plugins/mitogen-0.2.6/mitogen/su.py   |  128 +
 ansible/plugins/mitogen-0.2.6/mitogen/sudo.py |  277 ++
 ansible/plugins/mitogen-0.2.6/mitogen/unix.py |  168 +
 .../plugins/mitogen-0.2.6/mitogen/utils.py    |  227 ++
 ansible/plugins/mitogen-0.2.6/setup.cfg       |   15 +
 ansible/plugins/mitogen-0.2.6/setup.py        |   67 +
 test/plugins                                  |    1 +
 81 files changed, 21177 insertions(+)
 create mode 100644 ansible/plugins/mitogen-0.2.6/LICENSE
 create mode 100644 ansible/plugins/mitogen-0.2.6/MANIFEST.in
 create mode 100644 ansible/plugins/mitogen-0.2.6/PKG-INFO
 create mode 100644 ansible/plugins/mitogen-0.2.6/README.md
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/__init__.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/affinity.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/compat/__init__.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/compat/simplejson/__init__.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/compat/simplejson/decoder.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/compat/simplejson/encoder.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/compat/simplejson/scanner.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/connection.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/loaders.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/logging.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/mixins.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/module_finder.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/parsing.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/planner.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/__init__.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/action/__init__.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/action/mitogen_get_stack.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/__init__.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_doas.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_docker.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_jail.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_kubectl.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_local.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_lxc.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_lxd.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_machinectl.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_setns.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_ssh.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_su.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_sudo.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/strategy/__init__.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/strategy/mitogen.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/strategy/mitogen_free.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/strategy/mitogen_host_pinned.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/strategy/mitogen_linear.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/process.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/runner.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/services.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/strategy.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/target.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/ansible_mitogen/transport_config.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen.egg-info/PKG-INFO
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen.egg-info/SOURCES.txt
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen.egg-info/dependency_links.txt
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen.egg-info/not-zip-safe
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen.egg-info/top_level.txt
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/__init__.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/compat/__init__.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/compat/pkgutil.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/compat/tokenize.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/core.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/debug.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/doas.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/docker.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/fakessh.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/fork.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/jail.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/kubectl.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/lxc.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/lxd.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/master.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/minify.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/os_fork.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/parent.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/profiler.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/select.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/service.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/setns.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/ssh.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/su.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/sudo.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/unix.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/mitogen/utils.py
 create mode 100644 ansible/plugins/mitogen-0.2.6/setup.cfg
 create mode 100644 ansible/plugins/mitogen-0.2.6/setup.py
 create mode 120000 test/plugins

diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg
index 2bc469894..d3e6f1784 100644
--- a/ansible/ansible.cfg
+++ b/ansible/ansible.cfg
@@ -3,3 +3,5 @@ callback_whitelist = profile_tasks, timer
 inventory = inventory.yml
 nocows = 1
 stdout_callback = yaml
+strategy_plugins = plugins/mitogen-0.2.6/ansible_mitogen/plugins/strategy
+strategy = mitogen_linear
diff --git a/ansible/plugins/mitogen-0.2.6/LICENSE b/ansible/plugins/mitogen-0.2.6/LICENSE
new file mode 100644
index 000000000..70e43a944
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/LICENSE
@@ -0,0 +1,26 @@
+Copyright 2019, David Wilson
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+this list of conditions and the following disclaimer in the documentation
+and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors
+may be used to endorse or promote products derived from this software without
+specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/ansible/plugins/mitogen-0.2.6/MANIFEST.in b/ansible/plugins/mitogen-0.2.6/MANIFEST.in
new file mode 100644
index 000000000..1aba38f67
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/MANIFEST.in
@@ -0,0 +1 @@
+include LICENSE
diff --git a/ansible/plugins/mitogen-0.2.6/PKG-INFO b/ansible/plugins/mitogen-0.2.6/PKG-INFO
new file mode 100644
index 000000000..d5ec33a7b
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/PKG-INFO
@@ -0,0 +1,23 @@
+Metadata-Version: 1.1
+Name: mitogen
+Version: 0.2.6
+Summary: Library for writing distributed self-replicating programs.
+Home-page: https://github.com/dw/mitogen/
+Author: David Wilson
+Author-email: UNKNOWN
+License: New BSD
+Description: UNKNOWN
+Platform: UNKNOWN
+Classifier: Environment :: Console
+Classifier: Intended Audience :: System Administrators
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2.4
+Classifier: Programming Language :: Python :: 2.5
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Topic :: System :: Distributed Computing
+Classifier: Topic :: System :: Systems Administration
diff --git a/ansible/plugins/mitogen-0.2.6/README.md b/ansible/plugins/mitogen-0.2.6/README.md
new file mode 100644
index 000000000..5ef2447f5
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/README.md
@@ -0,0 +1,13 @@
+
+# Mitogen
+
+<!-- [![Build Status](https://travis-ci.org/dw/mitogen.png?branch=master)](https://travis-ci.org/dw/mitogen}) -->
+<a href="https://mitogen.readthedocs.io/">Please see the documentation</a>.
+
+![](https://i.imgur.com/eBM6LhJ.gif)
+
+[![Total alerts](https://img.shields.io/lgtm/alerts/g/dw/mitogen.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/dw/mitogen/alerts/)
+
+[![Build Status](https://travis-ci.org/dw/mitogen.svg?branch=master)](https://travis-ci.org/dw/mitogen)
+
+[![Pipelines Status](https://dev.azure.com/dw-mitogen/Mitogen/_apis/build/status/dw.mitogen?branchName=master)](https://dev.azure.com/dw-mitogen/Mitogen/_build/latest?definitionId=1?branchName=master)
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/__init__.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/affinity.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/affinity.py
new file mode 100644
index 000000000..57926516a
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/affinity.py
@@ -0,0 +1,257 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+"""
+As Mitogen separates asynchronous IO out to a broker thread, communication
+necessarily involves context switching and waking that thread. When application
+threads and the broker share a CPU, this can be almost invisibly fast - around
+25 microseconds for a full A->B->A round-trip.
+
+However when threads are scheduled on different CPUs, round-trip delays
+regularly vary wildly, and easily into milliseconds. Many contributing factors
+exist, not least scenarios like:
+
+1. A is preempted immediately after waking B, but before releasing the GIL.
+2. B wakes from IO wait only to immediately enter futex wait.
+3. A may wait 10ms or more for another timeslice, as the scheduler on its CPU
+   runs threads unrelated to its transaction (i.e. not B), wake only to release
+   its GIL, before entering IO sleep waiting for a reply from B, which cannot
+   exist yet.
+4. B wakes, acquires GIL, performs work, and sends reply to A, causing it to
+   wake. B is preempted before releasing GIL.
+5. A wakes from IO wait only to immediately enter futex wait.
+6. B may wait 10ms or more for another timeslice, wake only to release its GIL,
+   before sleeping again.
+7. A wakes, acquires GIL, finally receives reply.
+
+Per above if we are unlucky, on an even moderately busy machine it is possible
+to lose milliseconds just in scheduling delay, and the effect is compounded
+when pairs of threads in process A are communicating with pairs of threads in
+process B using the same scheme, such as when Ansible WorkerProcess is
+communicating with ContextService in the connection multiplexer. In the worst
+case it could involve 4 threads working in lockstep spread across 4 busy CPUs.
+
+Since multithreading in Python is essentially useless except for waiting on IO
+due to the presence of the GIL, at least in Ansible there is no good reason for
+threads in the same process to run on distinct CPUs - they always operate in
+lockstep due to the GIL, and are thus vulnerable to issues like above.
+
+Linux lacks any natural API to describe what we want, it only permits
+individual threads to be constrained to run on specific CPUs, and for that
+constraint to be inherited by new threads and forks of the constrained thread.
+
+This module therefore implements a CPU pinning policy for Ansible processes,
+providing methods that should be called early in any new process, either to
+rebalance which CPU it is pinned to, or in the case of subprocesses, to remove
+the pinning entirely. It is likely to require ongoing tweaking, since pinning
+necessarily involves preventing the scheduler from making load balancing
+decisions.
+"""
+
+import ctypes
+import mmap
+import multiprocessing
+import os
+import struct
+
+import mitogen.parent
+
+
+try:
+    _libc = ctypes.CDLL(None, use_errno=True)
+    _strerror = _libc.strerror
+    _strerror.restype = ctypes.c_char_p
+    _pthread_mutex_init = _libc.pthread_mutex_init
+    _pthread_mutex_lock = _libc.pthread_mutex_lock
+    _pthread_mutex_unlock = _libc.pthread_mutex_unlock
+    _sched_setaffinity = _libc.sched_setaffinity
+except (OSError, AttributeError):
+    _libc = None
+    _strerror = None
+    _pthread_mutex_init = None
+    _pthread_mutex_lock = None
+    _pthread_mutex_unlock = None
+    _sched_setaffinity = None
+
+
+class pthread_mutex_t(ctypes.Structure):
+    """
+    Wrap pthread_mutex_t to allow storing a lock in shared memory.
+    """
+    _fields_ = [
+        ('data', ctypes.c_uint8 * 512),
+    ]
+
+    def init(self):
+        if _pthread_mutex_init(self.data, 0):
+            raise Exception(_strerror(ctypes.get_errno()))
+
+    def acquire(self):
+        if _pthread_mutex_lock(self.data):
+            raise Exception(_strerror(ctypes.get_errno()))
+
+    def release(self):
+        if _pthread_mutex_unlock(self.data):
+            raise Exception(_strerror(ctypes.get_errno()))
+
+
+class State(ctypes.Structure):
+    """
+    Contents of shared memory segment. This allows :meth:`Manager.assign` to be
+    called from any child, since affinity assignment must happen from within
+    the context of the new child process.
+    """
+    _fields_ = [
+        ('lock', pthread_mutex_t),
+        ('counter', ctypes.c_uint8),
+    ]
+
+
+class Policy(object):
+    """
+    Process affinity policy.
+    """
+    def assign_controller(self):
+        """
+        Assign the Ansible top-level policy to this process.
+        """
+
+    def assign_muxprocess(self):
+        """
+        Assign the MuxProcess policy to this process.
+        """
+
+    def assign_worker(self):
+        """
+        Assign the WorkerProcess policy to this process.
+        """
+
+    def assign_subprocess(self):
+        """
+        Assign the helper subprocess policy to this process.
+        """
+
+class FixedPolicy(Policy):
+    """
+    :class:`Policy` for machines where the only control method available is
+    fixed CPU placement. The scheme here was tested on an otherwise idle 16
+    thread machine.
+
+    - The connection multiplexer is pinned to CPU 0.
+    - The Ansible top-level (strategy) is pinned to CPU 1.
+    - WorkerProcesses are pinned sequentually to 2..N, wrapping around when no
+      more CPUs exist.
+    - Children such as SSH may be scheduled on any CPU except 0/1.
+
+    If the machine has less than 4 cores available, the top-level and workers
+    are pinned between CPU 2..N, i.e. no CPU is reserved for the top-level
+    process.
+
+    This could at least be improved by having workers pinned to independent
+    cores, before reusing the second hyperthread of an existing core.
+
+    A hook is installed that causes :meth:`reset` to run in the child of any
+    process created with :func:`mitogen.parent.detach_popen`, ensuring
+    CPU-intensive children like SSH are not forced to share the same core as
+    the (otherwise potentially very busy) parent.
+    """
+    def __init__(self, cpu_count=None):
+        #: For tests.
+        self.cpu_count = cpu_count or multiprocessing.cpu_count()
+        self.mem = mmap.mmap(-1, 4096)
+        self.state = State.from_buffer(self.mem)
+        self.state.lock.init()
+
+        if self.cpu_count < 2:
+            # uniprocessor
+            self._reserve_mux = False
+            self._reserve_controller = False
+            self._reserve_mask = 0
+            self._reserve_shift = 0
+        elif self.cpu_count < 4:
+            # small SMP
+            self._reserve_mux = True
+            self._reserve_controller = False
+            self._reserve_mask = 1
+            self._reserve_shift = 1
+        else:
+            # big SMP
+            self._reserve_mux = True
+            self._reserve_controller = True
+            self._reserve_mask = 3
+            self._reserve_shift = 2
+
+    def _set_affinity(self, mask):
+        mitogen.parent._preexec_hook = self._clear
+        self._set_cpu_mask(mask)
+
+    def _balance(self):
+        self.state.lock.acquire()
+        try:
+            n = self.state.counter
+            self.state.counter += 1
+        finally:
+            self.state.lock.release()
+
+        self._set_cpu(self._reserve_shift + (
+            (n % (self.cpu_count - self._reserve_shift))
+        ))
+
+    def _set_cpu(self, cpu):
+        self._set_affinity(1 << cpu)
+
+    def _clear(self):
+        all_cpus = (1 << self.cpu_count) - 1
+        self._set_affinity(all_cpus & ~self._reserve_mask)
+
+    def assign_controller(self):
+        if self._reserve_controller:
+            self._set_cpu(1)
+        else:
+            self._balance()
+
+    def assign_muxprocess(self):
+        self._set_cpu(0)
+
+    def assign_worker(self):
+        self._balance()
+
+    def assign_subprocess(self):
+        self._clear()
+
+
+class LinuxPolicy(FixedPolicy):
+    def _set_cpu_mask(self, mask):
+        s = struct.pack('L', mask)
+        _sched_setaffinity(os.getpid(), len(s), s)
+
+
+if _sched_setaffinity is not None:
+    policy = LinuxPolicy()
+else:
+    policy = Policy()
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/compat/__init__.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/compat/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/compat/simplejson/__init__.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/compat/simplejson/__init__.py
new file mode 100644
index 000000000..d5b4d3991
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/compat/simplejson/__init__.py
@@ -0,0 +1,318 @@
+r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
+JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
+interchange format.
+
+:mod:`simplejson` exposes an API familiar to users of the standard library
+:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
+version of the :mod:`json` library contained in Python 2.6, but maintains
+compatibility with Python 2.4 and Python 2.5 and (currently) has
+significant performance advantages, even without using the optional C
+extension for speedups.
+
+Encoding basic Python object hierarchies::
+
+    >>> import simplejson as json
+    >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
+    '["foo", {"bar": ["baz", null, 1.0, 2]}]'
+    >>> print json.dumps("\"foo\bar")
+    "\"foo\bar"
+    >>> print json.dumps(u'\u1234')
+    "\u1234"
+    >>> print json.dumps('\\')
+    "\\"
+    >>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
+    {"a": 0, "b": 0, "c": 0}
+    >>> from StringIO import StringIO
+    >>> io = StringIO()
+    >>> json.dump(['streaming API'], io)
+    >>> io.getvalue()
+    '["streaming API"]'
+
+Compact encoding::
+
+    >>> import simplejson as json
+    >>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
+    '[1,2,3,{"4":5,"6":7}]'
+
+Pretty printing::
+
+    >>> import simplejson as json
+    >>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=4)
+    >>> print '\n'.join([l.rstrip() for l in  s.splitlines()])
+    {
+        "4": 5,
+        "6": 7
+    }
+
+Decoding JSON::
+
+    >>> import simplejson as json
+    >>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
+    >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
+    True
+    >>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
+    True
+    >>> from StringIO import StringIO
+    >>> io = StringIO('["streaming API"]')
+    >>> json.load(io)[0] == 'streaming API'
+    True
+
+Specializing JSON object decoding::
+
+    >>> import simplejson as json
+    >>> def as_complex(dct):
+    ...     if '__complex__' in dct:
+    ...         return complex(dct['real'], dct['imag'])
+    ...     return dct
+    ...
+    >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
+    ...     object_hook=as_complex)
+    (1+2j)
+    >>> import decimal
+    >>> json.loads('1.1', parse_float=decimal.Decimal) == decimal.Decimal('1.1')
+    True
+
+Specializing JSON object encoding::
+
+    >>> import simplejson as json
+    >>> def encode_complex(obj):
+    ...     if isinstance(obj, complex):
+    ...         return [obj.real, obj.imag]
+    ...     raise TypeError(repr(o) + " is not JSON serializable")
+    ...
+    >>> json.dumps(2 + 1j, default=encode_complex)
+    '[2.0, 1.0]'
+    >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
+    '[2.0, 1.0]'
+    >>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
+    '[2.0, 1.0]'
+
+
+Using simplejson.tool from the shell to validate and pretty-print::
+
+    $ echo '{"json":"obj"}' | python -m simplejson.tool
+    {
+        "json": "obj"
+    }
+    $ echo '{ 1.2:3.4}' | python -m simplejson.tool
+    Expecting property name: line 1 column 2 (char 2)
+"""
+__version__ = '2.0.9'
+__all__ = [
+    'dump', 'dumps', 'load', 'loads',
+    'JSONDecoder', 'JSONEncoder',
+]
+
+__author__ = 'Bob Ippolito <bob@redivi.com>'
+
+from decoder import JSONDecoder
+from encoder import JSONEncoder
+
+_default_encoder = JSONEncoder(
+    skipkeys=False,
+    ensure_ascii=True,
+    check_circular=True,
+    allow_nan=True,
+    indent=None,
+    separators=None,
+    encoding='utf-8',
+    default=None,
+)
+
+def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
+        allow_nan=True, cls=None, indent=None, separators=None,
+        encoding='utf-8', default=None, **kw):
+    """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
+    ``.write()``-supporting file-like object).
+
+    If ``skipkeys`` is true then ``dict`` keys that are not basic types
+    (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
+    will be skipped instead of raising a ``TypeError``.
+
+    If ``ensure_ascii`` is false, then the some chunks written to ``fp``
+    may be ``unicode`` instances, subject to normal Python ``str`` to
+    ``unicode`` coercion rules. Unless ``fp.write()`` explicitly
+    understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
+    to cause an error.
+
+    If ``check_circular`` is false, then the circular reference check
+    for container types will be skipped and a circular reference will
+    result in an ``OverflowError`` (or worse).
+
+    If ``allow_nan`` is false, then it will be a ``ValueError`` to
+    serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
+    in strict compliance of the JSON specification, instead of using the
+    JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
+
+    If ``indent`` is a non-negative integer, then JSON array elements and object
+    members will be pretty-printed with that indent level. An indent level
+    of 0 will only insert newlines. ``None`` is the most compact representation.
+
+    If ``separators`` is an ``(item_separator, dict_separator)`` tuple
+    then it will be used instead of the default ``(', ', ': ')`` separators.
+    ``(',', ':')`` is the most compact JSON representation.
+
+    ``encoding`` is the character encoding for str instances, default is UTF-8.
+
+    ``default(obj)`` is a function that should return a serializable version
+    of obj or raise TypeError. The default simply raises TypeError.
+
+    To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
+    ``.default()`` method to serialize additional types), specify it with
+    the ``cls`` kwarg.
+
+    """
+    # cached encoder
+    if (not skipkeys and ensure_ascii and
+        check_circular and allow_nan and
+        cls is None and indent is None and separators is None and
+        encoding == 'utf-8' and default is None and not kw):
+        iterable = _default_encoder.iterencode(obj)
+    else:
+        if cls is None:
+            cls = JSONEncoder
+        iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
+            check_circular=check_circular, allow_nan=allow_nan, indent=indent,
+            separators=separators, encoding=encoding,
+            default=default, **kw).iterencode(obj)
+    # could accelerate with writelines in some versions of Python, at
+    # a debuggability cost
+    for chunk in iterable:
+        fp.write(chunk)
+
+
+def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
+        allow_nan=True, cls=None, indent=None, separators=None,
+        encoding='utf-8', default=None, **kw):
+    """Serialize ``obj`` to a JSON formatted ``str``.
+
+    If ``skipkeys`` is false then ``dict`` keys that are not basic types
+    (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
+    will be skipped instead of raising a ``TypeError``.
+
+    If ``ensure_ascii`` is false, then the return value will be a
+    ``unicode`` instance subject to normal Python ``str`` to ``unicode``
+    coercion rules instead of being escaped to an ASCII ``str``.
+
+    If ``check_circular`` is false, then the circular reference check
+    for container types will be skipped and a circular reference will
+    result in an ``OverflowError`` (or worse).
+
+    If ``allow_nan`` is false, then it will be a ``ValueError`` to
+    serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
+    strict compliance of the JSON specification, instead of using the
+    JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
+
+    If ``indent`` is a non-negative integer, then JSON array elements and
+    object members will be pretty-printed with that indent level. An indent
+    level of 0 will only insert newlines. ``None`` is the most compact
+    representation.
+
+    If ``separators`` is an ``(item_separator, dict_separator)`` tuple
+    then it will be used instead of the default ``(', ', ': ')`` separators.
+    ``(',', ':')`` is the most compact JSON representation.
+
+    ``encoding`` is the character encoding for str instances, default is UTF-8.
+
+    ``default(obj)`` is a function that should return a serializable version
+    of obj or raise TypeError. The default simply raises TypeError.
+
+    To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
+    ``.default()`` method to serialize additional types), specify it with
+    the ``cls`` kwarg.
+
+    """
+    # cached encoder
+    if (not skipkeys and ensure_ascii and
+        check_circular and allow_nan and
+        cls is None and indent is None and separators is None and
+        encoding == 'utf-8' and default is None and not kw):
+        return _default_encoder.encode(obj)
+    if cls is None:
+        cls = JSONEncoder
+    return cls(
+        skipkeys=skipkeys, ensure_ascii=ensure_ascii,
+        check_circular=check_circular, allow_nan=allow_nan, indent=indent,
+        separators=separators, encoding=encoding, default=default,
+        **kw).encode(obj)
+
+
+_default_decoder = JSONDecoder(encoding=None, object_hook=None)
+
+
+def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
+        parse_int=None, parse_constant=None, **kw):
+    """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
+    a JSON document) to a Python object.
+
+    If the contents of ``fp`` is encoded with an ASCII based encoding other
+    than utf-8 (e.g. latin-1), then an appropriate ``encoding`` name must
+    be specified. Encodings that are not ASCII based (such as UCS-2) are
+    not allowed, and should be wrapped with
+    ``codecs.getreader(fp)(encoding)``, or simply decoded to a ``unicode``
+    object and passed to ``loads()``
+
+    ``object_hook`` is an optional function that will be called with the
+    result of any object literal decode (a ``dict``). The return value of
+    ``object_hook`` will be used instead of the ``dict``. This feature
+    can be used to implement custom decoders (e.g. JSON-RPC class hinting).
+
+    To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
+    kwarg.
+
+    """
+    return loads(fp.read(),
+        encoding=encoding, cls=cls, object_hook=object_hook,
+        parse_float=parse_float, parse_int=parse_int,
+        parse_constant=parse_constant, **kw)
+
+
+def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
+        parse_int=None, parse_constant=None, **kw):
+    """Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
+    document) to a Python object.
+
+    If ``s`` is a ``str`` instance and is encoded with an ASCII based encoding
+    other than utf-8 (e.g. latin-1) then an appropriate ``encoding`` name
+    must be specified. Encodings that are not ASCII based (such as UCS-2)
+    are not allowed and should be decoded to ``unicode`` first.
+
+    ``object_hook`` is an optional function that will be called with the
+    result of any object literal decode (a ``dict``). The return value of
+    ``object_hook`` will be used instead of the ``dict``. This feature
+    can be used to implement custom decoders (e.g. JSON-RPC class hinting).
+
+    ``parse_float``, if specified, will be called with the string
+    of every JSON float to be decoded. By default this is equivalent to
+    float(num_str). This can be used to use another datatype or parser
+    for JSON floats (e.g. decimal.Decimal).
+
+    ``parse_int``, if specified, will be called with the string
+    of every JSON int to be decoded. By default this is equivalent to
+    int(num_str). This can be used to use another datatype or parser
+    for JSON integers (e.g. float).
+
+    ``parse_constant``, if specified, will be called with one of the
+    following strings: -Infinity, Infinity, NaN, null, true, false.
+    This can be used to raise an exception if invalid JSON numbers
+    are encountered.
+
+    To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
+    kwarg.
+
+    """
+    if (cls is None and encoding is None and object_hook is None and
+            parse_int is None and parse_float is None and
+            parse_constant is None and not kw):
+        return _default_decoder.decode(s)
+    if cls is None:
+        cls = JSONDecoder
+    if object_hook is not None:
+        kw['object_hook'] = object_hook
+    if parse_float is not None:
+        kw['parse_float'] = parse_float
+    if parse_int is not None:
+        kw['parse_int'] = parse_int
+    if parse_constant is not None:
+        kw['parse_constant'] = parse_constant
+    return cls(encoding=encoding, **kw).decode(s)
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/compat/simplejson/decoder.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/compat/simplejson/decoder.py
new file mode 100644
index 000000000..b769ea486
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/compat/simplejson/decoder.py
@@ -0,0 +1,354 @@
+"""Implementation of JSONDecoder
+"""
+import re
+import sys
+import struct
+
+from simplejson.scanner import make_scanner
+try:
+    from simplejson._speedups import scanstring as c_scanstring
+except ImportError:
+    c_scanstring = None
+
+__all__ = ['JSONDecoder']
+
+FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
+
+def _floatconstants():
+    _BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
+    if sys.byteorder != 'big':
+        _BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
+    nan, inf = struct.unpack('dd', _BYTES)
+    return nan, inf, -inf
+
+NaN, PosInf, NegInf = _floatconstants()
+
+
+def linecol(doc, pos):
+    lineno = doc.count('\n', 0, pos) + 1
+    if lineno == 1:
+        colno = pos
+    else:
+        colno = pos - doc.rindex('\n', 0, pos)
+    return lineno, colno
+
+
+def errmsg(msg, doc, pos, end=None):
+    # Note that this function is called from _speedups
+    lineno, colno = linecol(doc, pos)
+    if end is None:
+        #fmt = '{0}: line {1} column {2} (char {3})'
+        #return fmt.format(msg, lineno, colno, pos)
+        fmt = '%s: line %d column %d (char %d)'
+        return fmt % (msg, lineno, colno, pos)
+    endlineno, endcolno = linecol(doc, end)
+    #fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})'
+    #return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end)
+    fmt = '%s: line %d column %d - line %d column %d (char %d - %d)'
+    return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end)
+
+
+_CONSTANTS = {
+    '-Infinity': NegInf,
+    'Infinity': PosInf,
+    'NaN': NaN,
+}
+
+STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
+BACKSLASH = {
+    '"': u'"', '\\': u'\\', '/': u'/',
+    'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t',
+}
+
+DEFAULT_ENCODING = "utf-8"
+
+def py_scanstring(s, end, encoding=None, strict=True, _b=BACKSLASH, _m=STRINGCHUNK.match):
+    """Scan the string s for a JSON string. End is the index of the
+    character in s after the quote that started the JSON string.
+    Unescapes all valid JSON string escape sequences and raises ValueError
+    on attempt to decode an invalid string. If strict is False then literal
+    control characters are allowed in the string.
+    
+    Returns a tuple of the decoded string and the index of the character in s
+    after the end quote."""
+    if encoding is None:
+        encoding = DEFAULT_ENCODING
+    chunks = []
+    _append = chunks.append
+    begin = end - 1
+    while 1:
+        chunk = _m(s, end)
+        if chunk is None:
+            raise ValueError(
+                errmsg("Unterminated string starting at", s, begin))
+        end = chunk.end()
+        content, terminator = chunk.groups()
+        # Content is contains zero or more unescaped string characters
+        if content:
+            if not isinstance(content, unicode):
+                content = unicode(content, encoding)
+            _append(content)
+        # Terminator is the end of string, a literal control character,
+        # or a backslash denoting that an escape sequence follows
+        if terminator == '"':
+            break
+        elif terminator != '\\':
+            if strict:
+                msg = "Invalid control character %r at" % (terminator,)
+                #msg = "Invalid control character {0!r} at".format(terminator)
+                raise ValueError(errmsg(msg, s, end))
+            else:
+                _append(terminator)
+                continue
+        try:
+            esc = s[end]
+        except IndexError:
+            raise ValueError(
+                errmsg("Unterminated string starting at", s, begin))
+        # If not a unicode escape sequence, must be in the lookup table
+        if esc != 'u':
+            try:
+                char = _b[esc]
+            except KeyError:
+                msg = "Invalid \\escape: " + repr(esc)
+                raise ValueError(errmsg(msg, s, end))
+            end += 1
+        else:
+            # Unicode escape sequence
+            esc = s[end + 1:end + 5]
+            next_end = end + 5
+            if len(esc) != 4:
+                msg = "Invalid \\uXXXX escape"
+                raise ValueError(errmsg(msg, s, end))
+            uni = int(esc, 16)
+            # Check for surrogate pair on UCS-4 systems
+            if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535:
+                msg = "Invalid \\uXXXX\\uXXXX surrogate pair"
+                if not s[end + 5:end + 7] == '\\u':
+                    raise ValueError(errmsg(msg, s, end))
+                esc2 = s[end + 7:end + 11]
+                if len(esc2) != 4:
+                    raise ValueError(errmsg(msg, s, end))
+                uni2 = int(esc2, 16)
+                uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00))
+                next_end += 6
+            char = unichr(uni)
+            end = next_end
+        # Append the unescaped character
+        _append(char)
+    return u''.join(chunks), end
+
+
+# Use speedup if available
+scanstring = c_scanstring or py_scanstring
+
+WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
+WHITESPACE_STR = ' \t\n\r'
+
+def JSONObject((s, end), encoding, strict, scan_once, object_hook, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
+    pairs = {}
+    # Use a slice to prevent IndexError from being raised, the following
+    # check will raise a more specific ValueError if the string is empty
+    nextchar = s[end:end + 1]
+    # Normally we expect nextchar == '"'
+    if nextchar != '"':
+        if nextchar in _ws:
+            end = _w(s, end).end()
+            nextchar = s[end:end + 1]
+        # Trivial empty object
+        if nextchar == '}':
+            return pairs, end + 1
+        elif nextchar != '"':
+            raise ValueError(errmsg("Expecting property name", s, end))
+    end += 1
+    while True:
+        key, end = scanstring(s, end, encoding, strict)
+
+        # To skip some function call overhead we optimize the fast paths where
+        # the JSON key separator is ": " or just ":".
+        if s[end:end + 1] != ':':
+            end = _w(s, end).end()
+            if s[end:end + 1] != ':':
+                raise ValueError(errmsg("Expecting : delimiter", s, end))
+
+        end += 1
+
+        try:
+            if s[end] in _ws:
+                end += 1
+                if s[end] in _ws:
+                    end = _w(s, end + 1).end()
+        except IndexError:
+            pass
+
+        try:
+            value, end = scan_once(s, end)
+        except StopIteration:
+            raise ValueError(errmsg("Expecting object", s, end))
+        pairs[key] = value
+
+        try:
+            nextchar = s[end]
+            if nextchar in _ws:
+                end = _w(s, end + 1).end()
+                nextchar = s[end]
+        except IndexError:
+            nextchar = ''
+        end += 1
+
+        if nextchar == '}':
+            break
+        elif nextchar != ',':
+            raise ValueError(errmsg("Expecting , delimiter", s, end - 1))
+
+        try:
+            nextchar = s[end]
+            if nextchar in _ws:
+                end += 1
+                nextchar = s[end]
+                if nextchar in _ws:
+                    end = _w(s, end + 1).end()
+                    nextchar = s[end]
+        except IndexError:
+            nextchar = ''
+
+        end += 1
+        if nextchar != '"':
+            raise ValueError(errmsg("Expecting property name", s, end - 1))
+
+    if object_hook is not None:
+        pairs = object_hook(pairs)
+    return pairs, end
+
+def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
+    values = []
+    nextchar = s[end:end + 1]
+    if nextchar in _ws:
+        end = _w(s, end + 1).end()
+        nextchar = s[end:end + 1]
+    # Look-ahead for trivial empty array
+    if nextchar == ']':
+        return values, end + 1
+    _append = values.append
+    while True:
+        try:
+            value, end = scan_once(s, end)
+        except StopIteration:
+            raise ValueError(errmsg("Expecting object", s, end))
+        _append(value)
+        nextchar = s[end:end + 1]
+        if nextchar in _ws:
+            end = _w(s, end + 1).end()
+            nextchar = s[end:end + 1]
+        end += 1
+        if nextchar == ']':
+            break
+        elif nextchar != ',':
+            raise ValueError(errmsg("Expecting , delimiter", s, end))
+
+        try:
+            if s[end] in _ws:
+                end += 1
+                if s[end] in _ws:
+                    end = _w(s, end + 1).end()
+        except IndexError:
+            pass
+
+    return values, end
+
+class JSONDecoder(object):
+    """Simple JSON <http://json.org> decoder
+
+    Performs the following translations in decoding by default:
+
+    +---------------+-------------------+
+    | JSON          | Python            |
+    +===============+===================+
+    | object        | dict              |
+    +---------------+-------------------+
+    | array         | list              |
+    +---------------+-------------------+
+    | string        | unicode           |
+    +---------------+-------------------+
+    | number (int)  | int, long         |
+    +---------------+-------------------+
+    | number (real) | float             |
+    +---------------+-------------------+
+    | true          | True              |
+    +---------------+-------------------+
+    | false         | False             |
+    +---------------+-------------------+
+    | null          | None              |
+    +---------------+-------------------+
+
+    It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
+    their corresponding ``float`` values, which is outside the JSON spec.
+
+    """
+
+    def __init__(self, encoding=None, object_hook=None, parse_float=None,
+            parse_int=None, parse_constant=None, strict=True):
+        """``encoding`` determines the encoding used to interpret any ``str``
+        objects decoded by this instance (utf-8 by default).  It has no
+        effect when decoding ``unicode`` objects.
+
+        Note that currently only encodings that are a superset of ASCII work,
+        strings of other encodings should be passed in as ``unicode``.
+
+        ``object_hook``, if specified, will be called with the result
+        of every JSON object decoded and its return value will be used in
+        place of the given ``dict``.  This can be used to provide custom
+        deserializations (e.g. to support JSON-RPC class hinting).
+
+        ``parse_float``, if specified, will be called with the string
+        of every JSON float to be decoded. By default this is equivalent to
+        float(num_str). This can be used to use another datatype or parser
+        for JSON floats (e.g. decimal.Decimal).
+
+        ``parse_int``, if specified, will be called with the string
+        of every JSON int to be decoded. By default this is equivalent to
+        int(num_str). This can be used to use another datatype or parser
+        for JSON integers (e.g. float).
+
+        ``parse_constant``, if specified, will be called with one of the
+        following strings: -Infinity, Infinity, NaN.
+        This can be used to raise an exception if invalid JSON numbers
+        are encountered.
+
+        """
+        self.encoding = encoding
+        self.object_hook = object_hook
+        self.parse_float = parse_float or float
+        self.parse_int = parse_int or int
+        self.parse_constant = parse_constant or _CONSTANTS.__getitem__
+        self.strict = strict
+        self.parse_object = JSONObject
+        self.parse_array = JSONArray
+        self.parse_string = scanstring
+        self.scan_once = make_scanner(self)
+
+    def decode(self, s, _w=WHITESPACE.match):
+        """Return the Python representation of ``s`` (a ``str`` or ``unicode``
+        instance containing a JSON document)
+
+        """
+        obj, end = self.raw_decode(s, idx=_w(s, 0).end())
+        end = _w(s, end).end()
+        if end != len(s):
+            raise ValueError(errmsg("Extra data", s, end, len(s)))
+        return obj
+
+    def raw_decode(self, s, idx=0):
+        """Decode a JSON document from ``s`` (a ``str`` or ``unicode`` beginning
+        with a JSON document) and return a 2-tuple of the Python
+        representation and the index in ``s`` where the document ended.
+
+        This can be used to decode a JSON document from a string that may
+        have extraneous data at the end.
+
+        """
+        try:
+            obj, end = self.scan_once(s, idx)
+        except StopIteration:
+            raise ValueError("No JSON object could be decoded")
+        return obj, end
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/compat/simplejson/encoder.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/compat/simplejson/encoder.py
new file mode 100644
index 000000000..cf5829036
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/compat/simplejson/encoder.py
@@ -0,0 +1,440 @@
+"""Implementation of JSONEncoder
+"""
+import re
+
+try:
+    from simplejson._speedups import encode_basestring_ascii as c_encode_basestring_ascii
+except ImportError:
+    c_encode_basestring_ascii = None
+try:
+    from simplejson._speedups import make_encoder as c_make_encoder
+except ImportError:
+    c_make_encoder = None
+
+ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]')
+ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
+HAS_UTF8 = re.compile(r'[\x80-\xff]')
+ESCAPE_DCT = {
+    '\\': '\\\\',
+    '"': '\\"',
+    '\b': '\\b',
+    '\f': '\\f',
+    '\n': '\\n',
+    '\r': '\\r',
+    '\t': '\\t',
+}
+for i in range(0x20):
+    #ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i))
+    ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
+
+# Assume this produces an infinity on all machines (probably not guaranteed)
+INFINITY = float('1e66666')
+FLOAT_REPR = repr
+
+def encode_basestring(s):
+    """Return a JSON representation of a Python string
+
+    """
+    def replace(match):
+        return ESCAPE_DCT[match.group(0)]
+    return '"' + ESCAPE.sub(replace, s) + '"'
+
+
+def py_encode_basestring_ascii(s):
+    """Return an ASCII-only JSON representation of a Python string
+
+    """
+    if isinstance(s, str) and HAS_UTF8.search(s) is not None:
+        s = s.decode('utf-8')
+    def replace(match):
+        s = match.group(0)
+        try:
+            return ESCAPE_DCT[s]
+        except KeyError:
+            n = ord(s)
+            if n < 0x10000:
+                #return '\\u{0:04x}'.format(n)
+                return '\\u%04x' % (n,)
+            else:
+                # surrogate pair
+                n -= 0x10000
+                s1 = 0xd800 | ((n >> 10) & 0x3ff)
+                s2 = 0xdc00 | (n & 0x3ff)
+                #return '\\u{0:04x}\\u{1:04x}'.format(s1, s2)
+                return '\\u%04x\\u%04x' % (s1, s2)
+    return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
+
+
+encode_basestring_ascii = c_encode_basestring_ascii or py_encode_basestring_ascii
+
+class JSONEncoder(object):
+    """Extensible JSON <http://json.org> encoder for Python data structures.
+
+    Supports the following objects and types by default:
+
+    +-------------------+---------------+
+    | Python            | JSON          |
+    +===================+===============+
+    | dict              | object        |
+    +-------------------+---------------+
+    | list, tuple       | array         |
+    +-------------------+---------------+
+    | str, unicode      | string        |
+    +-------------------+---------------+
+    | int, long, float  | number        |
+    +-------------------+---------------+
+    | True              | true          |
+    +-------------------+---------------+
+    | False             | false         |
+    +-------------------+---------------+
+    | None              | null          |
+    +-------------------+---------------+
+
+    To extend this to recognize other objects, subclass and implement a
+    ``.default()`` method with another method that returns a serializable
+    object for ``o`` if possible, otherwise it should call the superclass
+    implementation (to raise ``TypeError``).
+
+    """
+    item_separator = ', '
+    key_separator = ': '
+    def __init__(self, skipkeys=False, ensure_ascii=True,
+            check_circular=True, allow_nan=True, sort_keys=False,
+            indent=None, separators=None, encoding='utf-8', default=None):
+        """Constructor for JSONEncoder, with sensible defaults.
+
+        If skipkeys is false, then it is a TypeError to attempt
+        encoding of keys that are not str, int, long, float or None.  If
+        skipkeys is True, such items are simply skipped.
+
+        If ensure_ascii is true, the output is guaranteed to be str
+        objects with all incoming unicode characters escaped.  If
+        ensure_ascii is false, the output will be unicode object.
+
+        If check_circular is true, then lists, dicts, and custom encoded
+        objects will be checked for circular references during encoding to
+        prevent an infinite recursion (which would cause an OverflowError).
+        Otherwise, no such check takes place.
+
+        If allow_nan is true, then NaN, Infinity, and -Infinity will be
+        encoded as such.  This behavior is not JSON specification compliant,
+        but is consistent with most JavaScript based encoders and decoders.
+        Otherwise, it will be a ValueError to encode such floats.
+
+        If sort_keys is true, then the output of dictionaries will be
+        sorted by key; this is useful for regression tests to ensure
+        that JSON serializations can be compared on a day-to-day basis.
+
+        If indent is a non-negative integer, then JSON array
+        elements and object members will be pretty-printed with that
+        indent level.  An indent level of 0 will only insert newlines.
+        None is the most compact representation.
+
+        If specified, separators should be a (item_separator, key_separator)
+        tuple.  The default is (', ', ': ').  To get the most compact JSON
+        representation you should specify (',', ':') to eliminate whitespace.
+
+        If specified, default is a function that gets called for objects
+        that can't otherwise be serialized.  It should return a JSON encodable
+        version of the object or raise a ``TypeError``.
+
+        If encoding is not None, then all input strings will be
+        transformed into unicode using that encoding prior to JSON-encoding.
+        The default is UTF-8.
+
+        """
+
+        self.skipkeys = skipkeys
+        self.ensure_ascii = ensure_ascii
+        self.check_circular = check_circular
+        self.allow_nan = allow_nan
+        self.sort_keys = sort_keys
+        self.indent = indent
+        if separators is not None:
+            self.item_separator, self.key_separator = separators
+        if default is not None:
+            self.default = default
+        self.encoding = encoding
+
+    def default(self, o):
+        """Implement this method in a subclass such that it returns
+        a serializable object for ``o``, or calls the base implementation
+        (to raise a ``TypeError``).
+
+        For example, to support arbitrary iterators, you could
+        implement default like this::
+
+            def default(self, o):
+                try:
+                    iterable = iter(o)
+                except TypeError:
+                    pass
+                else:
+                    return list(iterable)
+                return JSONEncoder.default(self, o)
+
+        """
+        raise TypeError(repr(o) + " is not JSON serializable")
+
+    def encode(self, o):
+        """Return a JSON string representation of a Python data structure.
+
+        >>> JSONEncoder().encode({"foo": ["bar", "baz"]})
+        '{"foo": ["bar", "baz"]}'
+
+        """
+        # This is for extremely simple cases and benchmarks.
+        if isinstance(o, basestring):
+            if isinstance(o, str):
+                _encoding = self.encoding
+                if (_encoding is not None
+                        and not (_encoding == 'utf-8')):
+                    o = o.decode(_encoding)
+            if self.ensure_ascii:
+                return encode_basestring_ascii(o)
+            else:
+                return encode_basestring(o)
+        # This doesn't pass the iterator directly to ''.join() because the
+        # exceptions aren't as detailed.  The list call should be roughly
+        # equivalent to the PySequence_Fast that ''.join() would do.
+        chunks = self.iterencode(o, _one_shot=True)
+        if not isinstance(chunks, (list, tuple)):
+            chunks = list(chunks)
+        return ''.join(chunks)
+
+    def iterencode(self, o, _one_shot=False):
+        """Encode the given object and yield each string
+        representation as available.
+
+        For example::
+
+            for chunk in JSONEncoder().iterencode(bigobject):
+                mysocket.write(chunk)
+
+        """
+        if self.check_circular:
+            markers = {}
+        else:
+            markers = None
+        if self.ensure_ascii:
+            _encoder = encode_basestring_ascii
+        else:
+            _encoder = encode_basestring
+        if self.encoding != 'utf-8':
+            def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
+                if isinstance(o, str):
+                    o = o.decode(_encoding)
+                return _orig_encoder(o)
+
+        def floatstr(o, allow_nan=self.allow_nan, _repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
+            # Check for specials.  Note that this type of test is processor- and/or
+            # platform-specific, so do tests which don't depend on the internals.
+
+            if o != o:
+                text = 'NaN'
+            elif o == _inf:
+                text = 'Infinity'
+            elif o == _neginf:
+                text = '-Infinity'
+            else:
+                return _repr(o)
+
+            if not allow_nan:
+                raise ValueError(
+                    "Out of range float values are not JSON compliant: " +
+                    repr(o))
+
+            return text
+
+
+        if _one_shot and c_make_encoder is not None and not self.indent and not self.sort_keys:
+            _iterencode = c_make_encoder(
+                markers, self.default, _encoder, self.indent,
+                self.key_separator, self.item_separator, self.sort_keys,
+                self.skipkeys, self.allow_nan)
+        else:
+            _iterencode = _make_iterencode(
+                markers, self.default, _encoder, self.indent, floatstr,
+                self.key_separator, self.item_separator, self.sort_keys,
+                self.skipkeys, _one_shot)
+        return _iterencode(o, 0)
+
+def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
+        ## HACK: hand-optimized bytecode; turn globals into locals
+        False=False,
+        True=True,
+        ValueError=ValueError,
+        basestring=basestring,
+        dict=dict,
+        float=float,
+        id=id,
+        int=int,
+        isinstance=isinstance,
+        list=list,
+        long=long,
+        str=str,
+        tuple=tuple,
+    ):
+
+    def _iterencode_list(lst, _current_indent_level):
+        if not lst:
+            yield '[]'
+            return
+        if markers is not None:
+            markerid = id(lst)
+            if markerid in markers:
+                raise ValueError("Circular reference detected")
+            markers[markerid] = lst
+        buf = '['
+        if _indent is not None:
+            _current_indent_level += 1
+            newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
+            separator = _item_separator + newline_indent
+            buf += newline_indent
+        else:
+            newline_indent = None
+            separator = _item_separator
+        first = True
+        for value in lst:
+            if first:
+                first = False
+            else:
+                buf = separator
+            if isinstance(value, basestring):
+                yield buf + _encoder(value)
+            elif value is None:
+                yield buf + 'null'
+            elif value is True:
+                yield buf + 'true'
+            elif value is False:
+                yield buf + 'false'
+            elif isinstance(value, (int, long)):
+                yield buf + str(value)
+            elif isinstance(value, float):
+                yield buf + _floatstr(value)
+            else:
+                yield buf
+                if isinstance(value, (list, tuple)):
+                    chunks = _iterencode_list(value, _current_indent_level)
+                elif isinstance(value, dict):
+                    chunks = _iterencode_dict(value, _current_indent_level)
+                else:
+                    chunks = _iterencode(value, _current_indent_level)
+                for chunk in chunks:
+                    yield chunk
+        if newline_indent is not None:
+            _current_indent_level -= 1
+            yield '\n' + (' ' * (_indent * _current_indent_level))
+        yield ']'
+        if markers is not None:
+            del markers[markerid]
+
+    def _iterencode_dict(dct, _current_indent_level):
+        if not dct:
+            yield '{}'
+            return
+        if markers is not None:
+            markerid = id(dct)
+            if markerid in markers:
+                raise ValueError("Circular reference detected")
+            markers[markerid] = dct
+        yield '{'
+        if _indent is not None:
+            _current_indent_level += 1
+            newline_indent = '\n' + (' ' * (_indent * _current_indent_level))
+            item_separator = _item_separator + newline_indent
+            yield newline_indent
+        else:
+            newline_indent = None
+            item_separator = _item_separator
+        first = True
+        if _sort_keys:
+            items = dct.items()
+            items.sort(key=lambda kv: kv[0])
+        else:
+            items = dct.iteritems()
+        for key, value in items:
+            if isinstance(key, basestring):
+                pass
+            # JavaScript is weakly typed for these, so it makes sense to
+            # also allow them.  Many encoders seem to do something like this.
+            elif isinstance(key, float):
+                key = _floatstr(key)
+            elif key is True:
+                key = 'true'
+            elif key is False:
+                key = 'false'
+            elif key is None:
+                key = 'null'
+            elif isinstance(key, (int, long)):
+                key = str(key)
+            elif _skipkeys:
+                continue
+            else:
+                raise TypeError("key " + repr(key) + " is not a string")
+            if first:
+                first = False
+            else:
+                yield item_separator
+            yield _encoder(key)
+            yield _key_separator
+            if isinstance(value, basestring):
+                yield _encoder(value)
+            elif value is None:
+                yield 'null'
+            elif value is True:
+                yield 'true'
+            elif value is False:
+                yield 'false'
+            elif isinstance(value, (int, long)):
+                yield str(value)
+            elif isinstance(value, float):
+                yield _floatstr(value)
+            else:
+                if isinstance(value, (list, tuple)):
+                    chunks = _iterencode_list(value, _current_indent_level)
+                elif isinstance(value, dict):
+                    chunks = _iterencode_dict(value, _current_indent_level)
+                else:
+                    chunks = _iterencode(value, _current_indent_level)
+                for chunk in chunks:
+                    yield chunk
+        if newline_indent is not None:
+            _current_indent_level -= 1
+            yield '\n' + (' ' * (_indent * _current_indent_level))
+        yield '}'
+        if markers is not None:
+            del markers[markerid]
+
+    def _iterencode(o, _current_indent_level):
+        if isinstance(o, basestring):
+            yield _encoder(o)
+        elif o is None:
+            yield 'null'
+        elif o is True:
+            yield 'true'
+        elif o is False:
+            yield 'false'
+        elif isinstance(o, (int, long)):
+            yield str(o)
+        elif isinstance(o, float):
+            yield _floatstr(o)
+        elif isinstance(o, (list, tuple)):
+            for chunk in _iterencode_list(o, _current_indent_level):
+                yield chunk
+        elif isinstance(o, dict):
+            for chunk in _iterencode_dict(o, _current_indent_level):
+                yield chunk
+        else:
+            if markers is not None:
+                markerid = id(o)
+                if markerid in markers:
+                    raise ValueError("Circular reference detected")
+                markers[markerid] = o
+            o = _default(o)
+            for chunk in _iterencode(o, _current_indent_level):
+                yield chunk
+            if markers is not None:
+                del markers[markerid]
+
+    return _iterencode
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/compat/simplejson/scanner.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/compat/simplejson/scanner.py
new file mode 100644
index 000000000..adbc6ec97
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/compat/simplejson/scanner.py
@@ -0,0 +1,65 @@
+"""JSON token scanner
+"""
+import re
+try:
+    from simplejson._speedups import make_scanner as c_make_scanner
+except ImportError:
+    c_make_scanner = None
+
+__all__ = ['make_scanner']
+
+NUMBER_RE = re.compile(
+    r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
+    (re.VERBOSE | re.MULTILINE | re.DOTALL))
+
+def py_make_scanner(context):
+    parse_object = context.parse_object
+    parse_array = context.parse_array
+    parse_string = context.parse_string
+    match_number = NUMBER_RE.match
+    encoding = context.encoding
+    strict = context.strict
+    parse_float = context.parse_float
+    parse_int = context.parse_int
+    parse_constant = context.parse_constant
+    object_hook = context.object_hook
+
+    def _scan_once(string, idx):
+        try:
+            nextchar = string[idx]
+        except IndexError:
+            raise StopIteration
+
+        if nextchar == '"':
+            return parse_string(string, idx + 1, encoding, strict)
+        elif nextchar == '{':
+            return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook)
+        elif nextchar == '[':
+            return parse_array((string, idx + 1), _scan_once)
+        elif nextchar == 'n' and string[idx:idx + 4] == 'null':
+            return None, idx + 4
+        elif nextchar == 't' and string[idx:idx + 4] == 'true':
+            return True, idx + 4
+        elif nextchar == 'f' and string[idx:idx + 5] == 'false':
+            return False, idx + 5
+
+        m = match_number(string, idx)
+        if m is not None:
+            integer, frac, exp = m.groups()
+            if frac or exp:
+                res = parse_float(integer + (frac or '') + (exp or ''))
+            else:
+                res = parse_int(integer)
+            return res, m.end()
+        elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
+            return parse_constant('NaN'), idx + 3
+        elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
+            return parse_constant('Infinity'), idx + 8
+        elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
+            return parse_constant('-Infinity'), idx + 9
+        else:
+            raise StopIteration
+
+    return _scan_once
+
+make_scanner = c_make_scanner or py_make_scanner
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/connection.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/connection.py
new file mode 100644
index 000000000..bd4330ff2
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/connection.py
@@ -0,0 +1,1000 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import errno
+import logging
+import os
+import pprint
+import stat
+import sys
+import time
+
+import jinja2.runtime
+import ansible.constants as C
+import ansible.errors
+import ansible.plugins.connection
+import ansible.utils.shlex
+
+import mitogen.core
+import mitogen.fork
+import mitogen.unix
+import mitogen.utils
+
+import ansible_mitogen.parsing
+import ansible_mitogen.process
+import ansible_mitogen.services
+import ansible_mitogen.target
+import ansible_mitogen.transport_config
+
+
+LOG = logging.getLogger(__name__)
+
+
+def optional_int(value):
+    """
+    Convert `value` to an integer if it is not :data:`None`, otherwise return
+    :data:`None`.
+    """
+    try:
+        return int(value)
+    except (TypeError, ValueError):
+        return None
+
+
+def convert_bool(obj):
+    if isinstance(obj, bool):
+        return obj
+    if str(obj).lower() in ('no', 'false', '0'):
+        return False
+    if str(obj).lower() not in ('yes', 'true', '1'):
+        raise ansible.errors.AnsibleConnectionFailure(
+            'expected yes/no/true/false/0/1, got %r' % (obj,)
+        )
+    return True
+
+
+def default(value, default):
+    """
+    Return `default` is `value` is :data:`None`, otherwise return `value`.
+    """
+    if value is None:
+        return default
+    return value
+
+
+def _connect_local(spec):
+    """
+    Return ContextService arguments for a local connection.
+    """
+    return {
+        'method': 'local',
+        'kwargs': {
+            'python_path': spec.python_path(),
+        }
+    }
+
+
+def _connect_ssh(spec):
+    """
+    Return ContextService arguments for an SSH connection.
+    """
+    if C.HOST_KEY_CHECKING:
+        check_host_keys = 'enforce'
+    else:
+        check_host_keys = 'ignore'
+
+    # #334: tilde-expand private_key_file to avoid implementation difference
+    # between Python and OpenSSH.
+    private_key_file = spec.private_key_file()
+    if private_key_file is not None:
+        private_key_file = os.path.expanduser(private_key_file)
+
+    return {
+        'method': 'ssh',
+        'kwargs': {
+            'check_host_keys': check_host_keys,
+            'hostname': spec.remote_addr(),
+            'username': spec.remote_user(),
+            'compression': convert_bool(
+                default(spec.mitogen_ssh_compression(), True)
+            ),
+            'password': spec.password(),
+            'port': spec.port(),
+            'python_path': spec.python_path(),
+            'identity_file': private_key_file,
+            'identities_only': False,
+            'ssh_path': spec.ssh_executable(),
+            'connect_timeout': spec.ansible_ssh_timeout(),
+            'ssh_args': spec.ssh_args(),
+            'ssh_debug_level': spec.mitogen_ssh_debug_level(),
+        }
+    }
+
+
+def _connect_docker(spec):
+    """
+    Return ContextService arguments for a Docker connection.
+    """
+    return {
+        'method': 'docker',
+        'kwargs': {
+            'username': spec.remote_user(),
+            'container': spec.remote_addr(),
+            'python_path': spec.python_path(),
+            'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(),
+        }
+    }
+
+
+def _connect_kubectl(spec):
+    """
+    Return ContextService arguments for a Kubernetes connection.
+    """
+    return {
+        'method': 'kubectl',
+        'kwargs': {
+            'pod': spec.remote_addr(),
+            'python_path': spec.python_path(),
+            'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(),
+            'kubectl_path': spec.mitogen_kubectl_path(),
+            'kubectl_args': spec.extra_args(),
+        }
+    }
+
+
+def _connect_jail(spec):
+    """
+    Return ContextService arguments for a FreeBSD jail connection.
+    """
+    return {
+        'method': 'jail',
+        'kwargs': {
+            'username': spec.remote_user(),
+            'container': spec.remote_addr(),
+            'python_path': spec.python_path(),
+            'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(),
+        }
+    }
+
+
+def _connect_lxc(spec):
+    """
+    Return ContextService arguments for an LXC Classic container connection.
+    """
+    return {
+        'method': 'lxc',
+        'kwargs': {
+            'container': spec.remote_addr(),
+            'python_path': spec.python_path(),
+            'lxc_attach_path': spec.mitogen_lxc_attach_path(),
+            'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(),
+        }
+    }
+
+
+def _connect_lxd(spec):
+    """
+    Return ContextService arguments for an LXD container connection.
+    """
+    return {
+        'method': 'lxd',
+        'kwargs': {
+            'container': spec.remote_addr(),
+            'python_path': spec.python_path(),
+            'lxc_path': spec.mitogen_lxc_path(),
+            'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(),
+        }
+    }
+
+
+def _connect_machinectl(spec):
+    """
+    Return ContextService arguments for a machinectl connection.
+    """
+    return _connect_setns(spec, kind='machinectl')
+
+
+def _connect_setns(spec, kind=None):
+    """
+    Return ContextService arguments for a mitogen_setns connection.
+    """
+    return {
+        'method': 'setns',
+        'kwargs': {
+            'container': spec.remote_addr(),
+            'username': spec.remote_user(),
+            'python_path': spec.python_path(),
+            'kind': kind or spec.mitogen_kind(),
+            'docker_path': spec.mitogen_docker_path(),
+            'lxc_path': spec.mitogen_lxc_path(),
+            'lxc_info_path': spec.mitogen_lxc_info_path(),
+            'machinectl_path': spec.mitogen_machinectl_path(),
+        }
+    }
+
+
+def _connect_su(spec):
+    """
+    Return ContextService arguments for su as a become method.
+    """
+    return {
+        'method': 'su',
+        'enable_lru': True,
+        'kwargs': {
+            'username': spec.become_user(),
+            'password': spec.become_pass(),
+            'python_path': spec.python_path(),
+            'su_path': spec.become_exe(),
+            'connect_timeout': spec.timeout(),
+        }
+    }
+
+
+def _connect_sudo(spec):
+    """
+    Return ContextService arguments for sudo as a become method.
+    """
+    return {
+        'method': 'sudo',
+        'enable_lru': True,
+        'kwargs': {
+            'username': spec.become_user(),
+            'password': spec.become_pass(),
+            'python_path': spec.python_path(),
+            'sudo_path': spec.become_exe(),
+            'connect_timeout': spec.timeout(),
+            'sudo_args': spec.sudo_args(),
+        }
+    }
+
+
+def _connect_doas(spec):
+    """
+    Return ContextService arguments for doas as a become method.
+    """
+    return {
+        'method': 'doas',
+        'enable_lru': True,
+        'kwargs': {
+            'username': spec.become_user(),
+            'password': spec.become_pass(),
+            'python_path': spec.python_path(),
+            'doas_path': spec.become_exe(),
+            'connect_timeout': spec.timeout(),
+        }
+    }
+
+
+def _connect_mitogen_su(spec):
+    """
+    Return ContextService arguments for su as a first class connection.
+    """
+    return {
+        'method': 'su',
+        'kwargs': {
+            'username': spec.remote_user(),
+            'password': spec.password(),
+            'python_path': spec.python_path(),
+            'su_path': spec.become_exe(),
+            'connect_timeout': spec.timeout(),
+        }
+    }
+
+
+def _connect_mitogen_sudo(spec):
+    """
+    Return ContextService arguments for sudo as a first class connection.
+    """
+    return {
+        'method': 'sudo',
+        'kwargs': {
+            'username': spec.remote_user(),
+            'password': spec.password(),
+            'python_path': spec.python_path(),
+            'sudo_path': spec.become_exe(),
+            'connect_timeout': spec.timeout(),
+            'sudo_args': spec.sudo_args(),
+        }
+    }
+
+
+def _connect_mitogen_doas(spec):
+    """
+    Return ContextService arguments for doas as a first class connection.
+    """
+    return {
+        'method': 'doas',
+        'kwargs': {
+            'username': spec.remote_user(),
+            'password': spec.password(),
+            'python_path': spec.python_path(),
+            'doas_path': spec.become_exe(),
+            'connect_timeout': spec.timeout(),
+        }
+    }
+
+
+#: Mapping of connection method names to functions invoked as `func(spec)`
+#: generating ContextService keyword arguments matching a connection
+#: specification.
+CONNECTION_METHOD = {
+    'docker': _connect_docker,
+    'kubectl': _connect_kubectl,
+    'jail': _connect_jail,
+    'local': _connect_local,
+    'lxc': _connect_lxc,
+    'lxd': _connect_lxd,
+    'machinectl': _connect_machinectl,
+    'setns': _connect_setns,
+    'ssh': _connect_ssh,
+    'smart': _connect_ssh,  # issue #548.
+    'su': _connect_su,
+    'sudo': _connect_sudo,
+    'doas': _connect_doas,
+    'mitogen_su': _connect_mitogen_su,
+    'mitogen_sudo': _connect_mitogen_sudo,
+    'mitogen_doas': _connect_mitogen_doas,
+}
+
+
+class Broker(mitogen.master.Broker):
+    """
+    WorkerProcess maintains at most 2 file descriptors, therefore does not need
+    the exuberant syscall expense of EpollPoller, so override it and restore
+    the poll() poller.
+    """
+    poller_class = mitogen.core.Poller
+
+
+class CallChain(mitogen.parent.CallChain):
+    """
+    Extend :class:`mitogen.parent.CallChain` to additionally cause the
+    associated :class:`Connection` to be reset if a ChannelError occurs.
+
+    This only catches failures that occur while a call is pending, it is a
+    stop-gap until a more general method is available to notice connection in
+    every situation.
+    """
+    call_aborted_msg = (
+        'Mitogen was disconnected from the remote environment while a call '
+        'was in-progress. If you feel this is in error, please file a bug. '
+        'Original error was: %s'
+    )
+
+    def __init__(self, connection, context, pipelined=False):
+        super(CallChain, self).__init__(context, pipelined)
+        #: The connection to reset on CallError.
+        self._connection = connection
+
+    def _rethrow(self, recv):
+        try:
+            return recv.get().unpickle()
+        except mitogen.core.ChannelError as e:
+            self._connection.reset()
+            raise ansible.errors.AnsibleConnectionFailure(
+                self.call_aborted_msg % (e,)
+            )
+
+    def call(self, func, *args, **kwargs):
+        """
+        Like :meth:`mitogen.parent.CallChain.call`, but log timings.
+        """
+        t0 = time.time()
+        try:
+            recv = self.call_async(func, *args, **kwargs)
+            return self._rethrow(recv)
+        finally:
+            LOG.debug('Call took %d ms: %r', 1000 * (time.time() - t0),
+                      mitogen.parent.CallSpec(func, args, kwargs))
+
+
+class Connection(ansible.plugins.connection.ConnectionBase):
+    #: mitogen.master.Broker for this worker.
+    broker = None
+
+    #: mitogen.master.Router for this worker.
+    router = None
+
+    #: mitogen.parent.Context representing the parent Context, which is
+    #: presently always the connection multiplexer process.
+    parent = None
+
+    #: mitogen.parent.Context for the target account on the target, possibly
+    #: reached via become.
+    context = None
+
+    #: Context for the login account on the target. This is always the login
+    #: account, even when become=True.
+    login_context = None
+
+    #: Only sudo, su, and doas are supported for now.
+    become_methods = ['sudo', 'su', 'doas']
+
+    #: Dict containing init_child() return value as recorded at startup by
+    #: ContextService. Contains:
+    #:
+    #:  fork_context:   Context connected to the fork parent : process in the
+    #:                  target account.
+    #:  home_dir:       Target context's home directory.
+    #:  good_temp_dir:  A writeable directory where new temporary directories
+    #:                  can be created.
+    init_child_result = None
+
+    #: A :class:`mitogen.parent.CallChain` for calls made to the target
+    #: account, to ensure subsequent calls fail with the original exception if
+    #: pipelined directory creation or file transfer fails.
+    chain = None
+
+    #
+    # Note: any of the attributes below may be :data:`None` if the connection
+    # plugin was constructed directly by a non-cooperative action, such as in
+    # the case of the synchronize module.
+    #
+
+    #: Set to the host name as it appears in inventory by on_action_run().
+    inventory_hostname = None
+
+    #: Set to task_vars by on_action_run().
+    _task_vars = None
+
+    #: Set to 'hostvars' by on_action_run()
+    host_vars = None
+
+    #: Set by on_action_run()
+    delegate_to_hostname = None
+
+    #: Set to '_loader.get_basedir()' by on_action_run(). Used by mitogen_local
+    #: to change the working directory to that of the current playbook,
+    #: matching vanilla Ansible behaviour.
+    loader_basedir = None
+
+    def __init__(self, play_context, new_stdin, **kwargs):
+        assert ansible_mitogen.process.MuxProcess.unix_listener_path, (
+            'Mitogen connection types may only be instantiated '
+            'while the "mitogen" strategy is active.'
+        )
+        super(Connection, self).__init__(play_context, new_stdin)
+
+    def __del__(self):
+        """
+        Ansible cannot be trusted to always call close() e.g. the synchronize
+        action constructs a local connection like this. So provide a destructor
+        in the hopes of catching these cases.
+        """
+        # https://github.com/dw/mitogen/issues/140
+        self.close()
+
+    def on_action_run(self, task_vars, delegate_to_hostname, loader_basedir):
+        """
+        Invoked by ActionModuleMixin to indicate a new task is about to start
+        executing. We use the opportunity to grab relevant bits from the
+        task-specific data.
+
+        :param dict task_vars:
+            Task variable dictionary.
+        :param str delegate_to_hostname:
+            :data:`None`, or the template-expanded inventory hostname this task
+            is being delegated to. A similar variable exists on PlayContext
+            when ``delegate_to:`` is active, however it is unexpanded.
+        :param str loader_basedir:
+            Loader base directory; see :attr:`loader_basedir`.
+        """
+        self.inventory_hostname = task_vars['inventory_hostname']
+        self._task_vars = task_vars
+        self.host_vars = task_vars['hostvars']
+        self.delegate_to_hostname = delegate_to_hostname
+        self.loader_basedir = loader_basedir
+        self._mitogen_reset(mode='put')
+
+    def get_task_var(self, key, default=None):
+        """
+        Fetch the value of a task variable related to connection configuration,
+        or, if delegate_to is active, fetch the same variable via HostVars for
+        the delegated-to machine.
+
+        When running with delegate_to, Ansible tasks have variables associated
+        with the original machine, not the delegated-to machine, therefore it
+        does not make sense to extract connection-related configuration for the
+        delegated-to machine from them.
+        """
+        if self._task_vars:
+            if self.delegate_to_hostname is None:
+                if key in self._task_vars:
+                    return self._task_vars[key]
+            else:
+                delegated_vars = self._task_vars['ansible_delegated_vars']
+                if self.delegate_to_hostname in delegated_vars:
+                    task_vars = delegated_vars[self.delegate_to_hostname]
+                    if key in task_vars:
+                        return task_vars[key]
+
+        return default
+
+    @property
+    def homedir(self):
+        self._connect()
+        return self.init_child_result['home_dir']
+
+    @property
+    def connected(self):
+        return self.context is not None
+
+    def _spec_from_via(self, proxied_inventory_name, via_spec):
+        """
+        Produce a dict connection specifiction given a string `via_spec`, of
+        the form `[[become_method:]become_user@]inventory_hostname`.
+        """
+        become_user, _, inventory_name = via_spec.rpartition('@')
+        become_method, _, become_user = become_user.rpartition(':')
+
+        # must use __contains__ to avoid a TypeError for a missing host on
+        # Ansible 2.3.
+        if self.host_vars is None or inventory_name not in self.host_vars:
+            raise ansible.errors.AnsibleConnectionFailure(
+                self.unknown_via_msg % (
+                    via_spec,
+                    proxied_inventory_name,
+                )
+            )
+
+        via_vars = self.host_vars[inventory_name]
+        return ansible_mitogen.transport_config.MitogenViaSpec(
+            inventory_name=inventory_name,
+            play_context=self._play_context,
+            host_vars=dict(via_vars),  # TODO: make it lazy
+            become_method=become_method or None,
+            become_user=become_user or None,
+        )
+
+    unknown_via_msg = 'mitogen_via=%s of %s specifies an unknown hostname'
+    via_cycle_msg = 'mitogen_via=%s of %s creates a cycle (%s)'
+
+    def _stack_from_spec(self, spec, stack=(), seen_names=()):
+        """
+        Return a tuple of ContextService parameter dictionaries corresponding
+        to the connection described by `spec`, and any connection referenced by
+        its `mitogen_via` or `become` fields. Each element is a dict of the
+        form::
+
+            {
+                # Optional. If present and `True`, this hop is elegible for
+                # interpreter recycling.
+                "enable_lru": True,
+                # mitogen.master.Router method name.
+                "method": "ssh",
+                # mitogen.master.Router method kwargs.
+                "kwargs": {
+                    "hostname": "..."
+                }
+            }
+
+        :param ansible_mitogen.transport_config.Spec spec:
+            Connection specification.
+        :param tuple stack:
+            Stack elements from parent call (used for recursion).
+        :param tuple seen_names:
+            Inventory hostnames from parent call (cycle detection).
+        :returns:
+            Tuple `(stack, seen_names)`.
+        """
+        if spec.inventory_name() in seen_names:
+            raise ansible.errors.AnsibleConnectionFailure(
+                self.via_cycle_msg % (
+                    spec.mitogen_via(),
+                    spec.inventory_name(),
+                    ' -> '.join(reversed(
+                        seen_names + (spec.inventory_name(),)
+                    )),
+                )
+            )
+
+        if spec.mitogen_via():
+            stack = self._stack_from_spec(
+                self._spec_from_via(spec.inventory_name(), spec.mitogen_via()),
+                stack=stack,
+                seen_names=seen_names + (spec.inventory_name(),),
+            )
+
+        stack += (CONNECTION_METHOD[spec.transport()](spec),)
+        if spec.become() and ((spec.become_user() != spec.remote_user()) or
+                              C.BECOME_ALLOW_SAME_USER):
+            stack += (CONNECTION_METHOD[spec.become_method()](spec),)
+
+        return stack
+
+    def _connect_broker(self):
+        """
+        Establish a reference to the Broker, Router and parent context used for
+        connections.
+        """
+        if not self.broker:
+            self.broker = mitogen.master.Broker()
+            self.router, self.parent = mitogen.unix.connect(
+                path=ansible_mitogen.process.MuxProcess.unix_listener_path,
+                broker=self.broker,
+            )
+
+    def _build_stack(self):
+        """
+        Construct a list of dictionaries representing the connection
+        configuration between the controller and the target. This is
+        additionally used by the integration tests "mitogen_get_stack" action
+        to fetch the would-be connection configuration.
+        """
+        return self._stack_from_spec(
+            ansible_mitogen.transport_config.PlayContextSpec(
+                connection=self,
+                play_context=self._play_context,
+                transport=self.transport,
+                inventory_name=self.inventory_hostname,
+            )
+        )
+
+    def _connect_stack(self, stack):
+        """
+        Pass `stack` to ContextService, requesting a copy of the context object
+        representing the last tuple element. If no connection exists yet,
+        ContextService will recursively establish it before returning it or
+        throwing an error.
+
+        See :meth:`ansible_mitogen.services.ContextService.get` docstring for
+        description of the returned dictionary.
+        """
+        try:
+            dct = self.parent.call_service(
+                service_name='ansible_mitogen.services.ContextService',
+                method_name='get',
+                stack=mitogen.utils.cast(list(stack)),
+            )
+        except mitogen.core.CallError:
+            LOG.warning('Connection failed; stack configuration was:\n%s',
+                        pprint.pformat(stack))
+            raise
+
+        if dct['msg']:
+            if dct['method_name'] in self.become_methods:
+                raise ansible.errors.AnsibleModuleError(dct['msg'])
+            raise ansible.errors.AnsibleConnectionFailure(dct['msg'])
+
+        self.context = dct['context']
+        self.chain = CallChain(self, self.context, pipelined=True)
+        if self._play_context.become:
+            self.login_context = dct['via']
+        else:
+            self.login_context = self.context
+
+        self.init_child_result = dct['init_child_result']
+
+    def get_good_temp_dir(self):
+        """
+        Return the 'good temporary directory' as discovered by
+        :func:`ansible_mitogen.target.init_child` immediately after
+        ContextService constructed the target context.
+        """
+        self._connect()
+        return self.init_child_result['good_temp_dir']
+
+    def _connect(self):
+        """
+        Establish a connection to the master process's UNIX listener socket,
+        constructing a mitogen.master.Router to communicate with the master,
+        and a mitogen.parent.Context to represent it.
+
+        Depending on the original transport we should emulate, trigger one of
+        the _connect_*() service calls defined above to cause the master
+        process to establish the real connection on our behalf, or return a
+        reference to the existing one.
+        """
+        if self.connected:
+            return
+
+        self._connect_broker()
+        stack = self._build_stack()
+        self._connect_stack(stack)
+
+    def _mitogen_reset(self, mode):
+        """
+        Forget everything we know about the connected context. This function
+        cannot be called _reset() since that name is used as a public API by
+        Ansible 2.4 wait_for_connection plug-in.
+
+        :param str mode:
+            Name of ContextService method to use to discard the context, either
+            'put' or 'reset'.
+        """
+        if not self.context:
+            return
+
+        self.chain.reset()
+        self.parent.call_service(
+            service_name='ansible_mitogen.services.ContextService',
+            method_name=mode,
+            context=self.context
+        )
+
+        self.context = None
+        self.login_context = None
+        self.init_child_result = None
+        self.chain = None
+
+    def _shutdown_broker(self):
+        """
+        Shutdown the broker thread during :meth:`close` or :meth:`reset`.
+        """
+        if self.broker:
+            self.broker.shutdown()
+            self.broker.join()
+            self.broker = None
+            self.router = None
+
+            # #420: Ansible executes "meta" actions in the top-level process,
+            # meaning "reset_connection" will cause :class:`mitogen.core.Latch`
+            # FDs to be cached and erroneously shared by children on subsequent
+            # WorkerProcess forks. To handle that, call on_fork() to ensure any
+            # shared state is discarded.
+            # #490: only attempt to clean up when it's known that some
+            # resources exist to cleanup, otherwise later __del__ double-call
+            # to close() due to GC at random moment may obliterate an unrelated
+            # Connection's resources.
+            mitogen.fork.on_fork()
+
+    def close(self):
+        """
+        Arrange for the mitogen.master.Router running in the worker to
+        gracefully shut down, and wait for shutdown to complete. Safe to call
+        multiple times.
+        """
+        self._mitogen_reset(mode='put')
+        self._shutdown_broker()
+
+    def _reset_find_task_vars(self):
+        """
+        Monsterous hack: since "meta: reset_connection" does not run from an
+        action, we cannot capture task variables via :meth:`on_action_run`.
+        Instead walk the parent frames searching for the `all_vars` local from
+        StrategyBase._execute_meta(). If this fails, just leave task_vars
+        unset, likely causing a subtly wrong configuration to be selected.
+        """
+        frame = sys._getframe()
+        while frame and not self._task_vars:
+            self._task_vars = frame.f_locals.get('all_vars')
+            frame = frame.f_back
+
+    reset_compat_msg = (
+        'Mitogen only supports "reset_connection" on Ansible 2.5.6 or later'
+    )
+
+    def reset(self):
+        """
+        Explicitly terminate the connection to the remote host. This discards
+        any local state we hold for the connection, returns the Connection to
+        the 'disconnected' state, and informs ContextService the connection is
+        bad somehow, and should be shut down and discarded.
+        """
+        if self._task_vars is None:
+            self._reset_find_task_vars()
+
+        if self._play_context.remote_addr is None:
+            # <2.5.6 incorrectly populate PlayContext for reset_connection
+            # https://github.com/ansible/ansible/issues/27520
+            raise ansible.errors.AnsibleConnectionFailure(
+                self.reset_compat_msg
+            )
+
+        self._connect()
+        self._mitogen_reset(mode='reset')
+        self._shutdown_broker()
+
+    # Compatibility with Ansible 2.4 wait_for_connection plug-in.
+    _reset = reset
+
+    def get_chain(self, use_login=False, use_fork=False):
+        """
+        Return the :class:`mitogen.parent.CallChain` to use for executing
+        function calls.
+
+        :param bool use_login:
+            If :data:`True`, always return the chain for the login account
+            rather than any active become user.
+        :param bool use_fork:
+            If :data:`True`, return the chain for the fork parent.
+        :returns mitogen.parent.CallChain:
+        """
+        self._connect()
+        if use_login:
+            return self.login_context.default_call_chain
+        # See FORK_SUPPORTED comments in target.py.
+        if use_fork and self.init_child_result['fork_context'] is not None:
+            return self.init_child_result['fork_context'].default_call_chain
+        return self.chain
+
+    def spawn_isolated_child(self):
+        """
+        Fork or launch a new child off the target context.
+
+        :returns:
+            mitogen.core.Context of the new child.
+        """
+        return self.get_chain(use_fork=True).call(
+            ansible_mitogen.target.spawn_isolated_child
+        )
+
+    def get_extra_args(self):
+        """
+        Overridden by connections/mitogen_kubectl.py to a list of additional
+        arguments for the command.
+        """
+        # TODO: maybe use this for SSH too.
+        return []
+
+    def get_default_cwd(self):
+        """
+        Overridden by connections/mitogen_local.py to emulate behaviour of CWD
+        being fixed to that of ActionBase._loader.get_basedir().
+        """
+        return None
+
+    def get_default_env(self):
+        """
+        Overridden by connections/mitogen_local.py to emulate behaviour of
+        WorkProcess environment inherited from WorkerProcess.
+        """
+        return None
+
+    def exec_command(self, cmd, in_data='', sudoable=True, mitogen_chdir=None):
+        """
+        Implement exec_command() by calling the corresponding
+        ansible_mitogen.target function in the target.
+
+        :param str cmd:
+            Shell command to execute.
+        :param bytes in_data:
+            Data to supply on ``stdin`` of the process.
+        :returns:
+            (return code, stdout bytes, stderr bytes)
+        """
+        emulate_tty = (not in_data and sudoable)
+        rc, stdout, stderr = self.get_chain().call(
+            ansible_mitogen.target.exec_command,
+            cmd=mitogen.utils.cast(cmd),
+            in_data=mitogen.utils.cast(in_data),
+            chdir=mitogen_chdir or self.get_default_cwd(),
+            emulate_tty=emulate_tty,
+        )
+
+        stderr += b'Shared connection to %s closed.%s' % (
+            self._play_context.remote_addr.encode(),
+            (b'\r\n' if emulate_tty else b'\n'),
+        )
+        return rc, stdout, stderr
+
+    def fetch_file(self, in_path, out_path):
+        """
+        Implement fetch_file() by calling the corresponding
+        ansible_mitogen.target function in the target.
+
+        :param str in_path:
+            Remote filesystem path to read.
+        :param str out_path:
+            Local filesystem path to write.
+        """
+        output = self.get_chain().call(
+            ansible_mitogen.target.read_path,
+            mitogen.utils.cast(in_path),
+        )
+        ansible_mitogen.target.write_path(out_path, output)
+
+    def put_data(self, out_path, data, mode=None, utimes=None):
+        """
+        Implement put_file() by caling the corresponding ansible_mitogen.target
+        function in the target, transferring small files inline. This is
+        pipelined and will return immediately; failed transfers are reported as
+        exceptions in subsequent functon calls.
+
+        :param str out_path:
+            Remote filesystem path to write.
+        :param byte data:
+            File contents to put.
+        """
+        self.get_chain().call_no_reply(
+            ansible_mitogen.target.write_path,
+            mitogen.utils.cast(out_path),
+            mitogen.core.Blob(data),
+            mode=mode,
+            utimes=utimes,
+        )
+
+    #: Maximum size of a small file before switching to streaming
+    #: transfer. This should really be the same as
+    #: mitogen.services.FileService.IO_SIZE, however the message format has
+    #: slightly more overhead, so just randomly subtract 4KiB.
+    SMALL_FILE_LIMIT = mitogen.core.CHUNK_SIZE - 4096
+
+    def _throw_io_error(self, e, path):
+        if e.args[0] == errno.ENOENT:
+            s = 'file or module does not exist: ' + path
+            raise ansible.errors.AnsibleFileNotFound(s)
+
+    def put_file(self, in_path, out_path):
+        """
+        Implement put_file() by streamily transferring the file via
+        FileService.
+
+        :param str in_path:
+            Local filesystem path to read.
+        :param str out_path:
+            Remote filesystem path to write.
+        """
+        try:
+            st = os.stat(in_path)
+        except OSError as e:
+            self._throw_io_error(e, in_path)
+            raise
+
+        if not stat.S_ISREG(st.st_mode):
+            raise IOError('%r is not a regular file.' % (in_path,))
+
+        # If the file is sufficiently small, just ship it in the argument list
+        # rather than introducing an extra RTT for the child to request it from
+        # FileService.
+        if st.st_size <= self.SMALL_FILE_LIMIT:
+            try:
+                fp = open(in_path, 'rb')
+                try:
+                    s = fp.read(self.SMALL_FILE_LIMIT + 1)
+                finally:
+                    fp.close()
+            except OSError:
+                self._throw_io_error(e, in_path)
+                raise
+
+            # Ensure did not grow during read.
+            if len(s) == st.st_size:
+                return self.put_data(out_path, s, mode=st.st_mode,
+                                     utimes=(st.st_atime, st.st_mtime))
+
+        self._connect()
+        self.parent.call_service(
+            service_name='mitogen.service.FileService',
+            method_name='register',
+            path=mitogen.utils.cast(in_path)
+        )
+
+        # For now this must remain synchronous, as the action plug-in may have
+        # passed us a temporary file to transfer. A future FileService could
+        # maintain an LRU list of open file descriptors to keep the temporary
+        # file alive, but that requires more work.
+        self.get_chain().call(
+            ansible_mitogen.target.transfer_file,
+            context=self.parent,
+            in_path=in_path,
+            out_path=out_path
+        )
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/loaders.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/loaders.py
new file mode 100644
index 000000000..ff06c0c5b
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/loaders.py
@@ -0,0 +1,48 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+"""
+Stable names for PluginLoader instances across Ansible versions.
+"""
+
+from __future__ import absolute_import
+
+try:
+    from ansible.plugins.loader import action_loader
+    from ansible.plugins.loader import connection_loader
+    from ansible.plugins.loader import module_loader
+    from ansible.plugins.loader import module_utils_loader
+    from ansible.plugins.loader import shell_loader
+    from ansible.plugins.loader import strategy_loader
+except ImportError:  # Ansible <2.4
+    from ansible.plugins import action_loader
+    from ansible.plugins import connection_loader
+    from ansible.plugins import module_loader
+    from ansible.plugins import module_utils_loader
+    from ansible.plugins import shell_loader
+    from ansible.plugins import strategy_loader
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/logging.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/logging.py
new file mode 100644
index 000000000..ce6f16591
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/logging.py
@@ -0,0 +1,127 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import logging
+import os
+
+import mitogen.core
+import mitogen.utils
+
+try:
+    from __main__ import display
+except ImportError:
+    from ansible.utils.display import Display
+    display = Display()
+
+
+#: The process name set via :func:`set_process_name`.
+_process_name = None
+
+#: The PID of the process that last called :func:`set_process_name`, so its
+#: value can be ignored in unknown fork children.
+_process_pid = None
+
+
+def set_process_name(name):
+    """
+    Set a name to adorn log messages with.
+    """
+    global _process_name
+    _process_name = name
+
+    global _process_pid
+    _process_pid = os.getpid()
+
+
+class Handler(logging.Handler):
+    """
+    Use Mitogen's log format, but send the result to a Display method.
+    """
+    def __init__(self, normal_method):
+        logging.Handler.__init__(self)
+        self.formatter = mitogen.utils.log_get_formatter()
+        self.normal_method = normal_method
+
+    #: Set of target loggers that produce warnings and errors that spam the
+    #: console needlessly. Their log level is forced to INFO. A better strategy
+    #: may simply be to bury all target logs in DEBUG output, but not by
+    #: overriding their log level as done here.
+    NOISY_LOGGERS = frozenset([
+        'dnf',   # issue #272; warns when a package is already installed.
+        'boto',  # issue #541; normal boto retry logic can cause ERROR logs.
+    ])
+
+    def emit(self, record):
+        mitogen_name = getattr(record, 'mitogen_name', '')
+        if mitogen_name == 'stderr':
+            record.levelno = logging.ERROR
+        if mitogen_name in self.NOISY_LOGGERS and record.levelno >= logging.WARNING:
+            record.levelno = logging.DEBUG
+
+        if _process_pid == os.getpid():
+            process_name = _process_name
+        else:
+            process_name = '?'
+
+        s = '[%-4s %d] %s' % (process_name, os.getpid(), self.format(record))
+        if record.levelno >= logging.ERROR:
+            display.error(s, wrap_text=False)
+        elif record.levelno >= logging.WARNING:
+            display.warning(s, formatted=True)
+        else:
+            self.normal_method(s)
+
+
+def setup():
+    """
+    Install handlers for Mitogen loggers to redirect them into the Ansible
+    display framework. Ansible installs its own logging framework handlers when
+    C.DEFAULT_LOG_PATH is set, therefore disable propagation for our handlers.
+    """
+    l_mitogen = logging.getLogger('mitogen')
+    l_mitogen_io = logging.getLogger('mitogen.io')
+    l_ansible_mitogen = logging.getLogger('ansible_mitogen')
+
+    for logger in l_mitogen, l_mitogen_io, l_ansible_mitogen:
+        logger.handlers = [Handler(display.vvv)]
+        logger.propagate = False
+
+    if display.verbosity > 2:
+        l_ansible_mitogen.setLevel(logging.DEBUG)
+        l_mitogen.setLevel(logging.DEBUG)
+    else:
+        # Mitogen copies the active log level into new children, allowing them
+        # to filter tiny messages before they hit the network, and therefore
+        # before they wake the IO loop. Explicitly setting INFO saves ~4%
+        # running against just the local machine.
+        l_mitogen.setLevel(logging.ERROR)
+        l_ansible_mitogen.setLevel(logging.ERROR)
+
+    if display.verbosity > 3:
+        l_mitogen_io.setLevel(logging.DEBUG)
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/mixins.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/mixins.py
new file mode 100644
index 000000000..890467fd5
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/mixins.py
@@ -0,0 +1,432 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import logging
+import os
+import pwd
+import random
+import traceback
+
+try:
+    from shlex import quote as shlex_quote
+except ImportError:
+    from pipes import quote as shlex_quote
+
+from ansible.module_utils._text import to_bytes
+from ansible.parsing.utils.jsonify import jsonify
+
+import ansible
+import ansible.constants
+import ansible.plugins
+import ansible.plugins.action
+
+import mitogen.core
+import mitogen.select
+import mitogen.utils
+
+import ansible_mitogen.connection
+import ansible_mitogen.planner
+import ansible_mitogen.target
+from ansible.module_utils._text import to_text
+
+
+LOG = logging.getLogger(__name__)
+
+
+class ActionModuleMixin(ansible.plugins.action.ActionBase):
+    """
+    The Mitogen-patched PluginLoader dynamically mixes this into every action
+    class that Ansible attempts to load. It exists to override all the
+    assumptions built into the base action class that should really belong in
+    some middle layer, or at least in the connection layer.
+
+    Functionality is defined here for:
+
+    * Capturing the final set of task variables and giving Connection a chance
+      to update its idea of the correct execution environment, before any
+      attempt is made to call a Connection method. While it's not expected for
+      the interpreter to change on a per-task basis, Ansible permits this, and
+      so it must be supported.
+
+    * Overriding lots of methods that try to call out to shell for mundane
+      reasons, such as copying files around, changing file permissions,
+      creating temporary directories and suchlike.
+
+    * Short-circuiting any use of Ansiballz or related code for executing a
+      module remotely using shell commands and SSH.
+
+    * Short-circuiting most of the logic in dealing with the fact that Ansible
+      always runs become: tasks across at least the SSH user account and the
+      destination user account, and handling the security permission issues
+      that crop up due to this. Mitogen always runs a task completely within
+      the target user account, so it's not a problem for us.
+    """
+    def __init__(self, task, connection, *args, **kwargs):
+        """
+        Verify the received connection is really a Mitogen connection. If not,
+        transmute this instance back into the original unadorned base class.
+
+        This allows running the Mitogen strategy in mixed-target playbooks,
+        where some targets use SSH while others use WinRM or some fancier UNIX
+        connection plug-in. That's because when the Mitogen strategy is active,
+        ActionModuleMixin is unconditionally mixed into any action module that
+        is instantiated, and there is no direct way for the monkey-patch to
+        know what kind of connection will be used upfront.
+        """
+        super(ActionModuleMixin, self).__init__(task, connection, *args, **kwargs)
+        if not isinstance(connection, ansible_mitogen.connection.Connection):
+            _, self.__class__ = type(self).__bases__
+
+    def run(self, tmp=None, task_vars=None):
+        """
+        Override run() to notify Connection of task-specific data, so it has a
+        chance to know e.g. the Python interpreter in use.
+        """
+        self._connection.on_action_run(
+            task_vars=task_vars,
+            delegate_to_hostname=self._task.delegate_to,
+            loader_basedir=self._loader.get_basedir(),
+        )
+        return super(ActionModuleMixin, self).run(tmp, task_vars)
+
+    COMMAND_RESULT = {
+        'rc': 0,
+        'stdout': '',
+        'stdout_lines': [],
+        'stderr': ''
+    }
+
+    def fake_shell(self, func, stdout=False):
+        """
+        Execute a function and decorate its return value in the style of
+        _low_level_execute_command(). This produces a return value that looks
+        like some shell command was run, when really func() was implemented
+        entirely in Python.
+
+        If the function raises :py:class:`mitogen.core.CallError`, this will be
+        translated into a failed shell command with a non-zero exit status.
+
+        :param func:
+            Function invoked as `func()`.
+        :returns:
+            See :py:attr:`COMMAND_RESULT`.
+        """
+        dct = self.COMMAND_RESULT.copy()
+        try:
+            rc = func()
+            if stdout:
+                dct['stdout'] = repr(rc)
+        except mitogen.core.CallError:
+            LOG.exception('While emulating a shell command')
+            dct['rc'] = 1
+            dct['stderr'] = traceback.format_exc()
+
+        return dct
+
+    def _remote_file_exists(self, path):
+        """
+        Determine if `path` exists by directly invoking os.path.exists() in the
+        target user account.
+        """
+        LOG.debug('_remote_file_exists(%r)', path)
+        return self._connection.get_chain().call(
+            ansible_mitogen.target.file_exists,
+            mitogen.utils.cast(path)
+        )
+
+    def _configure_module(self, module_name, module_args, task_vars=None):
+        """
+        Mitogen does not use the Ansiballz framework. This call should never
+        happen when ActionMixin is active, so crash if it does.
+        """
+        assert False, "_configure_module() should never be called."
+
+    def _is_pipelining_enabled(self, module_style, wrap_async=False):
+        """
+        Mitogen does not use SSH pipelining. This call should never happen when
+        ActionMixin is active, so crash if it does.
+        """
+        assert False, "_is_pipelining_enabled() should never be called."
+
+    def _generate_tmp_path(self):
+        return os.path.join(
+            self._connection.get_good_temp_dir(),
+            'ansible_mitogen_action_%016x' % (
+                random.getrandbits(8*8),
+            )
+        )
+
+    def _generate_tmp_path(self):
+        return os.path.join(
+            self._connection.get_good_temp_dir(),
+            'ansible_mitogen_action_%016x' % (
+                random.getrandbits(8*8),
+            )
+        )
+
+    def _make_tmp_path(self, remote_user=None):
+        """
+        Create a temporary subdirectory as a child of the temporary directory
+        managed by the remote interpreter.
+        """
+        LOG.debug('_make_tmp_path(remote_user=%r)', remote_user)
+        path = self._generate_tmp_path()
+        LOG.debug('Temporary directory: %r', path)
+        self._connection.get_chain().call_no_reply(os.mkdir, path)
+        self._connection._shell.tmpdir = path
+        return path
+
+    def _remove_tmp_path(self, tmp_path):
+        """
+        Replace the base implementation's invocation of rm -rf, replacing it
+        with a pipelined call to :func:`ansible_mitogen.target.prune_tree`.
+        """
+        LOG.debug('_remove_tmp_path(%r)', tmp_path)
+        if tmp_path is None and ansible.__version__ > '2.6':
+            tmp_path = self._connection._shell.tmpdir  # 06f73ad578d
+        if tmp_path is not None:
+            self._connection.get_chain().call_no_reply(
+                ansible_mitogen.target.prune_tree,
+                tmp_path,
+            )
+        self._connection._shell.tmpdir = None
+
+    def _transfer_data(self, remote_path, data):
+        """
+        Used by the base _execute_module(), and in <2.4 also by the template
+        action module, and probably others.
+        """
+        if isinstance(data, dict):
+            data = jsonify(data)
+        if not isinstance(data, bytes):
+            data = to_bytes(data, errors='surrogate_or_strict')
+
+        LOG.debug('_transfer_data(%r, %s ..%d bytes)',
+                  remote_path, type(data), len(data))
+        self._connection.put_data(remote_path, data)
+        return remote_path
+
+    #: Actions listed here cause :func:`_fixup_perms2` to avoid a needless
+    #: roundtrip, as they modify file modes separately afterwards. This is due
+    #: to the method prototype having a default of `execute=True`.
+    FIXUP_PERMS_RED_HERRING = set(['copy'])
+
+    def _fixup_perms2(self, remote_paths, remote_user=None, execute=True):
+        """
+        Mitogen always executes ActionBase helper methods in the context of the
+        target user account, so it is never necessary to modify permissions
+        except to ensure the execute bit is set if requested.
+        """
+        LOG.debug('_fixup_perms2(%r, remote_user=%r, execute=%r)',
+                  remote_paths, remote_user, execute)
+        if execute and self._task.action not in self.FIXUP_PERMS_RED_HERRING:
+            return self._remote_chmod(remote_paths, mode='u+x')
+        return self.COMMAND_RESULT.copy()
+
+    def _remote_chmod(self, paths, mode, sudoable=False):
+        """
+        Issue an asynchronous set_file_mode() call for every path in `paths`,
+        then format the resulting return value list with fake_shell().
+        """
+        LOG.debug('_remote_chmod(%r, mode=%r, sudoable=%r)',
+                  paths, mode, sudoable)
+        return self.fake_shell(lambda: mitogen.select.Select.all(
+            self._connection.get_chain().call_async(
+                ansible_mitogen.target.set_file_mode, path, mode
+            )
+            for path in paths
+        ))
+
+    def _remote_chown(self, paths, user, sudoable=False):
+        """
+        Issue an asynchronous os.chown() call for every path in `paths`, then
+        format the resulting return value list with fake_shell().
+        """
+        LOG.debug('_remote_chown(%r, user=%r, sudoable=%r)',
+                  paths, user, sudoable)
+        ent = self._connection.get_chain().call(pwd.getpwnam, user)
+        return self.fake_shell(lambda: mitogen.select.Select.all(
+            self._connection.get_chain().call_async(
+                os.chown, path, ent.pw_uid, ent.pw_gid
+            )
+            for path in paths
+        ))
+
+    def _remote_expand_user(self, path, sudoable=True):
+        """
+        Replace the base implementation's attempt to emulate
+        os.path.expanduser() with an actual call to os.path.expanduser().
+
+        :param bool sudoable:
+            If :data:`True`, indicate unqualified tilde ("~" with no username)
+            should be evaluated in the context of the login account, not any
+            become_user.
+        """
+        LOG.debug('_remote_expand_user(%r, sudoable=%r)', path, sudoable)
+        if not path.startswith('~'):
+            # /home/foo -> /home/foo
+            return path
+        if sudoable or not self._play_context.become:
+            if path == '~':
+                # ~ -> /home/dmw
+                return self._connection.homedir
+            if path.startswith('~/'):
+                # ~/.ansible -> /home/dmw/.ansible
+                return os.path.join(self._connection.homedir, path[2:])
+        # ~root/.ansible -> /root/.ansible
+        return self._connection.get_chain(use_login=(not sudoable)).call(
+            os.path.expanduser,
+            mitogen.utils.cast(path),
+        )
+
+    def get_task_timeout_secs(self):
+        """
+        Return the task "async:" value, portable across 2.4-2.5.
+        """
+        try:
+            return self._task.async_val
+        except AttributeError:
+            return getattr(self._task, 'async')
+
+    def _temp_file_gibberish(self, module_args, wrap_async):
+        # Ansible>2.5 module_utils reuses the action's temporary directory if
+        # one exists. Older versions error if this key is present.
+        if ansible.__version__ > '2.5':
+            if wrap_async:
+                # Sharing is not possible with async tasks, as in that case,
+                # the directory must outlive the action plug-in.
+                module_args['_ansible_tmpdir'] = None
+            else:
+                module_args['_ansible_tmpdir'] = self._connection._shell.tmpdir
+
+        # If _ansible_tmpdir is unset, Ansible>2.6 module_utils will use
+        # _ansible_remote_tmp as the location to create the module's temporary
+        # directory. Older versions error if this key is present.
+        if ansible.__version__ > '2.6':
+            module_args['_ansible_remote_tmp'] = (
+                self._connection.get_good_temp_dir()
+            )
+
+    def _execute_module(self, module_name=None, module_args=None, tmp=None,
+                        task_vars=None, persist_files=False,
+                        delete_remote_tmp=True, wrap_async=False):
+        """
+        Collect up a module's execution environment then use it to invoke
+        target.run_module() or helpers.run_module_async() in the target
+        context.
+        """
+        if module_name is None:
+            module_name = self._task.action
+        if module_args is None:
+            module_args = self._task.args
+        if task_vars is None:
+            task_vars = {}
+
+        self._update_module_args(module_name, module_args, task_vars)
+        env = {}
+        self._compute_environment_string(env)
+        self._temp_file_gibberish(module_args, wrap_async)
+
+        self._connection._connect()
+        result = ansible_mitogen.planner.invoke(
+            ansible_mitogen.planner.Invocation(
+                action=self,
+                connection=self._connection,
+                module_name=mitogen.core.to_text(module_name),
+                module_args=mitogen.utils.cast(module_args),
+                task_vars=task_vars,
+                templar=self._templar,
+                env=mitogen.utils.cast(env),
+                wrap_async=wrap_async,
+                timeout_secs=self.get_task_timeout_secs(),
+            )
+        )
+
+        if ansible.__version__ < '2.5' and delete_remote_tmp and \
+                getattr(self._connection._shell, 'tmpdir', None) is not None:
+            # Built-in actions expected tmpdir to be cleaned up automatically
+            # on _execute_module().
+            self._remove_tmp_path(self._connection._shell.tmpdir)
+
+        return result
+
+    def _postprocess_response(self, result):
+        """
+        Apply fixups mimicking ActionBase._execute_module(); this is copied
+        verbatim from action/__init__.py, the guts of _parse_returned_data are
+        garbage and should be removed or reimplemented once tests exist.
+
+        :param dict result:
+            Dictionary with format::
+
+                {
+                    "rc": int,
+                    "stdout": "stdout data",
+                    "stderr": "stderr data"
+                }
+        """
+        data = self._parse_returned_data(result)
+
+        # Cutpasted from the base implementation.
+        if 'stdout' in data and 'stdout_lines' not in data:
+            data['stdout_lines'] = (data['stdout'] or u'').splitlines()
+        if 'stderr' in data and 'stderr_lines' not in data:
+            data['stderr_lines'] = (data['stderr'] or u'').splitlines()
+
+        return data
+
+    def _low_level_execute_command(self, cmd, sudoable=True, in_data=None,
+                                   executable=None,
+                                   encoding_errors='surrogate_then_replace',
+                                   chdir=None):
+        """
+        Override the base implementation by simply calling
+        target.exec_command() in the target context.
+        """
+        LOG.debug('_low_level_execute_command(%r, in_data=%r, exe=%r, dir=%r)',
+                  cmd, type(in_data), executable, chdir)
+        if executable is None:  # executable defaults to False
+            executable = self._play_context.executable
+        if executable:
+            cmd = executable + ' -c ' + shlex_quote(cmd)
+
+        rc, stdout, stderr = self._connection.exec_command(
+            cmd=cmd,
+            in_data=in_data,
+            sudoable=sudoable,
+            mitogen_chdir=chdir,
+        )
+        stdout_text = to_text(stdout, errors=encoding_errors)
+
+        return {
+            'rc': rc,
+            'stdout': stdout_text,
+            'stdout_lines': stdout_text.splitlines(),
+            'stderr': stderr,
+        }
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/module_finder.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/module_finder.py
new file mode 100644
index 000000000..633e3cade
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/module_finder.py
@@ -0,0 +1,157 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import collections
+import imp
+import os
+
+import mitogen.master
+
+
+PREFIX = 'ansible.module_utils.'
+
+
+Module = collections.namedtuple('Module', 'name path kind parent')
+
+
+def get_fullname(module):
+    """
+    Reconstruct a Module's canonical path by recursing through its parents.
+    """
+    bits = [str(module.name)]
+    while module.parent:
+        bits.append(str(module.parent.name))
+        module = module.parent
+    return '.'.join(reversed(bits))
+
+
+def get_code(module):
+    """
+    Compile and return a Module's code object.
+    """
+    fp = open(module.path)
+    try:
+        return compile(fp.read(), str(module.name), 'exec')
+    finally:
+        fp.close()
+
+
+def is_pkg(module):
+    """
+    Return :data:`True` if a Module represents a package.
+    """
+    return module.kind == imp.PKG_DIRECTORY
+
+
+def find(name, path=(), parent=None):
+    """
+    Return a Module instance describing the first matching module found on the
+    search path.
+
+    :param str name:
+        Module name.
+    :param list path:
+        List of directory names to search for the module.
+    :param Module parent:
+        Optional module parent.
+    """
+    assert isinstance(path, tuple)
+    head, _, tail = name.partition('.')
+    try:
+        tup = imp.find_module(head, list(path))
+    except ImportError:
+        return parent
+
+    fp, modpath, (suffix, mode, kind) = tup
+    if fp:
+        fp.close()
+
+    if parent and modpath == parent.path:
+        # 'from timeout import timeout', where 'timeout' is a function but also
+        # the name of the module being imported.
+        return None
+
+    if kind == imp.PKG_DIRECTORY:
+        modpath = os.path.join(modpath, '__init__.py')
+
+    module = Module(head, modpath, kind, parent)
+    # TODO: this code is entirely wrong on Python 3.x, but works well enough
+    # for Ansible. We need a new find_child() that only looks in the package
+    # directory, never falling back to the parent search path.
+    if tail and kind == imp.PKG_DIRECTORY:
+        return find_relative(module, tail, path)
+    return module
+
+
+def find_relative(parent, name, path=()):
+    if parent.kind == imp.PKG_DIRECTORY:
+        path = (os.path.dirname(parent.path),) + path
+    return find(name, path, parent=parent)
+
+
+def scan_fromlist(code):
+    for level, modname_s, fromlist in mitogen.master.scan_code_imports(code):
+        for name in fromlist:
+            yield level, '%s.%s' % (modname_s, name)
+        if not fromlist:
+            yield level, modname_s
+
+
+def scan(module_name, module_path, search_path):
+    module = Module(module_name, module_path, imp.PY_SOURCE, None)
+    stack = [module]
+    seen = set()
+
+    while stack:
+        module = stack.pop(0)
+        for level, fromname in scan_fromlist(get_code(module)):
+            if not fromname.startswith(PREFIX):
+                continue
+
+            imported = find(fromname[len(PREFIX):], search_path)
+            if imported is None or imported in seen:
+                continue
+
+            seen.add(imported)
+            stack.append(imported)
+            parent = imported.parent
+            while parent:
+                fullname = get_fullname(parent)
+                module = Module(fullname, parent.path, parent.kind, None)
+                if module not in seen:
+                    seen.add(module)
+                    stack.append(module)
+                parent = parent.parent
+
+    return sorted(
+        (PREFIX + get_fullname(module), module.path, is_pkg(module))
+        for module in seen
+    )
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/parsing.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/parsing.py
new file mode 100644
index 000000000..525e60cfe
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/parsing.py
@@ -0,0 +1,84 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+"""
+Classes to detect each case from [0] and prepare arguments necessary for the
+corresponding Runner class within the target, including preloading requisite
+files/modules known missing.
+
+[0] "Ansible Module Architecture", developing_program_flow_modules.html
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import mitogen.core
+
+
+def parse_script_interpreter(source):
+    """
+    Parse the script interpreter portion of a UNIX hashbang using the rules
+    Linux uses.
+
+    :param str source: String like "/usr/bin/env python".
+
+    :returns:
+        Tuple of `(interpreter, arg)`, where `intepreter` is the script
+        interpreter and `arg` is its sole argument if present, otherwise
+        :py:data:`None`.
+    """
+    # Find terminating newline. Assume last byte of binprm_buf if absent.
+    nl = source.find(b'\n', 0, 128)
+    if nl == -1:
+        nl = min(128, len(source))
+
+    # Split once on the first run of whitespace. If no whitespace exists,
+    # bits just contains the interpreter filename.
+    bits = source[0:nl].strip().split(None, 1)
+    if len(bits) == 1:
+        return mitogen.core.to_text(bits[0]), None
+    return mitogen.core.to_text(bits[0]), mitogen.core.to_text(bits[1])
+
+
+def parse_hashbang(source):
+    """
+    Parse a UNIX "hashbang line" using the syntax supported by Linux.
+
+    :param str source: String like "#!/usr/bin/env python".
+
+    :returns:
+        Tuple of `(interpreter, arg)`, where `intepreter` is the script
+        interpreter and `arg` is its sole argument if present, otherwise
+        :py:data:`None`.
+    """
+    # Linux requires first 2 bytes with no whitespace, pretty sure it's the
+    # same everywhere. See binfmt_script.c.
+    if not source.startswith(b'#!'):
+        return None, None
+
+    return parse_script_interpreter(source[2:])
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/planner.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/planner.py
new file mode 100644
index 000000000..3c5bd64fe
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/planner.py
@@ -0,0 +1,498 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+"""
+Classes to detect each case from [0] and prepare arguments necessary for the
+corresponding Runner class within the target, including preloading requisite
+files/modules known missing.
+
+[0] "Ansible Module Architecture", developing_program_flow_modules.html
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import json
+import logging
+import os
+import random
+
+from ansible.executor import module_common
+import ansible.errors
+import ansible.module_utils
+import mitogen.core
+import mitogen.select
+
+import ansible_mitogen.loaders
+import ansible_mitogen.parsing
+import ansible_mitogen.target
+
+
+LOG = logging.getLogger(__name__)
+NO_METHOD_MSG = 'Mitogen: no invocation method found for: '
+NO_INTERPRETER_MSG = 'module (%s) is missing interpreter line'
+NO_MODULE_MSG = 'The module %s was not found in configured module paths.'
+
+
+class Invocation(object):
+    """
+    Collect up a module's execution environment then use it to invoke
+    target.run_module() or helpers.run_module_async() in the target context.
+    """
+    def __init__(self, action, connection, module_name, module_args,
+                 task_vars, templar, env, wrap_async, timeout_secs):
+        #: ActionBase instance invoking the module. Required to access some
+        #: output postprocessing methods that don't belong in ActionBase at
+        #: all.
+        self.action = action
+        #: Ansible connection to use to contact the target. Must be an
+        #: ansible_mitogen connection.
+        self.connection = connection
+        #: Name of the module ('command', 'shell', etc.) to execute.
+        self.module_name = module_name
+        #: Final module arguments.
+        self.module_args = module_args
+        #: Task variables, needed to extract ansible_*_interpreter.
+        self.task_vars = task_vars
+        #: Templar, needed to extract ansible_*_interpreter.
+        self.templar = templar
+        #: Final module environment.
+        self.env = env
+        #: Boolean, if :py:data:`True`, launch the module asynchronously.
+        self.wrap_async = wrap_async
+        #: Integer, if >0, limit the time an asynchronous job may run for.
+        self.timeout_secs = timeout_secs
+        #: Initially ``None``, but set by :func:`invoke`. The path on the
+        #: master to the module's implementation file.
+        self.module_path = None
+        #: Initially ``None``, but set by :func:`invoke`. The raw source or
+        #: binary contents of the module.
+        self.module_source = None
+
+    def __repr__(self):
+        return 'Invocation(module_name=%s)' % (self.module_name,)
+
+
+class Planner(object):
+    """
+    A Planner receives a module name and the contents of its implementation
+    file, indicates whether or not it understands how to run the module, and
+    exports a method to run the module.
+    """
+    def __init__(self, invocation):
+        self._inv = invocation
+
+    def detect(self):
+        """
+        Return true if the supplied `invocation` matches the module type
+        implemented by this planner.
+        """
+        raise NotImplementedError()
+
+    def should_fork(self):
+        """
+        Asynchronous tasks must always be forked.
+        """
+        return self._inv.wrap_async
+
+    def get_push_files(self):
+        """
+        Return a list of files that should be propagated to the target context
+        using PushFileService. The default implementation pushes nothing.
+        """
+        return []
+
+    def get_module_deps(self):
+        """
+        Return a list of the Python module names imported by the module.
+        """
+        return []
+
+    def get_kwargs(self, **kwargs):
+        """
+        If :meth:`detect` returned :data:`True`, plan for the module's
+        execution, including granting access to or delivering any files to it
+        that are known to be absent, and finally return a dict::
+
+            {
+                # Name of the class from runners.py that implements the
+                # target-side execution of this module type.
+                "runner_name": "...",
+
+                # Remaining keys are passed to the constructor of the class
+                # named by `runner_name`.
+            }
+        """
+        new = dict((mitogen.core.UnicodeType(k), kwargs[k])
+                   for k in kwargs)
+        new.setdefault('good_temp_dir',
+            self._inv.connection.get_good_temp_dir())
+        new.setdefault('cwd', self._inv.connection.get_default_cwd())
+        new.setdefault('extra_env', self._inv.connection.get_default_env())
+        new.setdefault('emulate_tty', True)
+        new.setdefault('service_context', self._inv.connection.parent)
+        return new
+
+    def __repr__(self):
+        return '%s()' % (type(self).__name__,)
+
+
+class BinaryPlanner(Planner):
+    """
+    Binary modules take their arguments and will return data to Ansible in the
+    same way as want JSON modules.
+    """
+    runner_name = 'BinaryRunner'
+
+    def detect(self):
+        return module_common._is_binary(self._inv.module_source)
+
+    def get_push_files(self):
+        return [mitogen.core.to_text(self._inv.module_path)]
+
+    def get_kwargs(self, **kwargs):
+        return super(BinaryPlanner, self).get_kwargs(
+            runner_name=self.runner_name,
+            module=self._inv.module_name,
+            path=self._inv.module_path,
+            json_args=json.dumps(self._inv.module_args),
+            env=self._inv.env,
+            **kwargs
+        )
+
+
+class ScriptPlanner(BinaryPlanner):
+    """
+    Common functionality for script module planners -- handle interpreter
+    detection and rewrite.
+    """
+    def _rewrite_interpreter(self, path):
+        """
+        Given the original interpreter binary extracted from the script's
+        interpreter line, look up the associated `ansible_*_interpreter`
+        variable, render it and return it.
+
+        :param str path:
+            Absolute UNIX path to original interpreter.
+
+        :returns:
+            Shell fragment prefix used to execute the script via "/bin/sh -c".
+            While `ansible_*_interpreter` documentation suggests shell isn't
+            involved here, the vanilla implementation uses it and that use is
+            exploited in common playbooks.
+        """
+        key = u'ansible_%s_interpreter' % os.path.basename(path).strip()
+        try:
+            template = self._inv.task_vars[key]
+        except KeyError:
+            return path
+
+        return mitogen.utils.cast(self._inv.templar.template(template))
+
+    def _get_interpreter(self):
+        path, arg = ansible_mitogen.parsing.parse_hashbang(
+            self._inv.module_source
+        )
+        if path is None:
+            raise ansible.errors.AnsibleError(NO_INTERPRETER_MSG % (
+                self._inv.module_name,
+            ))
+
+        fragment = self._rewrite_interpreter(path)
+        if arg:
+            fragment += ' ' + arg
+
+        return fragment, path.startswith('python')
+
+    def get_kwargs(self, **kwargs):
+        interpreter_fragment, is_python = self._get_interpreter()
+        return super(ScriptPlanner, self).get_kwargs(
+            interpreter_fragment=interpreter_fragment,
+            is_python=is_python,
+            **kwargs
+        )
+
+
+class JsonArgsPlanner(ScriptPlanner):
+    """
+    Script that has its interpreter directive and the task arguments
+    substituted into its source as a JSON string.
+    """
+    runner_name = 'JsonArgsRunner'
+
+    def detect(self):
+        return module_common.REPLACER_JSONARGS in self._inv.module_source
+
+
+class WantJsonPlanner(ScriptPlanner):
+    """
+    If a module has the string WANT_JSON in it anywhere, Ansible treats it as a
+    non-native module that accepts a filename as its only command line
+    parameter. The filename is for a temporary file containing a JSON string
+    containing the module's parameters. The module needs to open the file, read
+    and parse the parameters, operate on the data, and print its return data as
+    a JSON encoded dictionary to stdout before exiting.
+
+    These types of modules are self-contained entities. As of Ansible 2.1,
+    Ansible only modifies them to change a shebang line if present.
+    """
+    runner_name = 'WantJsonRunner'
+
+    def detect(self):
+        return b'WANT_JSON' in self._inv.module_source
+
+
+class NewStylePlanner(ScriptPlanner):
+    """
+    The Ansiballz framework differs from module replacer in that it uses real
+    Python imports of things in ansible/module_utils instead of merely
+    preprocessing the module.
+    """
+    runner_name = 'NewStyleRunner'
+    marker = b'from ansible.module_utils.'
+
+    def detect(self):
+        return self.marker in self._inv.module_source
+
+    def _get_interpreter(self):
+        return None, None
+
+    def get_push_files(self):
+        return super(NewStylePlanner, self).get_push_files() + [
+            mitogen.core.to_text(path)
+            for fullname, path, is_pkg in self.get_module_map()['custom']
+        ]
+
+    def get_module_deps(self):
+        return self.get_module_map()['builtin']
+
+    #: Module names appearing in this set always require forking, usually due
+    #: to some terminal leakage that cannot be worked around in any sane
+    #: manner.
+    ALWAYS_FORK_MODULES = frozenset([
+        'dnf',  # issue #280; py-dnf/hawkey need therapy
+    ])
+
+    def should_fork(self):
+        """
+        In addition to asynchronous tasks, new-style modules should be forked
+        if:
+
+        * the user specifies mitogen_task_isolation=fork, or
+        * the new-style module has a custom module search path, or
+        * the module is known to leak like a sieve.
+        """
+        return (
+            super(NewStylePlanner, self).should_fork() or
+            (self._inv.task_vars.get('mitogen_task_isolation') == 'fork') or
+            (self._inv.module_name in self.ALWAYS_FORK_MODULES) or
+            (len(self.get_module_map()['custom']) > 0)
+        )
+
+    def get_search_path(self):
+        return tuple(
+            path
+            for path in ansible_mitogen.loaders.module_utils_loader._get_paths(
+                subdirs=False
+            )
+            if os.path.isdir(path)
+        )
+
+    _module_map = None
+
+    def get_module_map(self):
+        if self._module_map is None:
+            self._module_map = self._inv.connection.parent.call_service(
+                service_name='ansible_mitogen.services.ModuleDepService',
+                method_name='scan',
+
+                module_name='ansible_module_%s' % (self._inv.module_name,),
+                module_path=self._inv.module_path,
+                search_path=self.get_search_path(),
+                builtin_path=module_common._MODULE_UTILS_PATH,
+                context=self._inv.connection.context,
+            )
+        return self._module_map
+
+    def get_kwargs(self):
+        return super(NewStylePlanner, self).get_kwargs(
+            module_map=self.get_module_map(),
+        )
+
+
+class ReplacerPlanner(NewStylePlanner):
+    """
+    The Module Replacer framework is the original framework implementing
+    new-style modules. It is essentially a preprocessor (like the C
+    Preprocessor for those familiar with that programming language). It does
+    straight substitutions of specific substring patterns in the module file.
+    There are two types of substitutions.
+
+    * Replacements that only happen in the module file. These are public
+      replacement strings that modules can utilize to get helpful boilerplate
+      or access to arguments.
+
+      "from ansible.module_utils.MOD_LIB_NAME import *" is replaced with the
+      contents of the ansible/module_utils/MOD_LIB_NAME.py. These should only
+      be used with new-style Python modules.
+
+      "#<<INCLUDE_ANSIBLE_MODULE_COMMON>>" is equivalent to
+      "from ansible.module_utils.basic import *" and should also only apply to
+      new-style Python modules.
+
+      "# POWERSHELL_COMMON" substitutes the contents of
+      "ansible/module_utils/powershell.ps1". It should only be used with
+      new-style Powershell modules.
+    """
+    runner_name = 'ReplacerRunner'
+
+    def detect(self):
+        return module_common.REPLACER in self._inv.module_source
+
+
+class OldStylePlanner(ScriptPlanner):
+    runner_name = 'OldStyleRunner'
+
+    def detect(self):
+        # Everything else.
+        return True
+
+
+_planners = [
+    BinaryPlanner,
+    # ReplacerPlanner,
+    NewStylePlanner,
+    JsonArgsPlanner,
+    WantJsonPlanner,
+    OldStylePlanner,
+]
+
+
+def get_module_data(name):
+    path = ansible_mitogen.loaders.module_loader.find_plugin(name, '')
+    if path is None:
+        raise ansible.errors.AnsibleError(NO_MODULE_MSG % (name,))
+
+    with open(path, 'rb') as fp:
+        source = fp.read()
+    return mitogen.core.to_text(path), source
+
+
+def _propagate_deps(invocation, planner, context):
+    invocation.connection.parent.call_service(
+        service_name='mitogen.service.PushFileService',
+        method_name='propagate_paths_and_modules',
+        context=context,
+        paths=planner.get_push_files(),
+        modules=planner.get_module_deps(),
+    )
+
+
+def _invoke_async_task(invocation, planner):
+    job_id = '%016x' % random.randint(0, 2**64)
+    context = invocation.connection.spawn_isolated_child()
+    _propagate_deps(invocation, planner, context)
+
+    with mitogen.core.Receiver(context.router) as started_recv:
+        call_recv = context.call_async(
+            ansible_mitogen.target.run_module_async,
+            job_id=job_id,
+            timeout_secs=invocation.timeout_secs,
+            started_sender=started_recv.to_sender(),
+            kwargs=planner.get_kwargs(),
+        )
+
+        # Wait for run_module_async() to crash, or for AsyncRunner to indicate
+        # the job file has been written.
+        for msg in mitogen.select.Select([started_recv, call_recv]):
+            if msg.receiver is call_recv:
+                # It can only be an exception.
+                raise msg.unpickle()
+            break
+
+        return {
+            'stdout': json.dumps({
+                # modules/utilities/logic/async_wrapper.py::_run_module().
+                'changed': True,
+                'started': 1,
+                'finished': 0,
+                'ansible_job_id': job_id,
+            })
+        }
+
+
+def _invoke_isolated_task(invocation, planner):
+    context = invocation.connection.spawn_isolated_child()
+    _propagate_deps(invocation, planner, context)
+    try:
+        return context.call(
+            ansible_mitogen.target.run_module,
+            kwargs=planner.get_kwargs(),
+        )
+    finally:
+        context.shutdown()
+
+
+def _get_planner(invocation):
+    for klass in _planners:
+        planner = klass(invocation)
+        if planner.detect():
+            LOG.debug('%r accepted %r (filename %r)', planner,
+                      invocation.module_name, invocation.module_path)
+            return planner
+        LOG.debug('%r rejected %r', planner, invocation.module_name)
+    raise ansible.errors.AnsibleError(NO_METHOD_MSG + repr(invocation))
+
+
+def invoke(invocation):
+    """
+    Find a Planner subclass corresnding to `invocation` and use it to invoke
+    the module.
+
+    :param Invocation invocation:
+    :returns:
+        Module return dict.
+    :raises ansible.errors.AnsibleError:
+        Unrecognized/unsupported module type.
+    """
+    (invocation.module_path,
+     invocation.module_source) = get_module_data(invocation.module_name)
+    planner = _get_planner(invocation)
+
+    if invocation.wrap_async:
+        response = _invoke_async_task(invocation, planner)
+    elif planner.should_fork():
+        response = _invoke_isolated_task(invocation, planner)
+    else:
+        _propagate_deps(invocation, planner, invocation.connection.context)
+        response = invocation.connection.get_chain().call(
+            ansible_mitogen.target.run_module,
+            kwargs=planner.get_kwargs(),
+        )
+
+    return invocation.action._postprocess_response(response)
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/__init__.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/action/__init__.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/action/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/action/mitogen_get_stack.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/action/mitogen_get_stack.py
new file mode 100644
index 000000000..12afbfbaa
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/action/mitogen_get_stack.py
@@ -0,0 +1,54 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+"""
+Fetch the connection configuration stack that would be used to connect to a
+target, without actually connecting to it.
+"""
+
+import ansible_mitogen.connection
+
+from ansible.plugins.action import ActionBase
+
+
+class ActionModule(ActionBase):
+    def run(self, tmp=None, task_vars=None):
+        if not isinstance(self._connection,
+                          ansible_mitogen.connection.Connection):
+            return {
+                'skipped': True,
+            }
+
+        return {
+            'changed': True,
+            'result': self._connection._build_stack(),
+            '_ansible_verbose_always': True,
+        }
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/__init__.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_doas.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_doas.py
new file mode 100644
index 000000000..1113d7c63
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_doas.py
@@ -0,0 +1,44 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+try:
+    import ansible_mitogen.connection
+except ImportError:
+    base_dir = os.path.dirname(__file__)
+    sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
+    del base_dir
+
+import ansible_mitogen.connection
+
+
+class Connection(ansible_mitogen.connection.Connection):
+    transport = 'mitogen_doas'
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_docker.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_docker.py
new file mode 100644
index 000000000..b71ef5f11
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_docker.py
@@ -0,0 +1,51 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+try:
+    import ansible_mitogen
+except ImportError:
+    base_dir = os.path.dirname(__file__)
+    sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
+    del base_dir
+
+import ansible_mitogen.connection
+
+
+class Connection(ansible_mitogen.connection.Connection):
+    transport = 'docker'
+
+    @property
+    def docker_cmd(self):
+        """
+        Ansible 2.3 synchronize module wants to know how we run Docker.
+        """
+        return 'docker'
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_jail.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_jail.py
new file mode 100644
index 000000000..c7475fb14
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_jail.py
@@ -0,0 +1,44 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+try:
+    import ansible_mitogen
+except ImportError:
+    base_dir = os.path.dirname(__file__)
+    sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
+    del base_dir
+
+import ansible_mitogen.connection
+
+
+class Connection(ansible_mitogen.connection.Connection):
+    transport = 'jail'
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_kubectl.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_kubectl.py
new file mode 100644
index 000000000..2dab131b0
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_kubectl.py
@@ -0,0 +1,71 @@
+# coding: utf-8
+# Copyright 2018, Yannig Perré
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+try:
+    from ansible.plugins.connection import kubectl
+except ImportError:
+    kubectl = None
+
+from ansible.errors import AnsibleConnectionFailure
+from ansible.module_utils.six import iteritems
+
+try:
+    import ansible_mitogen
+except ImportError:
+    base_dir = os.path.dirname(__file__)
+    sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
+    del base_dir
+
+import ansible_mitogen.connection
+
+
+class Connection(ansible_mitogen.connection.Connection):
+    transport = 'kubectl'
+
+    not_supported_msg = (
+        'The "mitogen_kubectl" plug-in requires a version of Ansible '
+        'that ships with the "kubectl" connection plug-in.'
+    )
+
+    def __init__(self, *args, **kwargs):
+        if kubectl is None:
+            raise AnsibleConnectionFailure(self.not_supported_msg)
+        super(Connection, self).__init__(*args, **kwargs)
+
+    def get_extra_args(self):
+        parameters = []
+        for key, option in iteritems(kubectl.CONNECTION_OPTIONS):
+            if self.get_task_var('ansible_' + key) is not None:
+                parameters += [ option, self.get_task_var('ansible_' + key) ]
+
+        return parameters
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_local.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_local.py
new file mode 100644
index 000000000..24b84a036
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_local.py
@@ -0,0 +1,86 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+try:
+    import ansible_mitogen.connection
+except ImportError:
+    base_dir = os.path.dirname(__file__)
+    sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
+    del base_dir
+
+import ansible_mitogen.connection
+import ansible_mitogen.process
+
+
+if sys.version_info > (3,):
+    viewkeys = dict.keys
+elif sys.version_info > (2, 7):
+    viewkeys = dict.viewkeys
+else:
+    viewkeys = lambda dct: set(dct)
+
+
+def dict_diff(old, new):
+    """
+    Return a dict representing the differences between the dicts `old` and
+    `new`. Deleted keys appear as a key with the value :data:`None`, added and
+    changed keys appear as a key with the new value.
+    """
+    old_keys = viewkeys(old)
+    new_keys = viewkeys(dict(new))
+    out = {}
+    for key in new_keys - old_keys:
+        out[key] = new[key]
+    for key in old_keys - new_keys:
+        out[key] = None
+    for key in old_keys & new_keys:
+        if old[key] != new[key]:
+            out[key] = new[key]
+    return out
+
+
+class Connection(ansible_mitogen.connection.Connection):
+    transport = 'local'
+
+    def get_default_cwd(self):
+        # https://github.com/ansible/ansible/issues/14489
+        return self.loader_basedir
+
+    def get_default_env(self):
+        """
+        Vanilla Ansible local commands execute with an environment inherited
+        from WorkerProcess, we must emulate that.
+        """
+        return dict_diff(
+            old=ansible_mitogen.process.MuxProcess.original_env,
+            new=os.environ,
+        )
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_lxc.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_lxc.py
new file mode 100644
index 000000000..696c9abd0
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_lxc.py
@@ -0,0 +1,44 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+try:
+    import ansible_mitogen
+except ImportError:
+    base_dir = os.path.dirname(__file__)
+    sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
+    del base_dir
+
+import ansible_mitogen.connection
+
+
+class Connection(ansible_mitogen.connection.Connection):
+    transport = 'lxc'
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_lxd.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_lxd.py
new file mode 100644
index 000000000..95e692a01
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_lxd.py
@@ -0,0 +1,44 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+try:
+    import ansible_mitogen
+except ImportError:
+    base_dir = os.path.dirname(__file__)
+    sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
+    del base_dir
+
+import ansible_mitogen.connection
+
+
+class Connection(ansible_mitogen.connection.Connection):
+    transport = 'lxd'
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_machinectl.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_machinectl.py
new file mode 100644
index 000000000..0f5a0d282
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_machinectl.py
@@ -0,0 +1,44 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+try:
+    import ansible_mitogen.connection
+except ImportError:
+    base_dir = os.path.dirname(__file__)
+    sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
+    del base_dir
+
+import ansible_mitogen.connection
+
+
+class Connection(ansible_mitogen.connection.Connection):
+    transport = 'machinectl'
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_setns.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_setns.py
new file mode 100644
index 000000000..20c6f1370
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_setns.py
@@ -0,0 +1,44 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+try:
+    import ansible_mitogen.connection
+except ImportError:
+    base_dir = os.path.dirname(__file__)
+    sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
+    del base_dir
+
+import ansible_mitogen.connection
+
+
+class Connection(ansible_mitogen.connection.Connection):
+    transport = 'setns'
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_ssh.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_ssh.py
new file mode 100644
index 000000000..df0e87cbe
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_ssh.py
@@ -0,0 +1,65 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+DOCUMENTATION = """
+    author: David Wilson <dw@botanicus.net>
+    connection: mitogen_ssh
+    short_description: Connect over SSH via Mitogen
+    description:
+        - This connects using an OpenSSH client controlled by the Mitogen for
+          Ansible extension. It accepts every option the vanilla ssh plugin
+          accepts.
+    version_added: "2.5"
+    options:
+"""
+
+import ansible.plugins.connection.ssh
+
+try:
+    import ansible_mitogen.connection
+except ImportError:
+    base_dir = os.path.dirname(__file__)
+    sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
+    del base_dir
+
+import ansible_mitogen.connection
+
+
+class Connection(ansible_mitogen.connection.Connection):
+    transport = 'ssh'
+    vanilla_class = ansible.plugins.connection.ssh.Connection
+
+    @staticmethod
+    def _create_control_path(*args, **kwargs):
+        """Forward _create_control_path() to the implementation in ssh.py."""
+        # https://github.com/dw/mitogen/issues/342
+        return Connection.vanilla_class._create_control_path(*args, **kwargs)
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_su.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_su.py
new file mode 100644
index 000000000..4ab2711e5
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_su.py
@@ -0,0 +1,44 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+try:
+    import ansible_mitogen.connection
+except ImportError:
+    base_dir = os.path.dirname(__file__)
+    sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
+    del base_dir
+
+import ansible_mitogen.connection
+
+
+class Connection(ansible_mitogen.connection.Connection):
+    transport = 'mitogen_su'
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_sudo.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_sudo.py
new file mode 100644
index 000000000..130f54454
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/connection/mitogen_sudo.py
@@ -0,0 +1,44 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+try:
+    import ansible_mitogen.connection
+except ImportError:
+    base_dir = os.path.dirname(__file__)
+    sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..')))
+    del base_dir
+
+import ansible_mitogen.connection
+
+
+class Connection(ansible_mitogen.connection.Connection):
+    transport = 'mitogen_sudo'
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/strategy/__init__.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/strategy/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/strategy/mitogen.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/strategy/mitogen.py
new file mode 100644
index 000000000..66872663f
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/strategy/mitogen.py
@@ -0,0 +1,61 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+#
+# This is not the real Strategy implementation module, it simply exists as a
+# proxy to the real module, which is loaded using Python's regular import
+# mechanism, to prevent Ansible's PluginLoader from making up a fake name that
+# results in ansible_mitogen plugin modules being loaded twice: once by
+# PluginLoader with a name like "ansible.plugins.strategy.mitogen", which is
+# stuffed into sys.modules even though attempting to import it will trigger an
+# ImportError, and once under its canonical name, "ansible_mitogen.strategy".
+#
+# Therefore we have a proxy module that imports it under the real name, and
+# sets up the duff PluginLoader-imported module to just contain objects from
+# the real module, so duplicate types don't exist in memory, and things like
+# debuggers and isinstance() work predictably.
+#
+
+BASE_DIR = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '../../..')
+)
+
+if BASE_DIR not in sys.path:
+    sys.path.insert(0, BASE_DIR)
+
+import ansible_mitogen.strategy
+import ansible.plugins.strategy.linear
+
+
+class StrategyModule(ansible_mitogen.strategy.StrategyMixin,
+                     ansible.plugins.strategy.linear.StrategyModule):
+    pass
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/strategy/mitogen_free.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/strategy/mitogen_free.py
new file mode 100644
index 000000000..ffe2fbd94
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/strategy/mitogen_free.py
@@ -0,0 +1,62 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+#
+# This is not the real Strategy implementation module, it simply exists as a
+# proxy to the real module, which is loaded using Python's regular import
+# mechanism, to prevent Ansible's PluginLoader from making up a fake name that
+# results in ansible_mitogen plugin modules being loaded twice: once by
+# PluginLoader with a name like "ansible.plugins.strategy.mitogen", which is
+# stuffed into sys.modules even though attempting to import it will trigger an
+# ImportError, and once under its canonical name, "ansible_mitogen.strategy".
+#
+# Therefore we have a proxy module that imports it under the real name, and
+# sets up the duff PluginLoader-imported module to just contain objects from
+# the real module, so duplicate types don't exist in memory, and things like
+# debuggers and isinstance() work predictably.
+#
+
+BASE_DIR = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '../../..')
+)
+
+if BASE_DIR not in sys.path:
+    sys.path.insert(0, BASE_DIR)
+
+import ansible_mitogen.loaders
+import ansible_mitogen.strategy
+
+
+Base = ansible_mitogen.loaders.strategy_loader.get('free', class_only=True)
+
+class StrategyModule(ansible_mitogen.strategy.StrategyMixin, Base):
+    pass
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/strategy/mitogen_host_pinned.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/strategy/mitogen_host_pinned.py
new file mode 100644
index 000000000..23eccd369
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/strategy/mitogen_host_pinned.py
@@ -0,0 +1,67 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+#
+# This is not the real Strategy implementation module, it simply exists as a
+# proxy to the real module, which is loaded using Python's regular import
+# mechanism, to prevent Ansible's PluginLoader from making up a fake name that
+# results in ansible_mitogen plugin modules being loaded twice: once by
+# PluginLoader with a name like "ansible.plugins.strategy.mitogen", which is
+# stuffed into sys.modules even though attempting to import it will trigger an
+# ImportError, and once under its canonical name, "ansible_mitogen.strategy".
+#
+# Therefore we have a proxy module that imports it under the real name, and
+# sets up the duff PluginLoader-imported module to just contain objects from
+# the real module, so duplicate types don't exist in memory, and things like
+# debuggers and isinstance() work predictably.
+#
+
+BASE_DIR = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '../../..')
+)
+
+if BASE_DIR not in sys.path:
+    sys.path.insert(0, BASE_DIR)
+
+import ansible_mitogen.loaders
+import ansible_mitogen.strategy
+
+
+Base = ansible_mitogen.loaders.strategy_loader.get('host_pinned', class_only=True)
+
+if Base is None:
+    raise ImportError(
+        'The host_pinned strategy is only available in Ansible 2.7 or newer.'
+    )
+
+class StrategyModule(ansible_mitogen.strategy.StrategyMixin, Base):
+    pass
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/strategy/mitogen_linear.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/strategy/mitogen_linear.py
new file mode 100644
index 000000000..1b198e61d
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/plugins/strategy/mitogen_linear.py
@@ -0,0 +1,62 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+#
+# This is not the real Strategy implementation module, it simply exists as a
+# proxy to the real module, which is loaded using Python's regular import
+# mechanism, to prevent Ansible's PluginLoader from making up a fake name that
+# results in ansible_mitogen plugin modules being loaded twice: once by
+# PluginLoader with a name like "ansible.plugins.strategy.mitogen", which is
+# stuffed into sys.modules even though attempting to import it will trigger an
+# ImportError, and once under its canonical name, "ansible_mitogen.strategy".
+#
+# Therefore we have a proxy module that imports it under the real name, and
+# sets up the duff PluginLoader-imported module to just contain objects from
+# the real module, so duplicate types don't exist in memory, and things like
+# debuggers and isinstance() work predictably.
+#
+
+BASE_DIR = os.path.abspath(
+    os.path.join(os.path.dirname(__file__), '../../..')
+)
+
+if BASE_DIR not in sys.path:
+    sys.path.insert(0, BASE_DIR)
+
+import ansible_mitogen.loaders
+import ansible_mitogen.strategy
+
+
+Base = ansible_mitogen.loaders.strategy_loader.get('linear', class_only=True)
+
+class StrategyModule(ansible_mitogen.strategy.StrategyMixin, Base):
+    pass
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/process.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/process.py
new file mode 100644
index 000000000..e4e61e8bc
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/process.py
@@ -0,0 +1,358 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import atexit
+import errno
+import logging
+import os
+import signal
+import socket
+import sys
+import time
+
+try:
+    import faulthandler
+except ImportError:
+    faulthandler = None
+
+import mitogen
+import mitogen.core
+import mitogen.debug
+import mitogen.master
+import mitogen.parent
+import mitogen.service
+import mitogen.unix
+import mitogen.utils
+
+import ansible
+import ansible.constants as C
+import ansible_mitogen.logging
+import ansible_mitogen.services
+
+from mitogen.core import b
+import ansible_mitogen.affinity
+
+
+LOG = logging.getLogger(__name__)
+
+ANSIBLE_PKG_OVERRIDE = (
+    u"__version__ = %r\n"
+    u"__author__ = %r\n"
+)
+
+
+def clean_shutdown(sock):
+    """
+    Shut the write end of `sock`, causing `recv` in the worker process to wake
+    up with a 0-byte read and initiate mux process exit, then wait for a 0-byte
+    read from the read end, which will occur after the the child closes the
+    descriptor on exit.
+
+    This is done using :mod:`atexit` since Ansible lacks any more sensible hook
+    to run code during exit, and unless some synchronization exists with
+    MuxProcess, debug logs may appear on the user's terminal *after* the prompt
+    has been printed.
+    """
+    sock.shutdown(socket.SHUT_WR)
+    sock.recv(1)
+
+
+def getenv_int(key, default=0):
+    """
+    Get an integer-valued environment variable `key`, if it exists and parses
+    as an integer, otherwise return `default`.
+    """
+    try:
+        return int(os.environ.get(key, str(default)))
+    except ValueError:
+        return default
+
+
+def save_pid(name):
+    """
+    When debugging and profiling, it is very annoying to poke through the
+    process list to discover the currently running Ansible and MuxProcess IDs,
+    especially when trying to catch an issue during early startup. So here, if
+    a magic environment variable set, stash them in hidden files in the CWD::
+
+        alias muxpid="cat .ansible-mux.pid"
+        alias anspid="cat .ansible-controller.pid"
+
+        gdb -p $(muxpid)
+        perf top -p $(anspid)
+    """
+    if os.environ.get('MITOGEN_SAVE_PIDS'):
+        with open('.ansible-%s.pid' % (name,), 'w') as fp:
+            fp.write(str(os.getpid()))
+
+
+class MuxProcess(object):
+    """
+    Implement a subprocess forked from the Ansible top-level, as a safe place
+    to contain the Mitogen IO multiplexer thread, keeping its use of the
+    logging package (and the logging package's heavy use of locks) far away
+    from the clutches of os.fork(), which is used continuously by the
+    multiprocessing package in the top-level process.
+
+    The problem with running the multiplexer in that process is that should the
+    multiplexer thread be in the process of emitting a log entry (and holding
+    its lock) at the point of fork, in the child, the first attempt to log any
+    log entry using the same handler will deadlock the child, as in the memory
+    image the child received, the lock will always be marked held.
+
+    See https://bugs.python.org/issue6721 for a thorough description of the
+    class of problems this worker is intended to avoid.
+    """
+
+    #: In the top-level process, this references one end of a socketpair(),
+    #: which the MuxProcess blocks reading from in order to determine when
+    #: the master process dies. Once the read returns, the MuxProcess will
+    #: begin shutting itself down.
+    worker_sock = None
+
+    #: In the worker process, this references the other end of
+    #: :py:attr:`worker_sock`.
+    child_sock = None
+
+    #: In the top-level process, this is the PID of the single MuxProcess
+    #: that was spawned.
+    worker_pid = None
+
+    #: A copy of :data:`os.environ` at the time the multiplexer process was
+    #: started. It's used by mitogen_local.py to find changes made to the
+    #: top-level environment (e.g. vars plugins -- issue #297) that must be
+    #: applied to locally executed commands and modules.
+    original_env = None
+
+    #: In both processes, this is the temporary UNIX socket used for
+    #: forked WorkerProcesses to contact the MuxProcess
+    unix_listener_path = None
+
+    #: Singleton.
+    _instance = None
+
+    @classmethod
+    def start(cls, _init_logging=True):
+        """
+        Arrange for the subprocess to be started, if it is not already running.
+
+        The parent process picks a UNIX socket path the child will use prior to
+        fork, creates a socketpair used essentially as a semaphore, then blocks
+        waiting for the child to indicate the UNIX socket is ready for use.
+
+        :param bool _init_logging:
+            For testing, if :data:`False`, don't initialize logging.
+        """
+        if cls.worker_sock is not None:
+            return
+
+        if faulthandler is not None:
+            faulthandler.enable()
+
+        mitogen.utils.setup_gil()
+        cls.unix_listener_path = mitogen.unix.make_socket_path()
+        cls.worker_sock, cls.child_sock = socket.socketpair()
+        atexit.register(lambda: clean_shutdown(cls.worker_sock))
+        mitogen.core.set_cloexec(cls.worker_sock.fileno())
+        mitogen.core.set_cloexec(cls.child_sock.fileno())
+
+        cls.profiling = os.environ.get('MITOGEN_PROFILING') is not None
+        if cls.profiling:
+            mitogen.core.enable_profiling()
+        if _init_logging:
+            ansible_mitogen.logging.setup()
+
+        cls.original_env = dict(os.environ)
+        cls.child_pid = os.fork()
+        if cls.child_pid:
+            save_pid('controller')
+            ansible_mitogen.logging.set_process_name('top')
+            ansible_mitogen.affinity.policy.assign_controller()
+            cls.child_sock.close()
+            cls.child_sock = None
+            mitogen.core.io_op(cls.worker_sock.recv, 1)
+        else:
+            save_pid('mux')
+            ansible_mitogen.logging.set_process_name('mux')
+            ansible_mitogen.affinity.policy.assign_muxprocess()
+            cls.worker_sock.close()
+            cls.worker_sock = None
+            self = cls()
+            self.worker_main()
+
+    def worker_main(self):
+        """
+        The main function of for the mux process: setup the Mitogen broker
+        thread and ansible_mitogen services, then sleep waiting for the socket
+        connected to the parent to be closed (indicating the parent has died).
+        """
+        self._setup_master()
+        self._setup_services()
+
+        try:
+            # Let the parent know our listening socket is ready.
+            mitogen.core.io_op(self.child_sock.send, b('1'))
+            # Block until the socket is closed, which happens on parent exit.
+            mitogen.core.io_op(self.child_sock.recv, 1)
+        finally:
+            self.broker.shutdown()
+            self.broker.join()
+
+            # Test frameworks living somewhere higher on the stack of the
+            # original parent process may try to catch sys.exit(), so do a C
+            # level exit instead.
+            os._exit(0)
+
+    def _enable_router_debug(self):
+        if 'MITOGEN_ROUTER_DEBUG' in os.environ:
+            self.router.enable_debug()
+
+    def _enable_stack_dumps(self):
+        secs = getenv_int('MITOGEN_DUMP_THREAD_STACKS', default=0)
+        if secs:
+            mitogen.debug.dump_to_logger(secs=secs)
+
+    def _setup_simplejson(self, responder):
+        """
+        We support serving simplejson for Python 2.4 targets on Ansible 2.3, at
+        least so the package's own CI Docker scripts can run without external
+        help, however newer versions of simplejson no longer support Python
+        2.4. Therefore override any installed/loaded version with a
+        2.4-compatible version we ship in the compat/ directory.
+        """
+        responder.whitelist_prefix('simplejson')
+
+        # issue #536: must be at end of sys.path, in case existing newer
+        # version is already loaded.
+        compat_path = os.path.join(os.path.dirname(__file__), 'compat')
+        sys.path.append(compat_path)
+
+        for fullname, is_pkg, suffix in (
+            (u'simplejson', True, '__init__.py'),
+            (u'simplejson.decoder', False, 'decoder.py'),
+            (u'simplejson.encoder', False, 'encoder.py'),
+            (u'simplejson.scanner', False, 'scanner.py'),
+        ):
+            path = os.path.join(compat_path, 'simplejson', suffix)
+            fp = open(path, 'rb')
+            try:
+                source = fp.read()
+            finally:
+                fp.close()
+
+            responder.add_source_override(
+                fullname=fullname,
+                path=path,
+                source=source,
+                is_pkg=is_pkg,
+            )
+
+    def _setup_responder(self, responder):
+        """
+        Configure :class:`mitogen.master.ModuleResponder` to only permit
+        certain packages, and to generate custom responses for certain modules.
+        """
+        responder.whitelist_prefix('ansible')
+        responder.whitelist_prefix('ansible_mitogen')
+        self._setup_simplejson(responder)
+
+        # Ansible 2.3 is compatible with Python 2.4 targets, however
+        # ansible/__init__.py is not. Instead, executor/module_common.py writes
+        # out a 2.4-compatible namespace package for unknown reasons. So we
+        # copy it here.
+        responder.add_source_override(
+            fullname='ansible',
+            path=ansible.__file__,
+            source=(ANSIBLE_PKG_OVERRIDE % (
+                ansible.__version__,
+                ansible.__author__,
+            )).encode(),
+            is_pkg=True,
+        )
+
+    def _setup_master(self):
+        """
+        Construct a Router, Broker, and mitogen.unix listener
+        """
+        self.broker = mitogen.master.Broker(install_watcher=False)
+        self.router = mitogen.master.Router(
+            broker=self.broker,
+            max_message_size=4096 * 1048576,
+        )
+        self._setup_responder(self.router.responder)
+        mitogen.core.listen(self.broker, 'shutdown', self.on_broker_shutdown)
+        mitogen.core.listen(self.broker, 'exit', self.on_broker_exit)
+        self.listener = mitogen.unix.Listener(
+            router=self.router,
+            path=self.unix_listener_path,
+            backlog=C.DEFAULT_FORKS,
+        )
+        self._enable_router_debug()
+        self._enable_stack_dumps()
+
+    def _setup_services(self):
+        """
+        Construct a ContextService and a thread to service requests for it
+        arriving from worker processes.
+        """
+        self.pool = mitogen.service.Pool(
+            router=self.router,
+            services=[
+                mitogen.service.FileService(router=self.router),
+                mitogen.service.PushFileService(router=self.router),
+                ansible_mitogen.services.ContextService(self.router),
+                ansible_mitogen.services.ModuleDepService(self.router),
+            ],
+            size=getenv_int('MITOGEN_POOL_SIZE', default=32),
+        )
+        LOG.debug('Service pool configured: size=%d', self.pool.size)
+
+    def on_broker_shutdown(self):
+        """
+        Respond to broker shutdown by beginning service pool shutdown. Do not
+        join on the pool yet, since that would block the broker thread which
+        then cannot clean up pending handlers, which is required for the
+        threads to exit gracefully.
+        """
+        # In normal operation we presently kill the process because there is
+        # not yet any way to cancel connect().
+        self.pool.stop(join=self.profiling)
+
+    def on_broker_exit(self):
+        """
+        Respond to the broker thread about to exit by sending SIGTERM to
+        ourself. In future this should gracefully join the pool, but TERM is
+        fine for now.
+        """
+        if not self.profiling:
+            # In normal operation we presently kill the process because there is
+            # not yet any way to cancel connect(). When profiling, threads
+            # including the broker must shut down gracefully, otherwise pstats
+            # won't be written.
+            os.kill(os.getpid(), signal.SIGTERM)
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/runner.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/runner.py
new file mode 100644
index 000000000..30c36be75
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/runner.py
@@ -0,0 +1,928 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""
+These classes implement execution for each style of Ansible module. They are
+instantiated in the target context by way of target.py::run_module().
+
+Each class in here has a corresponding Planner class in planners.py that knows
+how to build arguments for it, preseed related data, etc.
+"""
+
+import atexit
+import codecs
+import imp
+import os
+import re
+import shlex
+import shutil
+import sys
+import tempfile
+import traceback
+import types
+
+import mitogen.core
+import ansible_mitogen.target  # TODO: circular import
+from mitogen.core import b
+from mitogen.core import bytes_partition
+from mitogen.core import str_partition
+from mitogen.core import str_rpartition
+from mitogen.core import to_text
+
+try:
+    import ctypes
+except ImportError:
+    # Python 2.4
+    ctypes = None
+
+try:
+    import json
+except ImportError:
+    # Python 2.4
+    import simplejson as json
+
+try:
+    # Cannot use cStringIO as it does not support Unicode.
+    from StringIO import StringIO
+except ImportError:
+    from io import StringIO
+
+try:
+    from shlex import quote as shlex_quote
+except ImportError:
+    from pipes import quote as shlex_quote
+
+# Absolute imports for <2.5.
+logging = __import__('logging')
+
+
+# Prevent accidental import of an Ansible module from hanging on stdin read.
+import ansible.module_utils.basic
+ansible.module_utils.basic._ANSIBLE_ARGS = '{}'
+
+# For tasks that modify /etc/resolv.conf, non-Debian derivative glibcs cache
+# resolv.conf at startup and never implicitly reload it. Cope with that via an
+# explicit call to res_init() on each task invocation. BSD-alikes export it
+# directly, Linux #defines it as "__res_init".
+libc__res_init = None
+if ctypes:
+    libc = ctypes.CDLL(None)
+    for symbol in 'res_init', '__res_init':
+        try:
+            libc__res_init = getattr(libc, symbol)
+        except AttributeError:
+            pass
+
+iteritems = getattr(dict, 'iteritems', dict.items)
+LOG = logging.getLogger(__name__)
+
+
+if mitogen.core.PY3:
+    shlex_split = shlex.split
+else:
+    def shlex_split(s, comments=False):
+        return [mitogen.core.to_text(token)
+                for token in shlex.split(str(s), comments=comments)]
+
+
+class EnvironmentFileWatcher(object):
+    """
+    Usually Ansible edits to /etc/environment and ~/.pam_environment are
+    reflected in subsequent tasks if become:true or SSH multiplexing is
+    disabled, due to sudo and/or SSH reinvoking pam_env. Rather than emulate
+    existing semantics, do our best to ensure edits are always reflected.
+
+    This can't perfectly replicate the existing behaviour, but it can safely
+    update and remove keys that appear to originate in `path`, and that do not
+    conflict with any existing environment key inherited from elsewhere.
+
+    A more robust future approach may simply be to arrange for the persistent
+    interpreter to restart when a change is detected.
+    """
+    def __init__(self, path):
+        self.path = os.path.expanduser(path)
+        #: Inode data at time of last check.
+        self._st = self._stat()
+        #: List of inherited keys appearing to originated from this file.
+        self._keys = [key for key, value in self._load()
+                      if value == os.environ.get(key)]
+        LOG.debug('%r installed; existing keys: %r', self, self._keys)
+
+    def __repr__(self):
+        return 'EnvironmentFileWatcher(%r)' % (self.path,)
+
+    def _stat(self):
+        try:
+            return os.stat(self.path)
+        except OSError:
+            return None
+
+    def _load(self):
+        try:
+            fp = codecs.open(self.path, 'r', encoding='utf-8')
+            try:
+                return list(self._parse(fp))
+            finally:
+                fp.close()
+        except IOError:
+            return []
+
+    def _parse(self, fp):
+        """
+        linux-pam-1.3.1/modules/pam_env/pam_env.c#L207
+        """
+        for line in fp:
+            # '   #export foo=some var  ' -> ['#export', 'foo=some var  ']
+            bits = shlex_split(line, comments=True)
+            if (not bits) or bits[0].startswith('#'):
+                continue
+
+            if bits[0] == u'export':
+                bits.pop(0)
+
+            key, sep, value = str_partition(u' '.join(bits), u'=')
+            if key and sep:
+                yield key, value
+
+    def _on_file_changed(self):
+        LOG.debug('%r: file changed, reloading', self)
+        for key, value in self._load():
+            if key in os.environ:
+                LOG.debug('%r: existing key %r=%r exists, not setting %r',
+                          self, key, os.environ[key], value)
+            else:
+                LOG.debug('%r: setting key %r to %r', self, key, value)
+                self._keys.append(key)
+                os.environ[key] = value
+
+    def _remove_existing(self):
+        """
+        When a change is detected, remove keys that existed in the old file.
+        """
+        for key in self._keys:
+            if key in os.environ:
+                LOG.debug('%r: removing old key %r', self, key)
+                del os.environ[key]
+        self._keys = []
+
+    def check(self):
+        """
+        Compare the :func:`os.stat` for the pam_env style environmnt file
+        `path` with the previous result `old_st`, which may be :data:`None` if
+        the previous stat attempt failed. Reload its contents if the file has
+        changed or appeared since last attempt.
+
+        :returns:
+            New :func:`os.stat` result. The new call to :func:`reload_env` should
+            pass it as the value of `old_st`.
+        """
+        st = self._stat()
+        if self._st == st:
+            return
+
+        self._st = st
+        self._remove_existing()
+
+        if st is None:
+            LOG.debug('%r: file has disappeared', self)
+        else:
+            self._on_file_changed()
+
+_pam_env_watcher = EnvironmentFileWatcher('~/.pam_environment')
+_etc_env_watcher = EnvironmentFileWatcher('/etc/environment')
+
+
+def utf8(s):
+    """
+    Coerce an object to bytes if it is Unicode.
+    """
+    if isinstance(s, mitogen.core.UnicodeType):
+        s = s.encode('utf-8')
+    return s
+
+
+def reopen_readonly(fp):
+    """
+    Replace the file descriptor belonging to the file object `fp` with one
+    open on the same file (`fp.name`), but opened with :py:data:`os.O_RDONLY`.
+    This enables temporary files to be executed on Linux, which usually throws
+    ``ETXTBUSY`` if any writeable handle exists pointing to a file passed to
+    `execve()`.
+    """
+    fd = os.open(fp.name, os.O_RDONLY)
+    os.dup2(fd, fp.fileno())
+    os.close(fd)
+
+
+class Runner(object):
+    """
+    Ansible module runner. After instantiation (with kwargs supplied by the
+    corresponding Planner), `.run()` is invoked, upon which `setup()`,
+    `_run()`, and `revert()` are invoked, with the return value of `_run()`
+    returned by `run()`.
+
+    Subclasses may override `_run`()` and extend `setup()` and `revert()`.
+
+    :param str module:
+        Name of the module to execute, e.g. "shell"
+    :param mitogen.core.Context service_context:
+        Context to which we should direct FileService calls. For now, always
+        the connection multiplexer process on the controller.
+    :param str json_args:
+        Ansible module arguments. A mixture of user and internal keys created
+        by :meth:`ansible.plugins.action.ActionBase._execute_module`.
+
+        This is passed as a string rather than a dict in order to mimic the
+        implicit bytes/str conversion behaviour of a 2.x controller running
+        against a 3.x target.
+    :param str good_temp_dir:
+        The writeable temporary directory for this user account reported by
+        :func:`ansible_mitogen.target.init_child` passed via the controller.
+        This is specified explicitly to remain compatible with Ansible<2.5, and
+        for forked tasks where init_child never runs.
+    :param dict env:
+        Additional environment variables to set during the run. Keys with
+        :data:`None` are unset if present.
+    :param str cwd:
+        If not :data:`None`, change to this directory before executing.
+    :param mitogen.core.ExternalContext econtext:
+        When `detach` is :data:`True`, a reference to the ExternalContext the
+        runner is executing in.
+    :param bool detach:
+        When :data:`True`, indicate the runner should detach the context from
+        its parent after setup has completed successfully.
+    """
+    def __init__(self, module, service_context, json_args, good_temp_dir,
+                 extra_env=None, cwd=None, env=None, econtext=None,
+                 detach=False):
+        self.module = module
+        self.service_context = service_context
+        self.econtext = econtext
+        self.detach = detach
+        self.args = json.loads(mitogen.core.to_text(json_args))
+        self.good_temp_dir = good_temp_dir
+        self.extra_env = extra_env
+        self.env = env
+        self.cwd = cwd
+        #: If not :data:`None`, :meth:`get_temp_dir` had to create a temporary
+        #: directory for this run, because we're in an asynchronous task, or
+        #: because the originating action did not create a directory.
+        self._temp_dir = None
+
+    def get_temp_dir(self):
+        path = self.args.get('_ansible_tmpdir')
+        if path is not None:
+            return path
+
+        if self._temp_dir is None:
+            self._temp_dir = tempfile.mkdtemp(
+                prefix='ansible_mitogen_runner_',
+                dir=self.good_temp_dir,
+            )
+
+        return self._temp_dir
+
+    def revert_temp_dir(self):
+        if self._temp_dir is not None:
+            ansible_mitogen.target.prune_tree(self._temp_dir)
+            self._temp_dir = None
+
+    def setup(self):
+        """
+        Prepare for running a module, including fetching necessary dependencies
+        from the parent, as :meth:`run` may detach prior to beginning
+        execution. The base implementation simply prepares the environment.
+        """
+        self._setup_cwd()
+        self._setup_environ()
+
+    def _setup_cwd(self):
+        """
+        For situations like sudo to a non-privileged account, CWD could be
+        $HOME of the old account, which could have mode go=, which means it is
+        impossible to restore the old directory, so don't even try.
+        """
+        if self.cwd:
+            os.chdir(self.cwd)
+
+    def _setup_environ(self):
+        """
+        Apply changes from /etc/environment files before creating a
+        TemporaryEnvironment to snapshot environment state prior to module run.
+        """
+        _pam_env_watcher.check()
+        _etc_env_watcher.check()
+        env = dict(self.extra_env or {})
+        if self.env:
+            env.update(self.env)
+        self._env = TemporaryEnvironment(env)
+
+    def revert(self):
+        """
+        Revert any changes made to the process after running a module. The base
+        implementation simply restores the original environment.
+        """
+        self._env.revert()
+        self.revert_temp_dir()
+
+    def _run(self):
+        """
+        The _run() method is expected to return a dictionary in the form of
+        ActionBase._low_level_execute_command() output, i.e. having::
+
+            {
+                "rc": int,
+                "stdout": "stdout data",
+                "stderr": "stderr data"
+            }
+        """
+        raise NotImplementedError()
+
+    def run(self):
+        """
+        Set up the process environment in preparation for running an Ansible
+        module. This monkey-patches the Ansible libraries in various places to
+        prevent it from trying to kill the process on completion, and to
+        prevent it from reading sys.stdin.
+
+        :returns:
+            Module result dictionary.
+        """
+        self.setup()
+        if self.detach:
+            self.econtext.detach()
+
+        try:
+            return self._run()
+        finally:
+            self.revert()
+
+
+class AtExitWrapper(object):
+    """
+    issue #397, #454: Newer Ansibles use :func:`atexit.register` to trigger
+    tmpdir cleanup when AnsibleModule.tmpdir is responsible for creating its
+    own temporary directory, however with Mitogen processes are preserved
+    across tasks, meaning cleanup must happen earlier.
+
+    Patch :func:`atexit.register`, catching :func:`shutil.rmtree` calls so they
+    can be executed on task completion, rather than on process shutdown.
+    """
+    # Wrapped in a dict to avoid instance method decoration.
+    original = {
+        'register': atexit.register
+    }
+
+    def __init__(self):
+        assert atexit.register == self.original['register'], \
+            "AtExitWrapper installed twice."
+        atexit.register = self._atexit__register
+        self.deferred = []
+
+    def revert(self):
+        """
+        Restore the original :func:`atexit.register`.
+        """
+        assert atexit.register == self._atexit__register, \
+            "AtExitWrapper not installed."
+        atexit.register = self.original['register']
+
+    def run_callbacks(self):
+        while self.deferred:
+            func, targs, kwargs = self.deferred.pop()
+            try:
+                func(*targs, **kwargs)
+            except Exception:
+                LOG.exception('While running atexit callbacks')
+
+    def _atexit__register(self, func, *targs, **kwargs):
+        """
+        Intercept :func:`atexit.register` calls, diverting any to
+        :func:`shutil.rmtree` into a private list.
+        """
+        if func == shutil.rmtree:
+            self.deferred.append((func, targs, kwargs))
+            return
+
+        self.original['register'](func, *targs, **kwargs)
+
+
+class ModuleUtilsImporter(object):
+    """
+    :param list module_utils:
+        List of `(fullname, path, is_pkg)` tuples.
+    """
+    def __init__(self, context, module_utils):
+        self._context = context
+        self._by_fullname = dict(
+            (fullname, (path, is_pkg))
+            for fullname, path, is_pkg in module_utils
+        )
+        self._loaded = set()
+        sys.meta_path.insert(0, self)
+
+    def revert(self):
+        sys.meta_path.remove(self)
+        for fullname in self._loaded:
+            sys.modules.pop(fullname, None)
+
+    def find_module(self, fullname, path=None):
+        if fullname in self._by_fullname:
+            return self
+
+    def load_module(self, fullname):
+        path, is_pkg = self._by_fullname[fullname]
+        source = ansible_mitogen.target.get_small_file(self._context, path)
+        code = compile(source, path, 'exec', 0, 1)
+        mod = sys.modules.setdefault(fullname, imp.new_module(fullname))
+        mod.__file__ = "master:%s" % (path,)
+        mod.__loader__ = self
+        if is_pkg:
+            mod.__path__ = []
+            mod.__package__ = str(fullname)
+        else:
+            mod.__package__ = str(str_rpartition(to_text(fullname), '.')[0])
+        exec(code, mod.__dict__)
+        self._loaded.add(fullname)
+        return mod
+
+
+class TemporaryEnvironment(object):
+    """
+    Apply environment changes from `env` until :meth:`revert` is called. Values
+    in the dict may be :data:`None` to indicate the relevant key should be
+    deleted.
+    """
+    def __init__(self, env=None):
+        self.original = dict(os.environ)
+        self.env = env or {}
+        for key, value in iteritems(self.env):
+            key = mitogen.core.to_text(key)
+            value = mitogen.core.to_text(value)
+            if value is None:
+                os.environ.pop(key, None)
+            else:
+                os.environ[key] = str(value)
+
+    def revert(self):
+        """
+        Revert changes made by the module to the process environment. This must
+        always run, as some modules (e.g. git.py) set variables like GIT_SSH
+        that must be cleared out between runs.
+        """
+        os.environ.clear()
+        os.environ.update(self.original)
+
+
+class TemporaryArgv(object):
+    def __init__(self, argv):
+        self.original = sys.argv[:]
+        sys.argv[:] = map(str, argv)
+
+    def revert(self):
+        sys.argv[:] = self.original
+
+
+class NewStyleStdio(object):
+    """
+    Patch ansible.module_utils.basic argument globals.
+    """
+    def __init__(self, args, temp_dir):
+        self.temp_dir = temp_dir
+        self.original_stdout = sys.stdout
+        self.original_stderr = sys.stderr
+        self.original_stdin = sys.stdin
+        sys.stdout = StringIO()
+        sys.stderr = StringIO()
+        encoded = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+        ansible.module_utils.basic._ANSIBLE_ARGS = utf8(encoded)
+        sys.stdin = StringIO(mitogen.core.to_text(encoded))
+
+        self.original_get_path = getattr(ansible.module_utils.basic,
+                                        'get_module_path', None)
+        ansible.module_utils.basic.get_module_path = self._get_path
+
+    def _get_path(self):
+        return self.temp_dir
+
+    def revert(self):
+        ansible.module_utils.basic.get_module_path = self.original_get_path
+        sys.stdout = self.original_stdout
+        sys.stderr = self.original_stderr
+        sys.stdin = self.original_stdin
+        ansible.module_utils.basic._ANSIBLE_ARGS = '{}'
+
+
+class ProgramRunner(Runner):
+    """
+    Base class for runners that run external programs.
+
+    :param str path:
+        Absolute path to the program file on the master, as it can be retrieved
+        via :class:`mitogen.service.FileService`.
+    :param bool emulate_tty:
+        If :data:`True`, execute the program with `stdout` and `stderr` merged
+        into a single pipe, emulating Ansible behaviour when an SSH TTY is in
+        use.
+    """
+    def __init__(self, path, emulate_tty=None, **kwargs):
+        super(ProgramRunner, self).__init__(**kwargs)
+        self.emulate_tty = emulate_tty
+        self.path = path
+
+    def setup(self):
+        super(ProgramRunner, self).setup()
+        self._setup_program()
+
+    def _get_program_filename(self):
+        """
+        Return the filename used for program on disk. Ansible uses the original
+        filename for non-Ansiballz runs, and "ansible_module_+filename for
+        Ansiballz runs.
+        """
+        return os.path.basename(self.path)
+
+    program_fp = None
+
+    def _setup_program(self):
+        """
+        Create a temporary file containing the program code. The code is
+        fetched via :meth:`_get_program`.
+        """
+        filename = self._get_program_filename()
+        path = os.path.join(self.get_temp_dir(), filename)
+        self.program_fp = open(path, 'wb')
+        self.program_fp.write(self._get_program())
+        self.program_fp.flush()
+        os.chmod(self.program_fp.name, int('0700', 8))
+        reopen_readonly(self.program_fp)
+
+    def _get_program(self):
+        """
+        Fetch the module binary from the master if necessary.
+        """
+        return ansible_mitogen.target.get_small_file(
+            context=self.service_context,
+            path=self.path,
+        )
+
+    def _get_program_args(self):
+        """
+        Return any arguments to pass to the program.
+        """
+        return []
+
+    def revert(self):
+        """
+        Delete the temporary program file.
+        """
+        if self.program_fp:
+            self.program_fp.close()
+        super(ProgramRunner, self).revert()
+
+    def _get_argv(self):
+        """
+        Return the final argument vector used to execute the program.
+        """
+        return [
+            self.args.get('_ansible_shell_executable', '/bin/sh'),
+            '-c',
+            self._get_shell_fragment(),
+        ]
+
+    def _get_shell_fragment(self):
+        return "%s %s" % (
+            shlex_quote(self.program_fp.name),
+            ' '.join(map(shlex_quote, self._get_program_args())),
+        )
+
+    def _run(self):
+        try:
+            rc, stdout, stderr = ansible_mitogen.target.exec_args(
+                args=self._get_argv(),
+                emulate_tty=self.emulate_tty,
+            )
+        except Exception:
+            LOG.exception('While running %s', self._get_argv())
+            e = sys.exc_info()[1]
+            return {
+                u'rc': 1,
+                u'stdout': u'',
+                u'stderr': u'%s: %s' % (type(e), e),
+            }
+
+        return {
+            u'rc': rc,
+            u'stdout': mitogen.core.to_text(stdout),
+            u'stderr': mitogen.core.to_text(stderr),
+        }
+
+
+class ArgsFileRunner(Runner):
+    def setup(self):
+        super(ArgsFileRunner, self).setup()
+        self._setup_args()
+
+    def _setup_args(self):
+        """
+        Create a temporary file containing the module's arguments. The
+        arguments are formatted via :meth:`_get_args`.
+        """
+        self.args_fp = tempfile.NamedTemporaryFile(
+            prefix='ansible_mitogen',
+            suffix='-args',
+            dir=self.get_temp_dir(),
+        )
+        self.args_fp.write(utf8(self._get_args_contents()))
+        self.args_fp.flush()
+        reopen_readonly(self.program_fp)
+
+    def _get_args_contents(self):
+        """
+        Return the module arguments formatted as JSON.
+        """
+        return json.dumps(self.args)
+
+    def _get_program_args(self):
+        return [self.args_fp.name]
+
+    def revert(self):
+        """
+        Delete the temporary argument file.
+        """
+        self.args_fp.close()
+        super(ArgsFileRunner, self).revert()
+
+
+class BinaryRunner(ArgsFileRunner, ProgramRunner):
+    pass
+
+
+class ScriptRunner(ProgramRunner):
+    def __init__(self, interpreter_fragment, is_python, **kwargs):
+        super(ScriptRunner, self).__init__(**kwargs)
+        self.interpreter_fragment = interpreter_fragment
+        self.is_python = is_python
+
+    b_ENCODING_STRING = b('# -*- coding: utf-8 -*-')
+
+    def _get_program(self):
+        return self._rewrite_source(
+            super(ScriptRunner, self)._get_program()
+        )
+
+    def _get_argv(self):
+        return [
+            self.args.get('_ansible_shell_executable', '/bin/sh'),
+            '-c',
+            self._get_shell_fragment(),
+        ]
+
+    def _get_shell_fragment(self):
+        """
+        Scripts are eligible for having their hashbang line rewritten, and to
+        be executed via /bin/sh using the ansible_*_interpreter value used as a
+        shell fragment prefixing to the invocation.
+        """
+        return "%s %s %s" % (
+            self.interpreter_fragment,
+            shlex_quote(self.program_fp.name),
+            ' '.join(map(shlex_quote, self._get_program_args())),
+        )
+
+    def _rewrite_source(self, s):
+        """
+        Mutate the source according to the per-task parameters.
+        """
+        # While Ansible rewrites the #! using ansible_*_interpreter, it is
+        # never actually used to execute the script, instead it is a shell
+        # fragment consumed by shell/__init__.py::build_module_command().
+        new = [b('#!') + utf8(self.interpreter_fragment)]
+        if self.is_python:
+            new.append(self.b_ENCODING_STRING)
+
+        _, _, rest = bytes_partition(s, b('\n'))
+        new.append(rest)
+        return b('\n').join(new)
+
+
+class NewStyleRunner(ScriptRunner):
+    """
+    Execute a new-style Ansible module, where Module Replacer-related tricks
+    aren't required.
+    """
+    #: path => new-style module bytecode.
+    _code_by_path = {}
+
+    def __init__(self, module_map, **kwargs):
+        super(NewStyleRunner, self).__init__(**kwargs)
+        self.module_map = module_map
+
+    def _setup_imports(self):
+        """
+        Ensure the local importer and PushFileService has everything for the
+        Ansible module before setup() completes, but before detach() is called
+        in an asynchronous task.
+
+        The master automatically streams modules towards us concurrent to the
+        runner invocation, however there is no public API to synchronize on the
+        completion of those preloads. Instead simply reuse the importer's
+        synchronization mechanism by importing everything the module will need
+        prior to detaching.
+        """
+        for fullname, _, _ in self.module_map['custom']:
+            mitogen.core.import_module(fullname)
+        for fullname in self.module_map['builtin']:
+            mitogen.core.import_module(fullname)
+
+    def _setup_excepthook(self):
+        """
+        Starting with Ansible 2.6, some modules (file.py) install a
+        sys.excepthook and never clean it up. So we must preserve the original
+        excepthook and restore it after the run completes.
+        """
+        self.original_excepthook = sys.excepthook
+
+    def setup(self):
+        super(NewStyleRunner, self).setup()
+
+        self._stdio = NewStyleStdio(self.args, self.get_temp_dir())
+        # It is possible that not supplying the script filename will break some
+        # module, but this has never been a bug report. Instead act like an
+        # interpreter that had its script piped on stdin.
+        self._argv = TemporaryArgv([''])
+        self._importer = ModuleUtilsImporter(
+            context=self.service_context,
+            module_utils=self.module_map['custom'],
+        )
+        self._setup_imports()
+        self._setup_excepthook()
+        self.atexit_wrapper = AtExitWrapper()
+        if libc__res_init:
+            libc__res_init()
+
+    def _revert_excepthook(self):
+        sys.excepthook = self.original_excepthook
+
+    def revert(self):
+        self.atexit_wrapper.revert()
+        self._argv.revert()
+        self._stdio.revert()
+        self._revert_excepthook()
+        super(NewStyleRunner, self).revert()
+
+    def _get_program_filename(self):
+        """
+        See ProgramRunner._get_program_filename().
+        """
+        return 'ansible_module_' + os.path.basename(self.path)
+
+    def _setup_args(self):
+        pass
+
+    # issue #555: in old times it was considered good form to reload sys and
+    # change the default encoding. This hack was removed from Ansible long ago,
+    # but not before permeating into many third party modules.
+    PREHISTORIC_HACK_RE = re.compile(
+        b(r'reload\s*\(\s*sys\s*\)\s*'
+          r'sys\s*\.\s*setdefaultencoding\([^)]+\)')
+    )
+
+    def _setup_program(self):
+        source = ansible_mitogen.target.get_small_file(
+            context=self.service_context,
+            path=self.path,
+        )
+        self.source = self.PREHISTORIC_HACK_RE.sub(b(''), source)
+
+    def _get_code(self):
+        try:
+            return self._code_by_path[self.path]
+        except KeyError:
+            return self._code_by_path.setdefault(self.path, compile(
+                # Py2.4 doesn't support kwargs.
+                self.source,            # source
+                "master:" + self.path,  # filename
+                'exec',                 # mode
+                0,                      # flags
+                True,                   # dont_inherit
+            ))
+
+    if mitogen.core.PY3:
+        main_module_name = '__main__'
+    else:
+        main_module_name = b('__main__')
+
+    def _handle_magic_exception(self, mod, exc):
+        """
+        Beginning with Ansible >2.6, some modules (file.py) install a
+        sys.excepthook which is a closure over AnsibleModule, redirecting the
+        magical exception to AnsibleModule.fail_json().
+
+        For extra special needs bonus points, the class is not defined in
+        module_utils, but is defined in the module itself, meaning there is no
+        type for isinstance() that outlasts the invocation.
+        """
+        klass = getattr(mod, 'AnsibleModuleError', None)
+        if klass and isinstance(exc, klass):
+            mod.module.fail_json(**exc.results)
+
+    def _run_code(self, code, mod):
+        try:
+            if mitogen.core.PY3:
+                exec(code, vars(mod))
+            else:
+                exec('exec code in vars(mod)')
+        except Exception:
+            self._handle_magic_exception(mod, sys.exc_info()[1])
+            raise
+
+    def _run(self):
+        mod = types.ModuleType(self.main_module_name)
+        mod.__package__ = None
+        # Some Ansible modules use __file__ to find the Ansiballz temporary
+        # directory. We must provide some temporary path in __file__, but we
+        # don't want to pointlessly write the module to disk when it never
+        # actually needs to exist. So just pass the filename as it would exist.
+        mod.__file__ = os.path.join(
+            self.get_temp_dir(),
+            'ansible_module_' + os.path.basename(self.path),
+        )
+
+        code = self._get_code()
+        rc = 2
+        try:
+            try:
+                self._run_code(code, mod)
+            except SystemExit:
+                exc = sys.exc_info()[1]
+                rc = exc.args[0]
+            except Exception:
+                # This writes to stderr by default.
+                traceback.print_exc()
+                rc = 1
+
+        finally:
+            self.atexit_wrapper.run_callbacks()
+
+        return {
+            u'rc': rc,
+            u'stdout': mitogen.core.to_text(sys.stdout.getvalue()),
+            u'stderr': mitogen.core.to_text(sys.stderr.getvalue()),
+        }
+
+
+class JsonArgsRunner(ScriptRunner):
+    JSON_ARGS = b('<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>')
+
+    def _get_args_contents(self):
+        return json.dumps(self.args).encode()
+
+    def _rewrite_source(self, s):
+        return (
+            super(JsonArgsRunner, self)._rewrite_source(s)
+            .replace(self.JSON_ARGS, self._get_args_contents())
+        )
+
+
+class WantJsonRunner(ArgsFileRunner, ScriptRunner):
+    pass
+
+
+class OldStyleRunner(ArgsFileRunner, ScriptRunner):
+    def _get_args_contents(self):
+        """
+        Mimic the argument formatting behaviour of
+        ActionBase._execute_module().
+        """
+        return ' '.join(
+            '%s=%s' % (key, shlex_quote(str(self.args[key])))
+            for key in self.args
+        ) + ' '  # Bug-for-bug :(
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/services.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/services.py
new file mode 100644
index 000000000..a7c0e46f3
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/services.py
@@ -0,0 +1,537 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""
+Classes in this file define Mitogen 'services' that run (initially) within the
+connection multiplexer process that is forked off the top-level controller
+process.
+
+Once a worker process connects to a multiplexer process
+(Connection._connect()), it communicates with these services to establish new
+connections, grant access to files by children, and register for notification
+when a child has completed a job.
+"""
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+import logging
+import os
+import os.path
+import sys
+import threading
+
+import ansible.constants
+
+import mitogen
+import mitogen.service
+import mitogen.utils
+import ansible_mitogen.loaders
+import ansible_mitogen.module_finder
+import ansible_mitogen.target
+
+
+LOG = logging.getLogger(__name__)
+
+# Force load of plugin to ensure ConfigManager has definitions loaded. Done
+# during module import to ensure a single-threaded environment; PluginLoader
+# is not thread-safe.
+ansible_mitogen.loaders.shell_loader.get('sh')
+
+
+if sys.version_info[0] == 3:
+    def reraise(tp, value, tb):
+        if value is None:
+            value = tp()
+        if value.__traceback__ is not tb:
+            raise value.with_traceback(tb)
+        raise value
+else:
+    exec(
+        "def reraise(tp, value, tb=None):\n"
+        "    raise tp, value, tb\n"
+     )
+
+
+def _get_candidate_temp_dirs():
+    try:
+        # >=2.5
+        options = ansible.constants.config.get_plugin_options('shell', 'sh')
+        remote_tmp = options.get('remote_tmp') or ansible.constants.DEFAULT_REMOTE_TMP
+        system_tmpdirs = options.get('system_tmpdirs', ('/var/tmp', '/tmp'))
+    except AttributeError:
+        # 2.3
+        remote_tmp = ansible.constants.DEFAULT_REMOTE_TMP
+        system_tmpdirs = ('/var/tmp', '/tmp')
+
+    return mitogen.utils.cast([remote_tmp] + list(system_tmpdirs))
+
+
+def key_from_dict(**kwargs):
+    """
+    Return a unique string representation of a dict as quickly as possible.
+    Used to generated deduplication keys from a request.
+    """
+    out = []
+    stack = [kwargs]
+    while stack:
+        obj = stack.pop()
+        if isinstance(obj, dict):
+            stack.extend(sorted(obj.items()))
+        elif isinstance(obj, (list, tuple)):
+            stack.extend(obj)
+        else:
+            out.append(str(obj))
+    return ''.join(out)
+
+
+class Error(Exception):
+    pass
+
+
+class ContextService(mitogen.service.Service):
+    """
+    Used by workers to fetch the single Context instance corresponding to a
+    connection configuration, creating the matching connection if it does not
+    exist.
+
+    For connection methods and their parameters, see:
+        https://mitogen.readthedocs.io/en/latest/api.html#context-factories
+
+    This concentrates connections in the top-level process, which may become a
+    bottleneck. The bottleneck can be removed using per-CPU connection
+    processes and arranging for the worker to select one according to a hash of
+    the connection parameters (sharding).
+    """
+    max_interpreters = int(os.getenv('MITOGEN_MAX_INTERPRETERS', '20'))
+
+    def __init__(self, *args, **kwargs):
+        super(ContextService, self).__init__(*args, **kwargs)
+        self._lock = threading.Lock()
+        #: Records the :meth:`get` result dict for successful calls, returned
+        #: for identical subsequent calls. Keyed by :meth:`key_from_dict`.
+        self._response_by_key = {}
+        #: List of :class:`mitogen.core.Latch` awaiting the result for a
+        #: particular key.
+        self._latches_by_key = {}
+        #: Mapping of :class:`mitogen.core.Context` -> reference count. Each
+        #: call to :meth:`get` increases this by one. Calls to :meth:`put`
+        #: decrease it by one.
+        self._refs_by_context = {}
+        #: List of contexts in creation order by via= parameter. When
+        #: :attr:`max_interpreters` is reached, the most recently used context
+        #: is destroyed to make room for any additional context.
+        self._lru_by_via = {}
+        #: :func:`key_from_dict` result by Context.
+        self._key_by_context = {}
+        #: Mapping of Context -> parent Context
+        self._via_by_context = {}
+
+    @mitogen.service.expose(mitogen.service.AllowParents())
+    @mitogen.service.arg_spec({
+        'context': mitogen.core.Context
+    })
+    def reset(self, context):
+        """
+        Return a reference, forcing close and discard of the underlying
+        connection. Used for 'meta: reset_connection' or when some other error
+        is detected.
+        """
+        LOG.debug('%r.reset(%r)', self, context)
+        self._lock.acquire()
+        try:
+            self._shutdown_unlocked(context)
+        finally:
+            self._lock.release()
+
+    @mitogen.service.expose(mitogen.service.AllowParents())
+    @mitogen.service.arg_spec({
+        'context': mitogen.core.Context
+    })
+    def put(self, context):
+        """
+        Return a reference, making it eligable for recycling once its reference
+        count reaches zero.
+        """
+        LOG.debug('%r.put(%r)', self, context)
+        self._lock.acquire()
+        try:
+            if self._refs_by_context.get(context, 0) == 0:
+                LOG.warning('%r.put(%r): refcount was 0. shutdown_all called?',
+                            self, context)
+                return
+            self._refs_by_context[context] -= 1
+        finally:
+            self._lock.release()
+
+    def _produce_response(self, key, response):
+        """
+        Reply to every waiting request matching a configuration key with a
+        response dictionary, deleting the list of waiters when done.
+
+        :param str key:
+            Result of :meth:`key_from_dict`
+        :param dict response:
+            Response dictionary
+        :returns:
+            Number of waiters that were replied to.
+        """
+        self._lock.acquire()
+        try:
+            latches = self._latches_by_key.pop(key)
+            count = len(latches)
+            for latch in latches:
+                latch.put(response)
+        finally:
+            self._lock.release()
+        return count
+
+    def _forget_context_unlocked(self, context):
+        key = self._key_by_context.get(context)
+        if key is None:
+            LOG.debug('%r: attempt to forget unknown %r', self, context)
+            return
+
+        self._response_by_key.pop(key, None)
+        self._latches_by_key.pop(key, None)
+        self._key_by_context.pop(context, None)
+        self._refs_by_context.pop(context, None)
+        self._via_by_context.pop(context, None)
+        self._lru_by_via.pop(context, None)
+
+    def _shutdown_unlocked(self, context, lru=None, new_context=None):
+        """
+        Arrange for `context` to be shut down, and optionally add `new_context`
+        to the LRU list while holding the lock.
+        """
+        LOG.info('%r._shutdown_unlocked(): shutting down %r', self, context)
+        context.shutdown()
+        via = self._via_by_context.get(context)
+        if via:
+            lru = self._lru_by_via.get(via)
+            if lru:
+                if context in lru:
+                    lru.remove(context)
+                if new_context:
+                    lru.append(new_context)
+        self._forget_context_unlocked(context)
+
+    def _update_lru_unlocked(self, new_context, spec, via):
+        """
+        Update the LRU ("MRU"?) list associated with the connection described
+        by `kwargs`, destroying the most recently created context if the list
+        is full. Finally add `new_context` to the list.
+        """
+        self._via_by_context[new_context] = via
+
+        lru = self._lru_by_via.setdefault(via, [])
+        if len(lru) < self.max_interpreters:
+            lru.append(new_context)
+            return
+
+        for context in reversed(lru):
+            if self._refs_by_context[context] == 0:
+                break
+        else:
+            LOG.warning('via=%r reached maximum number of interpreters, '
+                        'but they are all marked as in-use.', via)
+            return
+
+        self._shutdown_unlocked(context, lru=lru, new_context=new_context)
+
+    def _update_lru(self, new_context, spec, via):
+        self._lock.acquire()
+        try:
+            self._update_lru_unlocked(new_context, spec, via)
+        finally:
+            self._lock.release()
+
+    @mitogen.service.expose(mitogen.service.AllowParents())
+    def dump(self):
+        """
+        For testing, return a list of dicts describing every currently
+        connected context.
+        """
+        return [
+            {
+                'context_name': context.name,
+                'via': getattr(self._via_by_context.get(context),
+                               'name', None),
+                'refs': self._refs_by_context.get(context),
+            }
+            for context, key in sorted(self._key_by_context.items(),
+                                       key=lambda c_k: c_k[0].context_id)
+        ]
+
+    @mitogen.service.expose(mitogen.service.AllowParents())
+    def shutdown_all(self):
+        """
+        For testing use, arrange for all connections to be shut down.
+        """
+        self._lock.acquire()
+        try:
+            for context in list(self._key_by_context):
+                self._shutdown_unlocked(context)
+        finally:
+            self._lock.release()
+
+    def _on_context_disconnect(self, context):
+        """
+        Respond to Context disconnect event by deleting any record of the no
+        longer reachable context.  This method runs in the Broker thread and
+        must not to block.
+        """
+        self._lock.acquire()
+        try:
+            LOG.info('%r: Forgetting %r due to stream disconnect', self, context)
+            self._forget_context_unlocked(context)
+        finally:
+            self._lock.release()
+
+    ALWAYS_PRELOAD = (
+        'ansible.module_utils.basic',
+        'ansible.module_utils.json_utils',
+        'ansible.release',
+        'ansible_mitogen.runner',
+        'ansible_mitogen.target',
+        'mitogen.fork',
+        'mitogen.service',
+    )
+
+    def _send_module_forwards(self, context):
+        self.router.responder.forward_modules(context, self.ALWAYS_PRELOAD)
+
+    _candidate_temp_dirs = None
+
+    def _get_candidate_temp_dirs(self):
+        """
+        Return a list of locations to try to create the single temporary
+        directory used by the run. This simply caches the (expensive) plugin
+        load of :func:`_get_candidate_temp_dirs`.
+        """
+        if self._candidate_temp_dirs is None:
+            self._candidate_temp_dirs = _get_candidate_temp_dirs()
+        return self._candidate_temp_dirs
+
+    def _connect(self, key, spec, via=None):
+        """
+        Actual connect implementation. Arranges for the Mitogen connection to
+        be created and enqueues an asynchronous call to start the forked task
+        parent in the remote context.
+
+        :param key:
+            Deduplication key representing the connection configuration.
+        :param spec:
+            Connection specification.
+        :returns:
+            Dict like::
+
+                {
+                    'context': mitogen.core.Context or None,
+                    'via': mitogen.core.Context or None,
+                    'init_child_result': {
+                        'fork_context': mitogen.core.Context,
+                        'home_dir': str or None,
+                    },
+                    'msg': str or None
+                }
+
+            Where `context` is a reference to the newly constructed context,
+            `init_child_result` is the result of executing
+            :func:`ansible_mitogen.target.init_child` in that context, `msg` is
+            an error message and the remaining fields are :data:`None`, or
+            `msg` is :data:`None` and the remaining fields are set.
+        """
+        try:
+            method = getattr(self.router, spec['method'])
+        except AttributeError:
+            raise Error('unsupported method: %(transport)s' % spec)
+
+        context = method(via=via, unidirectional=True, **spec['kwargs'])
+        if via and spec.get('enable_lru'):
+            self._update_lru(context, spec, via)
+
+        # Forget the context when its disconnect event fires.
+        mitogen.core.listen(context, 'disconnect',
+            lambda: self._on_context_disconnect(context))
+
+        self._send_module_forwards(context)
+        init_child_result = context.call(
+            ansible_mitogen.target.init_child,
+            log_level=LOG.getEffectiveLevel(),
+            candidate_temp_dirs=self._get_candidate_temp_dirs(),
+        )
+
+        if os.environ.get('MITOGEN_DUMP_THREAD_STACKS'):
+            from mitogen import debug
+            context.call(debug.dump_to_logger)
+
+        self._key_by_context[context] = key
+        self._refs_by_context[context] = 0
+        return {
+            'context': context,
+            'via': via,
+            'init_child_result': init_child_result,
+            'msg': None,
+        }
+
+    def _wait_or_start(self, spec, via=None):
+        latch = mitogen.core.Latch()
+        key = key_from_dict(via=via, **spec)
+        self._lock.acquire()
+        try:
+            response = self._response_by_key.get(key)
+            if response is not None:
+                self._refs_by_context[response['context']] += 1
+                latch.put(response)
+                return latch
+
+            latches = self._latches_by_key.setdefault(key, [])
+            first = len(latches) == 0
+            latches.append(latch)
+        finally:
+            self._lock.release()
+
+        if first:
+            # I'm the first requestee, so I will create the connection.
+            try:
+                response = self._connect(key, spec, via=via)
+                count = self._produce_response(key, response)
+                # Only record the response for non-error results.
+                self._response_by_key[key] = response
+                # Set the reference count to the number of waiters.
+                self._refs_by_context[response['context']] += count
+            except Exception:
+                self._produce_response(key, sys.exc_info())
+
+        return latch
+
+    disconnect_msg = (
+        'Channel was disconnected while connection attempt was in progress; '
+        'this may be caused by an abnormal Ansible exit, or due to an '
+        'unreliable target.'
+    )
+
+    @mitogen.service.expose(mitogen.service.AllowParents())
+    @mitogen.service.arg_spec({
+        'stack': list
+    })
+    def get(self, msg, stack):
+        """
+        Return a Context referring to an established connection with the given
+        configuration, establishing new connections as necessary.
+
+        :param list stack:
+            Connection descriptions. Each element is a dict containing 'method'
+            and 'kwargs' keys describing the Router method and arguments.
+            Subsequent elements are proxied via the previous.
+
+        :returns dict:
+            * context: mitogen.parent.Context or None.
+            * init_child_result: Result of :func:`init_child`.
+            * msg: StreamError exception text or None.
+            * method_name: string failing method name.
+        """
+        via = None
+        for spec in stack:
+            try:
+                result = self._wait_or_start(spec, via=via).get()
+                if isinstance(result, tuple):  # exc_info()
+                    reraise(*result)
+                via = result['context']
+            except mitogen.core.ChannelError:
+                return {
+                    'context': None,
+                    'init_child_result': None,
+                    'method_name': spec['method'],
+                    'msg': self.disconnect_msg,
+                }
+            except mitogen.core.StreamError as e:
+                return {
+                    'context': None,
+                    'init_child_result': None,
+                    'method_name': spec['method'],
+                    'msg': str(e),
+                }
+
+        return result
+
+
+class ModuleDepService(mitogen.service.Service):
+    """
+    Scan a new-style module and produce a cached mapping of module_utils names
+    to their resolved filesystem paths.
+    """
+    invoker_class = mitogen.service.SerializedInvoker
+
+    def __init__(self, *args, **kwargs):
+        super(ModuleDepService, self).__init__(*args, **kwargs)
+        self._cache = {}
+
+    def _get_builtin_names(self, builtin_path, resolved):
+        return [
+            mitogen.core.to_text(fullname)
+            for fullname, path, is_pkg in resolved
+            if os.path.abspath(path).startswith(builtin_path)
+        ]
+
+    def _get_custom_tups(self, builtin_path, resolved):
+        return [
+            (mitogen.core.to_text(fullname),
+             mitogen.core.to_text(path),
+             is_pkg)
+            for fullname, path, is_pkg in resolved
+            if not os.path.abspath(path).startswith(builtin_path)
+        ]
+
+    @mitogen.service.expose(policy=mitogen.service.AllowParents())
+    @mitogen.service.arg_spec({
+        'module_name': mitogen.core.UnicodeType,
+        'module_path': mitogen.core.FsPathTypes,
+        'search_path': tuple,
+        'builtin_path': mitogen.core.FsPathTypes,
+        'context': mitogen.core.Context,
+    })
+    def scan(self, module_name, module_path, search_path, builtin_path, context):
+        key = (module_name, search_path)
+        if key not in self._cache:
+            resolved = ansible_mitogen.module_finder.scan(
+                module_name=module_name,
+                module_path=module_path,
+                search_path=tuple(search_path) + (builtin_path,),
+            )
+            builtin_path = os.path.abspath(builtin_path)
+            builtin = self._get_builtin_names(builtin_path, resolved)
+            custom = self._get_custom_tups(builtin_path, resolved)
+            self._cache[key] = {
+                'builtin': builtin,
+                'custom': custom,
+            }
+        return self._cache[key]
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/strategy.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/strategy.py
new file mode 100644
index 000000000..ba0ff5251
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/strategy.py
@@ -0,0 +1,257 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+import os
+import signal
+import threading
+
+import mitogen.core
+import ansible_mitogen.affinity
+import ansible_mitogen.loaders
+import ansible_mitogen.mixins
+import ansible_mitogen.process
+
+import ansible.executor.process.worker
+
+
+def _patch_awx_callback():
+    """
+    issue #400: AWX loads a display callback that suffers from thread-safety
+    issues. Detect the presence of older AWX versions and patch the bug.
+    """
+    # AWX uses sitecustomize.py to force-load this package. If it exists, we're
+    # running under AWX.
+    try:
+        from awx_display_callback.events import EventContext
+        from awx_display_callback.events import event_context
+    except ImportError:
+        return
+
+    if hasattr(EventContext(), '_local'):
+        # Patched version.
+        return
+
+    def patch_add_local(self, **kwargs):
+        tls = vars(self._local)
+        ctx = tls.setdefault('_ctx', {})
+        ctx.update(kwargs)
+
+    EventContext._local = threading.local()
+    EventContext.add_local = patch_add_local
+
+_patch_awx_callback()
+
+
+def wrap_action_loader__get(name, *args, **kwargs):
+    """
+    While the mitogen strategy is active, trap action_loader.get() calls,
+    augmenting any fetched class with ActionModuleMixin, which replaces various
+    helper methods inherited from ActionBase with implementations that avoid
+    the use of shell fragments wherever possible.
+
+    This is used instead of static subclassing as it generalizes to third party
+    action modules outside the Ansible tree.
+    """
+    klass = action_loader__get(name, class_only=True)
+    if klass:
+        bases = (ansible_mitogen.mixins.ActionModuleMixin, klass)
+        adorned_klass = type(str(name), bases, {})
+        if kwargs.get('class_only'):
+            return adorned_klass
+        return adorned_klass(*args, **kwargs)
+
+
+def wrap_connection_loader__get(name, *args, **kwargs):
+    """
+    While the strategy is active, rewrite connection_loader.get() calls for
+    some transports into requests for a compatible Mitogen transport.
+    """
+    if name in ('docker', 'kubectl', 'jail', 'local', 'lxc',
+                'lxd', 'machinectl', 'setns', 'ssh'):
+        name = 'mitogen_' + name
+    return connection_loader__get(name, *args, **kwargs)
+
+
+def wrap_worker__run(*args, **kwargs):
+    """
+    While the strategy is active, rewrite connection_loader.get() calls for
+    some transports into requests for a compatible Mitogen transport.
+    """
+    # Ignore parent's attempts to murder us when we still need to write
+    # profiling output.
+    if mitogen.core._profile_hook.__name__ != '_profile_hook':
+        signal.signal(signal.SIGTERM, signal.SIG_IGN)
+
+    ansible_mitogen.logging.set_process_name('task')
+    ansible_mitogen.affinity.policy.assign_worker()
+    return mitogen.core._profile_hook('WorkerProcess',
+        lambda: worker__run(*args, **kwargs)
+    )
+
+
+class StrategyMixin(object):
+    """
+    This mix-in enhances any built-in strategy by arranging for various Mitogen
+    services to be initialized in the Ansible top-level process, and for worker
+    processes to grow support for using those top-level services to communicate
+    with and execute modules on remote hosts.
+
+    Mitogen:
+
+        A private Broker IO multiplexer thread is created to dispatch IO
+        between the local Router and any connected streams, including streams
+        connected to Ansible WorkerProcesses, and SSH commands implementing
+        connections to remote machines.
+
+        A Router is created that implements message dispatch to any locally
+        registered handlers, and message routing for remote streams. Router is
+        the junction point through which WorkerProceses and remote SSH contexts
+        can communicate.
+
+        Router additionally adds message handlers for a variety of base
+        services, review the Standard Handles section of the How It Works guide
+        in the documentation.
+
+        A ContextService is installed as a message handler in the master
+        process and run on a private thread. It is responsible for accepting
+        requests to establish new SSH connections from worker processes, and
+        ensuring precisely one connection exists and is reused for subsequent
+        playbook steps. The service presently runs in a single thread, so to
+        begin with, new SSH connections are serialized.
+
+        Finally a mitogen.unix listener is created through which WorkerProcess
+        can establish a connection back into the master process, in order to
+        avail of ContextService. A UNIX listener socket is necessary as there
+        is no more sane mechanism to arrange for IPC between the Router in the
+        master process, and the corresponding Router in the worker process.
+
+    Ansible:
+
+        PluginLoader monkey patches are installed to catch attempts to create
+        connection and action plug-ins.
+
+        For connection plug-ins, if the desired method is "local" or "ssh", it
+        is redirected to the "mitogen" connection plug-in. That plug-in
+        implements communication via a UNIX socket connection to the top-level
+        Ansible process, and uses ContextService running in the top-level
+        process to actually establish and manage the connection.
+
+        For action plug-ins, the original class is looked up as usual, but a
+        new subclass is created dynamically in order to mix-in
+        ansible_mitogen.target.ActionModuleMixin, which overrides many of the
+        methods usually inherited from ActionBase in order to replace them with
+        pure-Python equivalents that avoid the use of shell.
+
+        In particular, _execute_module() is overridden with an implementation
+        that uses ansible_mitogen.target.run_module() executed in the target
+        Context. run_module() implements module execution by importing the
+        module as if it were a normal Python module, and capturing its output
+        in the remote process. Since the Mitogen module loader is active in the
+        remote process, all the heavy lifting of transferring the action module
+        and its dependencies are automatically handled by Mitogen.
+    """
+    def _install_wrappers(self):
+        """
+        Install our PluginLoader monkey patches and update global variables
+        with references to the real functions.
+        """
+        global action_loader__get
+        action_loader__get = ansible_mitogen.loaders.action_loader.get
+        ansible_mitogen.loaders.action_loader.get = wrap_action_loader__get
+
+        global connection_loader__get
+        connection_loader__get = ansible_mitogen.loaders.connection_loader.get
+        ansible_mitogen.loaders.connection_loader.get = wrap_connection_loader__get
+
+        global worker__run
+        worker__run = ansible.executor.process.worker.WorkerProcess.run
+        ansible.executor.process.worker.WorkerProcess.run = wrap_worker__run
+
+    def _remove_wrappers(self):
+        """
+        Uninstall the PluginLoader monkey patches.
+        """
+        ansible_mitogen.loaders.action_loader.get = action_loader__get
+        ansible_mitogen.loaders.connection_loader.get = connection_loader__get
+        ansible.executor.process.worker.WorkerProcess.run = worker__run
+
+    def _add_plugin_paths(self):
+        """
+        Add the Mitogen plug-in directories to the ModuleLoader path, avoiding
+        the need for manual configuration.
+        """
+        base_dir = os.path.join(os.path.dirname(__file__), 'plugins')
+        ansible_mitogen.loaders.connection_loader.add_directory(
+            os.path.join(base_dir, 'connection')
+        )
+        ansible_mitogen.loaders.action_loader.add_directory(
+            os.path.join(base_dir, 'action')
+        )
+
+    def _queue_task(self, host, task, task_vars, play_context):
+        """
+        Many PluginLoader caches are defective as they are only populated in
+        the ephemeral WorkerProcess. Touch each plug-in path before forking to
+        ensure all workers receive a hot cache.
+        """
+        ansible_mitogen.loaders.module_loader.find_plugin(
+            name=task.action,
+            mod_type='',
+        )
+        ansible_mitogen.loaders.connection_loader.get(
+            name=play_context.connection,
+            class_only=True,
+        )
+        ansible_mitogen.loaders.action_loader.get(
+            name=task.action,
+            class_only=True,
+        )
+
+        return super(StrategyMixin, self)._queue_task(
+            host=host,
+            task=task,
+            task_vars=task_vars,
+            play_context=play_context,
+        )
+
+    def run(self, iterator, play_context, result=0):
+        """
+        Arrange for a mitogen.master.Router to be available for the duration of
+        the strategy's real run() method.
+        """
+        ansible_mitogen.process.MuxProcess.start()
+        run = super(StrategyMixin, self).run
+        self._add_plugin_paths()
+        self._install_wrappers()
+        try:
+            return mitogen.core._profile_hook('Strategy',
+                lambda: run(iterator, play_context)
+            )
+        finally:
+            self._remove_wrappers()
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/target.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/target.py
new file mode 100644
index 000000000..40e5c57b0
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/target.py
@@ -0,0 +1,777 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""
+Helper functions intended to be executed on the target. These are entrypoints
+for file transfer, module execution and sundry bits like changing file modes.
+"""
+
+import errno
+import grp
+import operator
+import os
+import pwd
+import re
+import signal
+import stat
+import subprocess
+import sys
+import tempfile
+import traceback
+import types
+
+# Absolute imports for <2.5.
+logging = __import__('logging')
+
+import mitogen.core
+import mitogen.fork
+import mitogen.parent
+import mitogen.service
+from mitogen.core import b
+
+try:
+    import json
+except ImportError:
+    import simplejson as json
+
+try:
+    reduce
+except NameError:
+    # Python 3.x.
+    from functools import reduce
+
+try:
+    BaseException
+except NameError:
+    # Python 2.4
+    BaseException = Exception
+
+
+# Ansible since PR #41749 inserts "import __main__" into
+# ansible.module_utils.basic. Mitogen's importer will refuse such an import, so
+# we must setup a fake "__main__" before that module is ever imported. The
+# str() is to cast Unicode to bytes on Python 2.6.
+if not sys.modules.get(str('__main__')):
+    sys.modules[str('__main__')] = types.ModuleType(str('__main__'))
+
+import ansible.module_utils.json_utils
+import ansible_mitogen.runner
+
+
+LOG = logging.getLogger(__name__)
+
+MAKE_TEMP_FAILED_MSG = (
+    u"Unable to find a useable temporary directory. This likely means no\n"
+    u"system-supplied TMP directory can be written to, or all directories\n"
+    u"were mounted on 'noexec' filesystems.\n"
+    u"\n"
+    u"The following paths were tried:\n"
+    u"    %(namelist)s\n"
+    u"\n"
+    u"Please check '-vvv' output for a log of individual path errors."
+)
+
+# Python 2.4/2.5 cannot support fork+threads whatsoever, it doesn't even fix up
+# interpreter state. So 2.4/2.5 interpreters start .local() contexts for
+# isolation instead. Since we don't have any crazy memory sharing problems to
+# avoid, there is no virginal fork parent either. The child is started directly
+# from the login/become process. In future this will be default everywhere,
+# fork is brainwrong from the stone age.
+FORK_SUPPORTED = sys.version_info >= (2, 6)
+
+#: Initialized to an econtext.parent.Context pointing at a pristine fork of
+#: the target Python interpreter before it executes any code or imports.
+_fork_parent = None
+
+#: Set by :func:`init_child` to the name of a writeable and executable
+#: temporary directory accessible by the active user account.
+good_temp_dir = None
+
+
+def subprocess__Popen__close_fds(self, but):
+    """
+    issue #362, #435: subprocess.Popen(close_fds=True) aka.
+    AnsibleModule.run_command() loops the entire FD space on Python<3.2.
+    CentOS>5 ships with 1,048,576 FDs by default, resulting in huge (>500ms)
+    latency starting children. Therefore replace Popen._close_fds on Linux with
+    a version that is O(fds) rather than O(_SC_OPEN_MAX).
+    """
+    try:
+        names = os.listdir(u'/proc/self/fd')
+    except OSError:
+        # May fail if acting on a container that does not have /proc mounted.
+        self._original_close_fds(but)
+        return
+
+    for name in names:
+        if not name.isdigit():
+            continue
+
+        fd = int(name, 10)
+        if fd > 2 and fd != but:
+            try:
+                os.close(fd)
+            except OSError:
+                pass
+
+
+if (
+    sys.platform.startswith(u'linux') and
+    sys.version < u'3.0' and
+    hasattr(subprocess.Popen, u'_close_fds') and
+    not mitogen.is_master
+):
+    subprocess.Popen._original_close_fds = subprocess.Popen._close_fds
+    subprocess.Popen._close_fds = subprocess__Popen__close_fds
+
+
+def get_small_file(context, path):
+    """
+    Basic in-memory caching module fetcher. This generates one roundtrip for
+    every previously unseen file, so it is only a temporary solution.
+
+    :param context:
+        Context we should direct FileService requests to. For now (and probably
+        forever) this is just the top-level Mitogen connection manager process.
+    :param path:
+        Path to fetch from FileService, must previously have been registered by
+        a privileged context using the `register` command.
+    :returns:
+        Bytestring file data.
+    """
+    pool = mitogen.service.get_or_create_pool(router=context.router)
+    service = pool.get_service(u'mitogen.service.PushFileService')
+    return service.get(path)
+
+
+def transfer_file(context, in_path, out_path, sync=False, set_owner=False):
+    """
+    Streamily download a file from the connection multiplexer process in the
+    controller.
+
+    :param mitogen.core.Context context:
+        Reference to the context hosting the FileService that will transmit the
+        file.
+    :param bytes in_path:
+        FileService registered name of the input file.
+    :param bytes out_path:
+        Name of the output path on the local disk.
+    :param bool sync:
+        If :data:`True`, ensure the file content and metadat are fully on disk
+        before renaming the temporary file over the existing file. This should
+        ensure in the case of system crash, either the entire old or new file
+        are visible post-reboot.
+    :param bool set_owner:
+        If :data:`True`, look up the metadata username and group on the local
+        system and file the file owner using :func:`os.fchmod`.
+    """
+    out_path = os.path.abspath(out_path)
+    fd, tmp_path = tempfile.mkstemp(suffix='.tmp',
+                                    prefix='.ansible_mitogen_transfer-',
+                                    dir=os.path.dirname(out_path))
+    fp = os.fdopen(fd, 'wb', mitogen.core.CHUNK_SIZE)
+    LOG.debug('transfer_file(%r) temporary file: %s', out_path, tmp_path)
+
+    try:
+        try:
+            ok, metadata = mitogen.service.FileService.get(
+                context=context,
+                path=in_path,
+                out_fp=fp,
+            )
+            if not ok:
+                raise IOError('transfer of %r was interrupted.' % (in_path,))
+
+            set_file_mode(tmp_path, metadata['mode'], fd=fp.fileno())
+            if set_owner:
+                set_file_owner(tmp_path, metadata['owner'], metadata['group'],
+                               fd=fp.fileno())
+        finally:
+            fp.close()
+
+        if sync:
+            os.fsync(fp.fileno())
+        os.rename(tmp_path, out_path)
+    except BaseException:
+        os.unlink(tmp_path)
+        raise
+
+    os.utime(out_path, (metadata['atime'], metadata['mtime']))
+
+
+def prune_tree(path):
+    """
+    Like shutil.rmtree(), but log errors rather than discard them, and do not
+    waste multiple os.stat() calls discovering whether the object can be
+    deleted, just try deleting it instead.
+    """
+    try:
+        os.unlink(path)
+        return
+    except OSError:
+        e = sys.exc_info()[1]
+        if not (os.path.isdir(path) and
+                e.args[0] in (errno.EPERM, errno.EISDIR)):
+            LOG.error('prune_tree(%r): %s', path, e)
+            return
+
+    try:
+        # Ensure write access for readonly directories. Ignore error in case
+        # path is on a weird filesystem (e.g. vfat).
+        os.chmod(path, int('0700', 8))
+    except OSError:
+        e = sys.exc_info()[1]
+        LOG.warning('prune_tree(%r): %s', path, e)
+
+    try:
+        for name in os.listdir(path):
+            if name not in ('.', '..'):
+                prune_tree(os.path.join(path, name))
+        os.rmdir(path)
+    except OSError:
+        e = sys.exc_info()[1]
+        LOG.error('prune_tree(%r): %s', path, e)
+
+
+def is_good_temp_dir(path):
+    """
+    Return :data:`True` if `path` can be used as a temporary directory, logging
+    any failures that may cause it to be unsuitable. If the directory doesn't
+    exist, we attempt to create it using :func:`os.makedirs`.
+    """
+    if not os.path.exists(path):
+        try:
+            os.makedirs(path, mode=int('0700', 8))
+        except OSError:
+            e = sys.exc_info()[1]
+            LOG.debug('temp dir %r unusable: did not exist and attempting '
+                      'to create it failed: %s', path, e)
+            return False
+
+    try:
+        tmp = tempfile.NamedTemporaryFile(
+            prefix='ansible_mitogen_is_good_temp_dir',
+            dir=path,
+        )
+    except (OSError, IOError):
+        e = sys.exc_info()[1]
+        LOG.debug('temp dir %r unusable: %s', path, e)
+        return False
+
+    try:
+        try:
+            os.chmod(tmp.name, int('0700', 8))
+        except OSError:
+            e = sys.exc_info()[1]
+            LOG.debug('temp dir %r unusable: chmod failed: %s', path, e)
+            return False
+
+        try:
+            # access(.., X_OK) is sufficient to detect noexec.
+            if not os.access(tmp.name, os.X_OK):
+                raise OSError('filesystem appears to be mounted noexec')
+        except OSError:
+            e = sys.exc_info()[1]
+            LOG.debug('temp dir %r unusable: %s', path, e)
+            return False
+    finally:
+        tmp.close()
+
+    return True
+
+
+def find_good_temp_dir(candidate_temp_dirs):
+    """
+    Given a list of candidate temp directories extracted from ``ansible.cfg``,
+    combine it with the Python-builtin list of candidate directories used by
+    :mod:`tempfile`, then iteratively try each until one is found that is both
+    writeable and executable.
+
+    :param list candidate_temp_dirs:
+        List of candidate $variable-expanded and tilde-expanded directory paths
+        that may be usable as a temporary directory.
+    """
+    paths = [os.path.expandvars(os.path.expanduser(p))
+             for p in candidate_temp_dirs]
+    paths.extend(tempfile._candidate_tempdir_list())
+
+    for path in paths:
+        if is_good_temp_dir(path):
+            LOG.debug('Selected temp directory: %r (from %r)', path, paths)
+            return path
+
+    raise IOError(MAKE_TEMP_FAILED_MSG % {
+        'paths': '\n    '.join(paths),
+    })
+
+
+@mitogen.core.takes_econtext
+def init_child(econtext, log_level, candidate_temp_dirs):
+    """
+    Called by ContextService immediately after connection; arranges for the
+    (presently) spotless Python interpreter to be forked, where the newly
+    forked interpreter becomes the parent of any newly forked future
+    interpreters.
+
+    This is necessary to prevent modules that are executed in-process from
+    polluting the global interpreter state in a way that effects explicitly
+    isolated modules.
+
+    :param int log_level:
+        Logging package level active in the master.
+    :param list[str] candidate_temp_dirs:
+        List of $variable-expanded and tilde-expanded directory names to add to
+        candidate list of temporary directories.
+
+    :returns:
+        Dict like::
+
+            {
+                'fork_context': mitogen.core.Context or None,
+                'good_temp_dir': ...
+                'home_dir': str
+            }
+
+        Where `fork_context` refers to the newly forked 'fork parent' context
+        the controller will use to start forked jobs, and `home_dir` is the
+        home directory for the active user account.
+    """
+    # Copying the master's log level causes log messages to be filtered before
+    # they reach LogForwarder, thus reducing an influx of tiny messges waking
+    # the connection multiplexer process in the master.
+    LOG.setLevel(log_level)
+    logging.getLogger('ansible_mitogen').setLevel(log_level)
+
+    # issue #536: if the json module is available, remove simplejson from the
+    # importer whitelist to avoid confusing certain Ansible modules.
+    if json.__name__ == 'json':
+        econtext.importer.whitelist.remove('simplejson')
+
+    global _fork_parent
+    if FORK_SUPPORTED:
+        mitogen.parent.upgrade_router(econtext)
+        _fork_parent = econtext.router.fork()
+
+    global good_temp_dir
+    good_temp_dir = find_good_temp_dir(candidate_temp_dirs)
+
+    return {
+        u'fork_context': _fork_parent,
+        u'home_dir': mitogen.core.to_text(os.path.expanduser('~')),
+        u'good_temp_dir': good_temp_dir,
+    }
+
+
+@mitogen.core.takes_econtext
+def spawn_isolated_child(econtext):
+    """
+    For helper functions executed in the fork parent context, arrange for
+    the context's router to be upgraded as necessary and for a new child to be
+    prepared.
+
+    The actual fork occurs from the 'virginal fork parent', which does not have
+    any Ansible modules loaded prior to fork, to avoid conflicts resulting from
+    custom module_utils paths.
+    """
+    mitogen.parent.upgrade_router(econtext)
+    if FORK_SUPPORTED:
+        context = econtext.router.fork()
+    else:
+        context = econtext.router.local()
+    LOG.debug('create_fork_child() -> %r', context)
+    return context
+
+
+def run_module(kwargs):
+    """
+    Set up the process environment in preparation for running an Ansible
+    module. This monkey-patches the Ansible libraries in various places to
+    prevent it from trying to kill the process on completion, and to prevent it
+    from reading sys.stdin.
+    """
+    runner_name = kwargs.pop('runner_name')
+    klass = getattr(ansible_mitogen.runner, runner_name)
+    impl = klass(**mitogen.core.Kwargs(kwargs))
+    return impl.run()
+
+
+def _get_async_dir():
+    return os.path.expanduser(
+        os.environ.get('ANSIBLE_ASYNC_DIR', '~/.ansible_async')
+    )
+
+
+class AsyncRunner(object):
+    def __init__(self, job_id, timeout_secs, started_sender, econtext, kwargs):
+        self.job_id = job_id
+        self.timeout_secs = timeout_secs
+        self.started_sender = started_sender
+        self.econtext = econtext
+        self.kwargs = kwargs
+        self._timed_out = False
+        self._init_path()
+
+    def _init_path(self):
+        async_dir = _get_async_dir()
+        if not os.path.exists(async_dir):
+            os.makedirs(async_dir)
+        self.path = os.path.join(async_dir, self.job_id)
+
+    def _update(self, dct):
+        """
+        Update an async job status file.
+        """
+        LOG.info('%r._update(%r, %r)', self, self.job_id, dct)
+        dct.setdefault('ansible_job_id', self.job_id)
+        dct.setdefault('data', '')
+
+        fp = open(self.path + '.tmp', 'w')
+        try:
+            fp.write(json.dumps(dct))
+        finally:
+            fp.close()
+        os.rename(self.path + '.tmp', self.path)
+
+    def _on_sigalrm(self, signum, frame):
+        """
+        Respond to SIGALRM (job timeout) by updating the job file and killing
+        the process.
+        """
+        msg = "Job reached maximum time limit of %d seconds." % (
+            self.timeout_secs,
+        )
+        self._update({
+            "failed": 1,
+            "finished": 1,
+            "msg": msg,
+        })
+        self._timed_out = True
+        self.econtext.broker.shutdown()
+
+    def _install_alarm(self):
+        signal.signal(signal.SIGALRM, self._on_sigalrm)
+        signal.alarm(self.timeout_secs)
+
+    def _run_module(self):
+        kwargs = dict(self.kwargs, **{
+            'detach': True,
+            'econtext': self.econtext,
+            'emulate_tty': False,
+        })
+        return run_module(kwargs)
+
+    def _parse_result(self, dct):
+        filtered, warnings = (
+            ansible.module_utils.json_utils.
+            _filter_non_json_lines(dct['stdout'])
+        )
+        result = json.loads(filtered)
+        result.setdefault('warnings', []).extend(warnings)
+        result['stderr'] = dct['stderr'] or result.get('stderr', '')
+        self._update(result)
+
+    def _run(self):
+        """
+        1. Immediately updates the status file to mark the job as started.
+        2. Installs a timer/signal handler to implement the time limit.
+        3. Runs as with run_module(), writing the result to the status file.
+
+        :param dict kwargs:
+            Runner keyword arguments.
+        :param str job_id:
+            String job ID.
+        :param int timeout_secs:
+            If >0, limit the task's maximum run time.
+        """
+        self._update({
+            'started': 1,
+            'finished': 0,
+            'pid': os.getpid()
+        })
+        self.started_sender.send(True)
+
+        if self.timeout_secs > 0:
+            self._install_alarm()
+
+        dct = self._run_module()
+        if not self._timed_out:
+            # After SIGALRM fires, there is a window between broker responding
+            # to shutdown() by killing the process, and work continuing on the
+            # main thread. If main thread was asleep in at least
+            # basic.py/select.select(), an EINTR will be raised. We want to
+            # discard that exception.
+            try:
+                self._parse_result(dct)
+            except Exception:
+                self._update({
+                    "failed": 1,
+                    "msg": traceback.format_exc(),
+                    "data": dct['stdout'],  # temporary notice only
+                    "stderr": dct['stderr']
+                })
+
+    def run(self):
+        try:
+            try:
+                self._run()
+            except Exception:
+                self._update({
+                    "failed": 1,
+                    "msg": traceback.format_exc(),
+                })
+        finally:
+            self.econtext.broker.shutdown()
+
+
+@mitogen.core.takes_econtext
+def run_module_async(kwargs, job_id, timeout_secs, started_sender, econtext):
+    """
+    Execute a module with its run status and result written to a file,
+    terminating on the process on completion. This function must run in a child
+    forked using :func:`create_fork_child`.
+
+    @param mitogen.core.Sender started_sender:
+        A sender that will receive :data:`True` once the job has reached a
+        point where its initial job file has been written. This is required to
+        avoid a race where an overly eager controller can check for a task
+        before it has reached that point in execution, which is possible at
+        least on Python 2.4, where forking is not available for async tasks.
+    """
+    arunner = AsyncRunner(
+        job_id,
+        timeout_secs,
+        started_sender,
+        econtext,
+        kwargs
+    )
+    arunner.run()
+
+
+def get_user_shell():
+    """
+    For commands executed directly via an SSH command-line, SSH looks up the
+    user's shell via getpwuid() and only defaults to /bin/sh if that field is
+    missing or empty.
+    """
+    try:
+        pw_shell = pwd.getpwuid(os.geteuid()).pw_shell
+    except KeyError:
+        pw_shell = None
+
+    return pw_shell or '/bin/sh'
+
+
+def exec_args(args, in_data='', chdir=None, shell=None, emulate_tty=False):
+    """
+    Run a command in a subprocess, emulating the argument handling behaviour of
+    SSH.
+
+    :param list[str]:
+        Argument vector.
+    :param bytes in_data:
+        Optional standard input for the command.
+    :param bool emulate_tty:
+        If :data:`True`, arrange for stdout and stderr to be merged into the
+        stdout pipe and for LF to be translated into CRLF, emulating the
+        behaviour of a TTY.
+    :return:
+        (return code, stdout bytes, stderr bytes)
+    """
+    LOG.debug('exec_args(%r, ..., chdir=%r)', args, chdir)
+    assert isinstance(args, list)
+
+    if emulate_tty:
+        stderr = subprocess.STDOUT
+    else:
+        stderr = subprocess.PIPE
+
+    proc = subprocess.Popen(
+        args=args,
+        stdout=subprocess.PIPE,
+        stderr=stderr,
+        stdin=subprocess.PIPE,
+        cwd=chdir,
+    )
+    stdout, stderr = proc.communicate(in_data)
+
+    if emulate_tty:
+        stdout = stdout.replace(b('\n'), b('\r\n'))
+    return proc.returncode, stdout, stderr or b('')
+
+
+def exec_command(cmd, in_data='', chdir=None, shell=None, emulate_tty=False):
+    """
+    Run a command in a subprocess, emulating the argument handling behaviour of
+    SSH.
+
+    :param bytes cmd:
+        String command line, passed to user's shell.
+    :param bytes in_data:
+        Optional standard input for the command.
+    :return:
+        (return code, stdout bytes, stderr bytes)
+    """
+    assert isinstance(cmd, mitogen.core.UnicodeType)
+    return exec_args(
+        args=[get_user_shell(), '-c', cmd],
+        in_data=in_data,
+        chdir=chdir,
+        shell=shell,
+        emulate_tty=emulate_tty,
+    )
+
+
+def read_path(path):
+    """
+    Fetch the contents of a filesystem `path` as bytes.
+    """
+    return open(path, 'rb').read()
+
+
+def set_file_owner(path, owner, group=None, fd=None):
+    if owner:
+        uid = pwd.getpwnam(owner).pw_uid
+    else:
+        uid = os.geteuid()
+
+    if group:
+        gid = grp.getgrnam(group).gr_gid
+    else:
+        gid = os.getegid()
+
+    if fd is not None and hasattr(os, 'fchown'):
+        os.fchown(fd, (uid, gid))
+    else:
+        # Python<2.6
+        os.chown(path, (uid, gid))
+
+
+def write_path(path, s, owner=None, group=None, mode=None,
+               utimes=None, sync=False):
+    """
+    Writes bytes `s` to a filesystem `path`.
+    """
+    path = os.path.abspath(path)
+    fd, tmp_path = tempfile.mkstemp(suffix='.tmp',
+                                    prefix='.ansible_mitogen_transfer-',
+                                    dir=os.path.dirname(path))
+    fp = os.fdopen(fd, 'wb', mitogen.core.CHUNK_SIZE)
+    LOG.debug('write_path(path=%r) temporary file: %s', path, tmp_path)
+
+    try:
+        try:
+            if mode:
+                set_file_mode(tmp_path, mode, fd=fp.fileno())
+            if owner or group:
+                set_file_owner(tmp_path, owner, group, fd=fp.fileno())
+            fp.write(s)
+        finally:
+            fp.close()
+
+        if sync:
+            os.fsync(fp.fileno())
+        os.rename(tmp_path, path)
+    except BaseException:
+        os.unlink(tmp_path)
+        raise
+
+    if utimes:
+        os.utime(path, utimes)
+
+
+CHMOD_CLAUSE_PAT = re.compile(r'([uoga]*)([+\-=])([ugo]|[rwx]*)')
+CHMOD_MASKS = {
+    'u': stat.S_IRWXU,
+    'g': stat.S_IRWXG,
+    'o': stat.S_IRWXO,
+    'a': (stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO),
+}
+CHMOD_BITS = {
+    'u': {'r': stat.S_IRUSR, 'w': stat.S_IWUSR, 'x': stat.S_IXUSR},
+    'g': {'r': stat.S_IRGRP, 'w': stat.S_IWGRP, 'x': stat.S_IXGRP},
+    'o': {'r': stat.S_IROTH, 'w': stat.S_IWOTH, 'x': stat.S_IXOTH},
+    'a': {
+        'r': (stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH),
+        'w': (stat.S_IWUSR | stat.S_IWGRP | stat.S_IWOTH),
+        'x': (stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
+    }
+}
+
+
+def apply_mode_spec(spec, mode):
+    """
+    Given a symbolic file mode change specification in the style of chmod(1)
+    `spec`, apply changes in the specification to the numeric file mode `mode`.
+    """
+    for clause in mitogen.core.to_text(spec).split(','):
+        match = CHMOD_CLAUSE_PAT.match(clause)
+        who, op, perms = match.groups()
+        for ch in who or 'a':
+            mask = CHMOD_MASKS[ch]
+            bits = CHMOD_BITS[ch]
+            cur_perm_bits = mode & mask
+            new_perm_bits = reduce(operator.or_, (bits[p] for p in perms), 0)
+            mode &= ~mask
+            if op == '=':
+                mode |= new_perm_bits
+            elif op == '+':
+                mode |= new_perm_bits | cur_perm_bits
+            else:
+                mode |= cur_perm_bits & ~new_perm_bits
+    return mode
+
+
+def set_file_mode(path, spec, fd=None):
+    """
+    Update the permissions of a file using the same syntax as chmod(1).
+    """
+    if isinstance(spec, int):
+        new_mode = spec
+    elif not mitogen.core.PY3 and isinstance(spec, long):
+        new_mode = spec
+    elif spec.isdigit():
+        new_mode = int(spec, 8)
+    else:
+        mode = os.stat(path).st_mode
+        new_mode = apply_mode_spec(spec, mode)
+
+    if fd is not None and hasattr(os, 'fchmod'):
+        os.fchmod(fd, new_mode)
+    else:
+        os.chmod(path, new_mode)
+
+
+def file_exists(path):
+    """
+    Return :data:`True` if `path` exists. This is a wrapper function over
+    :func:`os.path.exists`, since its implementation module varies across
+    Python versions.
+    """
+    return os.path.exists(path)
diff --git a/ansible/plugins/mitogen-0.2.6/ansible_mitogen/transport_config.py b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/transport_config.py
new file mode 100644
index 000000000..8ef121658
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/ansible_mitogen/transport_config.py
@@ -0,0 +1,621 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import absolute_import
+from __future__ import unicode_literals
+
+"""
+Mitogen extends Ansible's target configuration mechanism in several ways that
+require some care:
+
+* Per-task configurables in Ansible like ansible_python_interpreter are
+  connection-layer configurables in Mitogen. They must be extracted during each
+  task execution to form the complete connection-layer configuration.
+
+* Mitogen has extra configurables not supported by Ansible at all, such as
+  mitogen_ssh_debug_level. These are extracted the same way as
+  ansible_python_interpreter.
+
+* Mitogen allows connections to be delegated to other machines. Ansible has no
+  internal framework for this, and so Mitogen must figure out a delegated
+  connection configuration all on its own. It cannot reuse much of the Ansible
+  machinery for building a connection configuration, as that machinery is
+  deeply spread out and hard-wired to expect Ansible's usual mode of operation.
+
+For normal and delegate_to connections, Ansible's PlayContext is reused where
+possible to maximize compatibility, but for proxy hops, configurations are
+built up using the HostVars magic class to call VariableManager.get_vars()
+behind the scenes on our behalf. Where Ansible has multiple sources of a
+configuration item, for example, ansible_ssh_extra_args, Mitogen must (ideally
+perfectly) reproduce how Ansible arrives at its value, without using mechanisms
+that are hard-wired or change across Ansible versions.
+
+That is what this file is for. It exports two spec classes, one that takes all
+information from PlayContext, and another that takes (almost) all information
+from HostVars.
+"""
+
+import abc
+import os
+import ansible.utils.shlex
+import ansible.constants as C
+
+from ansible.module_utils.six import with_metaclass
+
+
+import mitogen.core
+
+
+def parse_python_path(s):
+    """
+    Given the string set for ansible_python_interpeter, parse it using shell
+    syntax and return an appropriate argument vector.
+    """
+    if s:
+        return ansible.utils.shlex.shlex_split(s)
+
+
+def optional_secret(value):
+    """
+    Wrap `value` in :class:`mitogen.core.Secret` if it is not :data:`None`,
+    otherwise return :data:`None`.
+    """
+    if value is not None:
+        return mitogen.core.Secret(value)
+
+
+def first_true(it, default=None):
+    """
+    Return the first truthy element from `it`.
+    """
+    for elem in it:
+        if elem:
+            return elem
+    return default
+
+
+class Spec(with_metaclass(abc.ABCMeta, object)):
+    """
+    A source for variables that comprise a connection configuration.
+    """
+
+    @abc.abstractmethod
+    def transport(self):
+        """
+        The name of the Ansible plug-in implementing the connection.
+        """
+
+    @abc.abstractmethod
+    def inventory_name(self):
+        """
+        The name of the target being connected to as it appears in Ansible's
+        inventory.
+        """
+
+    @abc.abstractmethod
+    def remote_addr(self):
+        """
+        The network address of the target, or for container and other special
+        targets, some other unique identifier.
+        """
+
+    @abc.abstractmethod
+    def remote_user(self):
+        """
+        The username of the login account on the target.
+        """
+
+    @abc.abstractmethod
+    def password(self):
+        """
+        The password of the login account on the target.
+        """
+
+    @abc.abstractmethod
+    def become(self):
+        """
+        :data:`True` if privilege escalation should be active.
+        """
+
+    @abc.abstractmethod
+    def become_method(self):
+        """
+        The name of the Ansible become method to use.
+        """
+
+    @abc.abstractmethod
+    def become_user(self):
+        """
+        The username of the target account for become.
+        """
+
+    @abc.abstractmethod
+    def become_pass(self):
+        """
+        The password of the target account for become.
+        """
+
+    @abc.abstractmethod
+    def port(self):
+        """
+        The port of the login service on the target machine.
+        """
+
+    @abc.abstractmethod
+    def python_path(self):
+        """
+        Path to the Python interpreter on the target machine.
+        """
+
+    @abc.abstractmethod
+    def private_key_file(self):
+        """
+        Path to the SSH private key file to use to login.
+        """
+
+    @abc.abstractmethod
+    def ssh_executable(self):
+        """
+        Path to the SSH executable.
+        """
+
+    @abc.abstractmethod
+    def timeout(self):
+        """
+        The generic timeout for all connections.
+        """
+
+    @abc.abstractmethod
+    def ansible_ssh_timeout(self):
+        """
+        The SSH-specific timeout for a connection.
+        """
+
+    @abc.abstractmethod
+    def ssh_args(self):
+        """
+        The list of additional arguments that should be included in an SSH
+        invocation.
+        """
+
+    @abc.abstractmethod
+    def become_exe(self):
+        """
+        The path to the executable implementing the become method on the remote
+        machine.
+        """
+
+    @abc.abstractmethod
+    def sudo_args(self):
+        """
+        The list of additional arguments that should be included in a become
+        invocation.
+        """
+        # TODO: split out into sudo_args/become_args.
+
+    @abc.abstractmethod
+    def mitogen_via(self):
+        """
+        The value of the mitogen_via= variable for this connection. Indicates
+        the connection should be established via an intermediary.
+        """
+
+    @abc.abstractmethod
+    def mitogen_kind(self):
+        """
+        The type of container to use with the "setns" transport.
+        """
+
+    @abc.abstractmethod
+    def mitogen_docker_path(self):
+        """
+        The path to the "docker" program for the 'docker' transport.
+        """
+
+    @abc.abstractmethod
+    def mitogen_kubectl_path(self):
+        """
+        The path to the "kubectl" program for the 'docker' transport.
+        """
+
+    @abc.abstractmethod
+    def mitogen_lxc_path(self):
+        """
+        The path to the "lxc" program for the 'lxd' transport.
+        """
+
+    @abc.abstractmethod
+    def mitogen_lxc_attach_path(self):
+        """
+        The path to the "lxc-attach" program for the 'lxc' transport.
+        """
+
+    @abc.abstractmethod
+    def mitogen_lxc_info_path(self):
+        """
+        The path to the "lxc-info" program for the 'lxc' transport.
+        """
+
+    @abc.abstractmethod
+    def mitogen_machinectl_path(self):
+        """
+        The path to the "machinectl" program for the 'setns' transport.
+        """
+
+    @abc.abstractmethod
+    def mitogen_ssh_debug_level(self):
+        """
+        The SSH debug level.
+        """
+
+    @abc.abstractmethod
+    def mitogen_ssh_compression(self):
+        """
+        Whether SSH compression is enabled.
+        """
+
+    @abc.abstractmethod
+    def extra_args(self):
+        """
+        Connection-specific arguments.
+        """
+
+
+class PlayContextSpec(Spec):
+    """
+    PlayContextSpec takes almost all its information as-is from Ansible's
+    PlayContext. It is used for normal connections and delegate_to connections,
+    and should always be accurate.
+    """
+    def __init__(self, connection, play_context, transport, inventory_name):
+        self._connection = connection
+        self._play_context = play_context
+        self._transport = transport
+        self._inventory_name = inventory_name
+
+    def transport(self):
+        return self._transport
+
+    def inventory_name(self):
+        return self._inventory_name
+
+    def remote_addr(self):
+        return self._play_context.remote_addr
+
+    def remote_user(self):
+        return self._play_context.remote_user
+
+    def become(self):
+        return self._play_context.become
+
+    def become_method(self):
+        return self._play_context.become_method
+
+    def become_user(self):
+        return self._play_context.become_user
+
+    def become_pass(self):
+        return optional_secret(self._play_context.become_pass)
+
+    def password(self):
+        return optional_secret(self._play_context.password)
+
+    def port(self):
+        return self._play_context.port
+
+    def python_path(self):
+        s = self._connection.get_task_var('ansible_python_interpreter')
+        # #511, #536: executor/module_common.py::_get_shebang() hard-wires
+        # "/usr/bin/python" as the default interpreter path if no other
+        # interpreter is specified.
+        return parse_python_path(s or '/usr/bin/python')
+
+    def private_key_file(self):
+        return self._play_context.private_key_file
+
+    def ssh_executable(self):
+        return self._play_context.ssh_executable
+
+    def timeout(self):
+        return self._play_context.timeout
+
+    def ansible_ssh_timeout(self):
+        return (
+            self._connection.get_task_var('ansible_timeout') or
+            self._connection.get_task_var('ansible_ssh_timeout') or
+            self.timeout()
+        )
+
+    def ssh_args(self):
+        return [
+            mitogen.core.to_text(term)
+            for s in (
+                getattr(self._play_context, 'ssh_args', ''),
+                getattr(self._play_context, 'ssh_common_args', ''),
+                getattr(self._play_context, 'ssh_extra_args', '')
+            )
+            for term in ansible.utils.shlex.shlex_split(s or '')
+        ]
+
+    def become_exe(self):
+        return self._play_context.become_exe
+
+    def sudo_args(self):
+        return [
+            mitogen.core.to_text(term)
+            for term in ansible.utils.shlex.shlex_split(
+                first_true((
+                    self._play_context.become_flags,
+                    self._play_context.sudo_flags,
+                    # Ansible 2.3.
+                    getattr(C, 'DEFAULT_BECOME_FLAGS', ''),
+                    getattr(C, 'DEFAULT_SUDO_FLAGS', '')
+                ), default='')
+            )
+        ]
+
+    def mitogen_via(self):
+        return self._connection.get_task_var('mitogen_via')
+
+    def mitogen_kind(self):
+        return self._connection.get_task_var('mitogen_kind')
+
+    def mitogen_docker_path(self):
+        return self._connection.get_task_var('mitogen_docker_path')
+
+    def mitogen_kubectl_path(self):
+        return self._connection.get_task_var('mitogen_kubectl_path')
+
+    def mitogen_lxc_path(self):
+        return self._connection.get_task_var('mitogen_lxc_path')
+
+    def mitogen_lxc_attach_path(self):
+        return self._connection.get_task_var('mitogen_lxc_attach_path')
+
+    def mitogen_lxc_info_path(self):
+        return self._connection.get_task_var('mitogen_lxc_info_path')
+
+    def mitogen_machinectl_path(self):
+        return self._connection.get_task_var('mitogen_machinectl_path')
+
+    def mitogen_ssh_debug_level(self):
+        return self._connection.get_task_var('mitogen_ssh_debug_level')
+
+    def mitogen_ssh_compression(self):
+        return self._connection.get_task_var('mitogen_ssh_compression')
+
+    def extra_args(self):
+        return self._connection.get_extra_args()
+
+
+class MitogenViaSpec(Spec):
+    """
+    MitogenViaSpec takes most of its information from the HostVars of the
+    running task. HostVars is a lightweight wrapper around VariableManager, so
+    it is better to say that VariableManager.get_vars() is the ultimate source
+    of MitogenViaSpec's information.
+
+    Due to this, mitogen_via= hosts must have all their configuration
+    information represented as host and group variables. We cannot use any
+    per-task configuration, as all that data belongs to the real target host.
+
+    Ansible uses all kinds of strange historical logic for calculating
+    variables, including making their precedence configurable. MitogenViaSpec
+    must ultimately reimplement all of that logic. It is likely that if you are
+    having a configruation problem with connection delegation, the answer to
+    your problem lies in the method implementations below!
+    """
+    def __init__(self, inventory_name, host_vars, become_method, become_user,
+                 play_context):
+        """
+        :param str inventory_name:
+            The inventory name of the intermediary machine, i.e. not the target
+            machine.
+        :param dict host_vars:
+            The HostVars magic dictionary provided by Ansible in task_vars.
+        :param str become_method:
+            If the mitogen_via= spec included a become method, the method it
+            specifies.
+        :param str become_user:
+            If the mitogen_via= spec included a become user, the user it
+            specifies.
+        :param PlayContext play_context:
+            For some global values **only**, the PlayContext used to describe
+            the real target machine. Values from this object are **strictly
+            restricted** to values that are Ansible-global, e.g. the passwords
+            specified interactively.
+        """
+        self._inventory_name = inventory_name
+        self._host_vars = host_vars
+        self._become_method = become_method
+        self._become_user = become_user
+        # Dangerous! You may find a variable you want in this object, but it's
+        # almost certainly for the wrong machine!
+        self._dangerous_play_context = play_context
+
+    def transport(self):
+        return (
+            self._host_vars.get('ansible_connection') or
+            C.DEFAULT_TRANSPORT
+        )
+
+    def inventory_name(self):
+        return self._inventory_name
+
+    def remote_addr(self):
+        # play_context.py::MAGIC_VARIABLE_MAPPING
+        return (
+            self._host_vars.get('ansible_ssh_host') or
+            self._host_vars.get('ansible_host') or
+            self._inventory_name
+        )
+
+    def remote_user(self):
+        return (
+            self._host_vars.get('ansible_ssh_user') or
+            self._host_vars.get('ansible_user') or
+            C.DEFAULT_REMOTE_USER
+        )
+
+    def become(self):
+        return bool(self._become_user)
+
+    def become_method(self):
+        return (
+            self._become_method or
+            self._host_vars.get('ansible_become_method') or
+            C.DEFAULT_BECOME_METHOD
+        )
+
+    def become_user(self):
+        return self._become_user
+
+    def become_pass(self):
+        return optional_secret(
+            self._host_vars.get('ansible_become_password') or
+            self._host_vars.get('ansible_become_pass')
+        )
+
+    def password(self):
+        return optional_secret(
+            self._host_vars.get('ansible_ssh_pass') or
+            self._host_vars.get('ansible_password')
+        )
+
+    def port(self):
+        return (
+            self._host_vars.get('ansible_ssh_port') or
+            self._host_vars.get('ansible_port') or
+            C.DEFAULT_REMOTE_PORT
+        )
+
+    def python_path(self):
+        s = self._host_vars.get('ansible_python_interpreter')
+        # #511, #536: executor/module_common.py::_get_shebang() hard-wires
+        # "/usr/bin/python" as the default interpreter path if no other
+        # interpreter is specified.
+        return parse_python_path(s or '/usr/bin/python')
+
+    def private_key_file(self):
+        # TODO: must come from PlayContext too.
+        return (
+            self._host_vars.get('ansible_ssh_private_key_file') or
+            self._host_vars.get('ansible_private_key_file') or
+            C.DEFAULT_PRIVATE_KEY_FILE
+        )
+
+    def ssh_executable(self):
+        return (
+            self._host_vars.get('ansible_ssh_executable') or
+            C.ANSIBLE_SSH_EXECUTABLE
+        )
+
+    def timeout(self):
+        # TODO: must come from PlayContext too.
+        return C.DEFAULT_TIMEOUT
+
+    def ansible_ssh_timeout(self):
+        return (
+            self._host_vars.get('ansible_timeout') or
+            self._host_vars.get('ansible_ssh_timeout') or
+            self.timeout()
+        )
+
+    def ssh_args(self):
+        return [
+            mitogen.core.to_text(term)
+            for s in (
+                (
+                    self._host_vars.get('ansible_ssh_args') or
+                    getattr(C, 'ANSIBLE_SSH_ARGS', None) or
+                    os.environ.get('ANSIBLE_SSH_ARGS')
+                    # TODO: ini entry. older versions.
+                ),
+                (
+                    self._host_vars.get('ansible_ssh_common_args') or
+                    os.environ.get('ANSIBLE_SSH_COMMON_ARGS')
+                    # TODO: ini entry.
+                ),
+                (
+                    self._host_vars.get('ansible_ssh_extra_args') or
+                    os.environ.get('ANSIBLE_SSH_EXTRA_ARGS')
+                    # TODO: ini entry.
+                ),
+            )
+            for term in ansible.utils.shlex.shlex_split(s)
+            if s
+        ]
+
+    def become_exe(self):
+        return (
+            self._host_vars.get('ansible_become_exe') or
+            C.DEFAULT_BECOME_EXE
+        )
+
+    def sudo_args(self):
+        return [
+            mitogen.core.to_text(term)
+            for s in (
+                self._host_vars.get('ansible_sudo_flags') or '',
+                self._host_vars.get('ansible_become_flags') or '',
+            )
+            for term in ansible.utils.shlex.shlex_split(s)
+        ]
+
+    def mitogen_via(self):
+        return self._host_vars.get('mitogen_via')
+
+    def mitogen_kind(self):
+        return self._host_vars.get('mitogen_kind')
+
+    def mitogen_docker_path(self):
+        return self._host_vars.get('mitogen_docker_path')
+
+    def mitogen_kubectl_path(self):
+        return self._host_vars.get('mitogen_kubectl_path')
+
+    def mitogen_lxc_path(self):
+        return self.host_vars.get('mitogen_lxc_path')
+
+    def mitogen_lxc_attach_path(self):
+        return self._host_vars.get('mitogen_lxc_attach_path')
+
+    def mitogen_lxc_info_path(self):
+        return self._host_vars.get('mitogen_lxc_info_path')
+
+    def mitogen_machinectl_path(self):
+        return self._host_vars.get('mitogen_machinectl_path')
+
+    def mitogen_ssh_debug_level(self):
+        return self._host_vars.get('mitogen_ssh_debug_level')
+
+    def mitogen_ssh_compression(self):
+        return self._host_vars.get('mitogen_ssh_compression')
+
+    def extra_args(self):
+        return []  # TODO
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen.egg-info/PKG-INFO b/ansible/plugins/mitogen-0.2.6/mitogen.egg-info/PKG-INFO
new file mode 100644
index 000000000..d5ec33a7b
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen.egg-info/PKG-INFO
@@ -0,0 +1,23 @@
+Metadata-Version: 1.1
+Name: mitogen
+Version: 0.2.6
+Summary: Library for writing distributed self-replicating programs.
+Home-page: https://github.com/dw/mitogen/
+Author: David Wilson
+Author-email: UNKNOWN
+License: New BSD
+Description: UNKNOWN
+Platform: UNKNOWN
+Classifier: Environment :: Console
+Classifier: Intended Audience :: System Administrators
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: POSIX
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2.4
+Classifier: Programming Language :: Python :: 2.5
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Topic :: System :: Distributed Computing
+Classifier: Topic :: System :: Systems Administration
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen.egg-info/SOURCES.txt b/ansible/plugins/mitogen-0.2.6/mitogen.egg-info/SOURCES.txt
new file mode 100644
index 000000000..c2abd3383
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen.egg-info/SOURCES.txt
@@ -0,0 +1,78 @@
+LICENSE
+MANIFEST.in
+README.md
+setup.cfg
+setup.py
+ansible_mitogen/__init__.py
+ansible_mitogen/affinity.py
+ansible_mitogen/connection.py
+ansible_mitogen/loaders.py
+ansible_mitogen/logging.py
+ansible_mitogen/mixins.py
+ansible_mitogen/module_finder.py
+ansible_mitogen/parsing.py
+ansible_mitogen/planner.py
+ansible_mitogen/process.py
+ansible_mitogen/runner.py
+ansible_mitogen/services.py
+ansible_mitogen/strategy.py
+ansible_mitogen/target.py
+ansible_mitogen/transport_config.py
+ansible_mitogen/compat/__init__.py
+ansible_mitogen/compat/simplejson/__init__.py
+ansible_mitogen/compat/simplejson/decoder.py
+ansible_mitogen/compat/simplejson/encoder.py
+ansible_mitogen/compat/simplejson/scanner.py
+ansible_mitogen/plugins/__init__.py
+ansible_mitogen/plugins/action/__init__.py
+ansible_mitogen/plugins/action/mitogen_get_stack.py
+ansible_mitogen/plugins/connection/__init__.py
+ansible_mitogen/plugins/connection/mitogen_doas.py
+ansible_mitogen/plugins/connection/mitogen_docker.py
+ansible_mitogen/plugins/connection/mitogen_jail.py
+ansible_mitogen/plugins/connection/mitogen_kubectl.py
+ansible_mitogen/plugins/connection/mitogen_local.py
+ansible_mitogen/plugins/connection/mitogen_lxc.py
+ansible_mitogen/plugins/connection/mitogen_lxd.py
+ansible_mitogen/plugins/connection/mitogen_machinectl.py
+ansible_mitogen/plugins/connection/mitogen_setns.py
+ansible_mitogen/plugins/connection/mitogen_ssh.py
+ansible_mitogen/plugins/connection/mitogen_su.py
+ansible_mitogen/plugins/connection/mitogen_sudo.py
+ansible_mitogen/plugins/strategy/__init__.py
+ansible_mitogen/plugins/strategy/mitogen.py
+ansible_mitogen/plugins/strategy/mitogen_free.py
+ansible_mitogen/plugins/strategy/mitogen_host_pinned.py
+ansible_mitogen/plugins/strategy/mitogen_linear.py
+mitogen/__init__.py
+mitogen/core.py
+mitogen/debug.py
+mitogen/doas.py
+mitogen/docker.py
+mitogen/fakessh.py
+mitogen/fork.py
+mitogen/jail.py
+mitogen/kubectl.py
+mitogen/lxc.py
+mitogen/lxd.py
+mitogen/master.py
+mitogen/minify.py
+mitogen/os_fork.py
+mitogen/parent.py
+mitogen/profiler.py
+mitogen/select.py
+mitogen/service.py
+mitogen/setns.py
+mitogen/ssh.py
+mitogen/su.py
+mitogen/sudo.py
+mitogen/unix.py
+mitogen/utils.py
+mitogen.egg-info/PKG-INFO
+mitogen.egg-info/SOURCES.txt
+mitogen.egg-info/dependency_links.txt
+mitogen.egg-info/not-zip-safe
+mitogen.egg-info/top_level.txt
+mitogen/compat/__init__.py
+mitogen/compat/pkgutil.py
+mitogen/compat/tokenize.py
\ No newline at end of file
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen.egg-info/dependency_links.txt b/ansible/plugins/mitogen-0.2.6/mitogen.egg-info/dependency_links.txt
new file mode 100644
index 000000000..8b1378917
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen.egg-info/dependency_links.txt
@@ -0,0 +1 @@
+
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen.egg-info/not-zip-safe b/ansible/plugins/mitogen-0.2.6/mitogen.egg-info/not-zip-safe
new file mode 100644
index 000000000..8b1378917
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen.egg-info/not-zip-safe
@@ -0,0 +1 @@
+
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen.egg-info/top_level.txt b/ansible/plugins/mitogen-0.2.6/mitogen.egg-info/top_level.txt
new file mode 100644
index 000000000..2360b3f0c
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen.egg-info/top_level.txt
@@ -0,0 +1,2 @@
+ansible_mitogen
+mitogen
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/__init__.py b/ansible/plugins/mitogen-0.2.6/mitogen/__init__.py
new file mode 100644
index 000000000..048798d96
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/__init__.py
@@ -0,0 +1,120 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""
+On the Mitogen master, this is imported from ``mitogen/__init__.py`` as would
+be expected. On the slave, it is built dynamically during startup.
+"""
+
+
+#: Library version as a tuple.
+__version__ = (0, 2, 6)
+
+
+#: This is :data:`False` in slave contexts. Previously it was used to prevent
+#: re-execution of :mod:`__main__` in single file programs, however that now
+#: happens automatically.
+is_master = True
+
+
+#: This is `0` in a master, otherwise it is the master-assigned ID unique to
+#: the slave context used for message routing.
+context_id = 0
+
+
+#: This is :data:`None` in a master, otherwise it is the master-assigned ID
+#: unique to the slave's parent context.
+parent_id = None
+
+
+#: This is an empty list in a master, otherwise it is a list of parent context
+#: IDs ordered from most direct to least direct.
+parent_ids = []
+
+
+import os
+_default_profiling = os.environ.get('MITOGEN_PROFILING') is not None
+del os
+
+
+def main(log_level='INFO', profiling=_default_profiling):
+    """
+    Convenience decorator primarily useful for writing discardable test
+    scripts.
+
+    In the master process, when `func` is defined in the :mod:`__main__`
+    module, arranges for `func(router)` to be invoked immediately, with
+    :py:class:`mitogen.master.Router` construction and destruction handled just
+    as in :py:func:`mitogen.utils.run_with_router`. In slaves, this function
+    does nothing.
+
+    :param str log_level:
+        Logging package level to configure via
+        :py:func:`mitogen.utils.log_to_file`.
+
+    :param bool profiling:
+        If :py:data:`True`, equivalent to setting
+        :py:attr:`mitogen.master.Router.profiling` prior to router
+        construction. This causes ``/tmp`` files to be created everywhere at
+        the end of a successful run with :py:mod:`cProfile` output for every
+        thread.
+
+    Example:
+
+    ::
+
+        import mitogen
+        import requests
+
+        def get_url(url):
+            return requests.get(url).text
+
+        @mitogen.main()
+        def main(router):
+            z = router.ssh(hostname='k3')
+            print(z.call(get_url, 'https://example.org/')))))
+
+    """
+
+    def wrapper(func):
+        if func.__module__ != '__main__':
+            return func
+        import mitogen.parent
+        import mitogen.utils
+        if profiling:
+            mitogen.core.enable_profiling()
+            mitogen.master.Router.profiling = profiling
+        utils.log_to_file(level=log_level)
+        return mitogen.core._profile_hook(
+            'app.main',
+            utils.run_with_router,
+            func,
+        )
+    return wrapper
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/compat/__init__.py b/ansible/plugins/mitogen-0.2.6/mitogen/compat/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/compat/pkgutil.py b/ansible/plugins/mitogen-0.2.6/mitogen/compat/pkgutil.py
new file mode 100644
index 000000000..28e2aeade
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/compat/pkgutil.py
@@ -0,0 +1,593 @@
+"""Utilities to support packages."""
+
+# !mitogen: minify_safe
+
+# NOTE: This module must remain compatible with Python 2.3, as it is shared
+# by setuptools for distribution with Python 2.3 and up.
+
+import os
+import sys
+import imp
+import os.path
+from types import ModuleType
+
+__all__ = [
+    'get_importer', 'iter_importers', 'get_loader', 'find_loader',
+    'walk_packages', 'iter_modules', 'get_data',
+    'ImpImporter', 'ImpLoader', 'read_code', 'extend_path',
+]
+
+def read_code(stream):
+    # This helper is needed in order for the PEP 302 emulation to
+    # correctly handle compiled files
+    import marshal
+
+    magic = stream.read(4)
+    if magic != imp.get_magic():
+        return None
+
+    stream.read(4) # Skip timestamp
+    return marshal.load(stream)
+
+
+def simplegeneric(func):
+    """Make a trivial single-dispatch generic function"""
+    registry = {}
+    def wrapper(*args, **kw):
+        ob = args[0]
+        try:
+            cls = ob.__class__
+        except AttributeError:
+            cls = type(ob)
+        try:
+            mro = cls.__mro__
+        except AttributeError:
+            try:
+                class cls(cls, object):
+                    pass
+                mro = cls.__mro__[1:]
+            except TypeError:
+                mro = object,   # must be an ExtensionClass or some such  :(
+        for t in mro:
+            if t in registry:
+                return registry[t](*args, **kw)
+        else:
+            return func(*args, **kw)
+    try:
+        wrapper.__name__ = func.__name__
+    except (TypeError, AttributeError):
+        pass    # Python 2.3 doesn't allow functions to be renamed
+
+    def register(typ, func=None):
+        if func is None:
+            return lambda f: register(typ, f)
+        registry[typ] = func
+        return func
+
+    wrapper.__dict__ = func.__dict__
+    wrapper.__doc__ = func.__doc__
+    wrapper.register = register
+    return wrapper
+
+
+def walk_packages(path=None, prefix='', onerror=None):
+    """Yields (module_loader, name, ispkg) for all modules recursively
+    on path, or, if path is None, all accessible modules.
+
+    'path' should be either None or a list of paths to look for
+    modules in.
+
+    'prefix' is a string to output on the front of every module name
+    on output.
+
+    Note that this function must import all *packages* (NOT all
+    modules!) on the given path, in order to access the __path__
+    attribute to find submodules.
+
+    'onerror' is a function which gets called with one argument (the
+    name of the package which was being imported) if any exception
+    occurs while trying to import a package.  If no onerror function is
+    supplied, ImportErrors are caught and ignored, while all other
+    exceptions are propagated, terminating the search.
+
+    Examples:
+
+    # list all modules python can access
+    walk_packages()
+
+    # list all submodules of ctypes
+    walk_packages(ctypes.__path__, ctypes.__name__+'.')
+    """
+
+    def seen(p, m={}):
+        if p in m:
+            return True
+        m[p] = True
+
+    for importer, name, ispkg in iter_modules(path, prefix):
+        yield importer, name, ispkg
+
+        if ispkg:
+            try:
+                __import__(name)
+            except ImportError:
+                if onerror is not None:
+                    onerror(name)
+            except Exception:
+                if onerror is not None:
+                    onerror(name)
+                else:
+                    raise
+            else:
+                path = getattr(sys.modules[name], '__path__', None) or []
+
+                # don't traverse path items we've seen before
+                path = [p for p in path if not seen(p)]
+
+                for item in walk_packages(path, name+'.', onerror):
+                    yield item
+
+
+def iter_modules(path=None, prefix=''):
+    """Yields (module_loader, name, ispkg) for all submodules on path,
+    or, if path is None, all top-level modules on sys.path.
+
+    'path' should be either None or a list of paths to look for
+    modules in.
+
+    'prefix' is a string to output on the front of every module name
+    on output.
+    """
+
+    if path is None:
+        importers = iter_importers()
+    else:
+        importers = map(get_importer, path)
+
+    yielded = {}
+    for i in importers:
+        for name, ispkg in iter_importer_modules(i, prefix):
+            if name not in yielded:
+                yielded[name] = 1
+                yield i, name, ispkg
+
+
+#@simplegeneric
+def iter_importer_modules(importer, prefix=''):
+    if not hasattr(importer, 'iter_modules'):
+        return []
+    return importer.iter_modules(prefix)
+
+iter_importer_modules = simplegeneric(iter_importer_modules)
+
+
+class ImpImporter:
+    """PEP 302 Importer that wraps Python's "classic" import algorithm
+
+    ImpImporter(dirname) produces a PEP 302 importer that searches that
+    directory.  ImpImporter(None) produces a PEP 302 importer that searches
+    the current sys.path, plus any modules that are frozen or built-in.
+
+    Note that ImpImporter does not currently support being used by placement
+    on sys.meta_path.
+    """
+
+    def __init__(self, path=None):
+        self.path = path
+
+    def find_module(self, fullname, path=None):
+        # Note: we ignore 'path' argument since it is only used via meta_path
+        subname = fullname.split(".")[-1]
+        if subname != fullname and self.path is None:
+            return None
+        if self.path is None:
+            path = None
+        else:
+            path = [os.path.realpath(self.path)]
+        try:
+            file, filename, etc = imp.find_module(subname, path)
+        except ImportError:
+            return None
+        return ImpLoader(fullname, file, filename, etc)
+
+    def iter_modules(self, prefix=''):
+        if self.path is None or not os.path.isdir(self.path):
+            return
+
+        yielded = {}
+        import inspect
+        try:
+            filenames = os.listdir(self.path)
+        except OSError:
+            # ignore unreadable directories like import does
+            filenames = []
+        filenames.sort()  # handle packages before same-named modules
+
+        for fn in filenames:
+            modname = inspect.getmodulename(fn)
+            if modname=='__init__' or modname in yielded:
+                continue
+
+            path = os.path.join(self.path, fn)
+            ispkg = False
+
+            if not modname and os.path.isdir(path) and '.' not in fn:
+                modname = fn
+                try:
+                    dircontents = os.listdir(path)
+                except OSError:
+                    # ignore unreadable directories like import does
+                    dircontents = []
+                for fn in dircontents:
+                    subname = inspect.getmodulename(fn)
+                    if subname=='__init__':
+                        ispkg = True
+                        break
+                else:
+                    continue    # not a package
+
+            if modname and '.' not in modname:
+                yielded[modname] = 1
+                yield prefix + modname, ispkg
+
+
+class ImpLoader:
+    """PEP 302 Loader that wraps Python's "classic" import algorithm
+    """
+    code = source = None
+
+    def __init__(self, fullname, file, filename, etc):
+        self.file = file
+        self.filename = filename
+        self.fullname = fullname
+        self.etc = etc
+
+    def load_module(self, fullname):
+        self._reopen()
+        try:
+            mod = imp.load_module(fullname, self.file, self.filename, self.etc)
+        finally:
+            if self.file:
+                self.file.close()
+        # Note: we don't set __loader__ because we want the module to look
+        # normal; i.e. this is just a wrapper for standard import machinery
+        return mod
+
+    def get_data(self, pathname):
+        return open(pathname, "rb").read()
+
+    def _reopen(self):
+        if self.file and self.file.closed:
+            mod_type = self.etc[2]
+            if mod_type==imp.PY_SOURCE:
+                self.file = open(self.filename, 'rU')
+            elif mod_type in (imp.PY_COMPILED, imp.C_EXTENSION):
+                self.file = open(self.filename, 'rb')
+
+    def _fix_name(self, fullname):
+        if fullname is None:
+            fullname = self.fullname
+        elif fullname != self.fullname:
+            raise ImportError("Loader for module %s cannot handle "
+                              "module %s" % (self.fullname, fullname))
+        return fullname
+
+    def is_package(self, fullname):
+        fullname = self._fix_name(fullname)
+        return self.etc[2]==imp.PKG_DIRECTORY
+
+    def get_code(self, fullname=None):
+        fullname = self._fix_name(fullname)
+        if self.code is None:
+            mod_type = self.etc[2]
+            if mod_type==imp.PY_SOURCE:
+                source = self.get_source(fullname)
+                self.code = compile(source, self.filename, 'exec')
+            elif mod_type==imp.PY_COMPILED:
+                self._reopen()
+                try:
+                    self.code = read_code(self.file)
+                finally:
+                    self.file.close()
+            elif mod_type==imp.PKG_DIRECTORY:
+                self.code = self._get_delegate().get_code()
+        return self.code
+
+    def get_source(self, fullname=None):
+        fullname = self._fix_name(fullname)
+        if self.source is None:
+            mod_type = self.etc[2]
+            if mod_type==imp.PY_SOURCE:
+                self._reopen()
+                try:
+                    self.source = self.file.read()
+                finally:
+                    self.file.close()
+            elif mod_type==imp.PY_COMPILED:
+                if os.path.exists(self.filename[:-1]):
+                    f = open(self.filename[:-1], 'rU')
+                    self.source = f.read()
+                    f.close()
+            elif mod_type==imp.PKG_DIRECTORY:
+                self.source = self._get_delegate().get_source()
+        return self.source
+
+
+    def _get_delegate(self):
+        return ImpImporter(self.filename).find_module('__init__')
+
+    def get_filename(self, fullname=None):
+        fullname = self._fix_name(fullname)
+        mod_type = self.etc[2]
+        if self.etc[2]==imp.PKG_DIRECTORY:
+            return self._get_delegate().get_filename()
+        elif self.etc[2] in (imp.PY_SOURCE, imp.PY_COMPILED, imp.C_EXTENSION):
+            return self.filename
+        return None
+
+
+try:
+    import zipimport
+    from zipimport import zipimporter
+
+    def iter_zipimport_modules(importer, prefix=''):
+        dirlist = zipimport._zip_directory_cache[importer.archive].keys()
+        dirlist.sort()
+        _prefix = importer.prefix
+        plen = len(_prefix)
+        yielded = {}
+        import inspect
+        for fn in dirlist:
+            if not fn.startswith(_prefix):
+                continue
+
+            fn = fn[plen:].split(os.sep)
+
+            if len(fn)==2 and fn[1].startswith('__init__.py'):
+                if fn[0] not in yielded:
+                    yielded[fn[0]] = 1
+                    yield fn[0], True
+
+            if len(fn)!=1:
+                continue
+
+            modname = inspect.getmodulename(fn[0])
+            if modname=='__init__':
+                continue
+
+            if modname and '.' not in modname and modname not in yielded:
+                yielded[modname] = 1
+                yield prefix + modname, False
+
+    iter_importer_modules.register(zipimporter, iter_zipimport_modules)
+
+except ImportError:
+    pass
+
+
+def get_importer(path_item):
+    """Retrieve a PEP 302 importer for the given path item
+
+    The returned importer is cached in sys.path_importer_cache
+    if it was newly created by a path hook.
+
+    If there is no importer, a wrapper around the basic import
+    machinery is returned. This wrapper is never inserted into
+    the importer cache (None is inserted instead).
+
+    The cache (or part of it) can be cleared manually if a
+    rescan of sys.path_hooks is necessary.
+    """
+    try:
+        importer = sys.path_importer_cache[path_item]
+    except KeyError:
+        for path_hook in sys.path_hooks:
+            try:
+                importer = path_hook(path_item)
+                break
+            except ImportError:
+                pass
+        else:
+            importer = None
+        sys.path_importer_cache.setdefault(path_item, importer)
+
+    if importer is None:
+        try:
+            importer = ImpImporter(path_item)
+        except ImportError:
+            importer = None
+    return importer
+
+
+def iter_importers(fullname=""):
+    """Yield PEP 302 importers for the given module name
+
+    If fullname contains a '.', the importers will be for the package
+    containing fullname, otherwise they will be importers for sys.meta_path,
+    sys.path, and Python's "classic" import machinery, in that order.  If
+    the named module is in a package, that package is imported as a side
+    effect of invoking this function.
+
+    Non PEP 302 mechanisms (e.g. the Windows registry) used by the
+    standard import machinery to find files in alternative locations
+    are partially supported, but are searched AFTER sys.path. Normally,
+    these locations are searched BEFORE sys.path, preventing sys.path
+    entries from shadowing them.
+
+    For this to cause a visible difference in behaviour, there must
+    be a module or package name that is accessible via both sys.path
+    and one of the non PEP 302 file system mechanisms. In this case,
+    the emulation will find the former version, while the builtin
+    import mechanism will find the latter.
+
+    Items of the following types can be affected by this discrepancy:
+        imp.C_EXTENSION, imp.PY_SOURCE, imp.PY_COMPILED, imp.PKG_DIRECTORY
+    """
+    if fullname.startswith('.'):
+        raise ImportError("Relative module names not supported")
+    if '.' in fullname:
+        # Get the containing package's __path__
+        pkg = '.'.join(fullname.split('.')[:-1])
+        if pkg not in sys.modules:
+            __import__(pkg)
+        path = getattr(sys.modules[pkg], '__path__', None) or []
+    else:
+        for importer in sys.meta_path:
+            yield importer
+        path = sys.path
+    for item in path:
+        yield get_importer(item)
+    if '.' not in fullname:
+        yield ImpImporter()
+
+def get_loader(module_or_name):
+    """Get a PEP 302 "loader" object for module_or_name
+
+    If the module or package is accessible via the normal import
+    mechanism, a wrapper around the relevant part of that machinery
+    is returned.  Returns None if the module cannot be found or imported.
+    If the named module is not already imported, its containing package
+    (if any) is imported, in order to establish the package __path__.
+
+    This function uses iter_importers(), and is thus subject to the same
+    limitations regarding platform-specific special import locations such
+    as the Windows registry.
+    """
+    if module_or_name in sys.modules:
+        module_or_name = sys.modules[module_or_name]
+    if isinstance(module_or_name, ModuleType):
+        module = module_or_name
+        loader = getattr(module, '__loader__', None)
+        if loader is not None:
+            return loader
+        fullname = module.__name__
+    else:
+        fullname = module_or_name
+    return find_loader(fullname)
+
+def find_loader(fullname):
+    """Find a PEP 302 "loader" object for fullname
+
+    If fullname contains dots, path must be the containing package's __path__.
+    Returns None if the module cannot be found or imported. This function uses
+    iter_importers(), and is thus subject to the same limitations regarding
+    platform-specific special import locations such as the Windows registry.
+    """
+    for importer in iter_importers(fullname):
+        loader = importer.find_module(fullname)
+        if loader is not None:
+            return loader
+
+    return None
+
+
+def extend_path(path, name):
+    """Extend a package's path.
+
+    Intended use is to place the following code in a package's __init__.py:
+
+        from pkgutil import extend_path
+        __path__ = extend_path(__path__, __name__)
+
+    This will add to the package's __path__ all subdirectories of
+    directories on sys.path named after the package.  This is useful
+    if one wants to distribute different parts of a single logical
+    package as multiple directories.
+
+    It also looks for *.pkg files beginning where * matches the name
+    argument.  This feature is similar to *.pth files (see site.py),
+    except that it doesn't special-case lines starting with 'import'.
+    A *.pkg file is trusted at face value: apart from checking for
+    duplicates, all entries found in a *.pkg file are added to the
+    path, regardless of whether they are exist the filesystem.  (This
+    is a feature.)
+
+    If the input path is not a list (as is the case for frozen
+    packages) it is returned unchanged.  The input path is not
+    modified; an extended copy is returned.  Items are only appended
+    to the copy at the end.
+
+    It is assumed that sys.path is a sequence.  Items of sys.path that
+    are not (unicode or 8-bit) strings referring to existing
+    directories are ignored.  Unicode items of sys.path that cause
+    errors when used as filenames may cause this function to raise an
+    exception (in line with os.path.isdir() behavior).
+    """
+
+    if not isinstance(path, list):
+        # This could happen e.g. when this is called from inside a
+        # frozen package.  Return the path unchanged in that case.
+        return path
+
+    pname = os.path.join(*name.split('.')) # Reconstitute as relative path
+    # Just in case os.extsep != '.'
+    sname = os.extsep.join(name.split('.'))
+    sname_pkg = sname + os.extsep + "pkg"
+    init_py = "__init__" + os.extsep + "py"
+
+    path = path[:] # Start with a copy of the existing path
+
+    for dir in sys.path:
+        if not isinstance(dir, basestring) or not os.path.isdir(dir):
+            continue
+        subdir = os.path.join(dir, pname)
+        # XXX This may still add duplicate entries to path on
+        # case-insensitive filesystems
+        initfile = os.path.join(subdir, init_py)
+        if subdir not in path and os.path.isfile(initfile):
+            path.append(subdir)
+        # XXX Is this the right thing for subpackages like zope.app?
+        # It looks for a file named "zope.app.pkg"
+        pkgfile = os.path.join(dir, sname_pkg)
+        if os.path.isfile(pkgfile):
+            try:
+                f = open(pkgfile)
+            except IOError, msg:
+                sys.stderr.write("Can't open %s: %s\n" %
+                                 (pkgfile, msg))
+            else:
+                for line in f:
+                    line = line.rstrip('\n')
+                    if not line or line.startswith('#'):
+                        continue
+                    path.append(line) # Don't check for existence!
+                f.close()
+
+    return path
+
+def get_data(package, resource):
+    """Get a resource from a package.
+
+    This is a wrapper round the PEP 302 loader get_data API. The package
+    argument should be the name of a package, in standard module format
+    (foo.bar). The resource argument should be in the form of a relative
+    filename, using '/' as the path separator. The parent directory name '..'
+    is not allowed, and nor is a rooted name (starting with a '/').
+
+    The function returns a binary string, which is the contents of the
+    specified resource.
+
+    For packages located in the filesystem, which have already been imported,
+    this is the rough equivalent of
+
+        d = os.path.dirname(sys.modules[package].__file__)
+        data = open(os.path.join(d, resource), 'rb').read()
+
+    If the package cannot be located or loaded, or it uses a PEP 302 loader
+    which does not support get_data(), then None is returned.
+    """
+
+    loader = get_loader(package)
+    if loader is None or not hasattr(loader, 'get_data'):
+        return None
+    mod = sys.modules.get(package) or loader.load_module(package)
+    if mod is None or not hasattr(mod, '__file__'):
+        return None
+
+    # Modify the resource name to be compatible with the loader.get_data
+    # signature - an os.path format "filename" starting with the dirname of
+    # the package's __file__
+    parts = resource.split('/')
+    parts.insert(0, os.path.dirname(mod.__file__))
+    resource_name = os.path.join(*parts)
+    return loader.get_data(resource_name)
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/compat/tokenize.py b/ansible/plugins/mitogen-0.2.6/mitogen/compat/tokenize.py
new file mode 100644
index 000000000..0473c6a5c
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/compat/tokenize.py
@@ -0,0 +1,453 @@
+"""Tokenization help for Python programs.
+
+generate_tokens(readline) is a generator that breaks a stream of
+text into Python tokens.  It accepts a readline-like method which is called
+repeatedly to get the next line of input (or "" for EOF).  It generates
+5-tuples with these members:
+
+    the token type (see token.py)
+    the token (a string)
+    the starting (row, column) indices of the token (a 2-tuple of ints)
+    the ending (row, column) indices of the token (a 2-tuple of ints)
+    the original line (string)
+
+It is designed to match the working of the Python tokenizer exactly, except
+that it produces COMMENT tokens for comments and gives type OP for all
+operators
+
+Older entry points
+    tokenize_loop(readline, tokeneater)
+    tokenize(readline, tokeneater=printtoken)
+are the same, except instead of generating tokens, tokeneater is a callback
+function to which the 5 fields described above are passed as 5 arguments,
+each time a new token is found."""
+
+# !mitogen: minify_safe
+
+__author__ = 'Ka-Ping Yee <ping@lfw.org>'
+__credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
+               'Skip Montanaro, Raymond Hettinger')
+
+from itertools import chain
+import string, re
+from token import *
+
+import token
+__all__ = [x for x in dir(token) if not x.startswith("_")]
+__all__ += ["COMMENT", "tokenize", "generate_tokens", "NL", "untokenize"]
+del token
+
+COMMENT = N_TOKENS
+tok_name[COMMENT] = 'COMMENT'
+NL = N_TOKENS + 1
+tok_name[NL] = 'NL'
+N_TOKENS += 2
+
+def group(*choices): return '(' + '|'.join(choices) + ')'
+def any(*choices): return group(*choices) + '*'
+def maybe(*choices): return group(*choices) + '?'
+
+Whitespace = r'[ \f\t]*'
+Comment = r'#[^\r\n]*'
+Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
+Name = r'[a-zA-Z_]\w*'
+
+Hexnumber = r'0[xX][\da-fA-F]+[lL]?'
+Octnumber = r'(0[oO][0-7]+)|(0[0-7]*)[lL]?'
+Binnumber = r'0[bB][01]+[lL]?'
+Decnumber = r'[1-9]\d*[lL]?'
+Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
+Exponent = r'[eE][-+]?\d+'
+Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent)
+Expfloat = r'\d+' + Exponent
+Floatnumber = group(Pointfloat, Expfloat)
+Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]')
+Number = group(Imagnumber, Floatnumber, Intnumber)
+
+# Tail end of ' string.
+Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
+# Tail end of " string.
+Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
+# Tail end of ''' string.
+Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
+# Tail end of """ string.
+Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
+Triple = group("[uUbB]?[rR]?'''", '[uUbB]?[rR]?"""')
+# Single-line ' or " string.
+String = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
+               r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
+
+# Because of leftmost-then-longest match semantics, be sure to put the
+# longest operators first (e.g., if = came before ==, == would get
+# recognized as two instances of =).
+Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=",
+                 r"//=?",
+                 r"[+\-*/%&|^=<>]=?",
+                 r"~")
+
+Bracket = '[][(){}]'
+Special = group(r'\r?\n', r'[:;.,`@]')
+Funny = group(Operator, Bracket, Special)
+
+PlainToken = group(Number, Funny, String, Name)
+Token = Ignore + PlainToken
+
+# First (or only) line of ' or " string.
+ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
+                group("'", r'\\\r?\n'),
+                r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
+                group('"', r'\\\r?\n'))
+PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
+PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
+
+tokenprog, pseudoprog, single3prog, double3prog = map(
+    re.compile, (Token, PseudoToken, Single3, Double3))
+endprogs = {"'": re.compile(Single), '"': re.compile(Double),
+            "'''": single3prog, '"""': double3prog,
+            "r'''": single3prog, 'r"""': double3prog,
+            "u'''": single3prog, 'u"""': double3prog,
+            "ur'''": single3prog, 'ur"""': double3prog,
+            "R'''": single3prog, 'R"""': double3prog,
+            "U'''": single3prog, 'U"""': double3prog,
+            "uR'''": single3prog, 'uR"""': double3prog,
+            "Ur'''": single3prog, 'Ur"""': double3prog,
+            "UR'''": single3prog, 'UR"""': double3prog,
+            "b'''": single3prog, 'b"""': double3prog,
+            "br'''": single3prog, 'br"""': double3prog,
+            "B'''": single3prog, 'B"""': double3prog,
+            "bR'''": single3prog, 'bR"""': double3prog,
+            "Br'''": single3prog, 'Br"""': double3prog,
+            "BR'''": single3prog, 'BR"""': double3prog,
+            'r': None, 'R': None, 'u': None, 'U': None,
+            'b': None, 'B': None}
+
+triple_quoted = {}
+for t in ("'''", '"""',
+          "r'''", 'r"""', "R'''", 'R"""',
+          "u'''", 'u"""', "U'''", 'U"""',
+          "ur'''", 'ur"""', "Ur'''", 'Ur"""',
+          "uR'''", 'uR"""', "UR'''", 'UR"""',
+          "b'''", 'b"""', "B'''", 'B"""',
+          "br'''", 'br"""', "Br'''", 'Br"""',
+          "bR'''", 'bR"""', "BR'''", 'BR"""'):
+    triple_quoted[t] = t
+single_quoted = {}
+for t in ("'", '"',
+          "r'", 'r"', "R'", 'R"',
+          "u'", 'u"', "U'", 'U"',
+          "ur'", 'ur"', "Ur'", 'Ur"',
+          "uR'", 'uR"', "UR'", 'UR"',
+          "b'", 'b"', "B'", 'B"',
+          "br'", 'br"', "Br'", 'Br"',
+          "bR'", 'bR"', "BR'", 'BR"' ):
+    single_quoted[t] = t
+
+tabsize = 8
+
+class TokenError(Exception): pass
+
+class StopTokenizing(Exception): pass
+
+def printtoken(type, token, srow_scol, erow_ecol, line): # for testing
+    srow, scol = srow_scol
+    erow, ecol = erow_ecol
+    print("%d,%d-%d,%d:\t%s\t%s" % \
+        (srow, scol, erow, ecol, tok_name[type], repr(token)))
+
+def tokenize(readline, tokeneater=printtoken):
+    """
+    The tokenize() function accepts two parameters: one representing the
+    input stream, and one providing an output mechanism for tokenize().
+
+    The first parameter, readline, must be a callable object which provides
+    the same interface as the readline() method of built-in file objects.
+    Each call to the function should return one line of input as a string.
+
+    The second parameter, tokeneater, must also be a callable object. It is
+    called once for each token, with five arguments, corresponding to the
+    tuples generated by generate_tokens().
+    """
+    try:
+        tokenize_loop(readline, tokeneater)
+    except StopTokenizing:
+        pass
+
+# backwards compatible interface
+def tokenize_loop(readline, tokeneater):
+    for token_info in generate_tokens(readline):
+        tokeneater(*token_info)
+
+class Untokenizer:
+
+    def __init__(self):
+        self.tokens = []
+        self.prev_row = 1
+        self.prev_col = 0
+
+    def add_whitespace(self, start):
+        row, col = start
+        if row < self.prev_row or row == self.prev_row and col < self.prev_col:
+            raise ValueError("start ({},{}) precedes previous end ({},{})"
+                             .format(row, col, self.prev_row, self.prev_col))
+        row_offset = row - self.prev_row
+        if row_offset:
+            self.tokens.append("\\\n" * row_offset)
+            self.prev_col = 0
+        col_offset = col - self.prev_col
+        if col_offset:
+            self.tokens.append(" " * col_offset)
+
+    def untokenize(self, iterable):
+        it = iter(iterable)
+        indents = []
+        startline = False
+        for t in it:
+            if len(t) == 2:
+                self.compat(t, it)
+                break
+            tok_type, token, start, end, line = t
+            if tok_type == ENDMARKER:
+                break
+            if tok_type == INDENT:
+                indents.append(token)
+                continue
+            elif tok_type == DEDENT:
+                indents.pop()
+                self.prev_row, self.prev_col = end
+                continue
+            elif tok_type in (NEWLINE, NL):
+                startline = True
+            elif startline and indents:
+                indent = indents[-1]
+                if start[1] >= len(indent):
+                    self.tokens.append(indent)
+                    self.prev_col = len(indent)
+                startline = False
+            self.add_whitespace(start)
+            self.tokens.append(token)
+            self.prev_row, self.prev_col = end
+            if tok_type in (NEWLINE, NL):
+                self.prev_row += 1
+                self.prev_col = 0
+        return "".join(self.tokens)
+
+    def compat(self, token, iterable):
+        indents = []
+        toks_append = self.tokens.append
+        startline = token[0] in (NEWLINE, NL)
+        prevstring = False
+
+        for tok in chain([token], iterable):
+            toknum, tokval = tok[:2]
+
+            if toknum in (NAME, NUMBER):
+                tokval += ' '
+
+            # Insert a space between two consecutive strings
+            if toknum == STRING:
+                if prevstring:
+                    tokval = ' ' + tokval
+                prevstring = True
+            else:
+                prevstring = False
+
+            if toknum == INDENT:
+                indents.append(tokval)
+                continue
+            elif toknum == DEDENT:
+                indents.pop()
+                continue
+            elif toknum in (NEWLINE, NL):
+                startline = True
+            elif startline and indents:
+                toks_append(indents[-1])
+                startline = False
+            toks_append(tokval)
+
+def untokenize(iterable):
+    """Transform tokens back into Python source code.
+
+    Each element returned by the iterable must be a token sequence
+    with at least two elements, a token number and token value.  If
+    only two tokens are passed, the resulting output is poor.
+
+    Round-trip invariant for full input:
+        Untokenized source will match input source exactly
+
+    Round-trip invariant for limited intput:
+        # Output text will tokenize the back to the input
+        t1 = [tok[:2] for tok in generate_tokens(f.readline)]
+        newcode = untokenize(t1)
+        readline = iter(newcode.splitlines(1)).next
+        t2 = [tok[:2] for tok in generate_tokens(readline)]
+        assert t1 == t2
+    """
+    ut = Untokenizer()
+    return ut.untokenize(iterable)
+
+def generate_tokens(readline):
+    """
+    The generate_tokens() generator requires one argument, readline, which
+    must be a callable object which provides the same interface as the
+    readline() method of built-in file objects. Each call to the function
+    should return one line of input as a string.  Alternately, readline
+    can be a callable function terminating with StopIteration:
+        readline = open(myfile).next    # Example of alternate readline
+
+    The generator produces 5-tuples with these members: the token type; the
+    token string; a 2-tuple (srow, scol) of ints specifying the row and
+    column where the token begins in the source; a 2-tuple (erow, ecol) of
+    ints specifying the row and column where the token ends in the source;
+    and the line on which the token was found. The line passed is the
+    logical line; continuation lines are included.
+    """
+    lnum = parenlev = continued = 0
+    namechars, numchars = string.ascii_letters + '_', '0123456789'
+    contstr, needcont = '', 0
+    contline = None
+    indents = [0]
+
+    while 1:                                   # loop over lines in stream
+        try:
+            line = readline()
+        except StopIteration:
+            line = ''
+        lnum += 1
+        pos, max = 0, len(line)
+
+        if contstr:                            # continued string
+            if not line:
+                raise TokenError("EOF in multi-line string", strstart)
+            endmatch = endprog.match(line)
+            if endmatch:
+                pos = end = endmatch.end(0)
+                yield (STRING, contstr + line[:end],
+                       strstart, (lnum, end), contline + line)
+                contstr, needcont = '', 0
+                contline = None
+            elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
+                yield (ERRORTOKEN, contstr + line,
+                           strstart, (lnum, len(line)), contline)
+                contstr = ''
+                contline = None
+                continue
+            else:
+                contstr = contstr + line
+                contline = contline + line
+                continue
+
+        elif parenlev == 0 and not continued:  # new statement
+            if not line: break
+            column = 0
+            while pos < max:                   # measure leading whitespace
+                if line[pos] == ' ':
+                    column += 1
+                elif line[pos] == '\t':
+                    column = (column//tabsize + 1)*tabsize
+                elif line[pos] == '\f':
+                    column = 0
+                else:
+                    break
+                pos += 1
+            if pos == max:
+                break
+
+            if line[pos] in '#\r\n':           # skip comments or blank lines
+                if line[pos] == '#':
+                    comment_token = line[pos:].rstrip('\r\n')
+                    nl_pos = pos + len(comment_token)
+                    yield (COMMENT, comment_token,
+                           (lnum, pos), (lnum, pos + len(comment_token)), line)
+                    yield (NL, line[nl_pos:],
+                           (lnum, nl_pos), (lnum, len(line)), line)
+                else:
+                    yield ((NL, COMMENT)[line[pos] == '#'], line[pos:],
+                           (lnum, pos), (lnum, len(line)), line)
+                continue
+
+            if column > indents[-1]:           # count indents or dedents
+                indents.append(column)
+                yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
+            while column < indents[-1]:
+                if column not in indents:
+                    raise IndentationError(
+                        "unindent does not match any outer indentation level",
+                        ("<tokenize>", lnum, pos, line))
+                indents = indents[:-1]
+                yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
+
+        else:                                  # continued statement
+            if not line:
+                raise TokenError("EOF in multi-line statement", (lnum, 0))
+            continued = 0
+
+        while pos < max:
+            pseudomatch = pseudoprog.match(line, pos)
+            if pseudomatch:                                # scan for tokens
+                start, end = pseudomatch.span(1)
+                spos, epos, pos = (lnum, start), (lnum, end), end
+                if start == end:
+                    continue
+                token, initial = line[start:end], line[start]
+
+                if initial in numchars or \
+                   (initial == '.' and token != '.'):      # ordinary number
+                    yield (NUMBER, token, spos, epos, line)
+                elif initial in '\r\n':
+                    if parenlev > 0:
+                        n = NL
+                    else:
+                        n = NEWLINE
+                    yield (n, token, spos, epos, line)
+                elif initial == '#':
+                    assert not token.endswith("\n")
+                    yield (COMMENT, token, spos, epos, line)
+                elif token in triple_quoted:
+                    endprog = endprogs[token]
+                    endmatch = endprog.match(line, pos)
+                    if endmatch:                           # all on one line
+                        pos = endmatch.end(0)
+                        token = line[start:pos]
+                        yield (STRING, token, spos, (lnum, pos), line)
+                    else:
+                        strstart = (lnum, start)           # multiple lines
+                        contstr = line[start:]
+                        contline = line
+                        break
+                elif initial in single_quoted or \
+                    token[:2] in single_quoted or \
+                    token[:3] in single_quoted:
+                    if token[-1] == '\n':                  # continued string
+                        strstart = (lnum, start)
+                        endprog = (endprogs[initial] or endprogs[token[1]] or
+                                   endprogs[token[2]])
+                        contstr, needcont = line[start:], 1
+                        contline = line
+                        break
+                    else:                                  # ordinary string
+                        yield (STRING, token, spos, epos, line)
+                elif initial in namechars:                 # ordinary name
+                    yield (NAME, token, spos, epos, line)
+                elif initial == '\\':                      # continued stmt
+                    continued = 1
+                else:
+                    if initial in '([{':
+                        parenlev += 1
+                    elif initial in ')]}':
+                        parenlev -= 1
+                    yield (OP, token, spos, epos, line)
+            else:
+                yield (ERRORTOKEN, line[pos],
+                           (lnum, pos), (lnum, pos+1), line)
+                pos += 1
+
+    for indent in indents[1:]:                 # pop remaining indent levels
+        yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
+    yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '')
+
+if __name__ == '__main__':                     # testing
+    import sys
+    if len(sys.argv) > 1:
+        tokenize(open(sys.argv[1]).readline)
+    else:
+        tokenize(sys.stdin.readline)
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/core.py b/ansible/plugins/mitogen-0.2.6/mitogen/core.py
new file mode 100644
index 000000000..578337f7c
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/core.py
@@ -0,0 +1,3409 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""
+This module implements most package functionality, but remains separate from
+non-essential code in order to reduce its size, since it is also serves as the
+bootstrap implementation sent to every new slave context.
+"""
+
+import binascii
+import collections
+import encodings.latin_1
+import errno
+import fcntl
+import itertools
+import linecache
+import logging
+import os
+import pickle as py_pickle
+import pstats
+import signal
+import socket
+import struct
+import sys
+import threading
+import time
+import traceback
+import warnings
+import weakref
+import zlib
+
+# Python >3.7 deprecated the imp module.
+warnings.filterwarnings('ignore', message='the imp module is deprecated')
+import imp
+
+# Absolute imports for <2.5.
+select = __import__('select')
+
+try:
+    import cProfile
+except ImportError:
+    cProfile = None
+
+try:
+    import thread
+except ImportError:
+    import threading as thread
+
+try:
+    import cPickle as pickle
+except ImportError:
+    import pickle
+
+try:
+    from cStringIO import StringIO as BytesIO
+except ImportError:
+    from io import BytesIO
+
+try:
+    BaseException
+except NameError:
+    BaseException = Exception
+
+try:
+    ModuleNotFoundError
+except NameError:
+    ModuleNotFoundError = ImportError
+
+# TODO: usage of 'import' after setting __name__, but before fixing up
+# sys.modules generates a warning. This happens when profiling = True.
+warnings.filterwarnings('ignore',
+    "Parent module 'mitogen' not found while handling absolute import")
+
+LOG = logging.getLogger('mitogen')
+IOLOG = logging.getLogger('mitogen.io')
+IOLOG.setLevel(logging.INFO)
+
+LATIN1_CODEC = encodings.latin_1.Codec()
+# str.encode() may take import lock. Deadlock possible if broker calls
+# .encode() on behalf of thread currently waiting for module.
+UTF8_CODEC = encodings.latin_1.Codec()
+
+_v = False
+_vv = False
+
+GET_MODULE = 100
+CALL_FUNCTION = 101
+FORWARD_LOG = 102
+ADD_ROUTE = 103
+DEL_ROUTE = 104
+ALLOCATE_ID = 105
+SHUTDOWN = 106
+LOAD_MODULE = 107
+FORWARD_MODULE = 108
+DETACHING = 109
+CALL_SERVICE = 110
+
+#: Special value used to signal disconnection or the inability to route a
+#: message, when it appears in the `reply_to` field. Usually causes
+#: :class:`mitogen.core.ChannelError` to be raised when it is received.
+#:
+#: It indicates the sender did not know how to process the message, or wishes
+#: no further messages to be delivered to it. It is used when:
+#:
+#:  * a remote receiver is disconnected or explicitly closed.
+#:  * a related message could not be delivered due to no route existing for it.
+#:  * a router is being torn down, as a sentinel value to notify
+#:    :meth:`mitogen.core.Router.add_handler` callbacks to clean up.
+IS_DEAD = 999
+
+try:
+    BaseException
+except NameError:
+    BaseException = Exception
+
+PY24 = sys.version_info < (2, 5)
+PY3 = sys.version_info > (3,)
+if PY3:
+    b = str.encode
+    BytesType = bytes
+    UnicodeType = str
+    FsPathTypes = (str,)
+    BufferType = lambda buf, start: memoryview(buf)[start:]
+    long = int
+else:
+    b = str
+    BytesType = str
+    FsPathTypes = (str, unicode)
+    BufferType = buffer
+    UnicodeType = unicode
+
+AnyTextType = (BytesType, UnicodeType)
+
+try:
+    next
+except NameError:
+    next = lambda it: it.next()
+
+# #550: prehistoric WSL did not advertise itself in uname output.
+try:
+    fp = open('/proc/sys/kernel/osrelease')
+    IS_WSL = 'Microsoft' in fp.read()
+    fp.close()
+except IOError:
+    IS_WSL = False
+
+
+#: Default size for calls to :meth:`Side.read` or :meth:`Side.write`, and the
+#: size of buffers configured by :func:`mitogen.parent.create_socketpair`. This
+#: value has many performance implications, 128KiB seems to be a sweet spot.
+#:
+#: * When set low, large messages cause many :class:`Broker` IO loop
+#:   iterations, burning CPU and reducing throughput.
+#: * When set high, excessive RAM is reserved by the OS for socket buffers (2x
+#:   per child), and an identically sized temporary userspace buffer is
+#:   allocated on each read that requires zeroing, and over a particular size
+#:   may require two system calls to allocate/deallocate.
+#:
+#: Care must be taken to ensure the underlying kernel object and receiving
+#: program support the desired size. For example,
+#:
+#: * Most UNIXes have TTYs with fixed 2KiB-4KiB buffers, making them unsuitable
+#:   for efficient IO.
+#: * Different UNIXes have varying presets for pipes, which may not be
+#:   configurable. On recent Linux the default pipe buffer size is 64KiB, but
+#:   under memory pressure may be as low as 4KiB for unprivileged processes.
+#: * When communication is via an intermediary process, its internal buffers
+#:   effect the speed OS buffers will drain. For example OpenSSH uses 64KiB
+#:   reads.
+#:
+#: An ideal :class:`Message` has a size that is a multiple of
+#: :data:`CHUNK_SIZE` inclusive of headers, to avoid wasting IO loop iterations
+#: writing small trailer chunks.
+CHUNK_SIZE = 131072
+
+_tls = threading.local()
+
+
+if __name__ == 'mitogen.core':
+    # When loaded using import mechanism, ExternalContext.main() will not have
+    # a chance to set the synthetic mitogen global, so just import it here.
+    import mitogen
+else:
+    # When loaded as __main__, ensure classes and functions gain a __module__
+    # attribute consistent with the host process, so that pickling succeeds.
+    __name__ = 'mitogen.core'
+
+
+class Error(Exception):
+    """Base for all exceptions raised by Mitogen.
+
+    :param str fmt:
+        Exception text, or format string if `args` is non-empty.
+    :param tuple args:
+        Format string arguments.
+    """
+    def __init__(self, fmt=None, *args):
+        if args:
+            fmt %= args
+        if fmt and not isinstance(fmt, UnicodeType):
+            fmt = fmt.decode('utf-8')
+        Exception.__init__(self, fmt)
+
+
+class LatchError(Error):
+    """Raised when an attempt is made to use a :class:`mitogen.core.Latch`
+    that has been marked closed."""
+    pass
+
+
+class Blob(BytesType):
+    """A serializable bytes subclass whose content is summarized in repr()
+    output, making it suitable for logging binary data."""
+    def __repr__(self):
+        return '[blob: %d bytes]' % len(self)
+
+    def __reduce__(self):
+        return (Blob, (BytesType(self),))
+
+
+class Secret(UnicodeType):
+    """A serializable unicode subclass whose content is masked in repr()
+    output, making it suitable for logging passwords."""
+    def __repr__(self):
+        return '[secret]'
+
+    if not PY3:
+        # TODO: what is this needed for in 2.x?
+        def __str__(self):
+            return UnicodeType(self)
+
+    def __reduce__(self):
+        return (Secret, (UnicodeType(self),))
+
+
+class Kwargs(dict):
+    """
+    A serializable dict subclass that indicates its keys should be coerced to
+    Unicode on Python 3 and bytes on Python<2.6.
+
+    Python 2 produces keyword argument dicts whose keys are bytes, requiring a
+    helper to ensure compatibility with Python 3 where Unicode is required,
+    whereas Python 3 produces keyword argument dicts whose keys are Unicode,
+    requiring a helper for Python 2.4/2.5, where bytes are required.
+    """
+    if PY3:
+        def __init__(self, dct):
+            for k, v in dct.items():
+                if type(k) is bytes:
+                    self[k.decode()] = v
+                else:
+                    self[k] = v
+    elif sys.version_info < (2, 6):
+        def __init__(self, dct):
+            for k, v in dct.iteritems():
+                if type(k) is unicode:
+                    k, _ = UTF8_CODEC.encode(k)
+                self[k] = v
+
+    def __repr__(self):
+        return 'Kwargs(%s)' % (dict.__repr__(self),)
+
+    def __reduce__(self):
+        return (Kwargs, (dict(self),))
+
+
+class CallError(Error):
+    """
+    Serializable :class:`Error` subclass raised when :meth:`Context.call()
+    <mitogen.parent.Context.call>` fails. A copy of the traceback from the
+    external context is appended to the exception message.
+    """
+    def __init__(self, fmt=None, *args):
+        if not isinstance(fmt, BaseException):
+            Error.__init__(self, fmt, *args)
+        else:
+            e = fmt
+            cls = e.__class__
+            fmt = '%s.%s: %s' % (cls.__module__, cls.__name__, e)
+            tb = sys.exc_info()[2]
+            if tb:
+                fmt += '\n'
+                fmt += ''.join(traceback.format_tb(tb))
+            Error.__init__(self, fmt)
+
+    def __reduce__(self):
+        return (_unpickle_call_error, (self.args[0],))
+
+
+def _unpickle_call_error(s):
+    if not (type(s) is UnicodeType and len(s) < 10000):
+        raise TypeError('cannot unpickle CallError: bad input')
+    return CallError(s)
+
+
+class ChannelError(Error):
+    """Raised when a channel dies or has been closed."""
+    remote_msg = 'Channel closed by remote end.'
+    local_msg = 'Channel closed by local end.'
+
+
+class StreamError(Error):
+    """Raised when a stream cannot be established."""
+    pass
+
+
+class TimeoutError(Error):
+    """Raised when a timeout occurs on a stream."""
+    pass
+
+
+def to_text(o):
+    """Coerce `o` to Unicode by decoding it from UTF-8 if it is an instance of
+    :class:`bytes`, otherwise pass it to the :class:`str` constructor. The
+    returned object is always a plain :class:`str`, any subclass is removed."""
+    if isinstance(o, BytesType):
+        return o.decode('utf-8')
+    return UnicodeType(o)
+
+
+# Python 2.4
+try:
+    any
+except NameError:
+    def any(it):
+        for elem in it:
+            if elem:
+                return True
+
+
+def _partition(s, sep, find):
+    """
+    (str|unicode).(partition|rpartition) for Python 2.4/2.5.
+    """
+    idx = find(sep)
+    if idx != -1:
+        left = s[0:idx]
+        return left, sep, s[len(left)+len(sep):]
+
+
+if hasattr(UnicodeType, 'rpartition'):
+    str_partition = UnicodeType.partition
+    str_rpartition = UnicodeType.rpartition
+    bytes_partition = BytesType.partition
+else:
+    def str_partition(s, sep):
+        return _partition(s, sep, s.find) or (s, u'', u'')
+    def str_rpartition(s, sep):
+        return _partition(s, sep, s.rfind) or (u'', u'', s)
+    def bytes_partition(s, sep):
+        return _partition(s, sep, s.find) or (s, '', '')
+
+
+def has_parent_authority(msg, _stream=None):
+    """Policy function for use with :class:`Receiver` and
+    :meth:`Router.add_handler` that requires incoming messages to originate
+    from a parent context, or on a :class:`Stream` whose :attr:`auth_id
+    <Stream.auth_id>` has been set to that of a parent context or the current
+    context."""
+    return (msg.auth_id == mitogen.context_id or
+            msg.auth_id in mitogen.parent_ids)
+
+
+def listen(obj, name, func):
+    """
+    Arrange for `func(*args, **kwargs)` to be invoked when the named signal is
+    fired by `obj`.
+    """
+    signals = vars(obj).setdefault('_signals', {})
+    signals.setdefault(name, []).append(func)
+
+
+def fire(obj, name, *args, **kwargs):
+    """
+    Arrange for `func(*args, **kwargs)` to be invoked for every function
+    registered for the named signal on `obj`.
+    """
+    signals = vars(obj).get('_signals', {})
+    for func in signals.get(name, ()):
+        func(*args, **kwargs)
+
+
+def takes_econtext(func):
+    func.mitogen_takes_econtext = True
+    return func
+
+
+def takes_router(func):
+    func.mitogen_takes_router = True
+    return func
+
+
+def is_blacklisted_import(importer, fullname):
+    """
+    Return :data:`True` if `fullname` is part of a blacklisted package, or if
+    any packages have been whitelisted and `fullname` is not part of one.
+
+    NB:
+      - If a package is on both lists, then it is treated as blacklisted.
+      - If any package is whitelisted, then all non-whitelisted packages are
+        treated as blacklisted.
+    """
+    return ((not any(fullname.startswith(s) for s in importer.whitelist)) or
+                (any(fullname.startswith(s) for s in importer.blacklist)))
+
+
+def set_cloexec(fd):
+    """Set the file descriptor `fd` to automatically close on
+    :func:`os.execve`. This has no effect on file descriptors inherited across
+    :func:`os.fork`, they must be explicitly closed through some other means,
+    such as :func:`mitogen.fork.on_fork`."""
+    flags = fcntl.fcntl(fd, fcntl.F_GETFD)
+    assert fd > 2
+    fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
+
+
+def set_nonblock(fd):
+    """Set the file descriptor `fd` to non-blocking mode. For most underlying
+    file types, this causes :func:`os.read` or :func:`os.write` to raise
+    :class:`OSError` with :data:`errno.EAGAIN` rather than block the thread
+    when the underlying kernel buffer is exhausted."""
+    flags = fcntl.fcntl(fd, fcntl.F_GETFL)
+    fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
+
+
+def set_block(fd):
+    """Inverse of :func:`set_nonblock`, i.e. cause `fd` to block the thread
+    when the underlying kernel buffer is exhausted."""
+    flags = fcntl.fcntl(fd, fcntl.F_GETFL)
+    fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~os.O_NONBLOCK)
+
+
+def io_op(func, *args):
+    """Wrap `func(*args)` that may raise :class:`select.error`,
+    :class:`IOError`, or :class:`OSError`, trapping UNIX error codes relating
+    to disconnection and retry events in various subsystems:
+
+    * When a signal is delivered to the process on Python 2, system call retry
+      is signalled through :data:`errno.EINTR`. The invocation is automatically
+      restarted.
+    * When performing IO against a TTY, disconnection of the remote end is
+      signalled by :data:`errno.EIO`.
+    * When performing IO against a socket, disconnection of the remote end is
+      signalled by :data:`errno.ECONNRESET`.
+    * When performing IO against a pipe, disconnection of the remote end is
+      signalled by :data:`errno.EPIPE`.
+
+    :returns:
+        Tuple of `(return_value, disconnect_reason)`, where `return_value` is
+        the return value of `func(*args)`, and `disconnected` is an exception
+        instance when disconnection was detected, otherwise :data:`None`.
+    """
+    while True:
+        try:
+            return func(*args), None
+        except (select.error, OSError, IOError):
+            e = sys.exc_info()[1]
+            _vv and IOLOG.debug('io_op(%r) -> OSError: %s', func, e)
+            if e.args[0] == errno.EINTR:
+                continue
+            if e.args[0] in (errno.EIO, errno.ECONNRESET, errno.EPIPE):
+                return None, e
+            raise
+
+
+class PidfulStreamHandler(logging.StreamHandler):
+    """A :class:`logging.StreamHandler` subclass used when
+    :meth:`Router.enable_debug() <mitogen.master.Router.enable_debug>` has been
+    called, or the `debug` parameter was specified during context construction.
+    Verifies the process ID has not changed on each call to :meth:`emit`,
+    reopening the associated log file when a change is detected.
+
+    This ensures logging to the per-process output files happens correctly even
+    when uncooperative third party components call :func:`os.fork`.
+    """
+    #: PID that last opened the log file.
+    open_pid = None
+
+    #: Output path template.
+    template = '/tmp/mitogen.%s.%s.log'
+
+    def _reopen(self):
+        self.acquire()
+        try:
+            if self.open_pid == os.getpid():
+                return
+            ts = time.strftime('%Y%m%d_%H%M%S')
+            path = self.template % (os.getpid(), ts)
+            self.stream = open(path, 'w', 1)
+            set_cloexec(self.stream.fileno())
+            self.stream.write('Parent PID: %s\n' % (os.getppid(),))
+            self.stream.write('Created by:\n\n%s\n' % (
+                ''.join(traceback.format_stack()),
+            ))
+            self.open_pid = os.getpid()
+        finally:
+            self.release()
+
+    def emit(self, record):
+        if self.open_pid != os.getpid():
+            self._reopen()
+        logging.StreamHandler.emit(self, record)
+
+
+def enable_debug_logging():
+    global _v, _vv
+    _v = True
+    _vv = True
+    root = logging.getLogger()
+    root.setLevel(logging.DEBUG)
+    IOLOG.setLevel(logging.DEBUG)
+    handler = PidfulStreamHandler()
+    handler.formatter = logging.Formatter(
+        '%(asctime)s %(levelname).1s %(name)s: %(message)s',
+        '%H:%M:%S'
+    )
+    root.handlers.insert(0, handler)
+
+
+_profile_hook = lambda name, func, *args: func(*args)
+_profile_fmt = os.environ.get(
+    'MITOGEN_PROFILE_FMT',
+    '/tmp/mitogen.stats.%(pid)s.%(identity)s.%(now)s.%(ext)s',
+)
+
+
+def _profile_hook(name, func, *args):
+    """
+    Call `func(*args)` and return its result. This function is replaced by
+    :func:`_real_profile_hook` when :func:`enable_profiling` is called. This
+    interface is obsolete and will be replaced by a signals-based integration
+    later on.
+    """
+    return func(*args)
+
+
+def _real_profile_hook(name, func, *args):
+    profiler = cProfile.Profile()
+    profiler.enable()
+    try:
+        return func(*args)
+    finally:
+        path = _profile_fmt % {
+            'now': int(1e6 * time.time()),
+            'identity': name,
+            'pid': os.getpid(),
+            'ext': '%s'
+        }
+        profiler.dump_stats(path % ('pstats',))
+        profiler.create_stats()
+        fp = open(path % ('log',), 'w')
+        try:
+            stats = pstats.Stats(profiler, stream=fp)
+            stats.sort_stats('cumulative')
+            stats.print_stats()
+        finally:
+            fp.close()
+
+
+def enable_profiling(econtext=None):
+    global _profile_hook
+    _profile_hook = _real_profile_hook
+
+
+def import_module(modname):
+    """
+    Import `module` and return the attribute named `attr`.
+    """
+    return __import__(modname, None, None, [''])
+
+
+class Py24Pickler(py_pickle.Pickler):
+    """
+    Exceptions were classic classes until Python 2.5. Sadly for 2.4, cPickle
+    offers little control over how a classic instance is pickled. Therefore 2.4
+    uses a pure-Python pickler, so CallError can be made to look as it does on
+    newer Pythons.
+
+    This mess will go away once proper serialization exists.
+    """
+    @classmethod
+    def dumps(cls, obj, protocol):
+        bio = BytesIO()
+        self = cls(bio, protocol=protocol)
+        self.dump(obj)
+        return bio.getvalue()
+
+    def save_exc_inst(self, obj):
+        if isinstance(obj, CallError):
+            func, args = obj.__reduce__()
+            self.save(func)
+            self.save(args)
+            self.write(py_pickle.REDUCE)
+        else:
+            py_pickle.Pickler.save_inst(self, obj)
+
+    if PY24:
+        dispatch = py_pickle.Pickler.dispatch.copy()
+        dispatch[py_pickle.InstanceType] = save_exc_inst
+
+
+if PY3:
+    # In 3.x Unpickler is a class exposing find_class as an overridable, but it
+    # cannot be overridden without subclassing.
+    class _Unpickler(pickle.Unpickler):
+        def find_class(self, module, func):
+            return self.find_global(module, func)
+    pickle__dumps = pickle.dumps
+elif PY24:
+    # On Python 2.4, we must use a pure-Python pickler.
+    pickle__dumps = Py24Pickler.dumps
+    _Unpickler = pickle.Unpickler
+else:
+    pickle__dumps = pickle.dumps
+    # In 2.x Unpickler is a function exposing a writeable find_global
+    # attribute.
+    _Unpickler = pickle.Unpickler
+
+
+class Message(object):
+    """
+    Messages are the fundamental unit of communication, comprising fields from
+    the :ref:`stream-protocol` header, an optional reference to the receiving
+    :class:`mitogen.core.Router` for ingress messages, and helper methods for
+    deserialization and generating replies.
+    """
+    #: Integer target context ID. :class:`Router` delivers messages locally
+    #: when their :attr:`dst_id` matches :data:`mitogen.context_id`, otherwise
+    #: they are routed up or downstream.
+    dst_id = None
+
+    #: Integer source context ID. Used as the target of replies if any are
+    #: generated.
+    src_id = None
+
+    #: Context ID under whose authority the message is acting. See
+    #: :ref:`source-verification`.
+    auth_id = None
+
+    #: Integer target handle in the destination context. This is one of the
+    #: :ref:`standard-handles`, or a dynamically generated handle used to
+    #: receive a one-time reply, such as the return value of a function call.
+    handle = None
+
+    #: Integer target handle to direct any reply to this message. Used to
+    #: receive a one-time reply, such as the return value of a function call.
+    #: :data:`IS_DEAD` has a special meaning when it appears in this field.
+    reply_to = None
+
+    #: Raw message data bytes.
+    data = b('')
+
+    _unpickled = object()
+
+    #: The :class:`Router` responsible for routing the message. This is
+    #: :data:`None` for locally originated messages.
+    router = None
+
+    #: The :class:`Receiver` over which the message was last received. Part of
+    #: the :class:`mitogen.select.Select` interface. Defaults to :data:`None`.
+    receiver = None
+
+    def __init__(self, **kwargs):
+        """
+        Construct a message from from the supplied `kwargs`. :attr:`src_id` and
+        :attr:`auth_id` are always set to :data:`mitogen.context_id`.
+        """
+        self.src_id = mitogen.context_id
+        self.auth_id = mitogen.context_id
+        vars(self).update(kwargs)
+        assert isinstance(self.data, BytesType)
+
+    def _unpickle_context(self, context_id, name):
+        return _unpickle_context(context_id, name, router=self.router)
+
+    def _unpickle_sender(self, context_id, dst_handle):
+        return _unpickle_sender(self.router, context_id, dst_handle)
+
+    def _unpickle_bytes(self, s, encoding):
+        s, n = LATIN1_CODEC.encode(s)
+        return s
+
+    def _find_global(self, module, func):
+        """Return the class implementing `module_name.class_name` or raise
+        `StreamError` if the module is not whitelisted."""
+        if module == __name__:
+            if func == '_unpickle_call_error' or func == 'CallError':
+                return _unpickle_call_error
+            elif func == '_unpickle_sender':
+                return self._unpickle_sender
+            elif func == '_unpickle_context':
+                return self._unpickle_context
+            elif func == 'Blob':
+                return Blob
+            elif func == 'Secret':
+                return Secret
+            elif func == 'Kwargs':
+                return Kwargs
+        elif module == '_codecs' and func == 'encode':
+            return self._unpickle_bytes
+        elif module == '__builtin__' and func == 'bytes':
+            return BytesType
+        raise StreamError('cannot unpickle %r/%r', module, func)
+
+    @property
+    def is_dead(self):
+        """
+        :data:`True` if :attr:`reply_to` is set to the magic value
+        :data:`IS_DEAD`, indicating the sender considers the channel dead. Dead
+        messages can be raised in a variety of circumstances, see
+        :data:`IS_DEAD` for more information.
+        """
+        return self.reply_to == IS_DEAD
+
+    @classmethod
+    def dead(cls, reason=None, **kwargs):
+        """
+        Syntax helper to construct a dead message.
+        """
+        kwargs['data'], _ = UTF8_CODEC.encode(reason or u'')
+        return cls(reply_to=IS_DEAD, **kwargs)
+
+    @classmethod
+    def pickled(cls, obj, **kwargs):
+        """
+        Construct a pickled message, setting :attr:`data` to the serialization
+        of `obj`, and setting remaining fields using `kwargs`.
+
+        :returns:
+            The new message.
+        """
+        self = cls(**kwargs)
+        try:
+            self.data = pickle__dumps(obj, protocol=2)
+        except pickle.PicklingError:
+            e = sys.exc_info()[1]
+            self.data = pickle__dumps(CallError(e), protocol=2)
+        return self
+
+    def reply(self, msg, router=None, **kwargs):
+        """
+        Compose a reply to this message and send it using :attr:`router`, or
+        `router` is :attr:`router` is :data:`None`.
+
+        :param obj:
+            Either a :class:`Message`, or an object to be serialized in order
+            to construct a new message.
+        :param router:
+            Optional router to use if :attr:`router` is :data:`None`.
+        :param kwargs:
+            Optional keyword parameters overriding message fields in the reply.
+        """
+        if not isinstance(msg, Message):
+            msg = Message.pickled(msg)
+        msg.dst_id = self.src_id
+        msg.handle = self.reply_to
+        vars(msg).update(kwargs)
+        if msg.handle:
+            (self.router or router).route(msg)
+        else:
+            LOG.debug('Message.reply(): discarding due to zero handle: %r', msg)
+
+    if PY3:
+        UNPICKLER_KWARGS = {'encoding': 'bytes'}
+    else:
+        UNPICKLER_KWARGS = {}
+
+    def _throw_dead(self):
+        if len(self.data):
+            raise ChannelError(self.data.decode('utf-8', 'replace'))
+        elif self.src_id == mitogen.context_id:
+            raise ChannelError(ChannelError.local_msg)
+        else:
+            raise ChannelError(ChannelError.remote_msg)
+
+    def unpickle(self, throw=True, throw_dead=True):
+        """
+        Unpickle :attr:`data`, optionally raising any exceptions present.
+
+        :param bool throw_dead:
+            If :data:`True`, raise exceptions, otherwise it is the caller's
+            responsibility.
+
+        :raises CallError:
+            The serialized data contained CallError exception.
+        :raises ChannelError:
+            The `is_dead` field was set.
+        """
+        _vv and IOLOG.debug('%r.unpickle()', self)
+        if throw_dead and self.is_dead:
+            self._throw_dead()
+
+        obj = self._unpickled
+        if obj is Message._unpickled:
+            fp = BytesIO(self.data)
+            unpickler = _Unpickler(fp, **self.UNPICKLER_KWARGS)
+            unpickler.find_global = self._find_global
+            try:
+                # Must occur off the broker thread.
+                obj = unpickler.load()
+                self._unpickled = obj
+            except (TypeError, ValueError):
+                e = sys.exc_info()[1]
+                raise StreamError('invalid message: %s', e)
+
+        if throw:
+            if isinstance(obj, CallError):
+                raise obj
+
+        return obj
+
+    def __repr__(self):
+        return 'Message(%r, %r, %r, %r, %r, %r..%d)' % (
+            self.dst_id, self.src_id, self.auth_id, self.handle,
+            self.reply_to, (self.data or '')[:50], len(self.data)
+        )
+
+
+class Sender(object):
+    """
+    Senders are used to send pickled messages to a handle in another context,
+    it is the inverse of :class:`mitogen.core.Receiver`.
+
+    Senders may be serialized, making them convenient to wire up data flows.
+    See :meth:`mitogen.core.Receiver.to_sender` for more information.
+
+    :param Context context:
+        Context to send messages to.
+    :param int dst_handle:
+        Destination handle to send messages to.
+    """
+    def __init__(self, context, dst_handle):
+        self.context = context
+        self.dst_handle = dst_handle
+
+    def send(self, data):
+        """
+        Send `data` to the remote end.
+        """
+        _vv and IOLOG.debug('%r.send(%r..)', self, repr(data)[:100])
+        self.context.send(Message.pickled(data, handle=self.dst_handle))
+
+    explicit_close_msg = 'Sender was explicitly closed'
+
+    def close(self):
+        """
+        Send a dead message to the remote, causing :meth:`ChannelError` to be
+        raised in any waiting thread.
+        """
+        _vv and IOLOG.debug('%r.close()', self)
+        self.context.send(
+            Message.dead(
+                reason=self.explicit_close_msg,
+                handle=self.dst_handle
+            )
+        )
+
+    def __repr__(self):
+        return 'Sender(%r, %r)' % (self.context, self.dst_handle)
+
+    def __reduce__(self):
+        return _unpickle_sender, (self.context.context_id, self.dst_handle)
+
+
+def _unpickle_sender(router, context_id, dst_handle):
+    if not (isinstance(router, Router) and
+            isinstance(context_id, (int, long)) and context_id >= 0 and
+            isinstance(dst_handle, (int, long)) and dst_handle > 0):
+        raise TypeError('cannot unpickle Sender: bad input')
+    return Sender(Context(router, context_id), dst_handle)
+
+
+class Receiver(object):
+    """
+    Receivers maintain a thread-safe queue of messages sent to a handle of this
+    context from another context.
+
+    :param mitogen.core.Router router:
+        Router to register the handler on.
+
+    :param int handle:
+        If not :data:`None`, an explicit handle to register, otherwise an
+        unused handle is chosen.
+
+    :param bool persist:
+        If :data:`False`, unregister the handler after one message is received.
+        Single-message receivers are intended for RPC-like transactions, such
+        as in the case of :meth:`mitogen.parent.Context.call_async`.
+
+    :param mitogen.core.Context respondent:
+        Context this receiver is receiving from. If not :data:`None`, arranges
+        for the receiver to receive a dead message if messages can no longer be
+        routed to the context due to disconnection, and ignores messages that
+        did not originate from the respondent context.
+    """
+    #: If not :data:`None`, a reference to a function invoked as
+    #: `notify(receiver)` when a new message is delivered to this receiver. The
+    #: function is invoked on the broker thread, therefore it must not block.
+    #: Used by :class:`mitogen.select.Select` to implement waiting on multiple
+    #: receivers.
+    notify = None
+
+    raise_channelerror = True
+
+    def __init__(self, router, handle=None, persist=True,
+                 respondent=None, policy=None, overwrite=False):
+        self.router = router
+        #: The handle.
+        self.handle = handle  # Avoid __repr__ crash in add_handler()
+        self._latch = Latch()  # Must exist prior to .add_handler()
+        self.handle = router.add_handler(
+            fn=self._on_receive,
+            handle=handle,
+            policy=policy,
+            persist=persist,
+            respondent=respondent,
+            overwrite=overwrite,
+        )
+
+    def __repr__(self):
+        return 'Receiver(%r, %r)' % (self.router, self.handle)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, _1, _2, _3):
+        self.close()
+
+    def to_sender(self):
+        """
+        Return a :class:`Sender` configured to deliver messages to this
+        receiver. As senders are serializable, this makes it convenient to pass
+        `(context_id, handle)` pairs around::
+
+            def deliver_monthly_report(sender):
+                for line in open('monthly_report.txt'):
+                    sender.send(line)
+                sender.close()
+
+            @mitogen.main()
+            def main(router):
+                remote = router.ssh(hostname='mainframe')
+                recv = mitogen.core.Receiver(router)
+                remote.call(deliver_monthly_report, recv.to_sender())
+                for msg in recv:
+                    print(msg)
+        """
+        return Sender(self.router.myself(), self.handle)
+
+    def _on_receive(self, msg):
+        """
+        Callback registered for the handle with :class:`Router`; appends data
+        to the internal queue.
+        """
+        _vv and IOLOG.debug('%r._on_receive(%r)', self, msg)
+        self._latch.put(msg)
+        if self.notify:
+            self.notify(self)
+
+    closed_msg = 'the Receiver has been closed'
+
+    def close(self):
+        """
+        Unregister the receiver's handle from its associated router, and cause
+        :class:`ChannelError` to be raised in any thread waiting in :meth:`get`
+        on this receiver.
+        """
+        if self.handle:
+            self.router.del_handler(self.handle)
+            self.handle = None
+        self._latch.close()
+
+    def empty(self):
+        """
+        Return :data:`True` if calling :meth:`get` would block.
+
+        As with :class:`Queue.Queue`, :data:`True` may be returned even though
+        a subsequent call to :meth:`get` will succeed, since a message may be
+        posted at any moment between :meth:`empty` and :meth:`get`.
+        """
+        return self._latch.empty()
+
+    def get(self, timeout=None, block=True, throw_dead=True):
+        """
+        Sleep waiting for a message to arrive on this receiver.
+
+        :param float timeout:
+            If not :data:`None`, specifies a timeout in seconds.
+
+        :raises mitogen.core.ChannelError:
+            The remote end indicated the channel should be closed,
+            communication with it was lost, or :meth:`close` was called in the
+            local process.
+
+        :raises mitogen.core.TimeoutError:
+            Timeout was reached.
+
+        :returns:
+            :class:`Message` that was received.
+        """
+        _vv and IOLOG.debug('%r.get(timeout=%r, block=%r)', self, timeout, block)
+        try:
+            msg = self._latch.get(timeout=timeout, block=block)
+        except LatchError:
+            raise ChannelError(self.closed_msg)
+        if msg.is_dead and throw_dead:
+            msg._throw_dead()
+        return msg
+
+    def __iter__(self):
+        """
+        Yield consecutive :class:`Message` instances delivered to this receiver
+        until :class:`ChannelError` is raised.
+        """
+        while True:
+            try:
+                msg = self.get()
+            except ChannelError:
+                return
+            yield msg
+
+
+class Channel(Sender, Receiver):
+    """
+    A channel inherits from :class:`mitogen.core.Sender` and
+    `mitogen.core.Receiver` to provide bidirectional functionality.
+
+    This class is incomplete and obsolete, it will be removed in Mitogen 0.3.
+    Channels were an early attempt at syntax sugar. It is always easier to pass
+    around unidirectional pairs of senders/receivers, even though the syntax is
+    baroque:
+
+    .. literalinclude:: ../examples/ping_pong.py
+
+    Since all handles aren't known until after both ends are constructed, for
+    both ends to communicate through a channel, it is necessary for one end to
+    retrieve the handle allocated to the other and reconfigure its own channel
+    to match. Currently this is a manual task.
+    """
+    def __init__(self, router, context, dst_handle, handle=None):
+        Sender.__init__(self, context, dst_handle)
+        Receiver.__init__(self, router, handle)
+
+    def close(self):
+        Receiver.close(self)
+        Sender.close(self)
+
+    def __repr__(self):
+        return 'Channel(%s, %s)' % (
+            Sender.__repr__(self),
+            Receiver.__repr__(self)
+        )
+
+
+class Importer(object):
+    """
+    Import protocol implementation that fetches modules from the parent
+    process.
+
+    :param context: Context to communicate via.
+    """
+    # The Mitogen package is handled specially, since the child context must
+    # construct it manually during startup.
+    MITOGEN_PKG_CONTENT = [
+        'compat',
+        'debug',
+        'doas',
+        'docker',
+        'kubectl',
+        'fakessh',
+        'fork',
+        'jail',
+        'lxc',
+        'lxd',
+        'master',
+        'minify',
+        'os_fork',
+        'parent',
+        'select',
+        'service',
+        'setns',
+        'ssh',
+        'su',
+        'sudo',
+        'utils',
+    ]
+
+    ALWAYS_BLACKLIST = [
+        # 2.x generates needless imports for 'builtins', while 3.x does the
+        # same for '__builtin__'. The correct one is built-in, the other always
+        # a negative round-trip.
+        'builtins',
+        '__builtin__',
+        'thread',
+
+        # org.python.core imported by copy, pickle, xml.sax; breaks Jython, but
+        # very unlikely to trigger a bug report.
+        'org',
+    ]
+
+    if PY3:
+        ALWAYS_BLACKLIST += ['cStringIO']
+
+    def __init__(self, router, context, core_src, whitelist=(), blacklist=()):
+        self._context = context
+        self._present = {'mitogen': self.MITOGEN_PKG_CONTENT}
+        self._lock = threading.Lock()
+        self.whitelist = list(whitelist) or ['']
+        self.blacklist = list(blacklist) + self.ALWAYS_BLACKLIST
+
+        # Preserve copies of the original server-supplied whitelist/blacklist
+        # for later use by children.
+        self.master_whitelist = self.whitelist[:]
+        self.master_blacklist = self.blacklist[:]
+
+        # Presence of an entry in this map indicates in-flight GET_MODULE.
+        self._callbacks = {}
+        self._cache = {}
+        if core_src:
+            self._update_linecache('x/mitogen/core.py', core_src)
+            self._cache['mitogen.core'] = (
+                'mitogen.core',
+                None,
+                'x/mitogen/core.py',
+                zlib.compress(core_src, 9),
+                [],
+            )
+        self._install_handler(router)
+
+    def _update_linecache(self, path, data):
+        """
+        The Python 2.4 linecache module, used to fetch source code for
+        tracebacks and :func:`inspect.getsource`, does not support PEP-302,
+        meaning it needs extra help to for Mitogen-loaded modules. Directly
+        populate its cache if a loaded module belongs to the Mitogen package.
+        """
+        if PY24 and 'mitogen' in path:
+            linecache.cache[path] = (
+                len(data),
+                0.0,
+                [line+'\n' for line in data.splitlines()],
+                path,
+            )
+
+    def _install_handler(self, router):
+        router.add_handler(
+            fn=self._on_load_module,
+            handle=LOAD_MODULE,
+            policy=has_parent_authority,
+        )
+
+    def __repr__(self):
+        return 'Importer()'
+
+    def builtin_find_module(self, fullname):
+        # imp.find_module() will always succeed for __main__, because it is a
+        # built-in module. That means it exists on a special linked list deep
+        # within the bowels of the interpreter. We must special case it.
+        if fullname == '__main__':
+            raise ModuleNotFoundError()
+
+        parent, _, modname = str_rpartition(fullname, '.')
+        if parent:
+            path = sys.modules[parent].__path__
+        else:
+            path = None
+
+        fp, pathname, description = imp.find_module(modname, path)
+        if fp:
+            fp.close()
+
+    def find_module(self, fullname, path=None):
+        if hasattr(_tls, 'running'):
+            return None
+
+        _tls.running = True
+        try:
+            _v and LOG.debug('%r.find_module(%r)', self, fullname)
+            fullname = to_text(fullname)
+            pkgname, dot, _ = str_rpartition(fullname, '.')
+            pkg = sys.modules.get(pkgname)
+            if pkgname and getattr(pkg, '__loader__', None) is not self:
+                LOG.debug('%r: %r is submodule of a package we did not load',
+                          self, fullname)
+                return None
+
+            suffix = fullname[len(pkgname+dot):]
+            if pkgname and suffix not in self._present.get(pkgname, ()):
+                LOG.debug('%r: master doesn\'t know %r', self, fullname)
+                return None
+
+            # #114: explicitly whitelisted prefixes override any
+            # system-installed package.
+            if self.whitelist != ['']:
+                if any(fullname.startswith(s) for s in self.whitelist):
+                    return self
+
+            try:
+                self.builtin_find_module(fullname)
+                _vv and IOLOG.debug('%r: %r is available locally',
+                                    self, fullname)
+            except ImportError:
+                _vv and IOLOG.debug('find_module(%r) returning self', fullname)
+                return self
+        finally:
+            del _tls.running
+
+    blacklisted_msg = (
+        '%r is present in the Mitogen importer blacklist, therefore this '
+        'context will not attempt to request it from the master, as the '
+        'request will always be refused.'
+    )
+    pkg_resources_msg = (
+        'pkg_resources is prohibited from importing __main__, as it causes '
+        'problems in applications whose main module is not designed to be '
+        're-imported by children.'
+    )
+    absent_msg = (
+        'The Mitogen master process was unable to serve %r. It may be a '
+        'native Python extension, or it may be missing entirely. Check the '
+        'importer debug logs on the master for more information.'
+    )
+
+    def _refuse_imports(self, fullname):
+        if is_blacklisted_import(self, fullname):
+            raise ModuleNotFoundError(self.blacklisted_msg % (fullname,))
+
+        f = sys._getframe(2)
+        requestee = f.f_globals['__name__']
+
+        if fullname == '__main__' and requestee == 'pkg_resources':
+            # Anything that imports pkg_resources will eventually cause
+            # pkg_resources to try and scan __main__ for its __requires__
+            # attribute (pkg_resources/__init__.py::_build_master()). This
+            # breaks any app that is not expecting its __main__ to suddenly be
+            # sucked over a network and injected into a remote process, like
+            # py.test.
+            raise ModuleNotFoundError(self.pkg_resources_msg)
+
+        if fullname == 'pbr':
+            # It claims to use pkg_resources to read version information, which
+            # would result in PEP-302 being used, but it actually does direct
+            # filesystem access. So instead smodge the environment to override
+            # any version that was defined. This will probably break something
+            # later.
+            os.environ['PBR_VERSION'] = '0.0.0'
+
+    def _on_load_module(self, msg):
+        if msg.is_dead:
+            return
+
+        tup = msg.unpickle()
+        fullname = tup[0]
+        _v and LOG.debug('Importer._on_load_module(%r)', fullname)
+
+        self._lock.acquire()
+        try:
+            self._cache[fullname] = tup
+            if tup[2] is not None and PY24:
+                self._update_linecache(
+                    path='master:' + tup[2],
+                    data=zlib.decompress(tup[3])
+                )
+            callbacks = self._callbacks.pop(fullname, [])
+        finally:
+            self._lock.release()
+
+        for callback in callbacks:
+            callback()
+
+    def _request_module(self, fullname, callback):
+        self._lock.acquire()
+        try:
+            present = fullname in self._cache
+            if not present:
+                funcs = self._callbacks.get(fullname)
+                if funcs is not None:
+                    _v and LOG.debug('_request_module(%r): in flight', fullname)
+                    funcs.append(callback)
+                else:
+                    _v and LOG.debug('_request_module(%r): new request', fullname)
+                    self._callbacks[fullname] = [callback]
+                    self._context.send(
+                        Message(data=b(fullname), handle=GET_MODULE)
+                    )
+        finally:
+            self._lock.release()
+
+        if present:
+            callback()
+
+    def load_module(self, fullname):
+        fullname = to_text(fullname)
+        _v and LOG.debug('Importer.load_module(%r)', fullname)
+        self._refuse_imports(fullname)
+
+        event = threading.Event()
+        self._request_module(fullname, event.set)
+        event.wait()
+
+        ret = self._cache[fullname]
+        if ret[2] is None:
+            raise ModuleNotFoundError(self.absent_msg % (fullname,))
+
+        pkg_present = ret[1]
+        mod = sys.modules.setdefault(fullname, imp.new_module(fullname))
+        mod.__file__ = self.get_filename(fullname)
+        mod.__loader__ = self
+        if pkg_present is not None:  # it's a package.
+            mod.__path__ = []
+            mod.__package__ = fullname
+            self._present[fullname] = pkg_present
+        else:
+            mod.__package__ = str_rpartition(fullname, '.')[0] or None
+
+        if mod.__package__ and not PY3:
+            # 2.x requires __package__ to be exactly a string.
+            mod.__package__, _ = UTF8_CODEC.encode(mod.__package__)
+
+        source = self.get_source(fullname)
+        try:
+            code = compile(source, mod.__file__, 'exec', 0, 1)
+        except SyntaxError:
+            LOG.exception('while importing %r', fullname)
+            raise
+
+        if PY3:
+            exec(code, vars(mod))
+        else:
+            exec('exec code in vars(mod)')
+        return mod
+
+    def get_filename(self, fullname):
+        if fullname in self._cache:
+            path = self._cache[fullname][2]
+            if path is None:
+                # If find_loader() returns self but a subsequent master RPC
+                # reveals the module can't be loaded, and so load_module()
+                # throws ImportError, on Python 3.x it is still possible for
+                # the loader to be called to fetch metadata.
+                raise ModuleNotFoundError(self.absent_msg % (fullname,))
+            return u'master:' + self._cache[fullname][2]
+
+    def get_source(self, fullname):
+        if fullname in self._cache:
+            compressed = self._cache[fullname][3]
+            if compressed is None:
+                raise ModuleNotFoundError(self.absent_msg % (fullname,))
+
+            source = zlib.decompress(self._cache[fullname][3])
+            if PY3:
+                return to_text(source)
+            return source
+
+
+class LogHandler(logging.Handler):
+    def __init__(self, context):
+        logging.Handler.__init__(self)
+        self.context = context
+        self.local = threading.local()
+        self._buffer = []
+
+    def uncork(self):
+        """
+        #305: during startup :class:`LogHandler` may be installed before it is
+        possible to route messages, therefore messages are buffered until
+        :meth:`uncork` is called by :class:`ExternalContext`.
+        """
+        self._send = self.context.send
+        for msg in self._buffer:
+            self._send(msg)
+        self._buffer = None
+
+    def _send(self, msg):
+        self._buffer.append(msg)
+
+    def emit(self, rec):
+        if rec.name == 'mitogen.io' or \
+           getattr(self.local, 'in_emit', False):
+            return
+
+        self.local.in_emit = True
+        try:
+            msg = self.format(rec)
+            encoded = '%s\x00%s\x00%s' % (rec.name, rec.levelno, msg)
+            if isinstance(encoded, UnicodeType):
+                # Logging package emits both :(
+                encoded = encoded.encode('utf-8')
+            self._send(Message(data=encoded, handle=FORWARD_LOG))
+        finally:
+            self.local.in_emit = False
+
+
+class Side(object):
+    """
+    Represent a single side of a :class:`BasicStream`. This exists to allow
+    streams implemented using unidirectional (e.g. UNIX pipe) and bidirectional
+    (e.g. UNIX socket) file descriptors to operate identically.
+
+    :param mitogen.core.Stream stream:
+        The stream this side is associated with.
+
+    :param int fd:
+        Underlying file descriptor.
+
+    :param bool keep_alive:
+        Value for :attr:`keep_alive`
+
+    During construction, the file descriptor has its :data:`os.O_NONBLOCK` flag
+    enabled using :func:`fcntl.fcntl`.
+    """
+    _fork_refs = weakref.WeakValueDictionary()
+
+    def __init__(self, stream, fd, cloexec=True, keep_alive=True, blocking=False):
+        #: The :class:`Stream` for which this is a read or write side.
+        self.stream = stream
+        #: Integer file descriptor to perform IO on, or :data:`None` if
+        #: :meth:`close` has been called.
+        self.fd = fd
+        self.closed = False
+        #: If :data:`True`, causes presence of this side in
+        #: :class:`Broker`'s active reader set to defer shutdown until the
+        #: side is disconnected.
+        self.keep_alive = keep_alive
+        self._fork_refs[id(self)] = self
+        if cloexec:
+            set_cloexec(fd)
+        if not blocking:
+            set_nonblock(fd)
+
+    def __repr__(self):
+        return '<Side of %r fd %s>' % (self.stream, self.fd)
+
+    @classmethod
+    def _on_fork(cls):
+        while cls._fork_refs:
+            _, side = cls._fork_refs.popitem()
+            _vv and IOLOG.debug('Side._on_fork() closing %r', side)
+            side.close()
+
+    def close(self):
+        """
+        Call :func:`os.close` on :attr:`fd` if it is not :data:`None`,
+        then set it to :data:`None`.
+        """
+        if not self.closed:
+            _vv and IOLOG.debug('%r.close()', self)
+            self.closed = True
+            os.close(self.fd)
+
+    def read(self, n=CHUNK_SIZE):
+        """
+        Read up to `n` bytes from the file descriptor, wrapping the underlying
+        :func:`os.read` call with :func:`io_op` to trap common disconnection
+        conditions.
+
+        :meth:`read` always behaves as if it is reading from a regular UNIX
+        file; socket, pipe, and TTY disconnection errors are masked and result
+        in a 0-sized read like a regular file.
+
+        :returns:
+            Bytes read, or the empty to string to indicate disconnection was
+            detected.
+        """
+        if self.closed:
+            # Refuse to touch the handle after closed, it may have been reused
+            # by another thread. TODO: synchronize read()/write()/close().
+            return b('')
+        s, disconnected = io_op(os.read, self.fd, n)
+        if disconnected:
+            LOG.debug('%r.read(): disconnected: %s', self, disconnected)
+            return b('')
+        return s
+
+    def write(self, s):
+        """
+        Write as much of the bytes from `s` as possible to the file descriptor,
+        wrapping the underlying :func:`os.write` call with :func:`io_op` to
+        trap common disconnection conditions.
+
+        :returns:
+            Number of bytes written, or :data:`None` if disconnection was
+            detected.
+        """
+        if self.closed or self.fd is None:
+            # Refuse to touch the handle after closed, it may have been reused
+            # by another thread.
+            return None
+
+        written, disconnected = io_op(os.write, self.fd, s)
+        if disconnected:
+            LOG.debug('%r.write(): disconnected: %s', self, disconnected)
+            return None
+        return written
+
+
+class BasicStream(object):
+    #: A :class:`Side` representing the stream's receive file descriptor.
+    receive_side = None
+
+    #: A :class:`Side` representing the stream's transmit file descriptor.
+    transmit_side = None
+
+    def on_receive(self, broker):
+        """
+        Called by :class:`Broker` when the stream's :attr:`receive_side` has
+        been marked readable using :meth:`Broker.start_receive` and the broker
+        has detected the associated file descriptor is ready for reading.
+
+        Subclasses must implement this if :meth:`Broker.start_receive` is ever
+        called on them, and the method must call :meth:`on_disconect` if
+        reading produces an empty string.
+        """
+        pass
+
+    def on_transmit(self, broker):
+        """
+        Called by :class:`Broker` when the stream's :attr:`transmit_side`
+        has been marked writeable using :meth:`Broker._start_transmit` and
+        the broker has detected the associated file descriptor is ready for
+        writing.
+
+        Subclasses must implement this if :meth:`Broker._start_transmit` is
+        ever called on them.
+        """
+        pass
+
+    def on_shutdown(self, broker):
+        """
+        Called by :meth:`Broker.shutdown` to allow the stream time to
+        gracefully shutdown. The base implementation simply called
+        :meth:`on_disconnect`.
+        """
+        _v and LOG.debug('%r.on_shutdown()', self)
+        fire(self, 'shutdown')
+        self.on_disconnect(broker)
+
+    def on_disconnect(self, broker):
+        """
+        Called by :class:`Broker` to force disconnect the stream. The base
+        implementation simply closes :attr:`receive_side` and
+        :attr:`transmit_side` and unregisters the stream from the broker.
+        """
+        LOG.debug('%r.on_disconnect()', self)
+        if self.receive_side:
+            broker.stop_receive(self)
+            self.receive_side.close()
+        if self.transmit_side:
+            broker._stop_transmit(self)
+            self.transmit_side.close()
+        fire(self, 'disconnect')
+
+
+class Stream(BasicStream):
+    """
+    :class:`BasicStream` subclass implementing mitogen's :ref:`stream
+    protocol <stream-protocol>`.
+    """
+    #: If not :data:`None`, :class:`Router` stamps this into
+    #: :attr:`Message.auth_id` of every message received on this stream.
+    auth_id = None
+
+    #: If not :data:`False`, indicates the stream has :attr:`auth_id` set and
+    #: its value is the same as :data:`mitogen.context_id` or appears in
+    #: :data:`mitogen.parent_ids`.
+    is_privileged = False
+
+    def __init__(self, router, remote_id, **kwargs):
+        self._router = router
+        self.remote_id = remote_id
+        self.name = u'default'
+        self.sent_modules = set(['mitogen', 'mitogen.core'])
+        self.construct(**kwargs)
+        self._input_buf = collections.deque()
+        self._output_buf = collections.deque()
+        self._input_buf_len = 0
+        self._output_buf_len = 0
+        #: Routing records the dst_id of every message arriving from this
+        #: stream. Any arriving DEL_ROUTE is rebroadcast for any such ID.
+        self.egress_ids = set()
+
+    def construct(self):
+        pass
+
+    def _internal_receive(self, broker, buf):
+        if self._input_buf and self._input_buf_len < 128:
+            self._input_buf[0] += buf
+        else:
+            self._input_buf.append(buf)
+
+        self._input_buf_len += len(buf)
+        while self._receive_one(broker):
+            pass
+
+    def on_receive(self, broker):
+        """Handle the next complete message on the stream. Raise
+        :class:`StreamError` on failure."""
+        _vv and IOLOG.debug('%r.on_receive()', self)
+
+        buf = self.receive_side.read()
+        if not buf:
+            return self.on_disconnect(broker)
+
+        self._internal_receive(broker, buf)
+
+    HEADER_FMT = '>hLLLLLL'
+    HEADER_LEN = struct.calcsize(HEADER_FMT)
+    HEADER_MAGIC = 0x4d49  # 'MI'
+
+    corrupt_msg = (
+        'Corruption detected: frame signature incorrect. This likely means '
+        'some external process is interfering with the connection. Received:'
+        '\n\n'
+        '%r'
+    )
+
+    def _receive_one(self, broker):
+        if self._input_buf_len < self.HEADER_LEN:
+            return False
+
+        msg = Message()
+        msg.router = self._router
+        (magic, msg.dst_id, msg.src_id, msg.auth_id,
+         msg.handle, msg.reply_to, msg_len) = struct.unpack(
+            self.HEADER_FMT,
+            self._input_buf[0][:self.HEADER_LEN],
+        )
+
+        if magic != self.HEADER_MAGIC:
+            LOG.error(self.corrupt_msg, self._input_buf[0][:2048])
+            self.on_disconnect(broker)
+            return False
+
+        if msg_len > self._router.max_message_size:
+            LOG.error('Maximum message size exceeded (got %d, max %d)',
+                      msg_len, self._router.max_message_size)
+            self.on_disconnect(broker)
+            return False
+
+        total_len = msg_len + self.HEADER_LEN
+        if self._input_buf_len < total_len:
+            _vv and IOLOG.debug(
+                '%r: Input too short (want %d, got %d)',
+                self, msg_len, self._input_buf_len - self.HEADER_LEN
+            )
+            return False
+
+        start = self.HEADER_LEN
+        prev_start = start
+        remain = total_len
+        bits = []
+        while remain:
+            buf = self._input_buf.popleft()
+            bit = buf[start:remain]
+            bits.append(bit)
+            remain -= len(bit) + start
+            prev_start = start
+            start = 0
+
+        msg.data = b('').join(bits)
+        self._input_buf.appendleft(buf[prev_start+len(bit):])
+        self._input_buf_len -= total_len
+        self._router._async_route(msg, self)
+        return True
+
+    def pending_bytes(self):
+        """
+        Return the number of bytes queued for transmission on this stream. This
+        can be used to limit the amount of data buffered in RAM by an otherwise
+        unlimited consumer.
+
+        For an accurate result, this method should be called from the Broker
+        thread, for example by using :meth:`Broker.defer_sync`.
+        """
+        return self._output_buf_len
+
+    def on_transmit(self, broker):
+        """Transmit buffered messages."""
+        _vv and IOLOG.debug('%r.on_transmit()', self)
+
+        if self._output_buf:
+            buf = self._output_buf.popleft()
+            written = self.transmit_side.write(buf)
+            if not written:
+                _v and LOG.debug('%r.on_transmit(): disconnection detected', self)
+                self.on_disconnect(broker)
+                return
+            elif written != len(buf):
+                self._output_buf.appendleft(BufferType(buf, written))
+
+            _vv and IOLOG.debug('%r.on_transmit() -> len %d', self, written)
+            self._output_buf_len -= written
+
+        if not self._output_buf:
+            broker._stop_transmit(self)
+
+    def _send(self, msg):
+        _vv and IOLOG.debug('%r._send(%r)', self, msg)
+        pkt = struct.pack(self.HEADER_FMT, self.HEADER_MAGIC, msg.dst_id,
+                          msg.src_id, msg.auth_id, msg.handle,
+                          msg.reply_to or 0, len(msg.data)) + msg.data
+
+        if not self._output_buf_len:
+            # Modifying epoll/Kqueue state is expensive, as are needless broker
+            # loops. Rather than wait for writeability, just write immediately,
+            # and fall back to the broker loop on error or full buffer.
+            try:
+                n = self.transmit_side.write(pkt)
+                if n:
+                    if n == len(pkt):
+                        return
+                    pkt = pkt[n:]
+            except OSError:
+                pass
+
+            self._router.broker._start_transmit(self)
+        self._output_buf.append(pkt)
+        self._output_buf_len += len(pkt)
+
+    def send(self, msg):
+        """Send `data` to `handle`, and tell the broker we have output. May
+        be called from any thread."""
+        self._router.broker.defer(self._send, msg)
+
+    def on_shutdown(self, broker):
+        """Override BasicStream behaviour of immediately disconnecting."""
+        _v and LOG.debug('%r.on_shutdown(%r)', self, broker)
+
+    def accept(self, rfd, wfd):
+        # TODO: what is this os.dup for?
+        self.receive_side = Side(self, os.dup(rfd))
+        self.transmit_side = Side(self, os.dup(wfd))
+
+    def __repr__(self):
+        cls = type(self)
+        return "%s.%s('%s')" % (cls.__module__, cls.__name__, self.name)
+
+
+class Context(object):
+    """
+    Represent a remote context regardless of the underlying connection method.
+    Context objects are simple facades that emit messages through an
+    associated router, and have :ref:`signals` raised against them in response
+    to various events relating to the context.
+
+    **Note:** This is the somewhat limited core version, used by child
+    contexts. The master subclass is documented below this one.
+
+    Contexts maintain no internal state and are thread-safe.
+
+    Prefer :meth:`Router.context_by_id` over constructing context objects
+    explicitly, as that method is deduplicating, and returns the only context
+    instance :ref:`signals` will be raised on.
+
+    :param Router router:
+        Router to emit messages through.
+    :param int context_id:
+        Context ID.
+    :param str name:
+        Context name.
+    """
+    remote_name = None
+
+    def __init__(self, router, context_id, name=None):
+        self.router = router
+        self.context_id = context_id
+        self.name = name
+
+    def __reduce__(self):
+        name = self.name
+        if name and not isinstance(name, UnicodeType):
+            name = UnicodeType(name, 'utf-8')
+        return _unpickle_context, (self.context_id, name)
+
+    def on_disconnect(self):
+        _v and LOG.debug('%r.on_disconnect()', self)
+        fire(self, 'disconnect')
+
+    def send_async(self, msg, persist=False):
+        """
+        Arrange for `msg` to be delivered to this context, with replies
+        directed to a newly constructed receiver. :attr:`dst_id
+        <Message.dst_id>` is set to the target context ID, and :attr:`reply_to
+        <Message.reply_to>` is set to the newly constructed receiver's handle.
+
+        :param bool persist:
+            If :data:`False`, the handler will be unregistered after a single
+            message has been received.
+
+        :param mitogen.core.Message msg:
+            The message.
+
+        :returns:
+            :class:`Receiver` configured to receive any replies sent to the
+            message's `reply_to` handle.
+        """
+        if self.router.broker._thread == threading.currentThread():  # TODO
+            raise SystemError('Cannot making blocking call on broker thread')
+
+        receiver = Receiver(self.router, persist=persist, respondent=self)
+        msg.dst_id = self.context_id
+        msg.reply_to = receiver.handle
+
+        _v and LOG.debug('%r.send_async(%r)', self, msg)
+        self.send(msg)
+        return receiver
+
+    def call_service_async(self, service_name, method_name, **kwargs):
+        _v and LOG.debug('%r.call_service_async(%r, %r, %r)',
+                         self, service_name, method_name, kwargs)
+        if isinstance(service_name, BytesType):
+            service_name = service_name.encode('utf-8')
+        elif not isinstance(service_name, UnicodeType):
+            service_name = service_name.name()  # Service.name()
+        tup = (service_name, to_text(method_name), Kwargs(kwargs))
+        msg = Message.pickled(tup, handle=CALL_SERVICE)
+        return self.send_async(msg)
+
+    def send(self, msg):
+        """
+        Arrange for `msg` to be delivered to this context. :attr:`dst_id
+        <Message.dst_id>` is set to the target context ID.
+
+        :param Message msg:
+            Message.
+        """
+        msg.dst_id = self.context_id
+        self.router.route(msg)
+
+    def call_service(self, service_name, method_name, **kwargs):
+        recv = self.call_service_async(service_name, method_name, **kwargs)
+        return recv.get().unpickle()
+
+    def send_await(self, msg, deadline=None):
+        """
+        Like :meth:`send_async`, but expect a single reply (`persist=False`)
+        delivered within `deadline` seconds.
+
+        :param mitogen.core.Message msg:
+            The message.
+        :param float deadline:
+            If not :data:`None`, seconds before timing out waiting for a reply.
+        :returns:
+            Deserialized reply.
+        :raises TimeoutError:
+            No message was received and `deadline` passed.
+        """
+        receiver = self.send_async(msg)
+        response = receiver.get(deadline)
+        data = response.unpickle()
+        _vv and IOLOG.debug('%r._send_await() -> %r', self, data)
+        return data
+
+    def __repr__(self):
+        return 'Context(%s, %r)' % (self.context_id, self.name)
+
+
+def _unpickle_context(context_id, name, router=None):
+    if not (isinstance(context_id, (int, long)) and context_id >= 0 and (
+        (name is None) or
+        (isinstance(name, UnicodeType) and len(name) < 100))
+    ):
+        raise TypeError('cannot unpickle Context: bad input')
+
+    if isinstance(router, Router):
+        return router.context_by_id(context_id, name=name)
+    return Context(None, context_id, name)  # For plain Jane pickle.
+
+
+class Poller(object):
+    """
+    A poller manages OS file descriptors the user is waiting to become
+    available for IO. The :meth:`poll` method blocks the calling thread
+    until one or more become ready. The default implementation is based on
+    :func:`select.poll`.
+
+    Each descriptor has an associated `data` element, which is unique for each
+    readiness type, and defaults to being the same as the file descriptor. The
+    :meth:`poll` method yields the data associated with a descriptor, rather
+    than the descriptor itself, allowing concise loops like::
+
+        p = Poller()
+        p.start_receive(conn.fd, data=conn.on_read)
+        p.start_transmit(conn.fd, data=conn.on_write)
+
+        for callback in p.poll():
+            callback()  # invoke appropriate bound instance method
+
+    Pollers may be modified while :meth:`poll` is yielding results. Removals
+    are processed immediately, causing pending events for the descriptor to be
+    discarded.
+
+    The :meth:`close` method must be called when a poller is discarded to avoid
+    a resource leak.
+
+    Pollers may only be used by one thread at a time.
+    """
+    SUPPORTED = True
+
+    # This changed from select() to poll() in Mitogen 0.2.4. Since poll() has
+    # no upper FD limit, it is suitable for use with Latch, which must handle
+    # FDs larger than select's limit during many-host runs. We want this
+    # because poll() requires no setup and teardown: just a single system call,
+    # which is important because Latch.get() creates a Poller on each
+    # invocation. In a microbenchmark, poll() vs. epoll_ctl() is 30% faster in
+    # this scenario. If select() must return in future, it is important
+    # Latch.poller_class is set from parent.py to point to the industrial
+    # strength poller for the OS, otherwise Latch will fail randomly.
+
+    #: Increments on every poll(). Used to version _rfds and _wfds.
+    _generation = 1
+
+    def __init__(self):
+        self._rfds = {}
+        self._wfds = {}
+
+    def __repr__(self):
+        return '%s(%#x)' % (type(self).__name__, id(self))
+
+    def _update(self, fd):
+        """
+        Required by PollPoller subclass.
+        """
+        pass
+
+    @property
+    def readers(self):
+        """
+        Return a list of `(fd, data)` tuples for every FD registered for
+        receive readiness.
+        """
+        return list((fd, data) for fd, (data, gen) in self._rfds.items())
+
+    @property
+    def writers(self):
+        """
+        Return a list of `(fd, data)` tuples for every FD registered for
+        transmit readiness.
+        """
+        return list((fd, data) for fd, (data, gen) in self._wfds.items())
+
+    def close(self):
+        """
+        Close any underlying OS resource used by the poller.
+        """
+        pass
+
+    def start_receive(self, fd, data=None):
+        """
+        Cause :meth:`poll` to yield `data` when `fd` is readable.
+        """
+        self._rfds[fd] = (data or fd, self._generation)
+        self._update(fd)
+
+    def stop_receive(self, fd):
+        """
+        Stop yielding readability events for `fd`.
+
+        Redundant calls to :meth:`stop_receive` are silently ignored, this may
+        change in future.
+        """
+        self._rfds.pop(fd, None)
+        self._update(fd)
+
+    def start_transmit(self, fd, data=None):
+        """
+        Cause :meth:`poll` to yield `data` when `fd` is writeable.
+        """
+        self._wfds[fd] = (data or fd, self._generation)
+        self._update(fd)
+
+    def stop_transmit(self, fd):
+        """
+        Stop yielding writeability events for `fd`.
+
+        Redundant calls to :meth:`stop_transmit` are silently ignored, this may
+        change in future.
+        """
+        self._wfds.pop(fd, None)
+        self._update(fd)
+
+    def _poll(self, timeout):
+        (rfds, wfds, _), _ = io_op(select.select,
+            self._rfds,
+            self._wfds,
+            (), timeout
+        )
+
+        for fd in rfds:
+            _vv and IOLOG.debug('%r: POLLIN for %r', self, fd)
+            data, gen = self._rfds.get(fd, (None, None))
+            if gen and gen < self._generation:
+                yield data
+
+        for fd in wfds:
+            _vv and IOLOG.debug('%r: POLLOUT for %r', self, fd)
+            data, gen = self._wfds.get(fd, (None, None))
+            if gen and gen < self._generation:
+                yield data
+
+        if timeout:
+            timeout *= 1000
+
+    def poll(self, timeout=None):
+        """
+        Block the calling thread until one or more FDs are ready for IO.
+
+        :param float timeout:
+            If not :data:`None`, seconds to wait without an event before
+            returning an empty iterable.
+        :returns:
+            Iterable of `data` elements associated with ready FDs.
+        """
+        _vv and IOLOG.debug('%r.poll(%r)', self, timeout)
+        self._generation += 1
+        return self._poll(timeout)
+
+
+class Latch(object):
+    """
+    A latch is a :class:`Queue.Queue`-like object that supports mutation and
+    waiting from multiple threads, however unlike :class:`Queue.Queue`,
+    waiting threads always remain interruptible, so CTRL+C always succeeds, and
+    waits where a timeout is set experience no wake up latency. These
+    properties are not possible in combination using the built-in threading
+    primitives available in Python 2.x.
+
+    Latches implement queues using the UNIX self-pipe trick, and a per-thread
+    :func:`socket.socketpair` that is lazily created the first time any
+    latch attempts to sleep on a thread, and dynamically associated with the
+    waiting Latch only for duration of the wait.
+
+    See :ref:`waking-sleeping-threads` for further discussion.
+    """
+    poller_class = Poller
+
+    notify = None
+
+    # The _cls_ prefixes here are to make it crystal clear in the code which
+    # state mutation isn't covered by :attr:`_lock`.
+
+    #: List of reusable :func:`socket.socketpair` tuples. The list is mutated
+    #: from multiple threads, the only safe operations are `append()` and
+    #: `pop()`.
+    _cls_idle_socketpairs = []
+
+    #: List of every socket object that must be closed by :meth:`_on_fork`.
+    #: Inherited descriptors cannot be reused, as the duplicated handles
+    #: reference the same underlying kernel object in use by the parent.
+    _cls_all_sockets = []
+
+    def __init__(self):
+        self.closed = False
+        self._lock = threading.Lock()
+        #: List of unconsumed enqueued items.
+        self._queue = []
+        #: List of `(wsock, cookie)` awaiting an element, where `wsock` is the
+        #: socketpair's write side, and `cookie` is the string to write.
+        self._sleeping = []
+        #: Number of elements of :attr:`_sleeping` that have already been
+        #: woken, and have a corresponding element index from :attr:`_queue`
+        #: assigned to them.
+        self._waking = 0
+
+    @classmethod
+    def _on_fork(cls):
+        """
+        Clean up any files belonging to the parent process after a fork.
+        """
+        cls._cls_idle_socketpairs = []
+        while cls._cls_all_sockets:
+            cls._cls_all_sockets.pop().close()
+
+    def close(self):
+        """
+        Mark the latch as closed, and cause every sleeping thread to be woken,
+        with :class:`mitogen.core.LatchError` raised in each thread.
+        """
+        self._lock.acquire()
+        try:
+            self.closed = True
+            while self._waking < len(self._sleeping):
+                wsock, cookie = self._sleeping[self._waking]
+                self._wake(wsock, cookie)
+                self._waking += 1
+        finally:
+            self._lock.release()
+
+    def empty(self):
+        """
+        Return :data:`True` if calling :meth:`get` would block.
+
+        As with :class:`Queue.Queue`, :data:`True` may be returned even
+        though a subsequent call to :meth:`get` will succeed, since a
+        message may be posted at any moment between :meth:`empty` and
+        :meth:`get`.
+
+        As with :class:`Queue.Queue`, :data:`False` may be returned even
+        though a subsequent call to :meth:`get` will block, since another
+        waiting thread may be woken at any moment between :meth:`empty` and
+        :meth:`get`.
+
+        :raises LatchError:
+            The latch has already been marked closed.
+        """
+        self._lock.acquire()
+        try:
+            if self.closed:
+                raise LatchError()
+            return len(self._queue) == 0
+        finally:
+            self._lock.release()
+
+    def _get_socketpair(self):
+        """
+        Return an unused socketpair, creating one if none exist.
+        """
+        try:
+            return self._cls_idle_socketpairs.pop()  # pop() must be atomic
+        except IndexError:
+            rsock, wsock = socket.socketpair()
+            set_cloexec(rsock.fileno())
+            set_cloexec(wsock.fileno())
+            self._cls_all_sockets.extend((rsock, wsock))
+            return rsock, wsock
+
+    COOKIE_MAGIC, = struct.unpack('L', b('LTCH') * (struct.calcsize('L')//4))
+    COOKIE_FMT = '>Qqqq'  # #545: id() and get_ident() may exceed long on armhfp.
+    COOKIE_SIZE = struct.calcsize(COOKIE_FMT)
+
+    def _make_cookie(self):
+        """
+        Return a string encoding the ID of the process, instance and thread.
+        This disambiguates legitimate wake-ups, accidental writes to the FD,
+        and buggy internal FD sharing.
+        """
+        return struct.pack(self.COOKIE_FMT, self.COOKIE_MAGIC,
+                           os.getpid(), id(self), thread.get_ident())
+
+    def get(self, timeout=None, block=True):
+        """
+        Return the next enqueued object, or sleep waiting for one.
+
+        :param float timeout:
+            If not :data:`None`, specifies a timeout in seconds.
+
+        :param bool block:
+            If :data:`False`, immediately raise
+            :class:`mitogen.core.TimeoutError` if the latch is empty.
+
+        :raises mitogen.core.LatchError:
+            :meth:`close` has been called, and the object is no longer valid.
+
+        :raises mitogen.core.TimeoutError:
+            Timeout was reached.
+
+        :returns:
+            The de-queued object.
+        """
+        _vv and IOLOG.debug('%r.get(timeout=%r, block=%r)',
+                            self, timeout, block)
+        self._lock.acquire()
+        try:
+            if self.closed:
+                raise LatchError()
+            i = len(self._sleeping)
+            if len(self._queue) > i:
+                _vv and IOLOG.debug('%r.get() -> %r', self, self._queue[i])
+                return self._queue.pop(i)
+            if not block:
+                raise TimeoutError()
+            rsock, wsock = self._get_socketpair()
+            cookie = self._make_cookie()
+            self._sleeping.append((wsock, cookie))
+        finally:
+            self._lock.release()
+
+        poller = self.poller_class()
+        poller.start_receive(rsock.fileno())
+        try:
+            return self._get_sleep(poller, timeout, block, rsock, wsock, cookie)
+        finally:
+            poller.close()
+
+    def _get_sleep(self, poller, timeout, block, rsock, wsock, cookie):
+        """
+        When a result is not immediately available, sleep waiting for
+        :meth:`put` to write a byte to our socket pair.
+        """
+        _vv and IOLOG.debug(
+            '%r._get_sleep(timeout=%r, block=%r, rfd=%d, wfd=%d)',
+            self, timeout, block, rsock.fileno(), wsock.fileno()
+        )
+
+        e = None
+        woken = None
+        try:
+            woken = list(poller.poll(timeout))
+        except Exception:
+            e = sys.exc_info()[1]
+
+        self._lock.acquire()
+        try:
+            i = self._sleeping.index((wsock, cookie))
+            del self._sleeping[i]
+            if not woken:
+                raise e or TimeoutError()
+
+            got_cookie = rsock.recv(self.COOKIE_SIZE)
+            self._cls_idle_socketpairs.append((rsock, wsock))
+
+            assert cookie == got_cookie, (
+                "Cookie incorrect; got %r, expected %r" \
+                % (binascii.hexlify(got_cookie),
+                   binascii.hexlify(cookie))
+            )
+            assert i < self._waking, (
+                "Cookie correct, but no queue element assigned."
+            )
+            self._waking -= 1
+            if self.closed:
+                raise LatchError()
+            _vv and IOLOG.debug('%r.get() wake -> %r', self, self._queue[i])
+            return self._queue.pop(i)
+        finally:
+            self._lock.release()
+
+    def put(self, obj=None):
+        """
+        Enqueue an object, waking the first thread waiting for a result, if one
+        exists.
+
+        :param obj:
+            Object to enqueue. Defaults to :data:`None` as a convenience when
+            using :class:`Latch` only for synchronization.
+        :raises mitogen.core.LatchError:
+            :meth:`close` has been called, and the object is no longer valid.
+        """
+        _vv and IOLOG.debug('%r.put(%r)', self, obj)
+        self._lock.acquire()
+        try:
+            if self.closed:
+                raise LatchError()
+            self._queue.append(obj)
+
+            if self._waking < len(self._sleeping):
+                wsock, cookie = self._sleeping[self._waking]
+                self._waking += 1
+                _vv and IOLOG.debug('%r.put() -> waking wfd=%r',
+                                    self, wsock.fileno())
+                self._wake(wsock, cookie)
+            elif self.notify:
+                self.notify(self)
+        finally:
+            self._lock.release()
+
+    def _wake(self, wsock, cookie):
+        written, disconnected = io_op(os.write, wsock.fileno(), cookie)
+        assert written == len(cookie) and not disconnected
+
+    def __repr__(self):
+        return 'Latch(%#x, size=%d, t=%r)' % (
+            id(self),
+            len(self._queue),
+            threading.currentThread().getName(),
+        )
+
+
+class Waker(BasicStream):
+    """
+    :class:`BasicStream` subclass implementing the `UNIX self-pipe trick`_.
+    Used to wake the multiplexer when another thread needs to modify its state
+    (via a cross-thread function call).
+
+    .. _UNIX self-pipe trick: https://cr.yp.to/docs/selfpipe.html
+    """
+    broker_ident = None
+
+    def __init__(self, broker):
+        self._broker = broker
+        self._lock = threading.Lock()
+        self._deferred = []
+
+        rfd, wfd = os.pipe()
+        self.receive_side = Side(self, rfd)
+        self.transmit_side = Side(self, wfd)
+
+    def __repr__(self):
+        return 'Waker(%r rfd=%r, wfd=%r)' % (
+            self._broker,
+            self.receive_side and self.receive_side.fd,
+            self.transmit_side and self.transmit_side.fd,
+        )
+
+    @property
+    def keep_alive(self):
+        """
+        Prevent immediate Broker shutdown while deferred functions remain.
+        """
+        self._lock.acquire()
+        try:
+            return len(self._deferred)
+        finally:
+            self._lock.release()
+
+    def on_receive(self, broker):
+        """
+        Drain the pipe and fire callbacks. Since :attr:`_deferred` is
+        synchronized, :meth:`defer` and :meth:`on_receive` can conspire to
+        ensure only one byte needs to be pending regardless of queue length.
+        """
+        _vv and IOLOG.debug('%r.on_receive()', self)
+        self._lock.acquire()
+        try:
+            self.receive_side.read(1)
+            deferred = self._deferred
+            self._deferred = []
+        finally:
+            self._lock.release()
+
+        for func, args, kwargs in deferred:
+            try:
+                func(*args, **kwargs)
+            except Exception:
+                LOG.exception('defer() crashed: %r(*%r, **%r)',
+                              func, args, kwargs)
+                self._broker.shutdown()
+
+    def _wake(self):
+        """
+        Wake the multiplexer by writing a byte. If Broker is midway through
+        teardown, the FD may already be closed, so ignore EBADF.
+        """
+        try:
+            self.transmit_side.write(b(' '))
+        except OSError:
+            e = sys.exc_info()[1]
+            if e.args[0] != errno.EBADF:
+                raise
+
+    broker_shutdown_msg = (
+        "An attempt was made to enqueue a message with a Broker that has "
+        "already exitted. It is likely your program called Broker.shutdown() "
+        "too early."
+    )
+
+    def defer(self, func, *args, **kwargs):
+        """
+        Arrange for `func()` to execute on the broker thread. This function
+        returns immediately without waiting the result of `func()`. Use
+        :meth:`defer_sync` to block until a result is available.
+
+        :raises mitogen.core.Error:
+            :meth:`defer` was called after :class:`Broker` has begun shutdown.
+        """
+        if thread.get_ident() == self.broker_ident:
+            _vv and IOLOG.debug('%r.defer() [immediate]', self)
+            return func(*args, **kwargs)
+        if self._broker._exitted:
+            raise Error(self.broker_shutdown_msg)
+
+        _vv and IOLOG.debug('%r.defer() [fd=%r]', self, self.transmit_side.fd)
+        self._lock.acquire()
+        try:
+            if not self._deferred:
+                self._wake()
+            self._deferred.append((func, args, kwargs))
+        finally:
+            self._lock.release()
+
+
+class IoLogger(BasicStream):
+    """
+    :class:`BasicStream` subclass that sets up redirection of a standard
+    UNIX file descriptor back into the Python :mod:`logging` package.
+    """
+    _buf = ''
+
+    def __init__(self, broker, name, dest_fd):
+        self._broker = broker
+        self._name = name
+        self._rsock, self._wsock = socket.socketpair()
+        os.dup2(self._wsock.fileno(), dest_fd)
+        set_cloexec(self._wsock.fileno())
+
+        self._log = logging.getLogger(name)
+        # #453: prevent accidental log initialization in a child creating a
+        # feedback loop.
+        self._log.propagate = False
+        self._log.handlers = logging.getLogger().handlers[:]
+
+        self.receive_side = Side(self, self._rsock.fileno())
+        self.transmit_side = Side(self, dest_fd, cloexec=False, blocking=True)
+        self._broker.start_receive(self)
+
+    def __repr__(self):
+        return '<IoLogger %s>' % (self._name,)
+
+    def _log_lines(self):
+        while self._buf.find('\n') != -1:
+            line, _, self._buf = str_partition(self._buf, '\n')
+            self._log.info('%s', line.rstrip('\n'))
+
+    def on_shutdown(self, broker):
+        """Shut down the write end of the logging socket."""
+        _v and LOG.debug('%r.on_shutdown()', self)
+        if not IS_WSL:
+            # #333: WSL generates invalid readiness indication on shutdown()
+            self._wsock.shutdown(socket.SHUT_WR)
+        self._wsock.close()
+        self.transmit_side.close()
+
+    def on_receive(self, broker):
+        _vv and IOLOG.debug('%r.on_receive()', self)
+        buf = self.receive_side.read()
+        if not buf:
+            return self.on_disconnect(broker)
+
+        self._buf += buf.decode('latin1')
+        self._log_lines()
+
+
+class Router(object):
+    """
+    Route messages between contexts, and invoke local handlers for messages
+    addressed to this context. :meth:`Router.route() <route>` straddles the
+    :class:`Broker` thread and user threads, it is safe to call anywhere.
+
+    **Note:** This is the somewhat limited core version of the Router class
+    used by child contexts. The master subclass is documented below this one.
+    """
+    context_class = Context
+    max_message_size = 128 * 1048576
+
+    #: When :data:`True`, permit children to only communicate with the current
+    #: context or a parent of the current context. Routing between siblings or
+    #: children of parents is prohibited, ensuring no communication is possible
+    #: between intentionally partitioned networks, such as when a program
+    #: simultaneously manipulates hosts spread across a corporate and a
+    #: production network, or production networks that are otherwise
+    #: air-gapped.
+    #:
+    #: Sending a prohibited message causes an error to be logged and a dead
+    #: message to be sent in reply to the errant message, if that message has
+    #: ``reply_to`` set.
+    #:
+    #: The value of :data:`unidirectional` becomes the default for the
+    #: :meth:`local() <mitogen.master.Router.local>` `unidirectional`
+    #: parameter.
+    unidirectional = False
+
+    def __init__(self, broker):
+        self.broker = broker
+        listen(broker, 'exit', self._on_broker_exit)
+        self._setup_logging()
+
+        self._write_lock = threading.Lock()
+        #: context ID -> Stream; must hold _write_lock to edit or iterate
+        self._stream_by_id = {}
+        #: List of contexts to notify of shutdown; must hold _write_lock
+        self._context_by_id = {}
+        self._last_handle = itertools.count(1000)
+        #: handle -> (persistent?, func(msg))
+        self._handle_map = {}
+        #: Context -> set { handle, .. }
+        self._handles_by_respondent = {}
+        self.add_handler(self._on_del_route, DEL_ROUTE)
+
+    def __repr__(self):
+        return 'Router(%r)' % (self.broker,)
+
+    def _setup_logging(self):
+        """
+        This is done in the :class:`Router` constructor for historical reasons.
+        It must be called before ExternalContext logs its first messages, but
+        after logging has been setup. It must also be called when any router is
+        constructed for a consumer app.
+        """
+        # Here seems as good a place as any.
+        global _v, _vv
+        _v = logging.getLogger().level <= logging.DEBUG
+        _vv = IOLOG.level <= logging.DEBUG
+
+    def _on_del_route(self, msg):
+        """
+        Stub :data:`DEL_ROUTE` handler; fires 'disconnect' events on the
+        corresponding :attr:`_context_by_id` member. This is replaced by
+        :class:`mitogen.parent.RouteMonitor` in an upgraded context.
+        """
+        LOG.error('%r._on_del_route() %r', self, msg)
+        if msg.is_dead:
+            return
+
+        target_id_s, _, name = bytes_partition(msg.data, b(':'))
+        target_id = int(target_id_s, 10)
+        context = self._context_by_id.get(target_id)
+        if context:
+            fire(context, 'disconnect')
+        else:
+            LOG.debug('DEL_ROUTE for unknown ID %r: %r', target_id, msg)
+
+    def _on_stream_disconnect(self, stream):
+        notify = []
+        self._write_lock.acquire()
+        try:
+            for context in list(self._context_by_id.values()):
+                stream_ = self._stream_by_id.get(context.context_id)
+                if stream_ is stream:
+                    del self._stream_by_id[context.context_id]
+                    notify.append(context)
+        finally:
+            self._write_lock.release()
+
+        # Happens outside lock as e.g. RouteMonitor wants the same lock.
+        for context in notify:
+            context.on_disconnect()
+
+    broker_exit_msg = 'Broker has exitted'
+
+    def _on_broker_exit(self):
+        while self._handle_map:
+            _, (_, func, _, _) = self._handle_map.popitem()
+            func(Message.dead(self.broker_exit_msg))
+
+    def myself(self):
+        """
+        Return a :class:`Context` referring to the current process.
+        """
+        return self.context_class(
+            router=self,
+            context_id=mitogen.context_id,
+            name='self',
+        )
+
+    def context_by_id(self, context_id, via_id=None, create=True, name=None):
+        """
+        Messy factory/lookup function to find a context by its ID, or construct
+        it. This will eventually be replaced by a more sensible interface.
+        """
+        context = self._context_by_id.get(context_id)
+        if context:
+            return context
+
+        if create and via_id is not None:
+            via = self.context_by_id(via_id)
+        else:
+            via = None
+
+        self._write_lock.acquire()
+        try:
+            context = self._context_by_id.get(context_id)
+            if create and not context:
+                context = self.context_class(self, context_id, name=name)
+                context.via = via
+                self._context_by_id[context_id] = context
+        finally:
+            self._write_lock.release()
+
+        return context
+
+    def register(self, context, stream):
+        """
+        Register a newly constructed context and its associated stream, and add
+        the stream's receive side to the I/O multiplexer. This method remains
+        public while the design has not yet settled.
+        """
+        _v and LOG.debug('register(%r, %r)', context, stream)
+        self._write_lock.acquire()
+        try:
+            self._stream_by_id[context.context_id] = stream
+            self._context_by_id[context.context_id] = context
+        finally:
+            self._write_lock.release()
+
+        self.broker.start_receive(stream)
+        listen(stream, 'disconnect', lambda: self._on_stream_disconnect(stream))
+
+    def stream_by_id(self, dst_id):
+        """
+        Return the :class:`Stream` that should be used to communicate with
+        `dst_id`. If a specific route for `dst_id` is not known, a reference to
+        the parent context's stream is returned.
+        """
+        return (
+            self._stream_by_id.get(dst_id) or
+            self._stream_by_id.get(mitogen.parent_id)
+        )
+
+    def del_handler(self, handle):
+        """
+        Remove the handle registered for `handle`
+
+        :raises KeyError:
+            The handle wasn't registered.
+        """
+        _, _, _, respondent = self._handle_map.pop(handle)
+        if respondent:
+            self._handles_by_respondent[respondent].discard(handle)
+
+    def add_handler(self, fn, handle=None, persist=True,
+                    policy=None, respondent=None,
+                    overwrite=False):
+        """
+        Invoke `fn(msg)` on the :class:`Broker` thread for each Message sent to
+        `handle` from this context. Unregister after one invocation if
+        `persist` is :data:`False`. If `handle` is :data:`None`, a new handle
+        is allocated and returned.
+
+        :param int handle:
+            If not :data:`None`, an explicit handle to register, usually one of
+            the ``mitogen.core.*`` constants. If unspecified, a new unused
+            handle will be allocated.
+
+        :param bool persist:
+            If :data:`False`, the handler will be unregistered after a single
+            message has been received.
+
+        :param Context respondent:
+            Context that messages to this handle are expected to be sent from.
+            If specified, arranges for a dead message to be delivered to `fn`
+            when disconnection of the context is detected.
+
+            In future `respondent` will likely also be used to prevent other
+            contexts from sending messages to the handle.
+
+        :param function policy:
+            Function invoked as `policy(msg, stream)` where `msg` is a
+            :class:`mitogen.core.Message` about to be delivered, and `stream`
+            is the :class:`mitogen.core.Stream` on which it was received. The
+            function must return :data:`True`, otherwise an error is logged and
+            delivery is refused.
+
+            Two built-in policy functions exist:
+
+            * :func:`has_parent_authority`: requires the message arrived from a
+              parent context, or a context acting with a parent context's
+              authority (``auth_id``).
+
+            * :func:`mitogen.parent.is_immediate_child`: requires the
+              message arrived from an immediately connected child, for use in
+              messaging patterns where either something becomes buggy or
+              insecure by permitting indirect upstream communication.
+
+            In case of refusal, and the message's ``reply_to`` field is
+            nonzero, a :class:`mitogen.core.CallError` is delivered to the
+            sender indicating refusal occurred.
+
+        :param bool overwrite:
+            If :data:`True`, allow existing handles to be silently overwritten.
+
+        :return:
+            `handle`, or if `handle` was :data:`None`, the newly allocated
+            handle.
+        :raises Error:
+            Attemp to register handle that was already registered.
+        """
+        handle = handle or next(self._last_handle)
+        _vv and IOLOG.debug('%r.add_handler(%r, %r, %r)', self, fn, handle, persist)
+        if handle in self._handle_map and not overwrite:
+            raise Error(self.duplicate_handle_msg)
+
+        self._handle_map[handle] = persist, fn, policy, respondent
+        if respondent:
+            if respondent not in self._handles_by_respondent:
+                self._handles_by_respondent[respondent] = set()
+                listen(respondent, 'disconnect',
+                       lambda: self._on_respondent_disconnect(respondent))
+            self._handles_by_respondent[respondent].add(handle)
+
+        return handle
+
+    duplicate_handle_msg = 'cannot register a handle that is already exists'
+    refused_msg = 'refused by policy'
+    invalid_handle_msg = 'invalid handle'
+    too_large_msg = 'message too large (max %d bytes)'
+    respondent_disconnect_msg = 'the respondent Context has disconnected'
+    broker_shutdown_msg = 'Broker is shutting down'
+    no_route_msg = 'no route to %r, my ID is %r'
+    unidirectional_msg = (
+        'routing mode prevents forward of message from context %d via '
+        'context %d'
+    )
+
+    def _on_respondent_disconnect(self, context):
+        for handle in self._handles_by_respondent.pop(context, ()):
+            _, fn, _, _  = self._handle_map[handle]
+            fn(Message.dead(self.respondent_disconnect_msg))
+            del self._handle_map[handle]
+
+    def on_shutdown(self, broker):
+        """Called during :meth:`Broker.shutdown`, informs callbacks registered
+        with :meth:`add_handle_cb` the connection is dead."""
+        _v and LOG.debug('%r.on_shutdown(%r)', self, broker)
+        fire(self, 'shutdown')
+        for handle, (persist, fn) in self._handle_map.iteritems():
+            _v and LOG.debug('%r.on_shutdown(): killing %r: %r', self, handle, fn)
+            fn(Message.dead(self.broker_shutdown_msg))
+
+    def _maybe_send_dead(self, msg, reason, *args):
+        if args:
+            reason %= args
+        LOG.debug('%r: %r is dead: %r', self, msg, reason)
+        if msg.reply_to and not msg.is_dead:
+            msg.reply(Message.dead(reason=reason), router=self)
+
+    def _invoke(self, msg, stream):
+        # IOLOG.debug('%r._invoke(%r)', self, msg)
+        try:
+            persist, fn, policy, respondent = self._handle_map[msg.handle]
+        except KeyError:
+            self._maybe_send_dead(msg, reason=self.invalid_handle_msg)
+            return
+
+        if respondent and not (msg.is_dead or
+                               msg.src_id == respondent.context_id):
+            self._maybe_send_dead(msg, 'reply from unexpected context')
+            return
+
+        if policy and not policy(msg, stream):
+            self._maybe_send_dead(msg, self.refused_msg)
+            return
+
+        if not persist:
+            self.del_handler(msg.handle)
+
+        try:
+            fn(msg)
+        except Exception:
+            LOG.exception('%r._invoke(%r): %r crashed', self, msg, fn)
+
+    def _async_route(self, msg, in_stream=None):
+        """
+        Arrange for `msg` to be forwarded towards its destination. If its
+        destination is the local context, then arrange for it to be dispatched
+        using the local handlers.
+
+        This is a lower overhead version of :meth:`route` that may only be
+        called from the :class:`Broker` thread.
+
+        :param Stream in_stream:
+            If not :data:`None`, the stream the message arrived on. Used for
+            performing source route verification, to ensure sensitive messages
+            such as ``CALL_FUNCTION`` arrive only from trusted contexts.
+        """
+        _vv and IOLOG.debug('%r._async_route(%r, %r)', self, msg, in_stream)
+
+        if len(msg.data) > self.max_message_size:
+            self._maybe_send_dead(msg, self.too_large_msg % (
+                self.max_message_size,
+            ))
+            return
+
+        # Perform source verification.
+        if in_stream:
+            parent = self._stream_by_id.get(mitogen.parent_id)
+            expect = self._stream_by_id.get(msg.auth_id, parent)
+            if in_stream != expect:
+                LOG.error('%r: bad auth_id: got %r via %r, not %r: %r',
+                          self, msg.auth_id, in_stream, expect, msg)
+                return
+
+            if msg.src_id != msg.auth_id:
+                expect = self._stream_by_id.get(msg.src_id, parent)
+                if in_stream != expect:
+                    LOG.error('%r: bad src_id: got %r via %r, not %r: %r',
+                              self, msg.src_id, in_stream, expect, msg)
+                    return
+
+            if in_stream.auth_id is not None:
+                msg.auth_id = in_stream.auth_id
+
+            # Maintain a set of IDs the source ever communicated with.
+            in_stream.egress_ids.add(msg.dst_id)
+
+        if msg.dst_id == mitogen.context_id:
+            return self._invoke(msg, in_stream)
+
+        out_stream = self._stream_by_id.get(msg.dst_id)
+        if out_stream is None:
+            out_stream = self._stream_by_id.get(mitogen.parent_id)
+
+        if out_stream is None:
+            self._maybe_send_dead(msg, self.no_route_msg,
+                                  msg.dst_id, mitogen.context_id)
+            return
+
+        if in_stream and self.unidirectional and not \
+                (in_stream.is_privileged or out_stream.is_privileged):
+            self._maybe_send_dead(msg, self.unidirectional_msg,
+                in_stream.remote_id, out_stream.remote_id)
+            return
+
+        out_stream._send(msg)
+
+    def route(self, msg):
+        """
+        Arrange for the :class:`Message` `msg` to be delivered to its
+        destination using any relevant downstream context, or if none is found,
+        by forwarding the message upstream towards the master context. If `msg`
+        is destined for the local context, it is dispatched using the handles
+        registered with :meth:`add_handler`.
+
+        This may be called from any thread.
+        """
+        self.broker.defer(self._async_route, msg)
+
+
+class Broker(object):
+    """
+    Responsible for handling I/O multiplexing in a private thread.
+
+    **Note:** This is the somewhat limited core version of the Broker class
+    used by child contexts. The master subclass is documented below.
+    """
+    poller_class = Poller
+    _waker = None
+    _thread = None
+
+    #: Seconds grace to allow :class:`streams <Stream>` to shutdown gracefully
+    #: before force-disconnecting them during :meth:`shutdown`.
+    shutdown_timeout = 3.0
+
+    def __init__(self, poller_class=None, activate_compat=True):
+        self._alive = True
+        self._exitted = False
+        self._waker = Waker(self)
+        #: Arrange for `func(\*args, \**kwargs)` to be executed on the broker
+        #: thread, or immediately if the current thread is the broker thread.
+        #: Safe to call from any thread.
+        self.defer = self._waker.defer
+        self.poller = self.poller_class()
+        self.poller.start_receive(
+            self._waker.receive_side.fd,
+            (self._waker.receive_side, self._waker.on_receive)
+        )
+        self._thread = threading.Thread(
+            target=self._broker_main,
+            name='mitogen.broker'
+        )
+        self._thread.start()
+        if activate_compat:
+            self._py24_25_compat()
+
+    def _py24_25_compat(self):
+        """
+        Python 2.4/2.5 have grave difficulties with threads/fork. We
+        mandatorily quiesce all running threads during fork using a
+        monkey-patch there.
+        """
+        if sys.version_info < (2, 6):
+            # import_module() is used to avoid dep scanner.
+            os_fork = import_module('mitogen.os_fork')
+            mitogen.os_fork._notice_broker_or_pool(self)
+
+    def start_receive(self, stream):
+        """
+        Mark the :attr:`receive_side <Stream.receive_side>` on `stream` as
+        ready for reading. Safe to call from any thread. When the associated
+        file descriptor becomes ready for reading,
+        :meth:`BasicStream.on_receive` will be called.
+        """
+        _vv and IOLOG.debug('%r.start_receive(%r)', self, stream)
+        side = stream.receive_side
+        assert side and side.fd is not None
+        self.defer(self.poller.start_receive,
+                   side.fd, (side, stream.on_receive))
+
+    def stop_receive(self, stream):
+        """
+        Mark the :attr:`receive_side <Stream.receive_side>` on `stream` as not
+        ready for reading. Safe to call from any thread.
+        """
+        _vv and IOLOG.debug('%r.stop_receive(%r)', self, stream)
+        self.defer(self.poller.stop_receive, stream.receive_side.fd)
+
+    def _start_transmit(self, stream):
+        """
+        Mark the :attr:`transmit_side <Stream.transmit_side>` on `stream` as
+        ready for writing. Must only be called from the Broker thread. When the
+        associated file descriptor becomes ready for writing,
+        :meth:`BasicStream.on_transmit` will be called.
+        """
+        _vv and IOLOG.debug('%r._start_transmit(%r)', self, stream)
+        side = stream.transmit_side
+        assert side and side.fd is not None
+        self.poller.start_transmit(side.fd, (side, stream.on_transmit))
+
+    def _stop_transmit(self, stream):
+        """
+        Mark the :attr:`transmit_side <Stream.receive_side>` on `stream` as not
+        ready for writing.
+        """
+        _vv and IOLOG.debug('%r._stop_transmit(%r)', self, stream)
+        self.poller.stop_transmit(stream.transmit_side.fd)
+
+    def keep_alive(self):
+        """
+        Return :data:`True` if any reader's :attr:`Side.keep_alive` attribute
+        is :data:`True`, or any :class:`Context` is still registered that is
+        not the master. Used to delay shutdown while some important work is in
+        progress (e.g. log draining).
+        """
+        it = (side.keep_alive for (_, (side, _)) in self.poller.readers)
+        return sum(it, 0)
+
+    def defer_sync(self, func):
+        """
+        Arrange for `func()` to execute on :class:`Broker` thread, blocking the
+        current thread until a result or exception is available.
+
+        :returns:
+            Return value of `func()`.
+        """
+        latch = Latch()
+        def wrapper():
+            try:
+                latch.put(func())
+            except Exception:
+                latch.put(sys.exc_info()[1])
+        self.defer(wrapper)
+        res = latch.get()
+        if isinstance(res, Exception):
+            raise res
+        return res
+
+    def _call(self, stream, func):
+        """
+        Call `func(self)`, catching any exception that might occur, logging it,
+        and force-disconnecting the related `stream`.
+        """
+        try:
+            func(self)
+        except Exception:
+            LOG.exception('%r crashed', stream)
+            stream.on_disconnect(self)
+
+    def _loop_once(self, timeout=None):
+        """
+        Execute a single :class:`Poller` wait, dispatching any IO events that
+        caused the wait to complete.
+
+        :param float timeout:
+            If not :data:`None`, maximum time in seconds to wait for events.
+        """
+        _vv and IOLOG.debug('%r._loop_once(%r, %r)',
+                            self, timeout, self.poller)
+        #IOLOG.debug('readers =\n%s', pformat(self.poller.readers))
+        #IOLOG.debug('writers =\n%s', pformat(self.poller.writers))
+        for side, func in self.poller.poll(timeout):
+            self._call(side.stream, func)
+
+    def _broker_exit(self):
+        """
+        Forcefully call :meth:`Stream.on_disconnect` on any streams that failed
+        to shut down gracefully, then discard the :class:`Poller`.
+        """
+        for _, (side, _) in self.poller.readers + self.poller.writers:
+            LOG.debug('_broker_main() force disconnecting %r', side)
+            side.stream.on_disconnect(self)
+
+        self.poller.close()
+
+    def _broker_shutdown(self):
+        """
+        Invoke :meth:`Stream.on_shutdown` for every active stream, then allow
+        up to :attr:`shutdown_timeout` seconds for the streams to unregister
+        themselves, logging an error if any did not unregister during the grace
+        period.
+        """
+        for _, (side, _) in self.poller.readers + self.poller.writers:
+            self._call(side.stream, side.stream.on_shutdown)
+
+        deadline = time.time() + self.shutdown_timeout
+        while self.keep_alive() and time.time() < deadline:
+            self._loop_once(max(0, deadline - time.time()))
+
+        if self.keep_alive():
+            LOG.error('%r: some streams did not close gracefully. '
+                      'The most likely cause for this is one or '
+                      'more child processes still connected to '
+                      'our stdout/stderr pipes.', self)
+
+    def _do_broker_main(self):
+        """
+        Broker thread main function. Dispatches IO events until
+        :meth:`shutdown` is called.
+        """
+        # For Python 2.4, no way to retrieve ident except on thread.
+        self._waker.broker_ident = thread.get_ident()
+        try:
+            while self._alive:
+                self._loop_once()
+
+            fire(self, 'shutdown')
+            self._broker_shutdown()
+        except Exception:
+            LOG.exception('_broker_main() crashed')
+
+        self._alive = False  # Ensure _alive is consistent on crash.
+        self._exitted = True
+        self._broker_exit()
+
+    def _broker_main(self):
+        _profile_hook('mitogen.broker', self._do_broker_main)
+        fire(self, 'exit')
+
+    def shutdown(self):
+        """
+        Request broker gracefully disconnect streams and stop. Safe to call
+        from any thread.
+        """
+        _v and LOG.debug('%r.shutdown()', self)
+        def _shutdown():
+            self._alive = False
+        if self._alive and not self._exitted:
+            self.defer(_shutdown)
+
+    def join(self):
+        """
+        Wait for the broker to stop, expected to be called after
+        :meth:`shutdown`.
+        """
+        self._thread.join()
+
+    def __repr__(self):
+        return 'Broker(%#x)' % (id(self),)
+
+
+class Dispatcher(object):
+    """
+    Implementation of the :data:`CALL_FUNCTION` handle for a child context.
+    Listens on the child's main thread for messages sent by
+    :class:`mitogen.parent.CallChain` and dispatches the function calls they
+    describe.
+
+    If a :class:`mitogen.parent.CallChain` sending a message is in pipelined
+    mode, any exception that occurs is recorded, and causes all subsequent
+    calls with the same `chain_id` to fail with the same exception.
+    """
+    def __init__(self, econtext):
+        self.econtext = econtext
+        #: Chain ID -> CallError if prior call failed.
+        self._error_by_chain_id = {}
+        self.recv = Receiver(router=econtext.router,
+                             handle=CALL_FUNCTION,
+                             policy=has_parent_authority)
+        listen(econtext.broker, 'shutdown', self.recv.close)
+
+    @classmethod
+    @takes_econtext
+    def forget_chain(cls, chain_id, econtext):
+        econtext.dispatcher._error_by_chain_id.pop(chain_id, None)
+
+    def _parse_request(self, msg):
+        data = msg.unpickle(throw=False)
+        _v and LOG.debug('_dispatch_one(%r)', data)
+
+        chain_id, modname, klass, func, args, kwargs = data
+        obj = import_module(modname)
+        if klass:
+            obj = getattr(obj, klass)
+        fn = getattr(obj, func)
+        if getattr(fn, 'mitogen_takes_econtext', None):
+            kwargs.setdefault('econtext', self.econtext)
+        if getattr(fn, 'mitogen_takes_router', None):
+            kwargs.setdefault('router', self.econtext.router)
+
+        return chain_id, fn, args, kwargs
+
+    def _dispatch_one(self, msg):
+        try:
+            chain_id, fn, args, kwargs = self._parse_request(msg)
+        except Exception:
+            return None, CallError(sys.exc_info()[1])
+
+        if chain_id in self._error_by_chain_id:
+            return chain_id, self._error_by_chain_id[chain_id]
+
+        try:
+            return chain_id, fn(*args, **kwargs)
+        except Exception:
+            e = CallError(sys.exc_info()[1])
+            if chain_id is not None:
+                self._error_by_chain_id[chain_id] = e
+            return chain_id, e
+
+    def _dispatch_calls(self):
+        for msg in self.recv:
+            chain_id, ret = self._dispatch_one(msg)
+            _v and LOG.debug('_dispatch_calls: %r -> %r', msg, ret)
+            if msg.reply_to:
+                msg.reply(ret)
+            elif isinstance(ret, CallError) and chain_id is None:
+                LOG.error('No-reply function call failed: %s', ret)
+
+    def run(self):
+        if self.econtext.config.get('on_start'):
+            self.econtext.config['on_start'](self.econtext)
+
+        _profile_hook('mitogen.child_main', self._dispatch_calls)
+
+
+class ExternalContext(object):
+    """
+    External context implementation.
+
+    .. attribute:: broker
+        The :class:`mitogen.core.Broker` instance.
+
+    .. attribute:: context
+        The :class:`mitogen.core.Context` instance.
+
+    .. attribute:: channel
+        The :class:`mitogen.core.Channel` over which :data:`CALL_FUNCTION`
+        requests are received.
+
+    .. attribute:: stdout_log
+        The :class:`mitogen.core.IoLogger` connected to ``stdout``.
+
+    .. attribute:: importer
+        The :class:`mitogen.core.Importer` instance.
+
+    .. attribute:: stdout_log
+        The :class:`IoLogger` connected to ``stdout``.
+
+    .. attribute:: stderr_log
+        The :class:`IoLogger` connected to ``stderr``.
+
+    .. method:: _dispatch_calls
+        Implementation for the main thread in every child context.
+    """
+    detached = False
+
+    def __init__(self, config):
+        self.config = config
+
+    def _on_broker_exit(self):
+        if not self.config['profiling']:
+            os.kill(os.getpid(), signal.SIGTERM)
+
+    #: On Python >3.4, the global importer lock has been sharded into a
+    #: per-module lock, meaning there is no guarantee the import statement in
+    #: service_stub_main will be truly complete before a second thread
+    #: attempting the same import will see a partially initialized module.
+    #: Sigh. Therefore serialize execution of the stub itself.
+    service_stub_lock = threading.Lock()
+
+    def _service_stub_main(self, msg):
+        self.service_stub_lock.acquire()
+        try:
+            import mitogen.service
+            pool = mitogen.service.get_or_create_pool(router=self.router)
+            pool._receiver._on_receive(msg)
+        finally:
+            self.service_stub_lock.release()
+
+    def _on_call_service_msg(self, msg):
+        """
+        Stub service handler. Start a thread to import the mitogen.service
+        implementation from, and deliver the message to the newly constructed
+        pool. This must be done as CALL_SERVICE for e.g. PushFileService may
+        race with a CALL_FUNCTION blocking the main thread waiting for a result
+        from that service.
+        """
+        if not msg.is_dead:
+            th = threading.Thread(target=self._service_stub_main, args=(msg,))
+            th.start()
+
+    def _on_shutdown_msg(self, msg):
+        _v and LOG.debug('_on_shutdown_msg(%r)', msg)
+        if not msg.is_dead:
+            self.broker.shutdown()
+
+    def _on_parent_disconnect(self):
+        if self.detached:
+            mitogen.parent_ids = []
+            mitogen.parent_id = None
+            LOG.info('Detachment complete')
+        else:
+            _v and LOG.debug('%r: parent stream is gone, dying.', self)
+            self.broker.shutdown()
+
+    def detach(self):
+        self.detached = True
+        stream = self.router.stream_by_id(mitogen.parent_id)
+        if stream:  # not double-detach()'d
+            os.setsid()
+            self.parent.send_await(Message(handle=DETACHING))
+            LOG.info('Detaching from %r; parent is %s', stream, self.parent)
+            for x in range(20):
+                pending = self.broker.defer_sync(lambda: stream.pending_bytes())
+                if not pending:
+                    break
+                time.sleep(0.05)
+            if pending:
+                LOG.error('Stream had %d bytes after 2000ms', pending)
+            self.broker.defer(stream.on_disconnect, self.broker)
+
+    def _setup_master(self):
+        Router.max_message_size = self.config['max_message_size']
+        if self.config['profiling']:
+            enable_profiling()
+        self.broker = Broker(activate_compat=False)
+        self.router = Router(self.broker)
+        self.router.debug = self.config.get('debug', False)
+        self.router.undirectional = self.config['unidirectional']
+        self.router.add_handler(
+            fn=self._on_shutdown_msg,
+            handle=SHUTDOWN,
+            policy=has_parent_authority,
+        )
+        self.router.add_handler(
+            fn=self._on_call_service_msg,
+            handle=CALL_SERVICE,
+            policy=has_parent_authority,
+        )
+        self.master = Context(self.router, 0, 'master')
+        parent_id = self.config['parent_ids'][0]
+        if parent_id == 0:
+            self.parent = self.master
+        else:
+            self.parent = Context(self.router, parent_id, 'parent')
+
+        in_fd = self.config.get('in_fd', 100)
+        out_fd = self.config.get('out_fd', 1)
+        self.stream = Stream(self.router, parent_id)
+        self.stream.name = 'parent'
+        self.stream.accept(in_fd, out_fd)
+        self.stream.receive_side.keep_alive = False
+
+        listen(self.stream, 'disconnect', self._on_parent_disconnect)
+        listen(self.broker, 'exit', self._on_broker_exit)
+
+        os.close(in_fd)
+
+    def _reap_first_stage(self):
+        try:
+            os.wait()  # Reap first stage.
+        except OSError:
+            pass  # No first stage exists (e.g. fakessh)
+
+    def _setup_logging(self):
+        self.log_handler = LogHandler(self.master)
+        root = logging.getLogger()
+        root.setLevel(self.config['log_level'])
+        root.handlers = [self.log_handler]
+        if self.config['debug']:
+            enable_debug_logging()
+
+    def _setup_importer(self):
+        importer = self.config.get('importer')
+        if importer:
+            importer._install_handler(self.router)
+            importer._context = self.parent
+        else:
+            core_src_fd = self.config.get('core_src_fd', 101)
+            if core_src_fd:
+                fp = os.fdopen(core_src_fd, 'rb', 1)
+                try:
+                    core_src = fp.read()
+                    # Strip "ExternalContext.main()" call from last line.
+                    core_src = b('\n').join(core_src.splitlines()[:-1])
+                finally:
+                    fp.close()
+            else:
+                core_src = None
+
+            importer = Importer(
+                self.router,
+                self.parent,
+                core_src,
+                self.config.get('whitelist', ()),
+                self.config.get('blacklist', ()),
+            )
+
+        self.importer = importer
+        self.router.importer = importer
+        sys.meta_path.insert(0, self.importer)
+
+    def _setup_package(self):
+        global mitogen
+        mitogen = imp.new_module('mitogen')
+        mitogen.__package__ = 'mitogen'
+        mitogen.__path__ = []
+        mitogen.__loader__ = self.importer
+        mitogen.main = lambda *args, **kwargs: (lambda func: None)
+        mitogen.core = sys.modules['__main__']
+        mitogen.core.__file__ = 'x/mitogen/core.py'  # For inspect.getsource()
+        mitogen.core.__loader__ = self.importer
+        sys.modules['mitogen'] = mitogen
+        sys.modules['mitogen.core'] = mitogen.core
+        del sys.modules['__main__']
+
+    def _setup_globals(self):
+        mitogen.is_master = False
+        mitogen.__version__ = self.config['version']
+        mitogen.context_id = self.config['context_id']
+        mitogen.parent_ids = self.config['parent_ids'][:]
+        mitogen.parent_id = mitogen.parent_ids[0]
+
+    def _nullify_stdio(self):
+        """
+        Open /dev/null to replace stdin, and stdout/stderr temporarily. In case
+        of odd startup, assume we may be allocated a standard handle.
+        """
+        fd = os.open('/dev/null', os.O_RDWR)
+        try:
+            for stdfd in (0, 1, 2):
+                if fd != stdfd:
+                    os.dup2(fd, stdfd)
+        finally:
+            if fd not in (0, 1, 2):
+                os.close(fd)
+
+    def _setup_stdio(self):
+        # #481: when stderr is a TTY due to being started via
+        # tty_create_child()/hybrid_tty_create_child(), and some privilege
+        # escalation tool like prehistoric versions of sudo exec this process
+        # over the top of itself, there is nothing left to keep the slave PTY
+        # open after we replace our stdio. Therefore if stderr is a TTY, keep
+        # around a permanent dup() to avoid receiving SIGHUP.
+        try:
+            if os.isatty(2):
+                self.reserve_tty_fd = os.dup(2)
+                set_cloexec(self.reserve_tty_fd)
+        except OSError:
+            pass
+        # When sys.stdout was opened by the runtime, overwriting it will not
+        # close FD 1. However when forking from a child that previously used
+        # fdopen(), overwriting it /will/ close FD 1. So we must swallow the
+        # close before IoLogger overwrites FD 1, otherwise its new FD 1 will be
+        # clobbered. Additionally, stdout must be replaced with /dev/null prior
+        # to stdout.close(), since if block buffering was active in the parent,
+        # any pre-fork buffered data will be flushed on close(), corrupting the
+        # connection to the parent.
+        self._nullify_stdio()
+        sys.stdout.close()
+        self._nullify_stdio()
+
+        self.stdout_log = IoLogger(self.broker, 'stdout', 1)
+        self.stderr_log = IoLogger(self.broker, 'stderr', 2)
+        # Reopen with line buffering.
+        sys.stdout = os.fdopen(1, 'w', 1)
+
+    def main(self):
+        self._setup_master()
+        try:
+            try:
+                self._setup_logging()
+                self._setup_importer()
+                self._reap_first_stage()
+                if self.config.get('setup_package', True):
+                    self._setup_package()
+                self._setup_globals()
+                if self.config.get('setup_stdio', True):
+                    self._setup_stdio()
+
+                self.dispatcher = Dispatcher(self)
+                self.router.register(self.parent, self.stream)
+                self.router._setup_logging()
+                self.log_handler.uncork()
+
+                sys.executable = os.environ.pop('ARGV0', sys.executable)
+                _v and LOG.debug('Connected to context %s; my ID is %r',
+                                 self.parent, mitogen.context_id)
+                _v and LOG.debug('pid:%r ppid:%r uid:%r/%r, gid:%r/%r host:%r',
+                                 os.getpid(), os.getppid(), os.geteuid(),
+                                 os.getuid(), os.getegid(), os.getgid(),
+                                 socket.gethostname())
+                _v and LOG.debug('Recovered sys.executable: %r', sys.executable)
+
+                self.broker._py24_25_compat()
+                self.dispatcher.run()
+                _v and LOG.debug('ExternalContext.main() normal exit')
+            except KeyboardInterrupt:
+                LOG.debug('KeyboardInterrupt received, exiting gracefully.')
+            except BaseException:
+                LOG.exception('ExternalContext.main() crashed')
+                raise
+        finally:
+            self.broker.shutdown()
+            self.broker.join()
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/debug.py b/ansible/plugins/mitogen-0.2.6/mitogen/debug.py
new file mode 100644
index 000000000..3d13347f0
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/debug.py
@@ -0,0 +1,236 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""
+Basic signal handler for dumping thread stacks.
+"""
+
+import difflib
+import logging
+import os
+import gc
+import signal
+import sys
+import threading
+import time
+import traceback
+
+import mitogen.core
+import mitogen.parent
+
+
+LOG = logging.getLogger(__name__)
+_last = None
+
+
+def enable_evil_interrupts():
+    signal.signal(signal.SIGALRM, (lambda a, b: None))
+    signal.setitimer(signal.ITIMER_REAL, 0.01, 0.01)
+
+
+def disable_evil_interrupts():
+    signal.setitimer(signal.ITIMER_REAL, 0, 0)
+
+
+def _hex(n):
+    return '%08x' % n
+
+
+def get_subclasses(klass):
+    """
+    Rather than statically import every interesting subclass, forcing it all to
+    be transferred and potentially disrupting the debugged environment,
+    enumerate only those loaded in memory. Also returns the original class.
+    """
+    stack = [klass]
+    seen = set()
+    while stack:
+        klass = stack.pop()
+        seen.add(klass)
+        stack.extend(klass.__subclasses__())
+    return seen
+
+
+def get_routers():
+    return dict(
+        (_hex(id(router)), router)
+        for klass in get_subclasses(mitogen.core.Router)
+        for router in gc.get_referrers(klass)
+        if isinstance(router, mitogen.core.Router)
+    )
+
+
+def get_router_info():
+    return {
+        'routers': dict(
+            (id_, {
+                'id': id_,
+                'streams': len(set(router._stream_by_id.values())),
+                'contexts': len(set(router._context_by_id.values())),
+                'handles': len(router._handle_map),
+            })
+            for id_, router in get_routers().items()
+        )
+    }
+
+
+def get_stream_info(router_id):
+    router = get_routers().get(router_id)
+    return {
+        'streams': dict(
+            (_hex(id(stream)), ({
+                'name': stream.name,
+                'remote_id': stream.remote_id,
+                'sent_module_count': len(getattr(stream, 'sent_modules', [])),
+                'routes': sorted(getattr(stream, 'routes', [])),
+                'type': type(stream).__module__,
+            }))
+            for via_id, stream in router._stream_by_id.items()
+        )
+    }
+
+
+def format_stacks():
+    name_by_id = dict(
+        (t.ident, t.name)
+        for t in threading.enumerate()
+    )
+
+    l = ['', '']
+    for threadId, stack in sys._current_frames().items():
+        l += ["# PID %d ThreadID: (%s) %s; %r" % (
+            os.getpid(),
+            name_by_id.get(threadId, '<no name>'),
+            threadId,
+            stack,
+        )]
+        #stack = stack.f_back.f_back
+
+        for filename, lineno, name, line in traceback.extract_stack(stack):
+            l += [
+                'File: "%s", line %d, in %s' % (
+                    filename,
+                    lineno,
+                    name
+                )
+            ]
+            if line:
+                l += ['    ' + line.strip()]
+        l += ['']
+
+    l += ['', '']
+    return '\n'.join(l)
+
+
+def get_snapshot():
+    global _last
+
+    s = format_stacks()
+    snap = s
+    if _last:
+        snap += '\n'
+        diff = list(difflib.unified_diff(
+            a=_last.splitlines(),
+            b=s.splitlines(),
+            fromfile='then',
+            tofile='now'
+        ))
+
+        if diff:
+            snap += '\n'.join(diff) + '\n'
+        else:
+            snap += '(no change since last time)\n'
+    _last = s
+    return snap
+
+
+def _handler(*_):
+    fp = open('/dev/tty', 'w', 1)
+    fp.write(get_snapshot())
+    fp.close()
+
+
+def install_handler():
+    signal.signal(signal.SIGUSR2, _handler)
+
+
+def _logging_main(secs):
+    while True:
+        time.sleep(secs)
+        LOG.info('PERIODIC THREAD DUMP\n\n%s', get_snapshot())
+
+
+def dump_to_logger(secs=5):
+    th = threading.Thread(
+        target=_logging_main,
+        kwargs={'secs': secs},
+        name='mitogen.debug.dump_to_logger',
+    )
+    th.setDaemon(True)
+    th.start()
+
+
+class ContextDebugger(object):
+    @classmethod
+    @mitogen.core.takes_econtext
+    def _configure_context(cls, econtext):
+        mitogen.parent.upgrade_router(econtext)
+        econtext.debugger = cls(econtext.router)
+
+    def __init__(self, router):
+        self.router = router
+        self.router.add_handler(
+            func=self._on_debug_msg,
+            handle=mitogen.core.DEBUG,
+            persist=True,
+            policy=mitogen.core.has_parent_authority,
+        )
+        mitogen.core.listen(router, 'register', self._on_stream_register)
+        LOG.debug('Context debugging configured.')
+
+    def _on_stream_register(self, context, stream):
+        LOG.debug('_on_stream_register: sending configure() to %r', stream)
+        context.call_async(ContextDebugger._configure_context)
+
+    def _on_debug_msg(self, msg):
+        if msg != mitogen.core._DEAD:
+            threading.Thread(
+                target=self._handle_debug_msg,
+                name='ContextDebuggerHandler',
+                args=(msg,)
+            ).start()
+
+    def _handle_debug_msg(self, msg):
+        try:
+            method, args, kwargs = msg.unpickle()
+            msg.reply(getattr(cls, method)(*args, **kwargs))
+        except Exception:
+            e = sys.exc_info()[1]
+            msg.reply(mitogen.core.CallError(e))
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/doas.py b/ansible/plugins/mitogen-0.2.6/mitogen/doas.py
new file mode 100644
index 000000000..1b687fb20
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/doas.py
@@ -0,0 +1,113 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import logging
+
+import mitogen.core
+import mitogen.parent
+from mitogen.core import b
+
+
+LOG = logging.getLogger(__name__)
+
+
+class PasswordError(mitogen.core.StreamError):
+    pass
+
+
+class Stream(mitogen.parent.Stream):
+    create_child = staticmethod(mitogen.parent.hybrid_tty_create_child)
+    child_is_immediate_subprocess = False
+
+    username = 'root'
+    password = None
+    doas_path = 'doas'
+    password_prompt = b('Password:')
+    incorrect_prompts = (
+        b('doas: authentication failed'),
+    )
+
+    def construct(self, username=None, password=None, doas_path=None,
+                  password_prompt=None, incorrect_prompts=None, **kwargs):
+        super(Stream, self).construct(**kwargs)
+        if username is not None:
+            self.username = username
+        if password is not None:
+            self.password = password
+        if doas_path is not None:
+            self.doas_path = doas_path
+        if password_prompt is not None:
+            self.password_prompt = password_prompt.lower()
+        if incorrect_prompts is not None:
+            self.incorrect_prompts = map(str.lower, incorrect_prompts)
+
+    def _get_name(self):
+        return u'doas.' + mitogen.core.to_text(self.username)
+
+    def get_boot_command(self):
+        bits = [self.doas_path, '-u', self.username, '--']
+        bits = bits + super(Stream, self).get_boot_command()
+        LOG.debug('doas command line: %r', bits)
+        return bits
+
+    password_incorrect_msg = 'doas password is incorrect'
+    password_required_msg = 'doas password is required'
+
+    def _connect_input_loop(self, it):
+        password_sent = False
+        for buf in it:
+            LOG.debug('%r: received %r', self, buf)
+            if buf.endswith(self.EC0_MARKER):
+                self._ec0_received()
+                return
+            if any(s in buf.lower() for s in self.incorrect_prompts):
+                if password_sent:
+                    raise PasswordError(self.password_incorrect_msg)
+            elif self.password_prompt in buf.lower():
+                if self.password is None:
+                    raise PasswordError(self.password_required_msg)
+                if password_sent:
+                    raise PasswordError(self.password_incorrect_msg)
+                LOG.debug('sending password')
+                self.diag_stream.transmit_side.write(
+                    mitogen.core.to_text(self.password + '\n').encode('utf-8')
+                )
+                password_sent = True
+        raise mitogen.core.StreamError('bootstrap failed')
+
+    def _connect_bootstrap(self):
+        it = mitogen.parent.iter_read(
+            fds=[self.receive_side.fd, self.diag_stream.receive_side.fd],
+            deadline=self.connect_deadline,
+        )
+        try:
+            self._connect_input_loop(it)
+        finally:
+            it.close()
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/docker.py b/ansible/plugins/mitogen-0.2.6/mitogen/docker.py
new file mode 100644
index 000000000..0c0d40e77
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/docker.py
@@ -0,0 +1,81 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import logging
+
+import mitogen.core
+import mitogen.parent
+
+
+LOG = logging.getLogger(__name__)
+
+
+class Stream(mitogen.parent.Stream):
+    child_is_immediate_subprocess = False
+
+    container = None
+    image = None
+    username = None
+    docker_path = 'docker'
+
+    # TODO: better way of capturing errors such as "No such container."
+    create_child_args = {
+        'merge_stdio': True
+    }
+
+    def construct(self, container=None, image=None,
+                  docker_path=None, username=None,
+                  **kwargs):
+        assert container or image
+        super(Stream, self).construct(**kwargs)
+        if container:
+            self.container = container
+        if image:
+            self.image = image
+        if docker_path:
+            self.docker_path = docker_path
+        if username:
+            self.username = username
+
+    def _get_name(self):
+        return u'docker.' + (self.container or self.image)
+
+    def get_boot_command(self):
+        args = ['--interactive']
+        if self.username:
+            args += ['--user=' + self.username]
+
+        bits = [self.docker_path]
+        if self.container:
+            bits += ['exec'] + args + [self.container]
+        elif self.image:
+            bits += ['run'] + args + ['--rm', self.image]
+
+        return bits + super(Stream, self).get_boot_command()
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/fakessh.py b/ansible/plugins/mitogen-0.2.6/mitogen/fakessh.py
new file mode 100644
index 000000000..d39a710d0
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/fakessh.py
@@ -0,0 +1,461 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""
+:mod:`mitogen.fakessh` is a stream implementation that starts a subprocess with
+its environment modified such that ``PATH`` searches for `ssh` return a Mitogen
+implementation of SSH. When invoked, this implementation arranges for the
+command line supplied by the caller to be executed in a remote context, reusing
+the parent context's (possibly proxied) connection to that remote context.
+
+This allows tools like `rsync` and `scp` to transparently reuse the connections
+and tunnels already established by the host program to connect to a target
+machine, without wasteful redundant SSH connection setup, 3-way handshakes, or
+firewall hopping configurations, and enables these tools to be used in
+impossible scenarios, such as over `sudo` with ``requiretty`` enabled.
+
+The fake `ssh` command source is written to a temporary file on disk, and
+consists of a copy of the :py:mod:`mitogen.core` source code (just like any
+other child context), with a line appended to cause it to connect back to the
+host process over an FD it inherits. As there is no reliance on an existing
+filesystem file, it is possible for child contexts to use fakessh.
+
+As a consequence of connecting back through an inherited FD, only one SSH
+invocation is possible, which is fine for tools like `rsync`, however in future
+this restriction will be lifted.
+
+Sequence:
+
+    1. ``fakessh`` Context and Stream created by parent context. The stream's
+       buffer has a :py:func:`_fakessh_main` :py:data:`CALL_FUNCTION
+       <mitogen.core.CALL_FUNCTION>` enqueued.
+    2. Target program (`rsync/scp/sftp`) invoked, which internally executes
+       `ssh` from ``PATH``.
+    3. :py:mod:`mitogen.core` bootstrap begins, recovers the stream FD
+       inherited via the target program, established itself as the fakessh
+       context.
+    4. :py:func:`_fakessh_main` :py:data:`CALL_FUNCTION
+       <mitogen.core.CALL_FUNCTION>` is read by fakessh context,
+
+        a. sets up :py:class:`IoPump` for stdio, registers
+           stdin_handle for local context.
+        b. Enqueues :py:data:`CALL_FUNCTION <mitogen.core.CALL_FUNCTION>` for
+           :py:func:`_start_slave` invoked in target context,
+
+            i. the program from the `ssh` command line is started
+            ii. sets up :py:class:`IoPump` for `ssh` command line process's
+                stdio pipes
+            iii. returns `(control_handle, stdin_handle)` to
+                 :py:func:`_fakessh_main`
+
+    5. :py:func:`_fakessh_main` receives control/stdin handles from from
+       :py:func:`_start_slave`,
+
+        a. registers remote's stdin_handle with local :py:class:`IoPump`.
+        b. sends `("start", local_stdin_handle)` to remote's control_handle
+        c. registers local :py:class:`IoPump` with
+           :py:class:`mitogen.core.Broker`.
+        d. loops waiting for `local stdout closed && remote stdout closed`
+
+    6. :py:func:`_start_slave` control channel receives `("start", stdin_handle)`,
+
+        a. registers remote's stdin_handle with local :py:class:`IoPump`
+        b. registers local :py:class:`IoPump` with
+           :py:class:`mitogen.core.Broker`.
+        c. loops waiting for `local stdout closed && remote stdout closed`
+"""
+
+import getopt
+import inspect
+import os
+import shutil
+import socket
+import subprocess
+import sys
+import tempfile
+import threading
+
+import mitogen.core
+import mitogen.master
+import mitogen.parent
+
+from mitogen.core import LOG, IOLOG
+
+
+SSH_GETOPTS = (
+    "1246ab:c:e:fgi:kl:m:no:p:qstvx"
+    "ACD:E:F:I:KL:MNO:PQ:R:S:TVw:W:XYy"
+)
+
+_mitogen = None
+
+
+class IoPump(mitogen.core.BasicStream):
+    _output_buf = ''
+    _closed = False
+
+    def __init__(self, broker, stdin_fd, stdout_fd):
+        self._broker = broker
+        self.receive_side = mitogen.core.Side(self, stdout_fd)
+        self.transmit_side = mitogen.core.Side(self, stdin_fd)
+
+    def write(self, s):
+        self._output_buf += s
+        self._broker._start_transmit(self)
+
+    def close(self):
+        self._closed = True
+        # If local process hasn't exitted yet, ensure its write buffer is
+        # drained before lazily triggering disconnect in on_transmit.
+        if self.transmit_side.fd is not None:
+            self._broker._start_transmit(self)
+
+    def on_shutdown(self, broker):
+        self.close()
+
+    def on_transmit(self, broker):
+        written = self.transmit_side.write(self._output_buf)
+        IOLOG.debug('%r.on_transmit() -> len %r', self, written)
+        if written is None:
+            self.on_disconnect(broker)
+        else:
+            self._output_buf = self._output_buf[written:]
+
+        if not self._output_buf:
+            broker._stop_transmit(self)
+            if self._closed:
+                self.on_disconnect(broker)
+
+    def on_receive(self, broker):
+        s = self.receive_side.read()
+        IOLOG.debug('%r.on_receive() -> len %r', self, len(s))
+        if s:
+            mitogen.core.fire(self, 'receive', s)
+        else:
+            self.on_disconnect(broker)
+
+    def __repr__(self):
+        return 'IoPump(%r, %r)' % (
+            self.receive_side.fd,
+            self.transmit_side.fd,
+        )
+
+
+class Process(object):
+    """
+    Manages the lifetime and pipe connections of the SSH command running in the
+    slave.
+    """
+    def __init__(self, router, stdin_fd, stdout_fd, proc=None):
+        self.router = router
+        self.stdin_fd = stdin_fd
+        self.stdout_fd = stdout_fd
+        self.proc = proc
+        self.control_handle = router.add_handler(self._on_control)
+        self.stdin_handle = router.add_handler(self._on_stdin)
+        self.pump = IoPump(router.broker, stdin_fd, stdout_fd)
+        self.stdin = None
+        self.control = None
+        self.wake_event = threading.Event()
+
+        mitogen.core.listen(self.pump, 'disconnect', self._on_pump_disconnect)
+        mitogen.core.listen(self.pump, 'receive', self._on_pump_receive)
+
+        if proc:
+            pmon = mitogen.parent.ProcessMonitor.instance()
+            pmon.add(proc.pid, self._on_proc_exit)
+
+    def __repr__(self):
+        return 'Process(%r, %r)' % (self.stdin_fd, self.stdout_fd)
+
+    def _on_proc_exit(self, status):
+        LOG.debug('%r._on_proc_exit(%r)', self, status)
+        self.control.put(('exit', status))
+
+    def _on_stdin(self, msg):
+        if msg.is_dead:
+            IOLOG.debug('%r._on_stdin() -> %r', self, data)
+            self.pump.close()
+            return
+
+        data = msg.unpickle()
+        IOLOG.debug('%r._on_stdin() -> len %d', self, len(data))
+        self.pump.write(data)
+
+    def _on_control(self, msg):
+        if not msg.is_dead:
+            command, arg = msg.unpickle(throw=False)
+            LOG.debug('%r._on_control(%r, %s)', self, command, arg)
+
+            func = getattr(self, '_on_%s' % (command,), None)
+            if func:
+                return func(msg, arg)
+
+            LOG.warning('%r: unknown command %r', self, command)
+
+    def _on_start(self, msg, arg):
+        dest = mitogen.core.Context(self.router, msg.src_id)
+        self.control = mitogen.core.Sender(dest, arg[0])
+        self.stdin = mitogen.core.Sender(dest, arg[1])
+        self.router.broker.start_receive(self.pump)
+
+    def _on_exit(self, msg, arg):
+        LOG.debug('on_exit: proc = %r', self.proc)
+        if self.proc:
+            self.proc.terminate()
+        else:
+            self.router.broker.shutdown()
+
+    def _on_pump_receive(self, s):
+        IOLOG.info('%r._on_pump_receive(len %d)', self, len(s))
+        self.stdin.put(s)
+
+    def _on_pump_disconnect(self):
+        LOG.debug('%r._on_pump_disconnect()', self)
+        mitogen.core.fire(self, 'disconnect')
+        self.stdin.close()
+        self.wake_event.set()
+
+    def start_master(self, stdin, control):
+        self.stdin = stdin
+        self.control = control
+        control.put(('start', (self.control_handle, self.stdin_handle)))
+        self.router.broker.start_receive(self.pump)
+
+    def wait(self):
+        while not self.wake_event.isSet():
+            # Timeout is used so that sleep is interruptible, as blocking
+            # variants of libc thread operations cannot be interrupted e.g. via
+            # KeyboardInterrupt. isSet() test and wait() are separate since in
+            # <2.7 wait() always returns None.
+            self.wake_event.wait(0.1)
+
+
+@mitogen.core.takes_router
+def _start_slave(src_id, cmdline, router):
+    """
+    This runs in the target context, it is invoked by _fakessh_main running in
+    the fakessh context immediately after startup. It starts the slave process
+    (the the point where it has a stdin_handle to target but not stdout_chan to
+    write to), and waits for main to.
+    """
+    LOG.debug('_start_slave(%r, %r)', router, cmdline)
+
+    proc = subprocess.Popen(
+        cmdline,
+        # SSH server always uses user's shell.
+        shell=True,
+        # SSH server always executes new commands in the user's HOME.
+        cwd=os.path.expanduser('~'),
+
+        stdin=subprocess.PIPE,
+        stdout=subprocess.PIPE,
+    )
+
+    process = Process(
+        router,
+        proc.stdin.fileno(),
+        proc.stdout.fileno(),
+        proc,
+    )
+
+    return process.control_handle, process.stdin_handle
+
+
+#
+# SSH client interface.
+#
+
+
+def exit():
+    _mitogen.broker.shutdown()
+
+
+def die(msg, *args):
+    if args:
+        msg %= args
+    sys.stderr.write('%s\n' % (msg,))
+    exit()
+
+
+def parse_args():
+    hostname = None
+    remain = sys.argv[1:]
+    allopts = []
+    restarted = 0
+
+    while remain and restarted < 2:
+        opts, args = getopt.getopt(remain, SSH_GETOPTS)
+        remain = remain[:]  # getopt bug!
+        allopts += opts
+        if not args:
+            break
+
+        if not hostname:
+            hostname = args.pop(0)
+            remain = remain[remain.index(hostname) + 1:]
+
+        restarted += 1
+
+    return hostname, allopts, args
+
+
+@mitogen.core.takes_econtext
+def _fakessh_main(dest_context_id, econtext):
+    hostname, opts, args = parse_args()
+    if not hostname:
+        die('Missing hostname')
+
+    subsystem = False
+    for opt, optarg in opts:
+        if opt == '-s':
+            subsystem = True
+        else:
+            LOG.debug('Warning option %s %s is ignored.', opt, optarg)
+
+    LOG.debug('hostname: %r', hostname)
+    LOG.debug('opts: %r', opts)
+    LOG.debug('args: %r', args)
+
+    if subsystem:
+        die('-s <subsystem> is not yet supported')
+
+    if not args:
+        die('fakessh: login mode not supported and no command specified')
+
+    dest = mitogen.parent.Context(econtext.router, dest_context_id)
+
+    # Even though SSH receives an argument vector, it still cats the vector
+    # together before sending to the server, the server just uses /bin/sh -c to
+    # run the command. We must remain puke-for-puke compatible.
+    control_handle, stdin_handle = dest.call(_start_slave,
+        mitogen.context_id, ' '.join(args))
+
+    LOG.debug('_fakessh_main: received control_handle=%r, stdin_handle=%r',
+              control_handle, stdin_handle)
+
+    process = Process(econtext.router, 1, 0)
+    process.start_master(
+        stdin=mitogen.core.Sender(dest, stdin_handle),
+        control=mitogen.core.Sender(dest, control_handle),
+    )
+    process.wait()
+    process.control.put(('exit', None))
+
+
+def _get_econtext_config(context, sock2):
+    parent_ids = mitogen.parent_ids[:]
+    parent_ids.insert(0, mitogen.context_id)
+    return {
+        'context_id': context.context_id,
+        'core_src_fd': None,
+        'debug': getattr(context.router, 'debug', False),
+        'in_fd': sock2.fileno(),
+        'log_level': mitogen.parent.get_log_level(),
+        'max_message_size': context.router.max_message_size,
+        'out_fd': sock2.fileno(),
+        'parent_ids': parent_ids,
+        'profiling': getattr(context.router, 'profiling', False),
+        'unidirectional': getattr(context.router, 'unidirectional', False),
+        'setup_stdio': False,
+        'version': mitogen.__version__,
+    }
+
+
+#
+# Public API.
+#
+
+@mitogen.core.takes_econtext
+@mitogen.core.takes_router
+def run(dest, router, args, deadline=None, econtext=None):
+    """
+    Run the command specified by `args` such that ``PATH`` searches for SSH by
+    the command will cause its attempt to use SSH to execute a remote program
+    to be redirected to use mitogen to execute that program using the context
+    `dest` instead.
+
+    :param list args:
+        Argument vector.
+    :param mitogen.core.Context dest:
+        The destination context to execute the SSH command line in.
+
+    :param mitogen.core.Router router:
+
+    :param list[str] args:
+        Command line arguments for local program, e.g.
+        ``['rsync', '/tmp', 'remote:/tmp']``
+
+    :returns:
+        Exit status of the child process.
+    """
+    if econtext is not None:
+        mitogen.parent.upgrade_router(econtext)
+
+    context_id = router.allocate_id()
+    fakessh = mitogen.parent.Context(router, context_id)
+    fakessh.name = u'fakessh.%d' % (context_id,)
+
+    sock1, sock2 = socket.socketpair()
+
+    stream = mitogen.core.Stream(router, context_id)
+    stream.name = u'fakessh'
+    stream.accept(sock1.fileno(), sock1.fileno())
+    router.register(fakessh, stream)
+
+    # Held in socket buffer until process is booted.
+    fakessh.call_async(_fakessh_main, dest.context_id)
+
+    tmp_path = tempfile.mkdtemp(prefix='mitogen_fakessh')
+    try:
+        ssh_path = os.path.join(tmp_path, 'ssh')
+        fp = open(ssh_path, 'w')
+        try:
+            fp.write('#!%s\n' % (mitogen.parent.get_sys_executable(),))
+            fp.write(inspect.getsource(mitogen.core))
+            fp.write('\n')
+            fp.write('ExternalContext(%r).main()\n' % (
+                _get_econtext_config(context, sock2),
+            ))
+        finally:
+            fp.close()
+
+        os.chmod(ssh_path, int('0755', 8))
+        env = os.environ.copy()
+        env.update({
+            'PATH': '%s:%s' % (tmp_path, env.get('PATH', '')),
+            'ARGV0': mitogen.parent.get_sys_executable(),
+            'SSH_PATH': ssh_path,
+        })
+
+        proc = subprocess.Popen(args, env=env)
+        return proc.wait()
+    finally:
+        shutil.rmtree(tmp_path)
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/fork.py b/ansible/plugins/mitogen-0.2.6/mitogen/fork.py
new file mode 100644
index 000000000..d6685d70b
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/fork.py
@@ -0,0 +1,223 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import logging
+import os
+import random
+import sys
+import threading
+import traceback
+
+import mitogen.core
+import mitogen.parent
+
+
+LOG = logging.getLogger('mitogen')
+
+# Python 2.4/2.5 cannot support fork+threads whatsoever, it doesn't even fix up
+# interpreter state. So 2.4/2.5 interpreters start .local() contexts for
+# isolation instead. Since we don't have any crazy memory sharing problems to
+# avoid, there is no virginal fork parent either. The child is started directly
+# from the login/become process. In future this will be default everywhere,
+# fork is brainwrong from the stone age.
+FORK_SUPPORTED = sys.version_info >= (2, 6)
+
+
+class Error(mitogen.core.StreamError):
+    pass
+
+
+def fixup_prngs():
+    """
+    Add 256 bits of /dev/urandom to OpenSSL's PRNG in the child, and re-seed
+    the random package with the same data.
+    """
+    s = os.urandom(256 // 8)
+    random.seed(s)
+    if 'ssl' in sys.modules:
+        sys.modules['ssl'].RAND_add(s, 75.0)
+
+
+def reset_logging_framework():
+    """
+    After fork, ensure any logging.Handler locks are recreated, as a variety of
+    threads in the parent may have been using the logging package at the moment
+    of fork.
+
+    It is not possible to solve this problem in general; see
+    https://github.com/dw/mitogen/issues/150 for a full discussion.
+    """
+    logging._lock = threading.RLock()
+
+    # The root logger does not appear in the loggerDict.
+    for name in [None] + list(logging.Logger.manager.loggerDict):
+        for handler in logging.getLogger(name).handlers:
+            handler.createLock()
+
+    root = logging.getLogger()
+    root.handlers = [
+        handler
+        for handler in root.handlers
+        if not isinstance(handler, mitogen.core.LogHandler)
+    ]
+
+
+def on_fork():
+    """
+    Should be called by any program integrating Mitogen each time the process
+    is forked, in the context of the new child.
+    """
+    reset_logging_framework()  # Must be first!
+    fixup_prngs()
+    mitogen.core.Latch._on_fork()
+    mitogen.core.Side._on_fork()
+    mitogen.core.ExternalContext.service_stub_lock = threading.Lock()
+
+    mitogen__service = sys.modules.get('mitogen.service')
+    if mitogen__service:
+        mitogen__service._pool_lock = threading.Lock()
+
+
+def handle_child_crash():
+    """
+    Respond to _child_main() crashing by ensuring the relevant exception is
+    logged to /dev/tty.
+    """
+    tty = open('/dev/tty', 'wb')
+    tty.write('\n\nFORKED CHILD PID %d CRASHED\n%s\n\n' % (
+        os.getpid(),
+        traceback.format_exc(),
+    ))
+    tty.close()
+    os._exit(1)
+
+
+class Stream(mitogen.parent.Stream):
+    child_is_immediate_subprocess = True
+
+    #: Reference to the importer, if any, recovered from the parent.
+    importer = None
+
+    #: User-supplied function for cleaning up child process state.
+    on_fork = None
+
+    python_version_msg = (
+        "The mitogen.fork method is not supported on Python versions "
+        "prior to 2.6, since those versions made no attempt to repair "
+        "critical interpreter state following a fork. Please use the "
+        "local() method instead."
+    )
+
+    def construct(self, old_router, max_message_size, on_fork=None,
+                  debug=False, profiling=False, unidirectional=False,
+                  on_start=None):
+        if not FORK_SUPPORTED:
+            raise Error(self.python_version_msg)
+
+        # fork method only supports a tiny subset of options.
+        super(Stream, self).construct(max_message_size=max_message_size,
+                                      debug=debug, profiling=profiling,
+                                      unidirectional=False)
+        self.on_fork = on_fork
+        self.on_start = on_start
+
+        responder = getattr(old_router, 'responder', None)
+        if isinstance(responder, mitogen.parent.ModuleForwarder):
+            self.importer = responder.importer
+
+    name_prefix = u'fork'
+
+    def start_child(self):
+        parentfp, childfp = mitogen.parent.create_socketpair()
+        self.pid = os.fork()
+        if self.pid:
+            childfp.close()
+            # Decouple the socket from the lifetime of the Python socket object.
+            fd = os.dup(parentfp.fileno())
+            parentfp.close()
+            return self.pid, fd, None
+        else:
+            parentfp.close()
+            self._wrap_child_main(childfp)
+
+    def _wrap_child_main(self, childfp):
+        try:
+            self._child_main(childfp)
+        except BaseException:
+            handle_child_crash()
+
+    def _child_main(self, childfp):
+        on_fork()
+        if self.on_fork:
+            self.on_fork()
+        mitogen.core.set_block(childfp.fileno())
+
+        # Expected by the ExternalContext.main().
+        os.dup2(childfp.fileno(), 1)
+        os.dup2(childfp.fileno(), 100)
+
+        # Overwritten by ExternalContext.main(); we must replace the
+        # parent-inherited descriptors that were closed by Side._on_fork() to
+        # avoid ExternalContext.main() accidentally allocating new files over
+        # the standard handles.
+        os.dup2(childfp.fileno(), 0)
+
+        # Avoid corrupting the stream on fork crash by dupping /dev/null over
+        # stderr. Instead, handle_child_crash() uses /dev/tty to log errors.
+        devnull = os.open('/dev/null', os.O_WRONLY)
+        if devnull != 2:
+            os.dup2(devnull, 2)
+            os.close(devnull)
+
+        # If we're unlucky, childfp.fileno() may coincidentally be one of our
+        # desired FDs. In that case closing it breaks ExternalContext.main().
+        if childfp.fileno() not in (0, 1, 100):
+            childfp.close()
+
+        config = self.get_econtext_config()
+        config['core_src_fd'] = None
+        config['importer'] = self.importer
+        config['setup_package'] = False
+        if self.on_start:
+            config['on_start'] = self.on_start
+
+        try:
+            try:
+                mitogen.core.ExternalContext(config).main()
+            except Exception:
+                # TODO: report exception somehow.
+                os._exit(72)
+        finally:
+            # Don't trigger atexit handlers, they were copied from the parent.
+            os._exit(0)
+
+    def _connect_bootstrap(self):
+        # None required.
+        pass
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/jail.py b/ansible/plugins/mitogen-0.2.6/mitogen/jail.py
new file mode 100644
index 000000000..6e0ac68be
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/jail.py
@@ -0,0 +1,65 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import logging
+
+import mitogen.core
+import mitogen.parent
+
+
+LOG = logging.getLogger(__name__)
+
+
+class Stream(mitogen.parent.Stream):
+    child_is_immediate_subprocess = False
+    create_child_args = {
+        'merge_stdio': True
+    }
+
+    container = None
+    username = None
+    jexec_path = '/usr/sbin/jexec'
+
+    def construct(self, container, jexec_path=None, username=None, **kwargs):
+        super(Stream, self).construct(**kwargs)
+        self.container = container
+        self.username = username
+        if jexec_path:
+            self.jexec_path = jexec_path
+
+    def _get_name(self):
+        return u'jail.' + self.container
+
+    def get_boot_command(self):
+        bits = [self.jexec_path]
+        if self.username:
+            bits += ['-U', self.username]
+        bits += [self.container]
+        return bits + super(Stream, self).get_boot_command()
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/kubectl.py b/ansible/plugins/mitogen-0.2.6/mitogen/kubectl.py
new file mode 100644
index 000000000..ef626e1bc
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/kubectl.py
@@ -0,0 +1,65 @@
+# Copyright 2018, Yannig Perre
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import logging
+
+import mitogen.core
+import mitogen.parent
+
+
+LOG = logging.getLogger(__name__)
+
+
+class Stream(mitogen.parent.Stream):
+    child_is_immediate_subprocess = True
+
+    pod = None
+    kubectl_path = 'kubectl'
+    kubectl_args = None
+
+    # TODO: better way of capturing errors such as "No such container."
+    create_child_args = {
+        'merge_stdio': True
+    }
+
+    def construct(self, pod, kubectl_path=None, kubectl_args=None, **kwargs):
+        super(Stream, self).construct(**kwargs)
+        assert pod
+        self.pod = pod
+        if kubectl_path:
+            self.kubectl_path = kubectl_path
+        self.kubectl_args = kubectl_args or []
+
+    def _get_name(self):
+        return u'kubectl.%s%s' % (self.pod, self.kubectl_args)
+
+    def get_boot_command(self):
+        bits = [self.kubectl_path] + self.kubectl_args + ['exec', '-it', self.pod]
+        return bits + ["--"] + super(Stream, self).get_boot_command()
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/lxc.py b/ansible/plugins/mitogen-0.2.6/mitogen/lxc.py
new file mode 100644
index 000000000..879d19a16
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/lxc.py
@@ -0,0 +1,75 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import logging
+
+import mitogen.core
+import mitogen.parent
+
+
+LOG = logging.getLogger(__name__)
+
+
+class Stream(mitogen.parent.Stream):
+    child_is_immediate_subprocess = False
+    create_child_args = {
+        # If lxc-attach finds any of stdin, stdout, stderr connected to a TTY,
+        # to prevent input injection it creates a proxy pty, forcing all IO to
+        # be buffered in <4KiB chunks. So ensure stderr is also routed to the
+        # socketpair.
+        'merge_stdio': True
+    }
+
+    container = None
+    lxc_attach_path = 'lxc-attach'
+
+    eof_error_hint = (
+        'Note: many versions of LXC do not report program execution failure '
+        'meaningfully. Please check the host logs (/var/log) for more '
+        'information.'
+    )
+
+    def construct(self, container, lxc_attach_path=None, **kwargs):
+        super(Stream, self).construct(**kwargs)
+        self.container = container
+        if lxc_attach_path:
+            self.lxc_attach_path = lxc_attach_path
+
+    def _get_name(self):
+        return u'lxc.' + self.container
+
+    def get_boot_command(self):
+        bits = [
+            self.lxc_attach_path,
+            '--clear-env',
+            '--name', self.container,
+            '--',
+        ]
+        return bits + super(Stream, self).get_boot_command()
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/lxd.py b/ansible/plugins/mitogen-0.2.6/mitogen/lxd.py
new file mode 100644
index 000000000..faea2561f
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/lxd.py
@@ -0,0 +1,77 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import logging
+
+import mitogen.core
+import mitogen.parent
+
+
+LOG = logging.getLogger(__name__)
+
+
+class Stream(mitogen.parent.Stream):
+    child_is_immediate_subprocess = False
+    create_child_args = {
+        # If lxc finds any of stdin, stdout, stderr connected to a TTY, to
+        # prevent input injection it creates a proxy pty, forcing all IO to be
+        # buffered in <4KiB chunks. So ensure stderr is also routed to the
+        # socketpair.
+        'merge_stdio': True
+    }
+
+    container = None
+    lxc_path = 'lxc'
+    python_path = 'python'
+
+    eof_error_hint = (
+        'Note: many versions of LXC do not report program execution failure '
+        'meaningfully. Please check the host logs (/var/log) for more '
+        'information.'
+    )
+
+    def construct(self, container, lxc_path=None, **kwargs):
+        super(Stream, self).construct(**kwargs)
+        self.container = container
+        if lxc_path:
+            self.lxc_path = lxc_path
+
+    def _get_name(self):
+        return u'lxd.' + self.container
+
+    def get_boot_command(self):
+        bits = [
+            self.lxc_path,
+            'exec',
+            '--mode=noninteractive',
+            self.container,
+            '--',
+        ]
+        return bits + super(Stream, self).get_boot_command()
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/master.py b/ansible/plugins/mitogen-0.2.6/mitogen/master.py
new file mode 100644
index 000000000..1396f4e10
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/master.py
@@ -0,0 +1,1173 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""
+This module implements functionality required by master processes, such as
+starting new contexts via SSH. Its size is also restricted, since it must
+be sent to any context that will be used to establish additional child
+contexts.
+"""
+
+import dis
+import imp
+import inspect
+import itertools
+import logging
+import os
+import pkgutil
+import re
+import string
+import sys
+import time
+import threading
+import types
+import zlib
+
+if not hasattr(pkgutil, 'find_loader'):
+    # find_loader() was new in >=2.5, but the modern pkgutil.py syntax has
+    # been kept intentionally 2.3 compatible so we can reuse it.
+    from mitogen.compat import pkgutil
+
+import mitogen
+import mitogen.core
+import mitogen.minify
+import mitogen.parent
+
+from mitogen.core import b
+from mitogen.core import IOLOG
+from mitogen.core import LOG
+from mitogen.core import str_partition
+from mitogen.core import str_rpartition
+from mitogen.core import to_text
+
+imap = getattr(itertools, 'imap', map)
+izip = getattr(itertools, 'izip', zip)
+
+try:
+    any
+except NameError:
+    from mitogen.core import any
+
+try:
+    next
+except NameError:
+    from mitogen.core import next
+
+
+RLOG = logging.getLogger('mitogen.ctx')
+
+
+def _stdlib_paths():
+    """Return a set of paths from which Python imports the standard library.
+    """
+    attr_candidates = [
+        'prefix',
+        'real_prefix',  # virtualenv: only set inside a virtual environment.
+        'base_prefix',  # venv: always set, equal to prefix if outside.
+    ]
+    prefixes = (getattr(sys, a) for a in attr_candidates if hasattr(sys, a))
+    version = 'python%s.%s' % sys.version_info[0:2]
+    return set(os.path.abspath(os.path.join(p, 'lib', version))
+               for p in prefixes)
+
+
+def is_stdlib_name(modname):
+    """Return :data:`True` if `modname` appears to come from the standard
+    library.
+    """
+    if imp.is_builtin(modname) != 0:
+        return True
+
+    module = sys.modules.get(modname)
+    if module is None:
+        return False
+
+    # six installs crap with no __file__
+    modpath = os.path.abspath(getattr(module, '__file__', ''))
+    return is_stdlib_path(modpath)
+
+
+_STDLIB_PATHS = _stdlib_paths()
+
+
+def is_stdlib_path(path):
+    return any(
+        os.path.commonprefix((libpath, path)) == libpath
+        and 'site-packages' not in path
+        and 'dist-packages' not in path
+        for libpath in _STDLIB_PATHS
+    )
+
+
+def get_child_modules(path):
+    """Return the suffixes of submodules directly neated beneath of the package
+    directory at `path`.
+
+    :param str path:
+        Path to the module's source code on disk, or some PEP-302-recognized
+        equivalent. Usually this is the module's ``__file__`` attribute, but
+        is specified explicitly to avoid loading the module.
+
+    :return:
+        List of submodule name suffixes.
+    """
+    it = pkgutil.iter_modules([os.path.dirname(path)])
+    return [to_text(name) for _, name, _ in it]
+
+
+def _get_core_source():
+    """
+    Master version of parent.get_core_source().
+    """
+    source = inspect.getsource(mitogen.core)
+    return mitogen.minify.minimize_source(source)
+
+
+if mitogen.is_master:
+    # TODO: find a less surprising way of installing this.
+    mitogen.parent._get_core_source = _get_core_source
+
+
+LOAD_CONST = dis.opname.index('LOAD_CONST')
+IMPORT_NAME = dis.opname.index('IMPORT_NAME')
+
+
+def _getarg(nextb, c):
+    if c >= dis.HAVE_ARGUMENT:
+        return nextb() | (nextb() << 8)
+
+
+if sys.version_info < (3, 0):
+    def iter_opcodes(co):
+        # Yield `(op, oparg)` tuples from the code object `co`.
+        ordit = imap(ord, co.co_code)
+        nextb = ordit.next
+        return ((c, _getarg(nextb, c)) for c in ordit)
+elif sys.version_info < (3, 6):
+    def iter_opcodes(co):
+        # Yield `(op, oparg)` tuples from the code object `co`.
+        ordit = iter(co.co_code)
+        nextb = ordit.__next__
+        return ((c, _getarg(nextb, c)) for c in ordit)
+else:
+    def iter_opcodes(co):
+        # Yield `(op, oparg)` tuples from the code object `co`.
+        ordit = iter(co.co_code)
+        nextb = ordit.__next__
+        # https://github.com/abarnert/cpython/blob/c095a32f/Python/wordcode.md
+        return ((c, nextb()) for c in ordit)
+
+
+def scan_code_imports(co):
+    """
+    Given a code object `co`, scan its bytecode yielding any ``IMPORT_NAME``
+    and associated prior ``LOAD_CONST`` instructions representing an `Import`
+    statement or `ImportFrom` statement.
+
+    :return:
+        Generator producing `(level, modname, namelist)` tuples, where:
+
+        * `level`: -1 for normal import, 0, for absolute import, and >0 for
+          relative import.
+        * `modname`: Name of module to import, or from where `namelist` names
+          are imported.
+        * `namelist`: for `ImportFrom`, the list of names to be imported from
+          `modname`.
+    """
+    opit = iter_opcodes(co)
+    opit, opit2, opit3 = itertools.tee(opit, 3)
+
+    try:
+        next(opit2)
+        next(opit3)
+        next(opit3)
+    except StopIteration:
+        return
+
+    if sys.version_info >= (2, 5):
+        for oparg1, oparg2, (op3, arg3) in izip(opit, opit2, opit3):
+            if op3 == IMPORT_NAME:
+                op2, arg2 = oparg2
+                op1, arg1 = oparg1
+                if op1 == op2 == LOAD_CONST:
+                    yield (co.co_consts[arg1],
+                           co.co_names[arg3],
+                           co.co_consts[arg2] or ())
+    else:
+        # Python 2.4 did not yet have 'level', so stack format differs.
+        for oparg1, (op2, arg2) in izip(opit, opit2):
+            if op2 == IMPORT_NAME:
+                op1, arg1 = oparg1
+                if op1 == LOAD_CONST:
+                    yield (-1, co.co_names[arg2], co.co_consts[arg1] or ())
+
+
+class ThreadWatcher(object):
+    """
+    Manage threads that wait for another thread to shut down, before invoking
+    `on_join()` for each associated ThreadWatcher.
+
+    In CPython it seems possible to use this method to ensure a non-main thread
+    is signalled when the main thread has exited, using a third thread as a
+    proxy.
+    """
+    #: Protects remaining _cls_* members.
+    _cls_lock = threading.Lock()
+
+    #: PID of the process that last modified the class data. If the PID
+    #: changes, it means the thread watch dict refers to threads that no longer
+    #: exist in the current process (since it forked), and so must be reset.
+    _cls_pid = None
+
+    #: Map watched Thread -> list of ThreadWatcher instances.
+    _cls_instances_by_target = {}
+
+    #: Map watched Thread -> watcher Thread for each watched thread.
+    _cls_thread_by_target = {}
+
+    @classmethod
+    def _reset(cls):
+        """If we have forked since the watch dictionaries were initialized, all
+        that has is garbage, so clear it."""
+        if os.getpid() != cls._cls_pid:
+            cls._cls_pid = os.getpid()
+            cls._cls_instances_by_target.clear()
+            cls._cls_thread_by_target.clear()
+
+    def __init__(self, target, on_join):
+        self.target = target
+        self.on_join = on_join
+
+    @classmethod
+    def _watch(cls, target):
+        target.join()
+        for watcher in cls._cls_instances_by_target[target]:
+            watcher.on_join()
+
+    def install(self):
+        self._cls_lock.acquire()
+        try:
+            self._reset()
+            lst = self._cls_instances_by_target.setdefault(self.target, [])
+            lst.append(self)
+            if self.target not in self._cls_thread_by_target:
+                self._cls_thread_by_target[self.target] = threading.Thread(
+                    name='mitogen.master.join_thread_async',
+                    target=self._watch,
+                    args=(self.target,)
+                )
+                self._cls_thread_by_target[self.target].start()
+        finally:
+            self._cls_lock.release()
+
+    def remove(self):
+        self._cls_lock.acquire()
+        try:
+            self._reset()
+            lst = self._cls_instances_by_target.get(self.target, [])
+            if self in lst:
+                lst.remove(self)
+        finally:
+            self._cls_lock.release()
+
+    @classmethod
+    def watch(cls, target, on_join):
+        watcher = cls(target, on_join)
+        watcher.install()
+        return watcher
+
+
+class LogForwarder(object):
+    """
+    Install a :data:`mitogen.core.FORWARD_LOG` handler that delivers forwarded
+    log events into the local logging framework. This is used by the master's
+    :class:`Router`.
+
+    The forwarded :class:`logging.LogRecord` objects are delivered to loggers
+    under ``mitogen.ctx.*`` corresponding to their
+    :attr:`mitogen.core.Context.name`, with the message prefixed with the
+    logger name used in the child. The records include some extra attributes:
+
+    * ``mitogen_message``: Unicode original message without the logger name
+      prepended.
+    * ``mitogen_context``: :class:`mitogen.parent.Context` reference to the
+      source context.
+    * ``mitogen_name``: Original logger name.
+
+    :param mitogen.master.Router router:
+        Router to install the handler on.
+    """
+    def __init__(self, router):
+        self._router = router
+        self._cache = {}
+        router.add_handler(
+            fn=self._on_forward_log,
+            handle=mitogen.core.FORWARD_LOG,
+        )
+
+    def _on_forward_log(self, msg):
+        if msg.is_dead:
+            return
+
+        logger = self._cache.get(msg.src_id)
+        if logger is None:
+            context = self._router.context_by_id(msg.src_id)
+            if context is None:
+                LOG.error('%s: dropping log from unknown context ID %d',
+                          self, msg.src_id)
+                return
+
+            name = '%s.%s' % (RLOG.name, context.name)
+            self._cache[msg.src_id] = logger = logging.getLogger(name)
+
+        name, level_s, s = msg.data.decode('latin1').split('\x00', 2)
+
+        # See logging.Handler.makeRecord()
+        record = logging.LogRecord(
+            name=logger.name,
+            level=int(level_s),
+            pathname='(unknown file)',
+            lineno=0,
+            msg=('%s: %s' % (name, s)),
+            args=(),
+            exc_info=None,
+        )
+        record.mitogen_message = s
+        record.mitogen_context = self._router.context_by_id(msg.src_id)
+        record.mitogen_name = name
+        logger.handle(record)
+
+    def __repr__(self):
+        return 'LogForwarder(%r)' % (self._router,)
+
+
+class ModuleFinder(object):
+    """
+    Given the name of a loaded module, make a best-effort attempt at finding
+    related modules likely needed by a child context requesting the original
+    module.
+    """
+    def __init__(self):
+        #: Import machinery is expensive, keep :py:meth`:get_module_source`
+        #: results around.
+        self._found_cache = {}
+
+        #: Avoid repeated dependency scanning, which is expensive.
+        self._related_cache = {}
+
+    def __repr__(self):
+        return 'ModuleFinder()'
+
+    def _looks_like_script(self, path):
+        """
+        Return :data:`True` if the (possibly extensionless) file at `path`
+        resembles a Python script. For now we simply verify the file contains
+        ASCII text.
+        """
+        fp = open(path, 'rb')
+        try:
+            sample = fp.read(512).decode('latin-1')
+            return not set(sample).difference(string.printable)
+        finally:
+            fp.close()
+
+    def _py_filename(self, path):
+        if not path:
+            return None
+
+        if path[-4:] in ('.pyc', '.pyo'):
+            path = path.rstrip('co')
+
+        if path.endswith('.py'):
+            return path
+
+        if os.path.exists(path) and self._looks_like_script(path):
+            return path
+
+    def _get_main_module_defective_python_3x(self, fullname):
+        """
+        Recent versions of Python 3.x introduced an incomplete notion of
+        importer specs, and in doing so created permanent asymmetry in the
+        :mod:`pkgutil` interface handling for the `__main__` module. Therefore
+        we must handle `__main__` specially.
+        """
+        if fullname != '__main__':
+            return None
+
+        mod = sys.modules.get(fullname)
+        if not mod:
+            return None
+
+        path = getattr(mod, '__file__', None)
+        if not (os.path.exists(path) and self._looks_like_script(path)):
+            return None
+
+        fp = open(path, 'rb')
+        try:
+            source = fp.read()
+        finally:
+            fp.close()
+
+        return path, source, False
+
+    def _get_module_via_pkgutil(self, fullname):
+        """
+        Attempt to fetch source code via pkgutil. In an ideal world, this would
+        be the only required implementation of get_module().
+        """
+        try:
+            # Pre-'import spec' this returned None, in Python3.6 it raises
+            # ImportError.
+            loader = pkgutil.find_loader(fullname)
+        except ImportError:
+            e = sys.exc_info()[1]
+            LOG.debug('%r._get_module_via_pkgutil(%r): %s',
+                      self, fullname, e)
+            return None
+
+        IOLOG.debug('%r._get_module_via_pkgutil(%r) -> %r',
+                    self, fullname, loader)
+        if not loader:
+            return
+
+        try:
+            path = self._py_filename(loader.get_filename(fullname))
+            source = loader.get_source(fullname)
+            is_pkg = loader.is_package(fullname)
+        except (AttributeError, ImportError):
+            # - Per PEP-302, get_source() and is_package() are optional,
+            #   calling them may throw AttributeError.
+            # - get_filename() may throw ImportError if pkgutil.find_loader()
+            #   picks a "parent" package's loader for some crap that's been
+            #   stuffed in sys.modules, for example in the case of urllib3:
+            #       "loader for urllib3.contrib.pyopenssl cannot handle
+            #        requests.packages.urllib3.contrib.pyopenssl"
+            e = sys.exc_info()[1]
+            LOG.debug('%r: loading %r using %r failed: %s',
+                      self, fullname, loader, e)
+            return
+
+        if path is None or source is None:
+            return
+
+        if isinstance(source, mitogen.core.UnicodeType):
+            # get_source() returns "string" according to PEP-302, which was
+            # reinterpreted for Python 3 to mean a Unicode string.
+            source = source.encode('utf-8')
+
+        return path, source, is_pkg
+
+    def _get_module_via_sys_modules(self, fullname):
+        """
+        Attempt to fetch source code via sys.modules. This is specifically to
+        support __main__, but it may catch a few more cases.
+        """
+        module = sys.modules.get(fullname)
+        LOG.debug('_get_module_via_sys_modules(%r) -> %r', fullname, module)
+        if not isinstance(module, types.ModuleType):
+            LOG.debug('sys.modules[%r] absent or not a regular module',
+                      fullname)
+            return
+
+        path = self._py_filename(getattr(module, '__file__', ''))
+        if not path:
+            return
+
+        is_pkg = hasattr(module, '__path__')
+        try:
+            source = inspect.getsource(module)
+        except IOError:
+            # Work around inspect.getsourcelines() bug for 0-byte __init__.py
+            # files.
+            if not is_pkg:
+                raise
+            source = '\n'
+
+        if isinstance(source, mitogen.core.UnicodeType):
+            # get_source() returns "string" according to PEP-302, which was
+            # reinterpreted for Python 3 to mean a Unicode string.
+            source = source.encode('utf-8')
+
+        return path, source, is_pkg
+
+    def _get_module_via_parent_enumeration(self, fullname):
+        """
+        Attempt to fetch source code by examining the module's (hopefully less
+        insane) parent package. Required for older versions of
+        ansible.compat.six and plumbum.colors.
+        """
+        if fullname not in sys.modules:
+            # Don't attempt this unless a module really exists in sys.modules,
+            # else we could return junk.
+            return
+
+        pkgname, _, modname = str_rpartition(to_text(fullname), u'.')
+        pkg = sys.modules.get(pkgname)
+        if pkg is None or not hasattr(pkg, '__file__'):
+            return
+
+        pkg_path = os.path.dirname(pkg.__file__)
+        try:
+            fp, path, ext = imp.find_module(modname, [pkg_path])
+            try:
+                path = self._py_filename(path)
+                if not path:
+                    fp.close()
+                    return
+
+                source = fp.read()
+            finally:
+                if fp:
+                    fp.close()
+
+            if isinstance(source, mitogen.core.UnicodeType):
+                # get_source() returns "string" according to PEP-302, which was
+                # reinterpreted for Python 3 to mean a Unicode string.
+                source = source.encode('utf-8')
+            return path, source, False
+        except ImportError:
+            e = sys.exc_info()[1]
+            LOG.debug('imp.find_module(%r, %r) -> %s', modname, [pkg_path], e)
+
+    def add_source_override(self, fullname, path, source, is_pkg):
+        """
+        Explicitly install a source cache entry, preventing usual lookup
+        methods from being used.
+
+        Beware the value of `path` is critical when `is_pkg` is specified,
+        since it directs where submodules are searched for.
+
+        :param str fullname:
+            Name of the module to override.
+        :param str path:
+            Module's path as it will appear in the cache.
+        :param bytes source:
+            Module source code as a bytestring.
+        :param bool is_pkg:
+            :data:`True` if the module is a package.
+        """
+        self._found_cache[fullname] = (path, source, is_pkg)
+
+    get_module_methods = [
+        _get_main_module_defective_python_3x,
+        _get_module_via_pkgutil,
+        _get_module_via_sys_modules,
+        _get_module_via_parent_enumeration,
+    ]
+
+    def get_module_source(self, fullname):
+        """Given the name of a loaded module `fullname`, attempt to find its
+        source code.
+
+        :returns:
+            Tuple of `(module path, source text, is package?)`, or :data:`None`
+            if the source cannot be found.
+        """
+        tup = self._found_cache.get(fullname)
+        if tup:
+            return tup
+
+        for method in self.get_module_methods:
+            tup = method(self, fullname)
+            if tup:
+                #LOG.debug('%r returned %r', method, tup)
+                break
+        else:
+            tup = None, None, None
+            LOG.debug('get_module_source(%r): cannot find source', fullname)
+
+        self._found_cache[fullname] = tup
+        return tup
+
+    def resolve_relpath(self, fullname, level):
+        """Given an ImportFrom AST node, guess the prefix that should be tacked
+        on to an alias name to produce a canonical name. `fullname` is the name
+        of the module in which the ImportFrom appears.
+        """
+        mod = sys.modules.get(fullname, None)
+        if hasattr(mod, '__path__'):
+            fullname += '.__init__'
+
+        if level == 0 or not fullname:
+            return ''
+
+        bits = fullname.split('.')
+        if len(bits) <= level:
+            # This would be an ImportError in real code.
+            return ''
+
+        return '.'.join(bits[:-level]) + '.'
+
+    def generate_parent_names(self, fullname):
+        while '.' in fullname:
+            fullname, _, _ = str_rpartition(to_text(fullname), u'.')
+            yield fullname
+
+    def find_related_imports(self, fullname):
+        """
+        Return a list of non-stdlib modules that are directly imported by
+        `fullname`, plus their parents.
+
+        The list is determined by retrieving the source code of
+        `fullname`, compiling it, and examining all IMPORT_NAME ops.
+
+        :param fullname: Fully qualified name of an _already imported_ module
+            for which source code can be retrieved
+        :type fullname: str
+        """
+        related = self._related_cache.get(fullname)
+        if related is not None:
+            return related
+
+        modpath, src, _ = self.get_module_source(fullname)
+        if src is None:
+            return []
+
+        maybe_names = list(self.generate_parent_names(fullname))
+
+        co = compile(src, modpath, 'exec')
+        for level, modname, namelist in scan_code_imports(co):
+            if level == -1:
+                modnames = [modname, '%s.%s' % (fullname, modname)]
+            else:
+                modnames = [
+                    '%s%s' % (self.resolve_relpath(fullname, level), modname)
+                ]
+
+            maybe_names.extend(modnames)
+            maybe_names.extend(
+                '%s.%s' % (mname, name)
+                for mname in modnames
+                for name in namelist
+            )
+
+        return self._related_cache.setdefault(fullname, sorted(
+            set(
+                mitogen.core.to_text(name)
+                for name in maybe_names
+                if sys.modules.get(name) is not None
+                and not is_stdlib_name(name)
+                and u'six.moves' not in name  # TODO: crap
+            )
+        ))
+
+    def find_related(self, fullname):
+        """
+        Return a list of non-stdlib modules that are imported directly or
+        indirectly by `fullname`, plus their parents.
+
+        This method is like :py:meth:`find_related_imports`, but also
+        recursively searches any modules which are imported by `fullname`.
+
+        :param fullname: Fully qualified name of an _already imported_ module
+            for which source code can be retrieved
+        :type fullname: str
+        """
+        stack = [fullname]
+        found = set()
+
+        while stack:
+            name = stack.pop(0)
+            names = self.find_related_imports(name)
+            stack.extend(set(names).difference(set(found).union(stack)))
+            found.update(names)
+
+        found.discard(fullname)
+        return sorted(found)
+
+
+class ModuleResponder(object):
+    def __init__(self, router):
+        self._router = router
+        self._finder = ModuleFinder()
+        self._cache = {}  # fullname -> pickled
+        self.blacklist = []
+        self.whitelist = ['']
+
+        #: Context -> set([fullname, ..])
+        self._forwarded_by_context = {}
+
+        #: Number of GET_MODULE messages received.
+        self.get_module_count = 0
+        #: Total time spent in uncached GET_MODULE.
+        self.get_module_secs = 0.0
+        #: Total time spent minifying modules.
+        self.minify_secs = 0.0
+        #: Number of successful LOAD_MODULE messages sent.
+        self.good_load_module_count = 0
+        #: Total bytes in successful LOAD_MODULE payloads.
+        self.good_load_module_size = 0
+        #: Number of negative LOAD_MODULE messages sent.
+        self.bad_load_module_count = 0
+
+        router.add_handler(
+            fn=self._on_get_module,
+            handle=mitogen.core.GET_MODULE,
+        )
+
+    def __repr__(self):
+        return 'ModuleResponder(%r)' % (self._router,)
+
+    def add_source_override(self, fullname, path, source, is_pkg):
+        """
+        See :meth:`ModuleFinder.add_source_override.
+        """
+        self._finder.add_source_override(fullname, path, source, is_pkg)
+
+    MAIN_RE = re.compile(b(r'^if\s+__name__\s*==\s*.__main__.\s*:'), re.M)
+    main_guard_msg = (
+        "A child context attempted to import __main__, however the main "
+        "module present in the master process lacks an execution guard. "
+        "Update %r to prevent unintended execution, using a guard like:\n"
+        "\n"
+        "    if __name__ == '__main__':\n"
+        "        # your code here.\n"
+    )
+
+    def whitelist_prefix(self, fullname):
+        if self.whitelist == ['']:
+            self.whitelist = ['mitogen']
+        self.whitelist.append(fullname)
+
+    def blacklist_prefix(self, fullname):
+        self.blacklist.append(fullname)
+
+    def neutralize_main(self, path, src):
+        """Given the source for the __main__ module, try to find where it
+        begins conditional execution based on a "if __name__ == '__main__'"
+        guard, and remove any code after that point."""
+        match = self.MAIN_RE.search(src)
+        if match:
+            return src[:match.start()]
+
+        if b('mitogen.main(') in src:
+            return src
+
+        LOG.error(self.main_guard_msg, path)
+        raise ImportError('refused')
+
+    def _make_negative_response(self, fullname):
+        return (fullname, None, None, None, ())
+
+    minify_safe_re = re.compile(b(r'\s+#\s*!mitogen:\s*minify_safe'))
+
+    def _build_tuple(self, fullname):
+        if fullname in self._cache:
+            return self._cache[fullname]
+
+        if mitogen.core.is_blacklisted_import(self, fullname):
+            raise ImportError('blacklisted')
+
+        path, source, is_pkg = self._finder.get_module_source(fullname)
+        if path and is_stdlib_path(path):
+            # Prevent loading of 2.x<->3.x stdlib modules! This costs one
+            # RTT per hit, so a client-side solution is also required.
+            LOG.debug('%r: refusing to serve stdlib module %r',
+                      self, fullname)
+            tup = self._make_negative_response(fullname)
+            self._cache[fullname] = tup
+            return tup
+
+        if source is None:
+            # TODO: make this .warning() or similar again once importer has its
+            # own logging category.
+            LOG.debug('_build_tuple(%r): could not locate source', fullname)
+            tup = self._make_negative_response(fullname)
+            self._cache[fullname] = tup
+            return tup
+
+        if self.minify_safe_re.search(source):
+            # If the module contains a magic marker, it's safe to minify.
+            t0 = time.time()
+            source = mitogen.minify.minimize_source(source).encode('utf-8')
+            self.minify_secs += time.time() - t0
+
+        if is_pkg:
+            pkg_present = get_child_modules(path)
+            LOG.debug('_build_tuple(%r, %r) -> %r',
+                      path, fullname, pkg_present)
+        else:
+            pkg_present = None
+
+        if fullname == '__main__':
+            source = self.neutralize_main(path, source)
+        compressed = mitogen.core.Blob(zlib.compress(source, 9))
+        related = [
+            to_text(name)
+            for name in self._finder.find_related(fullname)
+            if not mitogen.core.is_blacklisted_import(self, name)
+        ]
+        # 0:fullname 1:pkg_present 2:path 3:compressed 4:related
+        tup = (
+            to_text(fullname),
+            pkg_present,
+            to_text(path),
+            compressed,
+            related
+        )
+        self._cache[fullname] = tup
+        return tup
+
+    def _send_load_module(self, stream, fullname):
+        if fullname not in stream.sent_modules:
+            tup = self._build_tuple(fullname)
+            msg = mitogen.core.Message.pickled(
+                tup,
+                dst_id=stream.remote_id,
+                handle=mitogen.core.LOAD_MODULE,
+            )
+            LOG.debug('%s: sending module %s (%.2f KiB)',
+                      stream.name, fullname, len(msg.data) / 1024.0)
+            self._router._async_route(msg)
+            stream.sent_modules.add(fullname)
+            if tup[2] is not None:
+                self.good_load_module_count += 1
+                self.good_load_module_size += len(msg.data)
+            else:
+                self.bad_load_module_count += 1
+
+    def _send_module_load_failed(self, stream, fullname):
+        self.bad_load_module_count += 1
+        stream.send(
+            mitogen.core.Message.pickled(
+                self._make_negative_response(fullname),
+                dst_id=stream.remote_id,
+                handle=mitogen.core.LOAD_MODULE,
+            )
+        )
+
+    def _send_module_and_related(self, stream, fullname):
+        if fullname in stream.sent_modules:
+            return
+
+        try:
+            tup = self._build_tuple(fullname)
+            for name in tup[4]:  # related
+                parent, _, _ = str_partition(name, '.')
+                if parent != fullname and parent not in stream.sent_modules:
+                    # Parent hasn't been sent, so don't load submodule yet.
+                    continue
+
+                self._send_load_module(stream, name)
+            self._send_load_module(stream, fullname)
+        except Exception:
+            LOG.debug('While importing %r', fullname, exc_info=True)
+            self._send_module_load_failed(stream, fullname)
+
+    def _on_get_module(self, msg):
+        if msg.is_dead:
+            return
+
+        stream = self._router.stream_by_id(msg.src_id)
+        if stream is None:
+            return
+
+        fullname = msg.data.decode()
+        LOG.debug('%s requested module %s', stream.name, fullname)
+        self.get_module_count += 1
+        if fullname in stream.sent_modules:
+            LOG.warning('_on_get_module(): dup request for %r from %r',
+                        fullname, stream)
+
+        t0 = time.time()
+        try:
+            self._send_module_and_related(stream, fullname)
+        finally:
+            self.get_module_secs += time.time() - t0
+
+    def _send_forward_module(self, stream, context, fullname):
+        if stream.remote_id != context.context_id:
+            stream.send(
+                mitogen.core.Message(
+                    data=b('%s\x00%s' % (context.context_id, fullname)),
+                    handle=mitogen.core.FORWARD_MODULE,
+                    dst_id=stream.remote_id,
+                )
+            )
+
+    def _forward_one_module(self, context, fullname):
+        forwarded = self._forwarded_by_context.get(context)
+        if forwarded is None:
+            forwarded = set()
+            self._forwarded_by_context[context] = forwarded
+
+        if fullname in forwarded:
+            return
+
+        path = []
+        while fullname:
+            path.append(fullname)
+            fullname, _, _ = str_rpartition(fullname, u'.')
+
+        stream = self._router.stream_by_id(context.context_id)
+        if stream is None:
+            LOG.debug('%r: dropping forward of %s to no longer existent '
+                      '%r', self, path[0], context)
+            return
+
+        for fullname in reversed(path):
+            self._send_module_and_related(stream, fullname)
+            self._send_forward_module(stream, context, fullname)
+
+    def _forward_modules(self, context, fullnames):
+        IOLOG.debug('%r._forward_modules(%r, %r)', self, context, fullnames)
+        for fullname in fullnames:
+            self._forward_one_module(context, mitogen.core.to_text(fullname))
+
+    def forward_modules(self, context, fullnames):
+        self._router.broker.defer(self._forward_modules, context, fullnames)
+
+
+class Broker(mitogen.core.Broker):
+    """
+    .. note::
+
+        You may construct as many brokers as desired, and use the same broker
+        for multiple routers, however usually only one broker need exist.
+        Multiple brokers may be useful when dealing with sets of children with
+        differing lifetimes. For example, a subscription service where
+        non-payment results in termination for one customer.
+
+    :param bool install_watcher:
+        If :data:`True`, an additional thread is started to monitor the
+        lifetime of the main thread, triggering :meth:`shutdown`
+        automatically in case the user forgets to call it, or their code
+        crashed.
+
+        You should not rely on this functionality in your program, it is only
+        intended as a fail-safe and to simplify the API for new users. In
+        particular, alternative Python implementations may not be able to
+        support watching the main thread.
+    """
+    shutdown_timeout = 5.0
+    _watcher = None
+    poller_class = mitogen.parent.PREFERRED_POLLER
+
+    def __init__(self, install_watcher=True):
+        if install_watcher:
+            self._watcher = ThreadWatcher.watch(
+                target=threading.currentThread(),
+                on_join=self.shutdown,
+            )
+        super(Broker, self).__init__()
+
+    def shutdown(self):
+        super(Broker, self).shutdown()
+        if self._watcher:
+            self._watcher.remove()
+
+
+class Router(mitogen.parent.Router):
+    """
+    Extend :class:`mitogen.core.Router` with functionality useful to masters,
+    and child contexts who later become masters. Currently when this class is
+    required, the target context's router is upgraded at runtime.
+
+    .. note::
+
+        You may construct as many routers as desired, and use the same broker
+        for multiple routers, however usually only one broker and router need
+        exist. Multiple routers may be useful when dealing with separate trust
+        domains, for example, manipulating infrastructure belonging to separate
+        customers or projects.
+
+    :param mitogen.master.Broker broker:
+        Broker to use. If not specified, a private :class:`Broker` is created.
+
+    :param int max_message_size:
+        Override the maximum message size this router is willing to receive or
+        transmit. Any value set here is automatically inherited by any children
+        created by the router.
+
+        This has a liberal default of 128 MiB, but may be set much lower.
+        Beware that setting it below 64KiB may encourage unexpected failures as
+        parents and children can no longer route large Python modules that may
+        be required by your application.
+    """
+
+    broker_class = Broker
+
+    #: When :data:`True`, cause the broker thread and any subsequent broker and
+    #: main threads existing in any child to write
+    #: ``/tmp/mitogen.stats.<pid>.<thread_name>.log`` containing a
+    #: :mod:`cProfile` dump on graceful exit. Must be set prior to construction
+    #: of any :class:`Broker`, e.g. via::
+    #:
+    #:      mitogen.master.Router.profiling = True
+    profiling = os.environ.get('MITOGEN_PROFILING') is not None
+
+    def __init__(self, broker=None, max_message_size=None):
+        if broker is None:
+            broker = self.broker_class()
+        if max_message_size:
+            self.max_message_size = max_message_size
+        super(Router, self).__init__(broker)
+        self.upgrade()
+
+    def upgrade(self):
+        self.id_allocator = IdAllocator(self)
+        self.responder = ModuleResponder(self)
+        self.log_forwarder = LogForwarder(self)
+        self.route_monitor = mitogen.parent.RouteMonitor(router=self)
+        self.add_handler(  # TODO: cutpaste.
+            fn=self._on_detaching,
+            handle=mitogen.core.DETACHING,
+            persist=True,
+        )
+
+    def _on_broker_exit(self):
+        super(Router, self)._on_broker_exit()
+        dct = self.get_stats()
+        dct['self'] = self
+        dct['minify_ms'] = 1000 * dct['minify_secs']
+        dct['get_module_ms'] = 1000 * dct['get_module_secs']
+        dct['good_load_module_size_kb'] = dct['good_load_module_size'] / 1024.0
+        dct['good_load_module_size_avg'] = (
+            (
+                dct['good_load_module_size'] /
+                (float(dct['good_load_module_count']) or 1.0)
+            ) / 1024.0
+        )
+
+        LOG.debug(
+            '%(self)r: stats: '
+                '%(get_module_count)d module requests in '
+                '%(get_module_ms)d ms, '
+                '%(good_load_module_count)d sent '
+                '(%(minify_ms)d ms minify time), '
+                '%(bad_load_module_count)d negative responses. '
+                'Sent %(good_load_module_size_kb).01f kb total, '
+                '%(good_load_module_size_avg).01f kb avg.'
+            % dct
+        )
+
+    def get_stats(self):
+        """
+        Return performance data for the module responder.
+
+        :returns:
+
+            Dict containing keys:
+
+            * `get_module_count`: Integer count of
+              :data:`mitogen.core.GET_MODULE` messages received.
+            * `get_module_secs`: Floating point total seconds spent servicing
+              :data:`mitogen.core.GET_MODULE` requests.
+            * `good_load_module_count`: Integer count of successful
+              :data:`mitogen.core.LOAD_MODULE` messages sent.
+            * `good_load_module_size`: Integer total bytes sent in
+              :data:`mitogen.core.LOAD_MODULE` message payloads.
+            * `bad_load_module_count`: Integer count of negative
+              :data:`mitogen.core.LOAD_MODULE` messages sent.
+            * `minify_secs`: CPU seconds spent minifying modules marked
+               minify-safe.
+        """
+        return {
+            'get_module_count': self.responder.get_module_count,
+            'get_module_secs': self.responder.get_module_secs,
+            'good_load_module_count': self.responder.good_load_module_count,
+            'good_load_module_size': self.responder.good_load_module_size,
+            'bad_load_module_count': self.responder.bad_load_module_count,
+            'minify_secs': self.responder.minify_secs,
+        }
+
+    def enable_debug(self):
+        """
+        Cause this context and any descendant child contexts to write debug
+        logs to ``/tmp/mitogen.<pid>.log``.
+        """
+        mitogen.core.enable_debug_logging()
+        self.debug = True
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, e_type, e_val, tb):
+        self.broker.shutdown()
+        self.broker.join()
+
+    def disconnect_stream(self, stream):
+        self.broker.defer(stream.on_disconnect, self.broker)
+
+    def disconnect_all(self):
+        for stream in self._stream_by_id.values():
+            self.disconnect_stream(stream)
+
+
+class IdAllocator(object):
+    def __init__(self, router):
+        self.router = router
+        self.next_id = 1
+        self.lock = threading.Lock()
+        router.add_handler(
+            fn=self.on_allocate_id,
+            handle=mitogen.core.ALLOCATE_ID,
+        )
+
+    def __repr__(self):
+        return 'IdAllocator(%r)' % (self.router,)
+
+    BLOCK_SIZE = 1000
+
+    def allocate(self):
+        """
+        Arrange for a unique context ID to be allocated and associated with a
+        route leading to the active context. In masters, the ID is generated
+        directly, in children it is forwarded to the master via a
+        :data:`mitogen.core.ALLOCATE_ID` message.
+        """
+        self.lock.acquire()
+        try:
+            id_ = self.next_id
+            self.next_id += 1
+            return id_
+        finally:
+            self.lock.release()
+
+    def allocate_block(self):
+        self.lock.acquire()
+        try:
+            id_ = self.next_id
+            self.next_id += self.BLOCK_SIZE
+            end_id = id_ + self.BLOCK_SIZE
+            LOG.debug('%r: allocating [%d..%d)', self, id_, end_id)
+            return id_, end_id
+        finally:
+            self.lock.release()
+
+    def on_allocate_id(self, msg):
+        if msg.is_dead:
+            return
+
+        id_, last_id = self.allocate_block()
+        requestee = self.router.context_by_id(msg.src_id)
+        LOG.debug('%r: allocating [%r..%r) to %r',
+                  self, id_, last_id, requestee)
+        msg.reply((id_, last_id))
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/minify.py b/ansible/plugins/mitogen-0.2.6/mitogen/minify.py
new file mode 100644
index 000000000..dc9f517c5
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/minify.py
@@ -0,0 +1,139 @@
+# Copyright 2017, Alex Willmer
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import sys
+
+try:
+    from io import StringIO
+except ImportError:
+    from StringIO import StringIO
+
+import mitogen.core
+
+if sys.version_info < (2, 7, 11):
+    from mitogen.compat import tokenize
+else:
+    import tokenize
+
+
+def minimize_source(source):
+    """Remove comments and docstrings from Python `source`, preserving line
+    numbers and syntax of empty blocks.
+
+    :param str source:
+        The source to minimize.
+
+    :returns str:
+        The minimized source.
+    """
+    source = mitogen.core.to_text(source)
+    tokens = tokenize.generate_tokens(StringIO(source).readline)
+    tokens = strip_comments(tokens)
+    tokens = strip_docstrings(tokens)
+    tokens = reindent(tokens)
+    return tokenize.untokenize(tokens)
+
+
+def strip_comments(tokens):
+    """Drop comment tokens from a `tokenize` stream.
+
+    Comments on lines 1-2 are kept, to preserve hashbang and encoding.
+    Trailing whitespace is remove from all lines.
+    """
+    prev_typ = None
+    prev_end_col = 0
+    for typ, tok, (start_row, start_col), (end_row, end_col), line in tokens:
+        if typ in (tokenize.NL, tokenize.NEWLINE):
+            if prev_typ in (tokenize.NL, tokenize.NEWLINE):
+                start_col = 0
+            else:
+                start_col = prev_end_col
+            end_col = start_col + 1
+        elif typ == tokenize.COMMENT and start_row > 2:
+            continue
+        prev_typ = typ
+        prev_end_col = end_col
+        yield typ, tok, (start_row, start_col), (end_row, end_col), line
+
+
+def strip_docstrings(tokens):
+    """Replace docstring tokens with NL tokens in a `tokenize` stream.
+
+    Any STRING token not part of an expression is deemed a docstring.
+    Indented docstrings are not yet recognised.
+    """
+    stack = []
+    state = 'wait_string'
+    for t in tokens:
+        typ = t[0]
+        if state == 'wait_string':
+            if typ in (tokenize.NL, tokenize.COMMENT):
+                yield t
+            elif typ in (tokenize.DEDENT, tokenize.INDENT, tokenize.STRING):
+                stack.append(t)
+            elif typ == tokenize.NEWLINE:
+                stack.append(t)
+                start_line, end_line = stack[0][2][0], stack[-1][3][0]+1
+                for i in range(start_line, end_line):
+                    yield tokenize.NL, '\n', (i, 0), (i,1), '\n'
+                for t in stack:
+                    if t[0] in (tokenize.DEDENT, tokenize.INDENT):
+                        yield t[0], t[1], (i+1, t[2][1]), (i+1, t[3][1]), t[4]
+                del stack[:]
+            else:
+                stack.append(t)
+                for t in stack: yield t
+                del stack[:]
+                state = 'wait_newline'
+        elif state == 'wait_newline':
+            if typ == tokenize.NEWLINE:
+                state = 'wait_string'
+            yield t
+
+
+def reindent(tokens, indent=' '):
+    """Replace existing indentation in a token steam, with `indent`.
+    """
+    old_levels = []
+    old_level = 0
+    new_level = 0
+    for typ, tok, (start_row, start_col), (end_row, end_col), line in tokens:
+        if typ == tokenize.INDENT:
+            old_levels.append(old_level)
+            old_level = len(tok)
+            new_level += 1
+            tok = indent * new_level
+        elif typ == tokenize.DEDENT:
+            old_level = old_levels.pop()
+            new_level -= 1
+        start_col = max(0, start_col - old_level + new_level)
+        if start_row == end_row:
+            end_col = start_col + len(tok)
+        yield typ, tok, (start_row, start_col), (end_row, end_col), line
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/os_fork.py b/ansible/plugins/mitogen-0.2.6/mitogen/os_fork.py
new file mode 100644
index 000000000..b27cfd5c3
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/os_fork.py
@@ -0,0 +1,183 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""
+Support for operating in a mixed threading/forking environment.
+"""
+
+import os
+import socket
+import sys
+import weakref
+
+import mitogen.core
+
+
+# List of weakrefs. On Python 2.4, mitogen.core registers its Broker on this
+# list and mitogen.service registers its Pool too.
+_brokers = weakref.WeakKeyDictionary()
+_pools = weakref.WeakKeyDictionary()
+
+
+def _notice_broker_or_pool(obj):
+    """
+    Used by :mod:`mitogen.core` and :mod:`mitogen.service` to automatically
+    register every broker and pool on Python 2.4/2.5.
+    """
+    if isinstance(obj, mitogen.core.Broker):
+        _brokers[obj] = True
+    else:
+        _pools[obj] = True
+
+
+def wrap_os__fork():
+    corker = Corker(
+        brokers=list(_brokers),
+        pools=list(_pools),
+    )
+    try:
+        corker.cork()
+        return os__fork()
+    finally:
+        corker.uncork()
+
+
+# If Python 2.4/2.5 where threading state is not fixed up, subprocess.Popen()
+# may still deadlock due to the broker thread. In this case, pause os.fork() so
+# that all active threads are paused during fork.
+if sys.version_info < (2, 6):
+    os__fork = os.fork
+    os.fork = wrap_os__fork
+
+
+class Corker(object):
+    """
+    Arrange for :class:`mitogen.core.Broker` and optionally
+    :class:`mitogen.service.Pool` to be temporarily "corked" while fork
+    operations may occur.
+
+    In a mixed threading/forking environment, it is critical no threads are
+    active at the moment of fork, as they could hold mutexes whose state is
+    unrecoverably snapshotted in the locked state in the fork child, causing
+    deadlocks at random future moments.
+
+    To ensure a target thread has all locks dropped, it is made to write a
+    large string to a socket with a small buffer that has :data:`os.O_NONBLOCK`
+    disabled. CPython will drop the GIL and enter the ``write()`` system call,
+    where it will block until the socket buffer is drained, or the write side
+    is closed.
+
+    :class:`mitogen.core.Poller` is used to ensure the thread really has
+    blocked outside any Python locks, by checking if the socket buffer has
+    started to fill.
+
+    Since this necessarily involves posting a message to every existent thread
+    and verifying acknowledgement, it will never be a fast operation.
+
+    This does not yet handle the case of corking being initiated from within a
+    thread that is also a cork target.
+
+    :param brokers:
+        Sequence of :class:`mitogen.core.Broker` instances to cork.
+    :param pools:
+        Sequence of :class:`mitogen.core.Pool` instances to cork.
+    """
+    def __init__(self, brokers=(), pools=()):
+        self.brokers = brokers
+        self.pools = pools
+
+    def _do_cork(self, s, wsock):
+        try:
+            try:
+                while True:
+                    # at least EINTR is possible. Do our best to keep handling
+                    # outside the GIL in this case using sendall().
+                    wsock.sendall(s)
+            except socket.error:
+                pass
+        finally:
+            wsock.close()
+
+    def _cork_one(self, s, obj):
+        """
+        Construct a socketpair, saving one side of it, and passing the other to
+        `obj` to be written to by one of its threads.
+        """
+        rsock, wsock = mitogen.parent.create_socketpair(size=4096)
+        mitogen.core.set_cloexec(rsock.fileno())
+        mitogen.core.set_cloexec(wsock.fileno())
+        mitogen.core.set_block(wsock)  # gevent
+        self._rsocks.append(rsock)
+        obj.defer(self._do_cork, s, wsock)
+
+    def _verify_one(self, rsock):
+        """
+        Pause until the socket `rsock` indicates readability, due to
+        :meth:`_do_cork` triggering a blocking write on another thread.
+        """
+        poller = mitogen.core.Poller()
+        poller.start_receive(rsock.fileno())
+        try:
+            while True:
+                for fd in poller.poll():
+                    return
+        finally:
+            poller.close()
+
+    def cork(self):
+        """
+        Arrange for any associated brokers and pools to be paused with no locks
+        held. This will not return until each thread acknowledges it has ceased
+        execution.
+        """
+        s = mitogen.core.b('CORK') * ((128 // 4) * 1024)
+        self._rsocks = []
+
+        # Pools must be paused first, as existing work may require the
+        # participation of a broker in order to complete.
+        for pool in self.pools:
+            if not pool.closed:
+                for x in range(pool.size):
+                    self._cork_one(s, pool)
+
+        for broker in self.brokers:
+            if broker._alive:
+                self._cork_one(s, broker)
+
+        # Pause until we can detect every thread has entered write().
+        for rsock in self._rsocks:
+            self._verify_one(rsock)
+
+    def uncork(self):
+        """
+        Arrange for paused threads to resume operation.
+        """
+        for rsock in self._rsocks:
+            rsock.close()
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/parent.py b/ansible/plugins/mitogen-0.2.6/mitogen/parent.py
new file mode 100644
index 000000000..3d02bc43e
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/parent.py
@@ -0,0 +1,2330 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""
+This module defines functionality common to master and parent processes. It is
+sent to any child context that is due to become a parent, due to recursive
+connection.
+"""
+
+import codecs
+import errno
+import fcntl
+import getpass
+import inspect
+import logging
+import os
+import signal
+import socket
+import struct
+import subprocess
+import sys
+import termios
+import textwrap
+import threading
+import time
+import zlib
+
+# Absolute imports for <2.5.
+select = __import__('select')
+
+try:
+    import thread
+except ImportError:
+    import threading as thread
+
+import mitogen.core
+from mitogen.core import b
+from mitogen.core import bytes_partition
+from mitogen.core import LOG
+from mitogen.core import IOLOG
+
+try:
+    next
+except NameError:
+    # Python 2.4/2.5
+    from mitogen.core import next
+
+
+itervalues = getattr(dict, 'itervalues', dict.values)
+
+if mitogen.core.PY3:
+    xrange = range
+    closure_attr = '__closure__'
+    IM_SELF_ATTR = '__self__'
+else:
+    closure_attr = 'func_closure'
+    IM_SELF_ATTR = 'im_self'
+
+
+try:
+    SC_OPEN_MAX = os.sysconf('SC_OPEN_MAX')
+except ValueError:
+    SC_OPEN_MAX = 1024
+
+OPENPTY_MSG = (
+    "Failed to create a PTY: %s. It is likely the maximum number of PTYs has "
+    "been reached. Consider increasing the 'kern.tty.ptmx_max' sysctl on OS "
+    "X, the 'kernel.pty.max' sysctl on Linux, or modifying your configuration "
+    "to avoid PTY use."
+)
+
+SYS_EXECUTABLE_MSG = (
+    "The Python sys.executable variable is unset, indicating Python was "
+    "unable to determine its original program name. Unless explicitly "
+    "configured otherwise, child contexts will be started using "
+    "'/usr/bin/python'"
+)
+_sys_executable_warning_logged = False
+
+
+def _ioctl_cast(n):
+    """
+    Linux ioctl() request parameter is unsigned, whereas on BSD/Darwin it is
+    signed. Until 2.5 Python exclusively implemented the BSD behaviour,
+    preventing use of large unsigned int requests like the TTY layer uses
+    below. So on 2.4, we cast our unsigned to look like signed for Python.
+    """
+    if sys.version_info < (2, 5):
+        n, = struct.unpack('i', struct.pack('I', n))
+    return n
+
+
+# If not :data:`None`, called prior to exec() of any new child process. Used by
+# :func:`mitogen.utils.reset_affinity` to allow the child to be freely
+# scheduled.
+_preexec_hook = None
+
+# Get PTY number; asm-generic/ioctls.h
+LINUX_TIOCGPTN = _ioctl_cast(2147767344)
+
+# Lock/unlock PTY; asm-generic/ioctls.h
+LINUX_TIOCSPTLCK = _ioctl_cast(1074025521)
+
+IS_LINUX = os.uname()[0] == 'Linux'
+
+SIGNAL_BY_NUM = dict(
+    (getattr(signal, name), name)
+    for name in sorted(vars(signal), reverse=True)
+    if name.startswith('SIG') and not name.startswith('SIG_')
+)
+
+
+def get_log_level():
+    return (LOG.level or logging.getLogger().level or logging.INFO)
+
+
+def get_sys_executable():
+    """
+    Return :data:`sys.executable` if it is set, otherwise return
+    ``"/usr/bin/python"`` and log a warning.
+    """
+    if sys.executable:
+        return sys.executable
+
+    global _sys_executable_warning_logged
+    if not _sys_executable_warning_logged:
+        LOG.warn(SYS_EXECUTABLE_MSG)
+        _sys_executable_warning_logged = True
+
+    return '/usr/bin/python'
+
+
+_core_source_lock = threading.Lock()
+_core_source_partial = None
+
+
+def _get_core_source():
+    """
+    In non-masters, simply fetch the cached mitogen.core source code via the
+    import mechanism. In masters, this function is replaced with a version that
+    performs minification directly.
+    """
+    return inspect.getsource(mitogen.core)
+
+
+def get_core_source_partial():
+    """
+    _get_core_source() is expensive, even with @lru_cache in minify.py, threads
+    can enter it simultaneously causing severe slowdowns.
+    """
+    global _core_source_partial
+
+    if _core_source_partial is None:
+        _core_source_lock.acquire()
+        try:
+            if _core_source_partial is None:
+                _core_source_partial = PartialZlib(
+                    _get_core_source().encode('utf-8')
+                )
+        finally:
+            _core_source_lock.release()
+
+    return _core_source_partial
+
+
+def get_default_remote_name():
+    """
+    Return the default name appearing in argv[0] of remote machines.
+    """
+    s = u'%s@%s:%d'
+    s %= (getpass.getuser(), socket.gethostname(), os.getpid())
+    # In mixed UNIX/Windows environments, the username may contain slashes.
+    return s.translate({
+        ord(u'\\'): ord(u'_'),
+        ord(u'/'): ord(u'_')
+    })
+
+
+def is_immediate_child(msg, stream):
+    """
+    Handler policy that requires messages to arrive only from immediately
+    connected children.
+    """
+    return msg.src_id == stream.remote_id
+
+
+def flags(names):
+    """Return the result of ORing a set of (space separated) :py:mod:`termios`
+    module constants together."""
+    return sum(getattr(termios, name, 0)
+               for name in names.split())
+
+
+def cfmakeraw(tflags):
+    """Given a list returned by :py:func:`termios.tcgetattr`, return a list
+    modified in a manner similar to the `cfmakeraw()` C library function, but
+    additionally disabling local echo."""
+    # BSD: https://github.com/freebsd/freebsd/blob/master/lib/libc/gen/termios.c#L162
+    # Linux: https://github.com/lattera/glibc/blob/master/termios/cfmakeraw.c#L20
+    iflag, oflag, cflag, lflag, ispeed, ospeed, cc = tflags
+    iflag &= ~flags('IMAXBEL IXOFF INPCK BRKINT PARMRK ISTRIP INLCR ICRNL IXON IGNPAR')
+    iflag &= ~flags('IGNBRK BRKINT PARMRK')
+    oflag &= ~flags('OPOST')
+    lflag &= ~flags('ECHO ECHOE ECHOK ECHONL ICANON ISIG IEXTEN NOFLSH TOSTOP PENDIN')
+    cflag &= ~flags('CSIZE PARENB')
+    cflag |= flags('CS8 CREAD')
+    return [iflag, oflag, cflag, lflag, ispeed, ospeed, cc]
+
+
+def disable_echo(fd):
+    old = termios.tcgetattr(fd)
+    new = cfmakeraw(old)
+    flags = getattr(termios, 'TCSASOFT', 0)
+    if not mitogen.core.IS_WSL:
+        # issue #319: Windows Subsystem for Linux as of July 2018 throws EINVAL
+        # if TCSAFLUSH is specified.
+        flags |= termios.TCSAFLUSH
+    termios.tcsetattr(fd, flags, new)
+
+
+def close_nonstandard_fds():
+    for fd in xrange(3, SC_OPEN_MAX):
+        try:
+            os.close(fd)
+        except OSError:
+            pass
+
+
+def create_socketpair(size=None):
+    """
+    Create a :func:`socket.socketpair` to use for use as a child process's UNIX
+    stdio channels. As socket pairs are bidirectional, they are economical on
+    file descriptor usage as the same descriptor can be used for ``stdin`` and
+    ``stdout``. As they are sockets their buffers are tunable, allowing large
+    buffers to be configured in order to improve throughput for file transfers
+    and reduce :class:`mitogen.core.Broker` IO loop iterations.
+    """
+    parentfp, childfp = socket.socketpair()
+    parentfp.setsockopt(socket.SOL_SOCKET,
+                        socket.SO_SNDBUF,
+                        size or mitogen.core.CHUNK_SIZE)
+    childfp.setsockopt(socket.SOL_SOCKET,
+                       socket.SO_RCVBUF,
+                       size or mitogen.core.CHUNK_SIZE)
+    return parentfp, childfp
+
+
+def detach_popen(**kwargs):
+    """
+    Use :class:`subprocess.Popen` to construct a child process, then hack the
+    Popen so that it forgets the child it created, allowing it to survive a
+    call to Popen.__del__.
+
+    If the child process is not detached, there is a race between it exitting
+    and __del__ being called. If it exits before __del__ runs, then __del__'s
+    call to :func:`os.waitpid` will capture the one and only exit event
+    delivered to this process, causing later 'legitimate' calls to fail with
+    ECHILD.
+
+    :param list close_on_error:
+        Array of integer file descriptors to close on exception.
+    :returns:
+        Process ID of the new child.
+    """
+    # This allows Popen() to be used for e.g. graceful post-fork error
+    # handling, without tying the surrounding code into managing a Popen
+    # object, which isn't possible for at least :mod:`mitogen.fork`. This
+    # should be replaced by a swappable helper class in a future version.
+    real_preexec_fn = kwargs.pop('preexec_fn', None)
+    def preexec_fn():
+        if _preexec_hook:
+            _preexec_hook()
+        if real_preexec_fn:
+            real_preexec_fn()
+    proc = subprocess.Popen(preexec_fn=preexec_fn, **kwargs)
+    proc._child_created = False
+    return proc.pid
+
+
+def create_child(args, merge_stdio=False, stderr_pipe=False, preexec_fn=None):
+    """
+    Create a child process whose stdin/stdout is connected to a socket.
+
+    :param args:
+        Argument vector for execv() call.
+    :param bool merge_stdio:
+        If :data:`True`, arrange for `stderr` to be connected to the `stdout`
+        socketpair, rather than inherited from the parent process. This may be
+        necessary to ensure that not TTY is connected to any stdio handle, for
+        instance when using LXC.
+    :param bool stderr_pipe:
+        If :data:`True` and `merge_stdio` is :data:`False`, arrange for
+        `stderr` to be connected to a separate pipe, to allow any ongoing debug
+        logs generated by e.g. SSH to be outpu as the session progresses,
+        without interfering with `stdout`.
+    :returns:
+        `(pid, socket_obj, :data:`None` or pipe_fd)`
+    """
+    parentfp, childfp = create_socketpair()
+    # When running under a monkey patches-enabled gevent, the socket module
+    # yields file descriptors who already have O_NONBLOCK, which is
+    # persisted across fork, totally breaking Python. Therefore, drop
+    # O_NONBLOCK from Python's future stdin fd.
+    mitogen.core.set_block(childfp.fileno())
+
+    stderr_r = None
+    extra = {}
+    if merge_stdio:
+        extra = {'stderr': childfp}
+    elif stderr_pipe:
+        stderr_r, stderr_w = os.pipe()
+        mitogen.core.set_cloexec(stderr_r)
+        mitogen.core.set_cloexec(stderr_w)
+        extra = {'stderr': stderr_w}
+
+    try:
+        pid = detach_popen(
+            args=args,
+            stdin=childfp,
+            stdout=childfp,
+            close_fds=True,
+            preexec_fn=preexec_fn,
+            **extra
+        )
+    except Exception:
+        childfp.close()
+        parentfp.close()
+        if stderr_pipe:
+            os.close(stderr_r)
+            os.close(stderr_w)
+        raise
+
+    if stderr_pipe:
+        os.close(stderr_w)
+    childfp.close()
+    # Decouple the socket from the lifetime of the Python socket object.
+    fd = os.dup(parentfp.fileno())
+    parentfp.close()
+
+    LOG.debug('create_child() child %d fd %d, parent %d, cmd: %s',
+              pid, fd, os.getpid(), Argv(args))
+    return pid, fd, stderr_r
+
+
+def _acquire_controlling_tty():
+    os.setsid()
+    if sys.platform in ('linux', 'linux2'):
+        # On Linux, the controlling tty becomes the first tty opened by a
+        # process lacking any prior tty.
+        os.close(os.open(os.ttyname(2), os.O_RDWR))
+    if hasattr(termios, 'TIOCSCTTY') and not mitogen.core.IS_WSL:
+        # #550: prehistoric WSL does not like TIOCSCTTY.
+        # On BSD an explicit ioctl is required. For some inexplicable reason,
+        # Python 2.6 on Travis also requires it.
+        fcntl.ioctl(2, termios.TIOCSCTTY)
+
+
+def _linux_broken_devpts_openpty():
+    """
+    #462: On broken Linux hosts with mismatched configuration (e.g. old
+    /etc/fstab template installed), /dev/pts may be mounted without the gid=
+    mount option, causing new slave devices to be created with the group ID of
+    the calling process. This upsets glibc, whose openpty() is required by
+    specification to produce a slave owned by a special group ID (which is
+    always the 'tty' group).
+
+    Glibc attempts to use "pt_chown" to fix ownership. If that fails, it
+    chown()s the PTY directly, which fails due to non-root, causing openpty()
+    to fail with EPERM ("Operation not permitted"). Since we don't need the
+    magical TTY group to run sudo and su, open the PTY ourselves in this case.
+    """
+    master_fd = None
+    try:
+        # Opening /dev/ptmx causes a PTY pair to be allocated, and the
+        # corresponding slave /dev/pts/* device to be created, owned by UID/GID
+        # matching this process.
+        master_fd = os.open('/dev/ptmx', os.O_RDWR)
+        # Clear the lock bit from the PTY. This a prehistoric feature from a
+        # time when slave device files were persistent.
+        fcntl.ioctl(master_fd, LINUX_TIOCSPTLCK, struct.pack('i', 0))
+        # Since v4.13 TIOCGPTPEER exists to open the slave in one step, but we
+        # must support older kernels. Ask for the PTY number.
+        pty_num_s = fcntl.ioctl(master_fd, LINUX_TIOCGPTN,
+                                struct.pack('i', 0))
+        pty_num, = struct.unpack('i', pty_num_s)
+        pty_name = '/dev/pts/%d' % (pty_num,)
+        # Now open it with O_NOCTTY to ensure it doesn't change our controlling
+        # TTY. Otherwise when we close the FD we get killed by the kernel, and
+        # the child we spawn that should really attach to it will get EPERM
+        # during _acquire_controlling_tty().
+        slave_fd = os.open(pty_name, os.O_RDWR|os.O_NOCTTY)
+        return master_fd, slave_fd
+    except OSError:
+        if master_fd is not None:
+            os.close(master_fd)
+        e = sys.exc_info()[1]
+        raise mitogen.core.StreamError(OPENPTY_MSG, e)
+
+
+def openpty():
+    """
+    Call :func:`os.openpty`, raising a descriptive error if the call fails.
+
+    :raises mitogen.core.StreamError:
+        Creating a PTY failed.
+    :returns:
+        See :func`os.openpty`.
+    """
+    try:
+        return os.openpty()
+    except OSError:
+        e = sys.exc_info()[1]
+        if IS_LINUX and e.args[0] == errno.EPERM:
+            return _linux_broken_devpts_openpty()
+        raise mitogen.core.StreamError(OPENPTY_MSG, e)
+
+
+def tty_create_child(args):
+    """
+    Return a file descriptor connected to the master end of a pseudo-terminal,
+    whose slave end is connected to stdin/stdout/stderr of a new child process.
+    The child is created such that the pseudo-terminal becomes its controlling
+    TTY, ensuring access to /dev/tty returns a new file descriptor open on the
+    slave end.
+
+    :param list args:
+        :py:func:`os.execl` argument list.
+
+    :returns:
+        `(pid, tty_fd, None)`
+    """
+    master_fd, slave_fd = openpty()
+    try:
+        mitogen.core.set_block(slave_fd)
+        disable_echo(master_fd)
+        disable_echo(slave_fd)
+
+        pid = detach_popen(
+            args=args,
+            stdin=slave_fd,
+            stdout=slave_fd,
+            stderr=slave_fd,
+            preexec_fn=_acquire_controlling_tty,
+            close_fds=True,
+        )
+    except Exception:
+        os.close(master_fd)
+        os.close(slave_fd)
+        raise
+
+    os.close(slave_fd)
+    LOG.debug('tty_create_child() child %d fd %d, parent %d, cmd: %s',
+              pid, master_fd, os.getpid(), Argv(args))
+    return pid, master_fd, None
+
+
+def hybrid_tty_create_child(args):
+    """
+    Like :func:`tty_create_child`, except attach stdin/stdout to a socketpair
+    like :func:`create_child`, but leave stderr and the controlling TTY
+    attached to a TTY.
+
+    :param list args:
+        :py:func:`os.execl` argument list.
+
+    :returns:
+        `(pid, socketpair_fd, tty_fd)`
+    """
+    master_fd, slave_fd = openpty()
+
+    try:
+        disable_echo(master_fd)
+        disable_echo(slave_fd)
+        mitogen.core.set_block(slave_fd)
+
+        parentfp, childfp = create_socketpair()
+        try:
+            mitogen.core.set_block(childfp)
+            pid = detach_popen(
+                args=args,
+                stdin=childfp,
+                stdout=childfp,
+                stderr=slave_fd,
+                preexec_fn=_acquire_controlling_tty,
+                close_fds=True,
+            )
+        except Exception:
+            parentfp.close()
+            childfp.close()
+            raise
+    except Exception:
+        os.close(master_fd)
+        os.close(slave_fd)
+        raise
+
+    os.close(slave_fd)
+    childfp.close()
+    # Decouple the socket from the lifetime of the Python socket object.
+    stdio_fd = os.dup(parentfp.fileno())
+    parentfp.close()
+
+    LOG.debug('hybrid_tty_create_child() pid=%d stdio=%d, tty=%d, cmd: %s',
+              pid, stdio_fd, master_fd, Argv(args))
+    return pid, stdio_fd, master_fd
+
+
+def write_all(fd, s, deadline=None):
+    """Arrange for all of bytestring `s` to be written to the file descriptor
+    `fd`.
+
+    :param int fd:
+        File descriptor to write to.
+    :param bytes s:
+        Bytestring to write to file descriptor.
+    :param float deadline:
+        If not :data:`None`, absolute UNIX timestamp after which timeout should
+        occur.
+
+    :raises mitogen.core.TimeoutError:
+        Bytestring could not be written entirely before deadline was exceeded.
+    :raises mitogen.parent.EofError:
+        Stream indicated EOF, suggesting the child process has exitted.
+    :raises mitogen.core.StreamError:
+        File descriptor was disconnected before write could complete.
+    """
+    timeout = None
+    written = 0
+    poller = PREFERRED_POLLER()
+    poller.start_transmit(fd)
+
+    try:
+        while written < len(s):
+            if deadline is not None:
+                timeout = max(0, deadline - time.time())
+            if timeout == 0:
+                raise mitogen.core.TimeoutError('write timed out')
+
+            if mitogen.core.PY3:
+                window = memoryview(s)[written:]
+            else:
+                window = buffer(s, written)
+
+            for fd in poller.poll(timeout):
+                n, disconnected = mitogen.core.io_op(os.write, fd, window)
+                if disconnected:
+                    raise EofError('EOF on stream during write')
+
+                written += n
+    finally:
+        poller.close()
+
+
+class PartialZlib(object):
+    """
+    Because the mitogen.core source has a line appended to it during bootstrap,
+    it must be recompressed for each connection. This is not a problem for a
+    small number of connections, but it amounts to 30 seconds CPU time by the
+    time 500 targets are in use.
+
+    For that reason, build a compressor containing mitogen.core and flush as
+    much of it as possible into an initial buffer. Then to append the custom
+    line, clone the compressor and compress just that line.
+
+    A full compression costs ~6ms on a modern machine, this method costs ~35
+    usec.
+    """
+    def __init__(self, s):
+        self.s = s
+        if sys.version_info > (2, 5):
+            self._compressor = zlib.compressobj(9)
+            self._out = self._compressor.compress(s)
+            self._out += self._compressor.flush(zlib.Z_SYNC_FLUSH)
+        else:
+            self._compressor = None
+
+    def append(self, s):
+        """
+        Append the bytestring `s` to the compressor state and return the
+        final compressed output.
+        """
+        if self._compressor is None:
+            return zlib.compress(self.s + s, 9)
+        else:
+            compressor = self._compressor.copy()
+            out = self._out
+            out += compressor.compress(s)
+            return out + compressor.flush()
+
+
+class IteratingRead(object):
+    def __init__(self, fds, deadline=None):
+        self.deadline = deadline
+        self.timeout = None
+        self.poller = PREFERRED_POLLER()
+        for fd in fds:
+            self.poller.start_receive(fd)
+
+        self.bits = []
+        self.timeout = None
+
+    def close(self):
+        self.poller.close()
+
+    def __iter__(self):
+        return self
+
+    def next(self):
+        while self.poller.readers:
+            if self.deadline is not None:
+                self.timeout = max(0, self.deadline - time.time())
+                if self.timeout == 0:
+                    break
+
+            for fd in self.poller.poll(self.timeout):
+                s, disconnected = mitogen.core.io_op(os.read, fd, 4096)
+                if disconnected or not s:
+                    LOG.debug('iter_read(%r) -> disconnected: %s',
+                              fd, disconnected)
+                    self.poller.stop_receive(fd)
+                else:
+                    IOLOG.debug('iter_read(%r) -> %r', fd, s)
+                    self.bits.append(s)
+                    return s
+
+        if not self.poller.readers:
+            raise EofError(u'EOF on stream; last 300 bytes received: %r' %
+                           (b('').join(self.bits)[-300:].decode('latin1'),))
+
+        raise mitogen.core.TimeoutError('read timed out')
+
+    __next__ = next
+
+
+def iter_read(fds, deadline=None):
+    """Return a generator that arranges for up to 4096-byte chunks to be read
+    at a time from the file descriptor `fd` until the generator is destroyed.
+
+    :param int fd:
+        File descriptor to read from.
+    :param float deadline:
+        If not :data:`None`, an absolute UNIX timestamp after which timeout
+        should occur.
+
+    :raises mitogen.core.TimeoutError:
+        Attempt to read beyond deadline.
+    :raises mitogen.parent.EofError:
+        All streams indicated EOF, suggesting the child process has exitted.
+    :raises mitogen.core.StreamError:
+        Attempt to read past end of file.
+    """
+    return IteratingRead(fds=fds, deadline=deadline)
+
+
+def discard_until(fd, s, deadline):
+    """Read chunks from `fd` until one is encountered that ends with `s`. This
+    is used to skip output produced by ``/etc/profile``, ``/etc/motd`` and
+    mandatory SSH banners while waiting for :attr:`Stream.EC0_MARKER` to
+    appear, indicating the first stage is ready to receive the compressed
+    :mod:`mitogen.core` source.
+
+    :param int fd:
+        File descriptor to read from.
+    :param bytes s:
+        Marker string to discard until encountered.
+    :param float deadline:
+        Absolute UNIX timestamp after which timeout should occur.
+
+    :raises mitogen.core.TimeoutError:
+        Attempt to read beyond deadline.
+    :raises mitogen.parent.EofError:
+        All streams indicated EOF, suggesting the child process has exitted.
+    :raises mitogen.core.StreamError:
+        Attempt to read past end of file.
+    """
+    it = iter_read([fd], deadline)
+    try:
+        for buf in it:
+            if IOLOG.level == logging.DEBUG:
+                for line in buf.splitlines():
+                    IOLOG.debug('discard_until: discarding %r', line)
+            if buf.endswith(s):
+                return
+    finally:
+        it.close()  # ensure Poller.close() is called.
+
+
+def _upgrade_broker(broker):
+    """
+    Extract the poller state from Broker and replace it with the industrial
+    strength poller for this OS. Must run on the Broker thread.
+    """
+    # This function is deadly! The act of calling start_receive() generates log
+    # messages which must be silenced as the upgrade progresses, otherwise the
+    # poller state will change as it is copied, resulting in write fds that are
+    # lost. (Due to LogHandler->Router->Stream->Broker->Poller, where Stream
+    # only calls start_transmit() when transitioning from empty to non-empty
+    # buffer. If the start_transmit() is lost, writes from the child hang
+    # permanently).
+    root = logging.getLogger()
+    old_level = root.level
+    root.setLevel(logging.CRITICAL)
+
+    old = broker.poller
+    new = PREFERRED_POLLER()
+    for fd, data in old.readers:
+        new.start_receive(fd, data)
+    for fd, data in old.writers:
+        new.start_transmit(fd, data)
+
+    old.close()
+    broker.poller = new
+    root.setLevel(old_level)
+    LOG.debug('replaced %r with %r (new: %d readers, %d writers; '
+              'old: %d readers, %d writers)', old, new,
+              len(new.readers), len(new.writers),
+              len(old.readers), len(old.writers))
+
+
+@mitogen.core.takes_econtext
+def upgrade_router(econtext):
+    if not isinstance(econtext.router, Router):  # TODO
+        econtext.broker.defer(_upgrade_broker, econtext.broker)
+        econtext.router.__class__ = Router  # TODO
+        econtext.router.upgrade(
+            importer=econtext.importer,
+            parent=econtext.parent,
+        )
+
+
+def stream_by_method_name(name):
+    """
+    Given the name of a Mitogen connection method, import its implementation
+    module and return its Stream subclass.
+    """
+    if name == u'local':
+        name = u'parent'
+    module = mitogen.core.import_module(u'mitogen.' + name)
+    return module.Stream
+
+
+@mitogen.core.takes_econtext
+def _proxy_connect(name, method_name, kwargs, econtext):
+    """
+    Implements the target portion of Router._proxy_connect() by upgrading the
+    local context to a parent if it was not already, then calling back into
+    Router._connect() using the arguments passed to the parent's
+    Router.connect().
+
+    :returns:
+        Dict containing:
+        * ``id``: :data:`None`, or integer new context ID.
+        * ``name``: :data:`None`, or string name attribute of new Context.
+        * ``msg``: :data:`None`, or StreamError exception text.
+    """
+    upgrade_router(econtext)
+
+    try:
+        context = econtext.router._connect(
+            klass=stream_by_method_name(method_name),
+            name=name,
+            **kwargs
+        )
+    except mitogen.core.StreamError:
+        return {
+            u'id': None,
+            u'name': None,
+            u'msg': 'error occurred on host %s: %s' % (
+                socket.gethostname(),
+                sys.exc_info()[1],
+            ),
+        }
+
+    return {
+        u'id': context.context_id,
+        u'name': context.name,
+        u'msg': None,
+    }
+
+
+def wstatus_to_str(status):
+    """
+    Parse and format a :func:`os.waitpid` exit status.
+    """
+    if os.WIFEXITED(status):
+        return 'exited with return code %d' % (os.WEXITSTATUS(status),)
+    if os.WIFSIGNALED(status):
+        n = os.WTERMSIG(status)
+        return 'exited due to signal %d (%s)' % (n, SIGNAL_BY_NUM.get(n))
+    if os.WIFSTOPPED(status):
+        n = os.WSTOPSIG(status)
+        return 'stopped due to signal %d (%s)' % (n, SIGNAL_BY_NUM.get(n))
+    return 'unknown wait status (%d)' % (status,)
+
+
+class EofError(mitogen.core.StreamError):
+    """
+    Raised by :func:`iter_read` and :func:`write_all` when EOF is detected by
+    the child process.
+    """
+    # inherits from StreamError to maintain compatibility.
+    pass
+
+
+class Argv(object):
+    """
+    Wrapper to defer argv formatting when debug logging is disabled.
+    """
+    def __init__(self, argv):
+        self.argv = argv
+
+    must_escape = frozenset('\\$"`!')
+    must_escape_or_space = must_escape | frozenset(' ')
+
+    def escape(self, x):
+        if not self.must_escape_or_space.intersection(x):
+            return x
+
+        s = '"'
+        for c in x:
+            if c in self.must_escape:
+                s += '\\'
+            s += c
+        s += '"'
+        return s
+
+    def __str__(self):
+        return ' '.join(map(self.escape, self.argv))
+
+
+class CallSpec(object):
+    """
+    Wrapper to defer call argument formatting when debug logging is disabled.
+    """
+    def __init__(self, func, args, kwargs):
+        self.func = func
+        self.args = args
+        self.kwargs = kwargs
+
+    def _get_name(self):
+        bits = [self.func.__module__]
+        if inspect.ismethod(self.func):
+            im_self = getattr(self.func, IM_SELF_ATTR)
+            bits.append(getattr(im_self, '__name__', None) or
+                        getattr(type(im_self), '__name__', None))
+        bits.append(self.func.__name__)
+        return u'.'.join(bits)
+
+    def _get_args(self):
+        return u', '.join(repr(a) for a in self.args)
+
+    def _get_kwargs(self):
+        s = u''
+        if self.kwargs:
+            s = u', '.join('%s=%r' % (k, v) for k, v in self.kwargs.items())
+            if self.args:
+                s = u', ' + s
+        return s
+
+    def __repr__(self):
+        return '%s(%s%s)' % (
+            self._get_name(),
+            self._get_args(),
+            self._get_kwargs(),
+        )
+
+
+class PollPoller(mitogen.core.Poller):
+    """
+    Poller based on the POSIX poll(2) interface. Not available on some versions
+    of OS X, otherwise it is the preferred poller for small FD counts.
+    """
+    SUPPORTED = hasattr(select, 'poll')
+    _repr = 'PollPoller()'
+
+    def __init__(self):
+        super(PollPoller, self).__init__()
+        self._pollobj = select.poll()
+
+    # TODO: no proof we dont need writemask too
+    _readmask = (
+        getattr(select, 'POLLIN', 0) |
+        getattr(select, 'POLLHUP', 0)
+    )
+
+    def _update(self, fd):
+        mask = (((fd in self._rfds) and self._readmask) |
+                ((fd in self._wfds) and select.POLLOUT))
+        if mask:
+            self._pollobj.register(fd, mask)
+        else:
+            try:
+                self._pollobj.unregister(fd)
+            except KeyError:
+                pass
+
+    def _poll(self, timeout):
+        if timeout:
+            timeout *= 1000
+
+        events, _ = mitogen.core.io_op(self._pollobj.poll, timeout)
+        for fd, event in events:
+            if event & self._readmask:
+                IOLOG.debug('%r: POLLIN|POLLHUP for %r', self, fd)
+                data, gen = self._rfds.get(fd, (None, None))
+                if gen and gen < self._generation:
+                    yield data
+            if event & select.POLLOUT:
+                IOLOG.debug('%r: POLLOUT for %r', self, fd)
+                data, gen = self._wfds.get(fd, (None, None))
+                if gen and gen < self._generation:
+                    yield data
+
+
+class KqueuePoller(mitogen.core.Poller):
+    """
+    Poller based on the FreeBSD/Darwin kqueue(2) interface.
+    """
+    SUPPORTED = hasattr(select, 'kqueue')
+    _repr = 'KqueuePoller()'
+
+    def __init__(self):
+        super(KqueuePoller, self).__init__()
+        self._kqueue = select.kqueue()
+        self._changelist = []
+
+    def close(self):
+        super(KqueuePoller, self).close()
+        self._kqueue.close()
+
+    def _control(self, fd, filters, flags):
+        mitogen.core._vv and IOLOG.debug(
+            '%r._control(%r, %r, %r)', self, fd, filters, flags)
+        # TODO: at shutdown it is currently possible for KQ_EV_ADD/KQ_EV_DEL
+        # pairs to be pending after the associated file descriptor has already
+        # been closed. Fixing this requires maintaining extra state, or perhaps
+        # making fd closure the poller's responsibility. In the meantime,
+        # simply apply changes immediately.
+        # self._changelist.append(select.kevent(fd, filters, flags))
+        changelist = [select.kevent(fd, filters, flags)]
+        events, _ = mitogen.core.io_op(self._kqueue.control, changelist, 0, 0)
+        assert not events
+
+    def start_receive(self, fd, data=None):
+        mitogen.core._vv and IOLOG.debug('%r.start_receive(%r, %r)',
+            self, fd, data)
+        if fd not in self._rfds:
+            self._control(fd, select.KQ_FILTER_READ, select.KQ_EV_ADD)
+        self._rfds[fd] = (data or fd, self._generation)
+
+    def stop_receive(self, fd):
+        mitogen.core._vv and IOLOG.debug('%r.stop_receive(%r)', self, fd)
+        if fd in self._rfds:
+            self._control(fd, select.KQ_FILTER_READ, select.KQ_EV_DELETE)
+            del self._rfds[fd]
+
+    def start_transmit(self, fd, data=None):
+        mitogen.core._vv and IOLOG.debug('%r.start_transmit(%r, %r)',
+            self, fd, data)
+        if fd not in self._wfds:
+            self._control(fd, select.KQ_FILTER_WRITE, select.KQ_EV_ADD)
+        self._wfds[fd] = (data or fd, self._generation)
+
+    def stop_transmit(self, fd):
+        mitogen.core._vv and IOLOG.debug('%r.stop_transmit(%r)', self, fd)
+        if fd in self._wfds:
+            self._control(fd, select.KQ_FILTER_WRITE, select.KQ_EV_DELETE)
+            del self._wfds[fd]
+
+    def _poll(self, timeout):
+        changelist = self._changelist
+        self._changelist = []
+        events, _ = mitogen.core.io_op(self._kqueue.control,
+            changelist, 32, timeout)
+        for event in events:
+            fd = event.ident
+            if event.flags & select.KQ_EV_ERROR:
+                LOG.debug('ignoring stale event for fd %r: errno=%d: %s',
+                          fd, event.data, errno.errorcode.get(event.data))
+            elif event.filter == select.KQ_FILTER_READ:
+                data, gen = self._rfds.get(fd, (None, None))
+                # Events can still be read for an already-discarded fd.
+                if gen and gen < self._generation:
+                    mitogen.core._vv and IOLOG.debug('%r: POLLIN: %r', self, fd)
+                    yield data
+            elif event.filter == select.KQ_FILTER_WRITE and fd in self._wfds:
+                data, gen = self._wfds.get(fd, (None, None))
+                if gen and gen < self._generation:
+                    mitogen.core._vv and IOLOG.debug('%r: POLLOUT: %r', self, fd)
+                    yield data
+
+
+class EpollPoller(mitogen.core.Poller):
+    """
+    Poller based on the Linux epoll(2) interface.
+    """
+    SUPPORTED = hasattr(select, 'epoll')
+    _repr = 'EpollPoller()'
+
+    def __init__(self):
+        super(EpollPoller, self).__init__()
+        self._epoll = select.epoll(32)
+        self._registered_fds = set()
+
+    def close(self):
+        super(EpollPoller, self).close()
+        self._epoll.close()
+
+    def _control(self, fd):
+        mitogen.core._vv and IOLOG.debug('%r._control(%r)', self, fd)
+        mask = (((fd in self._rfds) and select.EPOLLIN) |
+                ((fd in self._wfds) and select.EPOLLOUT))
+        if mask:
+            if fd in self._registered_fds:
+                self._epoll.modify(fd, mask)
+            else:
+                self._epoll.register(fd, mask)
+                self._registered_fds.add(fd)
+        elif fd in self._registered_fds:
+            self._epoll.unregister(fd)
+            self._registered_fds.remove(fd)
+
+    def start_receive(self, fd, data=None):
+        mitogen.core._vv and IOLOG.debug('%r.start_receive(%r, %r)',
+            self, fd, data)
+        self._rfds[fd] = (data or fd, self._generation)
+        self._control(fd)
+
+    def stop_receive(self, fd):
+        mitogen.core._vv and IOLOG.debug('%r.stop_receive(%r)', self, fd)
+        self._rfds.pop(fd, None)
+        self._control(fd)
+
+    def start_transmit(self, fd, data=None):
+        mitogen.core._vv and IOLOG.debug('%r.start_transmit(%r, %r)',
+            self, fd, data)
+        self._wfds[fd] = (data or fd, self._generation)
+        self._control(fd)
+
+    def stop_transmit(self, fd):
+        mitogen.core._vv and IOLOG.debug('%r.stop_transmit(%r)', self, fd)
+        self._wfds.pop(fd, None)
+        self._control(fd)
+
+    _inmask = (getattr(select, 'EPOLLIN', 0) |
+               getattr(select, 'EPOLLHUP', 0))
+
+    def _poll(self, timeout):
+        the_timeout = -1
+        if timeout is not None:
+            the_timeout = timeout
+
+        events, _ = mitogen.core.io_op(self._epoll.poll, the_timeout, 32)
+        for fd, event in events:
+            if event & self._inmask:
+                data, gen = self._rfds.get(fd, (None, None))
+                if gen and gen < self._generation:
+                    # Events can still be read for an already-discarded fd.
+                    mitogen.core._vv and IOLOG.debug('%r: POLLIN: %r', self, fd)
+                    yield data
+            if event & select.EPOLLOUT:
+                data, gen = self._wfds.get(fd, (None, None))
+                if gen and gen < self._generation:
+                    mitogen.core._vv and IOLOG.debug('%r: POLLOUT: %r', self, fd)
+                    yield data
+
+
+# 2.4 and 2.5 only had select.select() and select.poll().
+for _klass in mitogen.core.Poller, PollPoller, KqueuePoller, EpollPoller:
+    if _klass.SUPPORTED:
+        PREFERRED_POLLER = _klass
+
+# For apps that start threads dynamically, it's possible Latch will also get
+# very high-numbered wait fds when there are many connections, and so select()
+# becomes useless there too. So swap in our favourite poller.
+if PollPoller.SUPPORTED:
+    mitogen.core.Latch.poller_class = PollPoller
+else:
+    mitogen.core.Latch.poller_class = PREFERRED_POLLER
+
+
+class DiagLogStream(mitogen.core.BasicStream):
+    """
+    For "hybrid TTY/socketpair" mode, after a connection has been setup, a
+    spare TTY file descriptor will exist that cannot be closed, and to which
+    SSH or sudo may continue writing log messages.
+
+    The descriptor cannot be closed since the UNIX TTY layer will send a
+    termination signal to any processes whose controlling TTY is the TTY that
+    has been closed.
+
+    DiagLogStream takes over this descriptor and creates corresponding log
+    messages for anything written to it.
+    """
+
+    def __init__(self, fd, stream):
+        self.receive_side = mitogen.core.Side(self, fd)
+        self.transmit_side = self.receive_side
+        self.stream = stream
+        self.buf = ''
+
+    def __repr__(self):
+        return "mitogen.parent.DiagLogStream(fd=%r, '%s')" % (
+            self.receive_side.fd,
+            self.stream.name,
+        )
+
+    def on_receive(self, broker):
+        """
+        This handler is only called after the stream is registered with the IO
+        loop, the descriptor is manually read/written by _connect_bootstrap()
+        prior to that.
+        """
+        buf = self.receive_side.read()
+        if not buf:
+            return self.on_disconnect(broker)
+
+        self.buf += buf.decode('utf-8', 'replace')
+        while u'\n' in self.buf:
+            lines = self.buf.split('\n')
+            self.buf = lines[-1]
+            for line in lines[:-1]:
+                LOG.debug('%s: %s', self.stream.name, line.rstrip())
+
+
+class Stream(mitogen.core.Stream):
+    """
+    Base for streams capable of starting new slaves.
+    """
+    #: The path to the remote Python interpreter.
+    python_path = get_sys_executable()
+
+    #: Maximum time to wait for a connection attempt.
+    connect_timeout = 30.0
+
+    #: Derived from :py:attr:`connect_timeout`; absolute floating point
+    #: UNIX timestamp after which the connection attempt should be abandoned.
+    connect_deadline = None
+
+    #: True to cause context to write verbose /tmp/mitogen.<pid>.log.
+    debug = False
+
+    #: True to cause context to write /tmp/mitogen.stats.<pid>.<thread>.log.
+    profiling = False
+
+    #: Set to the child's PID by connect().
+    pid = None
+
+    #: Passed via Router wrapper methods, must eventually be passed to
+    #: ExternalContext.main().
+    max_message_size = None
+
+    #: If :attr:`create_child` supplied a diag_fd, references the corresponding
+    #: :class:`DiagLogStream`, allowing it to be disconnected when this stream
+    #: is disconnected. Set to :data:`None` if no `diag_fd` was present.
+    diag_stream = None
+
+    #: Function with the semantics of :func:`create_child` used to create the
+    #: child process.
+    create_child = staticmethod(create_child)
+
+    #: Dictionary of extra kwargs passed to :attr:`create_child`.
+    create_child_args = {}
+
+    #: :data:`True` if the remote has indicated that it intends to detach, and
+    #: should not be killed on disconnect.
+    detached = False
+
+    #: If :data:`True`, indicates the child should not be killed during
+    #: graceful detachment, as it the actual process implementing the child
+    #: context. In all other cases, the subprocess is SSH, sudo, or a similar
+    #: tool that should be reminded to quit during disconnection.
+    child_is_immediate_subprocess = True
+
+    #: Prefix given to default names generated by :meth:`connect`.
+    name_prefix = u'local'
+
+    _reaped = False
+
+    def __init__(self, *args, **kwargs):
+        super(Stream, self).__init__(*args, **kwargs)
+        self.sent_modules = set(['mitogen', 'mitogen.core'])
+
+    def construct(self, max_message_size, remote_name=None, python_path=None,
+                  debug=False, connect_timeout=None, profiling=False,
+                  unidirectional=False, old_router=None, **kwargs):
+        """Get the named context running on the local machine, creating it if
+        it does not exist."""
+        super(Stream, self).construct(**kwargs)
+        self.max_message_size = max_message_size
+        if python_path:
+            self.python_path = python_path
+        if connect_timeout:
+            self.connect_timeout = connect_timeout
+        if remote_name is None:
+            remote_name = get_default_remote_name()
+        if '/' in remote_name or '\\' in remote_name:
+            raise ValueError('remote_name= cannot contain slashes')
+        self.remote_name = remote_name
+        self.debug = debug
+        self.profiling = profiling
+        self.unidirectional = unidirectional
+        self.max_message_size = max_message_size
+        self.connect_deadline = time.time() + self.connect_timeout
+
+    def on_shutdown(self, broker):
+        """Request the slave gracefully shut itself down."""
+        LOG.debug('%r closing CALL_FUNCTION channel', self)
+        self._send(
+            mitogen.core.Message(
+                src_id=mitogen.context_id,
+                dst_id=self.remote_id,
+                handle=mitogen.core.SHUTDOWN,
+            )
+        )
+
+    def _reap_child(self):
+        """
+        Reap the child process during disconnection.
+        """
+        if self.detached and self.child_is_immediate_subprocess:
+            LOG.debug('%r: immediate child is detached, won\'t reap it', self)
+            return
+
+        if self.profiling:
+            LOG.info('%r: wont kill child because profiling=True', self)
+            return
+
+        if self._reaped:
+            # on_disconnect() may be invoked more than once, for example, if
+            # there is still a pending message to be sent after the first
+            # on_disconnect() call.
+            return
+
+        try:
+            pid, status = os.waitpid(self.pid, os.WNOHANG)
+        except OSError:
+            e = sys.exc_info()[1]
+            if e.args[0] == errno.ECHILD:
+                LOG.warn('%r: waitpid(%r) produced ECHILD', self, self.pid)
+                return
+            raise
+
+        self._reaped = True
+        if pid:
+            LOG.debug('%r: PID %d %s', self, pid, wstatus_to_str(status))
+            return
+
+        if not self._router.profiling:
+            # For processes like sudo we cannot actually send sudo a signal,
+            # because it is setuid, so this is best-effort only.
+            LOG.debug('%r: child process still alive, sending SIGTERM', self)
+            try:
+                os.kill(self.pid, signal.SIGTERM)
+            except OSError:
+                e = sys.exc_info()[1]
+                if e.args[0] != errno.EPERM:
+                    raise
+
+    def on_disconnect(self, broker):
+        super(Stream, self).on_disconnect(broker)
+        if self.diag_stream is not None:
+            self.diag_stream.on_disconnect(broker)
+        self._reap_child()
+
+    # Minimised, gzipped, base64'd and passed to 'python -c'. It forks, dups
+    # file descriptor 0 as 100, creates a pipe, then execs a new interpreter
+    # with a custom argv.
+    #   * Optimized for minimum byte count after minification & compression.
+    #   * 'CONTEXT_NAME' and 'PREAMBLE_COMPRESSED_LEN' are substituted with
+    #     their respective values.
+    #   * CONTEXT_NAME must be prefixed with the name of the Python binary in
+    #     order to allow virtualenvs to detect their install prefix.
+    #   * For Darwin, OS X installs a craptacular argv0-introspecting Python
+    #     version switcher as /usr/bin/python. Override attempts to call it
+    #     with an explicit call to python2.7
+    #
+    # Locals:
+    #   R: read side of interpreter stdin.
+    #   W: write side of interpreter stdin.
+    #   r: read side of core_src FD.
+    #   w: write side of core_src FD.
+    #   C: the decompressed core source.
+
+    # Final os.close(2) to avoid --py-debug build from corrupting stream with
+    # "[1234 refs]" during exit.
+    @staticmethod
+    def _first_stage():
+        R,W=os.pipe()
+        r,w=os.pipe()
+        if os.fork():
+            os.dup2(0,100)
+            os.dup2(R,0)
+            os.dup2(r,101)
+            os.close(R)
+            os.close(r)
+            os.close(W)
+            os.close(w)
+            if sys.platform == 'darwin' and sys.executable == '/usr/bin/python':
+                sys.executable += sys.version[:3]
+            os.environ['ARGV0']=sys.executable
+            os.execl(sys.executable,sys.executable+'(mitogen:CONTEXT_NAME)')
+        os.write(1,'MITO000\n'.encode())
+        C=_(os.fdopen(0,'rb').read(PREAMBLE_COMPRESSED_LEN),'zip')
+        fp=os.fdopen(W,'wb',0)
+        fp.write(C)
+        fp.close()
+        fp=os.fdopen(w,'wb',0)
+        fp.write(C)
+        fp.close()
+        os.write(1,'MITO001\n'.encode())
+        os.close(2)
+
+    def get_python_argv(self):
+        """
+        Return the initial argument vector elements necessary to invoke Python,
+        by returning a 1-element list containing :attr:`python_path` if it is a
+        string, or simply returning it if it is already a list.
+
+        This allows emulation of existing tools where the Python invocation may
+        be set to e.g. `['/usr/bin/env', 'python']`.
+        """
+        if isinstance(self.python_path, list):
+            return self.python_path
+        return [self.python_path]
+
+    def get_boot_command(self):
+        source = inspect.getsource(self._first_stage)
+        source = textwrap.dedent('\n'.join(source.strip().split('\n')[2:]))
+        source = source.replace('    ', '\t')
+        source = source.replace('CONTEXT_NAME', self.remote_name)
+        preamble_compressed = self.get_preamble()
+        source = source.replace('PREAMBLE_COMPRESSED_LEN',
+                                str(len(preamble_compressed)))
+        compressed = zlib.compress(source.encode(), 9)
+        encoded = codecs.encode(compressed, 'base64').replace(b('\n'), b(''))
+        # We can't use bytes.decode() in 3.x since it was restricted to always
+        # return unicode, so codecs.decode() is used instead. In 3.x
+        # codecs.decode() requires a bytes object. Since we must be compatible
+        # with 2.4 (no bytes literal), an extra .encode() either returns the
+        # same str (2.x) or an equivalent bytes (3.x).
+        return self.get_python_argv() + [
+            '-c',
+            'import codecs,os,sys;_=codecs.decode;'
+            'exec(_(_("%s".encode(),"base64"),"zip"))' % (encoded.decode(),)
+        ]
+
+    def get_econtext_config(self):
+        assert self.max_message_size is not None
+        parent_ids = mitogen.parent_ids[:]
+        parent_ids.insert(0, mitogen.context_id)
+        return {
+            'parent_ids': parent_ids,
+            'context_id': self.remote_id,
+            'debug': self.debug,
+            'profiling': self.profiling,
+            'unidirectional': self.unidirectional,
+            'log_level': get_log_level(),
+            'whitelist': self._router.get_module_whitelist(),
+            'blacklist': self._router.get_module_blacklist(),
+            'max_message_size': self.max_message_size,
+            'version': mitogen.__version__,
+        }
+
+    def get_preamble(self):
+        suffix = (
+            '\nExternalContext(%r).main()\n' %\
+            (self.get_econtext_config(),)
+        )
+        partial = get_core_source_partial()
+        return partial.append(suffix.encode('utf-8'))
+
+    def start_child(self):
+        args = self.get_boot_command()
+        try:
+            return self.create_child(args, **self.create_child_args)
+        except OSError:
+            e = sys.exc_info()[1]
+            msg = 'Child start failed: %s. Command was: %s' % (e, Argv(args))
+            raise mitogen.core.StreamError(msg)
+
+    eof_error_hint = None
+
+    def _adorn_eof_error(self, e):
+        """
+        Used by subclasses to provide additional information in the case of a
+        failed connection.
+        """
+        if self.eof_error_hint:
+            e.args = ('%s\n\n%s' % (e.args[0], self.eof_error_hint),)
+
+    def _get_name(self):
+        """
+        Called by :meth:`connect` after :attr:`pid` is known. Subclasses can
+        override it to specify a default stream name, or set
+        :attr:`name_prefix` to generate a default format.
+        """
+        return u'%s.%s' % (self.name_prefix, self.pid)
+
+    def connect(self):
+        LOG.debug('%r.connect()', self)
+        self.pid, fd, diag_fd = self.start_child()
+        self.name = self._get_name()
+        self.receive_side = mitogen.core.Side(self, fd)
+        self.transmit_side = mitogen.core.Side(self, os.dup(fd))
+        if diag_fd is not None:
+            self.diag_stream = DiagLogStream(diag_fd, self)
+        else:
+            self.diag_stream = None
+
+        LOG.debug('%r.connect(): pid:%r stdin:%r, stdout:%r, diag:%r',
+                  self, self.pid, self.receive_side.fd, self.transmit_side.fd,
+                  self.diag_stream and self.diag_stream.receive_side.fd)
+
+        try:
+            self._connect_bootstrap()
+        except EofError:
+            self.on_disconnect(self._router.broker)
+            e = sys.exc_info()[1]
+            self._adorn_eof_error(e)
+            raise
+        except Exception:
+            self.on_disconnect(self._router.broker)
+            self._reap_child()
+            raise
+
+    #: Sentinel value emitted by the first stage to indicate it is ready to
+    #: receive the compressed bootstrap. For :mod:`mitogen.ssh` this must have
+    #: length of at least `max(len('password'), len('debug1:'))`
+    EC0_MARKER = mitogen.core.b('MITO000\n')
+    EC1_MARKER = mitogen.core.b('MITO001\n')
+
+    def _ec0_received(self):
+        LOG.debug('%r._ec0_received()', self)
+        write_all(self.transmit_side.fd, self.get_preamble())
+        discard_until(self.receive_side.fd, self.EC1_MARKER,
+                      self.connect_deadline)
+        if self.diag_stream:
+            self._router.broker.start_receive(self.diag_stream)
+
+    def _connect_bootstrap(self):
+        discard_until(self.receive_side.fd, self.EC0_MARKER,
+                      self.connect_deadline)
+        self._ec0_received()
+
+
+class ChildIdAllocator(object):
+    def __init__(self, router):
+        self.router = router
+        self.lock = threading.Lock()
+        self.it = iter(xrange(0))
+
+    def allocate(self):
+        self.lock.acquire()
+        try:
+            for id_ in self.it:
+                return id_
+
+            master = mitogen.core.Context(self.router, 0)
+            start, end = master.send_await(
+                mitogen.core.Message(dst_id=0, handle=mitogen.core.ALLOCATE_ID)
+            )
+            self.it = iter(xrange(start, end))
+        finally:
+            self.lock.release()
+
+        return self.allocate()
+
+
+class CallChain(object):
+    """
+    Deliver :data:`mitogen.core.CALL_FUNCTION` messages to a target context,
+    optionally threading related calls so an exception in an earlier call
+    cancels subsequent calls.
+
+    :param mitogen.core.Context context:
+        Target context.
+    :param bool pipelined:
+        Enable pipelining.
+
+    :meth:`call`, :meth:`call_no_reply` and :meth:`call_async`
+    normally issue calls and produce responses with no memory of prior
+    exceptions. If a call made with :meth:`call_no_reply` fails, the exception
+    is logged to the target context's logging framework.
+
+    **Pipelining**
+
+    When pipelining is enabled, if an exception occurs during a call,
+    subsequent calls made by the same :class:`CallChain` fail with the same
+    exception, including those already in-flight on the network, and no further
+    calls execute until :meth:`reset` is invoked.
+
+    No exception is logged for calls made with :meth:`call_no_reply`, instead
+    the exception is saved and reported as the result of subsequent
+    :meth:`call` or :meth:`call_async` calls.
+
+    Sequences of asynchronous calls can be made without wasting network
+    round-trips to discover if prior calls succeed, and chains originating from
+    multiple unrelated source contexts may overlap concurrently at a target
+    context without interference.
+
+    In this example, 4 calls complete in one round-trip::
+
+        chain = mitogen.parent.CallChain(context, pipelined=True)
+        chain.call_no_reply(os.mkdir, '/tmp/foo')
+
+        # If previous mkdir() failed, this never runs:
+        chain.call_no_reply(os.mkdir, '/tmp/foo/bar')
+
+        # If either mkdir() failed, this never runs, and the exception is
+        # asynchronously delivered to the receiver.
+        recv = chain.call_async(subprocess.check_output, '/tmp/foo')
+
+        # If anything so far failed, this never runs, and raises the exception.
+        chain.call(do_something)
+
+        # If this code was executed, the exception would also be raised.
+        if recv.get().unpickle() == 'baz':
+            pass
+
+    When pipelining is enabled, :meth:`reset` must be invoked to ensure any
+    exception is discarded, otherwise unbounded memory usage is possible in
+    long-running programs. The context manager protocol is supported to ensure
+    :meth:`reset` is always invoked::
+
+        with mitogen.parent.CallChain(context, pipelined=True) as chain:
+            chain.call_no_reply(...)
+            chain.call_no_reply(...)
+            chain.call_no_reply(...)
+            chain.call(...)
+
+        # chain.reset() automatically invoked.
+    """
+    def __init__(self, context, pipelined=False):
+        self.context = context
+        if pipelined:
+            self.chain_id = self.make_chain_id()
+        else:
+            self.chain_id = None
+
+    @classmethod
+    def make_chain_id(cls):
+        return '%s-%s-%x-%x' % (
+            socket.gethostname(),
+            os.getpid(),
+            thread.get_ident(),
+            int(1e6 * time.time()),
+        )
+
+    def __repr__(self):
+        return '%s(%s)' % (self.__class__.__name__, self.context)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, _1, _2, _3):
+        self.reset()
+
+    def reset(self):
+        """
+        Instruct the target to forget any related exception.
+        """
+        if not self.chain_id:
+            return
+
+        saved, self.chain_id = self.chain_id, None
+        try:
+            self.call_no_reply(mitogen.core.Dispatcher.forget_chain, saved)
+        finally:
+            self.chain_id = saved
+
+    closures_msg = (
+        'Mitogen cannot invoke closures, as doing so would require '
+        'serializing arbitrary program state, and no universal '
+        'method exists to recover a reference to them.'
+    )
+
+    lambda_msg = (
+        'Mitogen cannot invoke anonymous functions, as no universal method '
+        'exists to recover a reference to an anonymous function.'
+    )
+
+    method_msg = (
+        'Mitogen cannot invoke instance methods, as doing so would require '
+        'serializing arbitrary program state.'
+    )
+
+    def make_msg(self, fn, *args, **kwargs):
+        if getattr(fn, closure_attr, None) is not None:
+            raise TypeError(self.closures_msg)
+        if fn.__name__ == '<lambda>':
+            raise TypeError(self.lambda_msg)
+
+        if inspect.ismethod(fn):
+            im_self = getattr(fn, IM_SELF_ATTR)
+            if not inspect.isclass(im_self):
+                raise TypeError(self.method_msg)
+            klass = mitogen.core.to_text(im_self.__name__)
+        else:
+            klass = None
+
+        tup = (
+            self.chain_id,
+            mitogen.core.to_text(fn.__module__),
+            klass,
+            mitogen.core.to_text(fn.__name__),
+            args,
+            mitogen.core.Kwargs(kwargs)
+        )
+        return mitogen.core.Message.pickled(tup,
+            handle=mitogen.core.CALL_FUNCTION)
+
+    def call_no_reply(self, fn, *args, **kwargs):
+        """
+        Like :meth:`call_async`, but do not wait for a return value, and inform
+        the target context no reply is expected. If the call fails and
+        pipelining is disabled, the exception will be logged to the target
+        context's logging framework.
+        """
+        LOG.debug('%r.call_no_reply(): %r', self, CallSpec(fn, args, kwargs))
+        self.context.send(self.make_msg(fn, *args, **kwargs))
+
+    def call_async(self, fn, *args, **kwargs):
+        """
+        Arrange for `fn(*args, **kwargs)` to be invoked on the context's main
+        thread.
+
+        :param fn:
+            A free function in module scope or a class method of a class
+            directly reachable from module scope:
+
+            .. code-block:: python
+
+                # mymodule.py
+
+                def my_func():
+                    '''A free function reachable as mymodule.my_func'''
+
+                class MyClass:
+                    @classmethod
+                    def my_classmethod(cls):
+                        '''Reachable as mymodule.MyClass.my_classmethod'''
+
+                    def my_instancemethod(self):
+                        '''Unreachable: requires a class instance!'''
+
+                    class MyEmbeddedClass:
+                        @classmethod
+                        def my_classmethod(cls):
+                            '''Not directly reachable from module scope!'''
+
+        :param tuple args:
+            Function arguments, if any. See :ref:`serialization-rules` for
+            permitted types.
+        :param dict kwargs:
+            Function keyword arguments, if any. See :ref:`serialization-rules`
+            for permitted types.
+        :returns:
+            :class:`mitogen.core.Receiver` configured to receive the result of
+            the invocation:
+
+            .. code-block:: python
+
+                recv = context.call_async(os.check_output, 'ls /tmp/')
+                try:
+                    # Prints output once it is received.
+                    msg = recv.get()
+                    print(msg.unpickle())
+                except mitogen.core.CallError, e:
+                    print('Call failed:', str(e))
+
+            Asynchronous calls may be dispatched in parallel to multiple
+            contexts and consumed as they complete using
+            :class:`mitogen.select.Select`.
+        """
+        LOG.debug('%r.call_async(): %r', self, CallSpec(fn, args, kwargs))
+        return self.context.send_async(self.make_msg(fn, *args, **kwargs))
+
+    def call(self, fn, *args, **kwargs):
+        """
+        Like :meth:`call_async`, but block until the return value is available.
+        Equivalent to::
+
+            call_async(fn, *args, **kwargs).get().unpickle()
+
+        :returns:
+            The function's return value.
+        :raises mitogen.core.CallError:
+            An exception was raised in the remote context during execution.
+        """
+        receiver = self.call_async(fn, *args, **kwargs)
+        return receiver.get().unpickle(throw_dead=False)
+
+
+class Context(mitogen.core.Context):
+    """
+    Extend :class:`mitogen.core.Context` with functionality useful to masters,
+    and child contexts who later become parents. Currently when this class is
+    required, the target context's router is upgraded at runtime.
+    """
+    #: A :class:`CallChain` instance constructed by default, with pipelining
+    #: disabled. :meth:`call`, :meth:`call_async` and :meth:`call_no_reply` use
+    #: this instance.
+    call_chain_class = CallChain
+
+    via = None
+
+    def __init__(self, *args, **kwargs):
+        super(Context, self).__init__(*args, **kwargs)
+        self.default_call_chain = self.call_chain_class(self)
+
+    def __ne__(self, other):
+        return not (self == other)
+
+    def __eq__(self, other):
+        return (isinstance(other, mitogen.core.Context) and
+                (other.context_id == self.context_id) and
+                (other.router == self.router))
+
+    def __hash__(self):
+        return hash((self.router, self.context_id))
+
+    def call_async(self, fn, *args, **kwargs):
+        """
+        See :meth:`CallChain.call_async`.
+        """
+        return self.default_call_chain.call_async(fn, *args, **kwargs)
+
+    def call(self, fn, *args, **kwargs):
+        """
+        See :meth:`CallChain.call`.
+        """
+        return self.default_call_chain.call(fn, *args, **kwargs)
+
+    def call_no_reply(self, fn, *args, **kwargs):
+        """
+        See :meth:`CallChain.call_no_reply`.
+        """
+        self.default_call_chain.call_no_reply(fn, *args, **kwargs)
+
+    def shutdown(self, wait=False):
+        """
+        Arrange for the context to receive a ``SHUTDOWN`` message, triggering
+        graceful shutdown.
+
+        Due to a lack of support for timers, no attempt is made yet to force
+        terminate a hung context using this method. This will be fixed shortly.
+
+        :param bool wait:
+            If :data:`True`, block the calling thread until the context has
+            completely terminated.
+
+        :returns:
+            If `wait` is :data:`False`, returns a :class:`mitogen.core.Latch`
+            whose :meth:`get() <mitogen.core.Latch.get>` method returns
+            :data:`None` when shutdown completes. The `timeout` parameter may
+            be used to implement graceful timeouts.
+        """
+        LOG.debug('%r.shutdown() sending SHUTDOWN', self)
+        latch = mitogen.core.Latch()
+        mitogen.core.listen(self, 'disconnect', lambda: latch.put(None))
+        self.send(
+            mitogen.core.Message(
+                handle=mitogen.core.SHUTDOWN,
+            )
+        )
+
+        if wait:
+            latch.get()
+        else:
+            return latch
+
+
+class RouteMonitor(object):
+    """
+    Generate and respond to :data:`mitogen.core.ADD_ROUTE` and
+    :data:`mitogen.core.DEL_ROUTE` messages sent to the local context by
+    maintaining a table of available routes, and propagating messages towards
+    parents and siblings as appropriate.
+
+    :class:`RouteMonitor` is responsible for generating routing messages for
+    directly attached children. It learns of new children via
+    :meth:`notice_stream` called by :class:`Router`, and subscribes to their
+    ``disconnect`` event to learn when they disappear.
+
+    In children, constructing this class overwrites the stub
+    :data:`mitogen.core.DEL_ROUTE` handler installed by
+    :class:`mitogen.core.ExternalContext`, which is expected behaviour when a
+    child is beging upgraded in preparation to become a parent of children of
+    its own.
+
+    By virtue of only being active while responding to messages from a handler,
+    RouteMonitor lives entirely on the broker thread, so its data requires no
+    locking.
+
+    :param Router router:
+        Router to install handlers on.
+    :param Context parent:
+        :data:`None` in the master process, or reference to the parent context
+        we should propagate route updates towards.
+    """
+    def __init__(self, router, parent=None):
+        self.router = router
+        self.parent = parent
+        #: Mapping of Stream instance to integer context IDs reachable via the
+        #: stream; used to cleanup routes during disconnection.
+        self._routes_by_stream = {}
+        self.router.add_handler(
+            fn=self._on_add_route,
+            handle=mitogen.core.ADD_ROUTE,
+            persist=True,
+            policy=is_immediate_child,
+            overwrite=True,
+        )
+        self.router.add_handler(
+            fn=self._on_del_route,
+            handle=mitogen.core.DEL_ROUTE,
+            persist=True,
+            policy=is_immediate_child,
+            overwrite=True,
+        )
+
+    def __repr__(self):
+        return 'RouteMonitor()'
+
+    def _send_one(self, stream, handle, target_id, name):
+        """
+        Compose and send an update message on a stream.
+
+        :param mitogen.core.Stream stream:
+            Stream to send it on.
+        :param int handle:
+            :data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE`
+        :param int target_id:
+            ID of the connecting or disconnecting context.
+        :param str name:
+            Context name or :data:`None`.
+        """
+        if not stream:
+            # We may not have a stream during shutdown.
+            return
+
+        data = str(target_id)
+        if name:
+            data = '%s:%s' % (target_id, name)
+        stream.send(
+            mitogen.core.Message(
+                handle=handle,
+                data=data.encode('utf-8'),
+                dst_id=stream.remote_id,
+            )
+        )
+
+    def _propagate_up(self, handle, target_id, name=None):
+        """
+        In a non-master context, propagate an update towards the master.
+
+        :param int handle:
+            :data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE`
+        :param int target_id:
+            ID of the connecting or disconnecting context.
+        :param str name:
+            For :data:`mitogen.core.ADD_ROUTE`, the name of the new context
+            assigned by its parent. This is used by parents to assign the
+            :attr:`mitogen.core.Context.name` attribute.
+        """
+        if self.parent:
+            stream = self.router.stream_by_id(self.parent.context_id)
+            self._send_one(stream, handle, target_id, name)
+
+    def _propagate_down(self, handle, target_id):
+        """
+        For DEL_ROUTE, we additionally want to broadcast the message to any
+        stream that has ever communicated with the disconnecting ID, so
+        core.py's :meth:`mitogen.core.Router._on_del_route` can turn the
+        message into a disconnect event.
+
+        :param int handle:
+            :data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE`
+        :param int target_id:
+            ID of the connecting or disconnecting context.
+        """
+        for stream in self.router.get_streams():
+            if target_id in stream.egress_ids and (
+                    (self.parent is None) or
+                    (self.parent.context_id != stream.remote_id)
+                ):
+                self._send_one(stream, mitogen.core.DEL_ROUTE, target_id, None)
+
+    def notice_stream(self, stream):
+        """
+        When this parent is responsible for a new directly connected child
+        stream, we're also responsible for broadcasting DEL_ROUTE upstream
+        if/when that child disconnects.
+        """
+        self._routes_by_stream[stream] = set([stream.remote_id])
+        self._propagate_up(mitogen.core.ADD_ROUTE, stream.remote_id,
+                        stream.name)
+        mitogen.core.listen(
+            obj=stream,
+            name='disconnect',
+            func=lambda: self._on_stream_disconnect(stream),
+        )
+
+    def get_routes(self, stream):
+        """
+        Return the set of context IDs reachable on a stream.
+
+        :param mitogen.core.Stream stream:
+        :returns: set([int])
+        """
+        return self._routes_by_stream.get(stream) or set()
+
+    def _on_stream_disconnect(self, stream):
+        """
+        Respond to disconnection of a local stream by propagating DEL_ROUTE for
+        any contexts we know were attached to it.
+        """
+        # During a stream crash it is possible for disconnect signal to fire
+        # twice, in which case ignore the second instance.
+        routes = self._routes_by_stream.pop(stream, None)
+        if routes is None:
+            return
+
+        LOG.debug('%r: %r is gone; propagating DEL_ROUTE for %r',
+                  self, stream, routes)
+        for target_id in routes:
+            self.router.del_route(target_id)
+            self._propagate_up(mitogen.core.DEL_ROUTE, target_id)
+            self._propagate_down(mitogen.core.DEL_ROUTE, target_id)
+
+            context = self.router.context_by_id(target_id, create=False)
+            if context:
+                mitogen.core.fire(context, 'disconnect')
+
+    def _on_add_route(self, msg):
+        """
+        Respond to :data:`mitogen.core.ADD_ROUTE` by validating the source of
+        the message, updating the local table, and propagating the message
+        upwards.
+        """
+        if msg.is_dead:
+            return
+
+        target_id_s, _, target_name = bytes_partition(msg.data, b(':'))
+        target_name = target_name.decode()
+        target_id = int(target_id_s)
+        self.router.context_by_id(target_id).name = target_name
+        stream = self.router.stream_by_id(msg.auth_id)
+        current = self.router.stream_by_id(target_id)
+        if current and current.remote_id != mitogen.parent_id:
+            LOG.error('Cannot add duplicate route to %r via %r, '
+                      'already have existing route via %r',
+                      target_id, stream, current)
+            return
+
+        LOG.debug('Adding route to %d via %r', target_id, stream)
+        self._routes_by_stream[stream].add(target_id)
+        self.router.add_route(target_id, stream)
+        self._propagate_up(mitogen.core.ADD_ROUTE, target_id, target_name)
+
+    def _on_del_route(self, msg):
+        """
+        Respond to :data:`mitogen.core.DEL_ROUTE` by validating the source of
+        the message, updating the local table, propagating the message
+        upwards, and downwards towards any stream that every had a message
+        forwarded from it towards the disconnecting context.
+        """
+        if msg.is_dead:
+            return
+
+        target_id = int(msg.data)
+        registered_stream = self.router.stream_by_id(target_id)
+        if registered_stream is None:
+            return
+
+        stream = self.router.stream_by_id(msg.auth_id)
+        if registered_stream != stream:
+            LOG.error('%r: received DEL_ROUTE for %d from %r, expected %r',
+                      self, target_id, stream, registered_stream)
+            return
+
+        context = self.router.context_by_id(target_id, create=False)
+        if context:
+            LOG.debug('%r: firing local disconnect for %r', self, context)
+            mitogen.core.fire(context, 'disconnect')
+
+        LOG.debug('%r: deleting route to %d via %r', self, target_id, stream)
+        routes = self._routes_by_stream.get(stream)
+        if routes:
+            routes.discard(target_id)
+
+        self.router.del_route(target_id)
+        if stream.remote_id != mitogen.parent_id:
+            self._propagate_up(mitogen.core.DEL_ROUTE, target_id)
+        self._propagate_down(mitogen.core.DEL_ROUTE, target_id)
+
+
+class Router(mitogen.core.Router):
+    context_class = Context
+    debug = False
+    profiling = False
+
+    id_allocator = None
+    responder = None
+    log_forwarder = None
+    route_monitor = None
+
+    def upgrade(self, importer, parent):
+        LOG.debug('%r.upgrade()', self)
+        self.id_allocator = ChildIdAllocator(router=self)
+        self.responder = ModuleForwarder(
+            router=self,
+            parent_context=parent,
+            importer=importer,
+        )
+        self.route_monitor = RouteMonitor(self, parent)
+        self.add_handler(
+            fn=self._on_detaching,
+            handle=mitogen.core.DETACHING,
+            persist=True,
+        )
+
+    def _on_detaching(self, msg):
+        if msg.is_dead:
+            return
+        stream = self.stream_by_id(msg.src_id)
+        if stream.remote_id != msg.src_id or stream.detached:
+            LOG.warning('bad DETACHING received on %r: %r', stream, msg)
+            return
+        LOG.debug('%r: marking as detached', stream)
+        stream.detached = True
+        msg.reply(None)
+
+    def get_streams(self):
+        """
+        Return a snapshot of all streams in existence at time of call.
+        """
+        self._write_lock.acquire()
+        try:
+            return itervalues(self._stream_by_id)
+        finally:
+            self._write_lock.release()
+
+    def add_route(self, target_id, stream):
+        """
+        Arrange for messages whose `dst_id` is `target_id` to be forwarded on
+        the directly connected stream for `via_id`. This method is called
+        automatically in response to :data:`mitogen.core.ADD_ROUTE` messages,
+        but remains public while the design has not yet settled, and situations
+        may arise where routing is not fully automatic.
+        """
+        LOG.debug('%r.add_route(%r, %r)', self, target_id, stream)
+        assert isinstance(target_id, int)
+        assert isinstance(stream, Stream)
+
+        self._write_lock.acquire()
+        try:
+            self._stream_by_id[target_id] = stream
+        finally:
+            self._write_lock.release()
+
+    def del_route(self, target_id):
+        LOG.debug('%r.del_route(%r)', self, target_id)
+        # DEL_ROUTE may be sent by a parent if it knows this context sent
+        # messages to a peer that has now disconnected, to let us raise
+        # 'disconnect' event on the appropriate Context instance. In that case,
+        # we won't a matching _stream_by_id entry for the disappearing route,
+        # so don't raise an error for a missing key here.
+        self._write_lock.acquire()
+        try:
+            self._stream_by_id.pop(target_id, None)
+        finally:
+            self._write_lock.release()
+
+    def get_module_blacklist(self):
+        if mitogen.context_id == 0:
+            return self.responder.blacklist
+        return self.importer.master_blacklist
+
+    def get_module_whitelist(self):
+        if mitogen.context_id == 0:
+            return self.responder.whitelist
+        return self.importer.master_whitelist
+
+    def allocate_id(self):
+        return self.id_allocator.allocate()
+
+    connection_timeout_msg = u"Connection timed out."
+
+    def _connect(self, klass, name=None, **kwargs):
+        context_id = self.allocate_id()
+        context = self.context_class(self, context_id)
+        kwargs['old_router'] = self
+        kwargs['max_message_size'] = self.max_message_size
+        stream = klass(self, context_id, **kwargs)
+        if name is not None:
+            stream.name = name
+        try:
+            stream.connect()
+        except mitogen.core.TimeoutError:
+            raise mitogen.core.StreamError(self.connection_timeout_msg)
+        context.name = stream.name
+        self.route_monitor.notice_stream(stream)
+        self.register(context, stream)
+        return context
+
+    def connect(self, method_name, name=None, **kwargs):
+        klass = stream_by_method_name(method_name)
+        kwargs.setdefault(u'debug', self.debug)
+        kwargs.setdefault(u'profiling', self.profiling)
+        kwargs.setdefault(u'unidirectional', self.unidirectional)
+
+        via = kwargs.pop(u'via', None)
+        if via is not None:
+            return self.proxy_connect(via, method_name, name=name,
+                                      **mitogen.core.Kwargs(kwargs))
+        return self._connect(klass, name=name,
+                             **mitogen.core.Kwargs(kwargs))
+
+    def proxy_connect(self, via_context, method_name, name=None, **kwargs):
+        resp = via_context.call(_proxy_connect,
+            name=name,
+            method_name=method_name,
+            kwargs=mitogen.core.Kwargs(kwargs),
+        )
+        if resp['msg'] is not None:
+            raise mitogen.core.StreamError(resp['msg'])
+
+        name = u'%s.%s' % (via_context.name, resp['name'])
+        context = self.context_class(self, resp['id'], name=name)
+        context.via = via_context
+        self._write_lock.acquire()
+        try:
+            self._context_by_id[context.context_id] = context
+        finally:
+            self._write_lock.release()
+        return context
+
+    def doas(self, **kwargs):
+        return self.connect(u'doas', **kwargs)
+
+    def docker(self, **kwargs):
+        return self.connect(u'docker', **kwargs)
+
+    def kubectl(self, **kwargs):
+        return self.connect(u'kubectl', **kwargs)
+
+    def fork(self, **kwargs):
+        return self.connect(u'fork', **kwargs)
+
+    def jail(self, **kwargs):
+        return self.connect(u'jail', **kwargs)
+
+    def local(self, **kwargs):
+        return self.connect(u'local', **kwargs)
+
+    def lxc(self, **kwargs):
+        return self.connect(u'lxc', **kwargs)
+
+    def lxd(self, **kwargs):
+        return self.connect(u'lxd', **kwargs)
+
+    def setns(self, **kwargs):
+        return self.connect(u'setns', **kwargs)
+
+    def su(self, **kwargs):
+        return self.connect(u'su', **kwargs)
+
+    def sudo(self, **kwargs):
+        return self.connect(u'sudo', **kwargs)
+
+    def ssh(self, **kwargs):
+        return self.connect(u'ssh', **kwargs)
+
+
+class ProcessMonitor(object):
+    """
+    Install a :data:`signal.SIGCHLD` handler that generates callbacks when a
+    specific child process has exitted. This class is obsolete, do not use.
+    """
+    def __init__(self):
+        # pid -> callback()
+        self.callback_by_pid = {}
+        signal.signal(signal.SIGCHLD, self._on_sigchld)
+
+    def _on_sigchld(self, _signum, _frame):
+        for pid, callback in self.callback_by_pid.items():
+            pid, status = os.waitpid(pid, os.WNOHANG)
+            if pid:
+                callback(status)
+                del self.callback_by_pid[pid]
+
+    def add(self, pid, callback):
+        """
+        Add a callback function to be notified of the exit status of a process.
+
+        :param int pid:
+            Process ID to be notified of.
+
+        :param callback:
+            Function invoked as `callback(status)`, where `status` is the raw
+            exit status of the child process.
+        """
+        self.callback_by_pid[pid] = callback
+
+    _instance = None
+
+    @classmethod
+    def instance(cls):
+        if cls._instance is None:
+            cls._instance = cls()
+        return cls._instance
+
+
+class ModuleForwarder(object):
+    """
+    Respond to GET_MODULE requests in a slave by forwarding the request to our
+    parent context, or satisfying the request from our local Importer cache.
+    """
+    def __init__(self, router, parent_context, importer):
+        self.router = router
+        self.parent_context = parent_context
+        self.importer = importer
+        router.add_handler(
+            fn=self._on_forward_module,
+            handle=mitogen.core.FORWARD_MODULE,
+            persist=True,
+            policy=mitogen.core.has_parent_authority,
+        )
+        router.add_handler(
+            fn=self._on_get_module,
+            handle=mitogen.core.GET_MODULE,
+            persist=True,
+            policy=is_immediate_child,
+        )
+
+    def __repr__(self):
+        return 'ModuleForwarder(%r)' % (self.router,)
+
+    def _on_forward_module(self, msg):
+        if msg.is_dead:
+            return
+
+        context_id_s, _, fullname = bytes_partition(msg.data, b('\x00'))
+        fullname = mitogen.core.to_text(fullname)
+        context_id = int(context_id_s)
+        stream = self.router.stream_by_id(context_id)
+        if stream.remote_id == mitogen.parent_id:
+            LOG.error('%r: dropping FORWARD_MODULE(%d, %r): no route to child',
+                      self, context_id, fullname)
+            return
+
+        if fullname in stream.sent_modules:
+            return
+
+        LOG.debug('%r._on_forward_module() sending %r to %r via %r',
+                  self, fullname, context_id, stream.remote_id)
+        self._send_module_and_related(stream, fullname)
+        if stream.remote_id != context_id:
+            stream._send(
+                mitogen.core.Message(
+                    data=msg.data,
+                    handle=mitogen.core.FORWARD_MODULE,
+                    dst_id=stream.remote_id,
+                )
+            )
+
+    def _on_get_module(self, msg):
+        LOG.debug('%r._on_get_module(%r)', self, msg)
+        if msg.is_dead:
+            return
+
+        fullname = msg.data.decode('utf-8')
+        callback = lambda: self._on_cache_callback(msg, fullname)
+        self.importer._request_module(fullname, callback)
+
+    def _on_cache_callback(self, msg, fullname):
+        LOG.debug('%r._on_get_module(): sending %r', self, fullname)
+        stream = self.router.stream_by_id(msg.src_id)
+        self._send_module_and_related(stream, fullname)
+
+    def _send_module_and_related(self, stream, fullname):
+        tup = self.importer._cache[fullname]
+        for related in tup[4]:
+            rtup = self.importer._cache.get(related)
+            if rtup:
+                self._send_one_module(stream, rtup)
+            else:
+                LOG.debug('%r._send_module_and_related(%r): absent: %r',
+                           self, fullname, related)
+
+        self._send_one_module(stream, tup)
+
+    def _send_one_module(self, stream, tup):
+        if tup[0] not in stream.sent_modules:
+            stream.sent_modules.add(tup[0])
+            self.router._async_route(
+                mitogen.core.Message.pickled(
+                    tup,
+                    dst_id=stream.remote_id,
+                    handle=mitogen.core.LOAD_MODULE,
+                )
+            )
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/profiler.py b/ansible/plugins/mitogen-0.2.6/mitogen/profiler.py
new file mode 100644
index 000000000..74bbdb235
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/profiler.py
@@ -0,0 +1,166 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""mitogen.profiler
+    Record and report cProfile statistics from a run. Creates one aggregated
+    output file, one aggregate containing only workers, and one for the
+    top-level process.
+
+Usage:
+    mitogen.profiler record <dest_path> <tool> [args ..]
+    mitogen.profiler report <dest_path> [sort_mode]
+    mitogen.profiler stat <sort_mode> <tool> [args ..]
+
+Mode:
+    record: Record a trace.
+    report: Report on a previously recorded trace.
+    stat: Record and report in a single step.
+
+Where:
+    dest_path: Filesystem prefix to write .pstats files to.
+    sort_mode: Sorting mode; defaults to "cumulative". See:
+        https://docs.python.org/2/library/profile.html#pstats.Stats.sort_stats
+
+Example:
+    mitogen.profiler record /tmp/mypatch ansible-playbook foo.yml
+    mitogen.profiler dump /tmp/mypatch-worker.pstats
+"""
+
+from __future__ import print_function
+import os
+import pstats
+import cProfile
+import shutil
+import subprocess
+import sys
+import tempfile
+import time
+
+import mitogen.core
+
+
+def try_merge(stats, path):
+    try:
+        stats.add(path)
+        return True
+    except Exception as e:
+        print('Failed. Race? Will retry. %s' % (e,))
+        return False
+
+
+def merge_stats(outpath, inpaths):
+    first, rest = inpaths[0], inpaths[1:]
+    for x in range(5):
+        try:
+            stats = pstats.Stats(first)
+        except EOFError:
+            time.sleep(0.2)
+            continue
+
+        print("Writing %r..." % (outpath,))
+        for path in rest:
+            #print("Merging %r into %r.." % (os.path.basename(path), outpath))
+            for x in range(5):
+                if try_merge(stats, path):
+                    break
+                time.sleep(0.2)
+
+    stats.dump_stats(outpath)
+
+
+def generate_stats(outpath, tmpdir):
+    print('Generating stats..')
+    all_paths = []
+    paths_by_ident = {}
+
+    for name in os.listdir(tmpdir):
+        if name.endswith('-dump.pstats'):
+            ident, _, pid = name.partition('-')
+            path = os.path.join(tmpdir, name)
+            all_paths.append(path)
+            paths_by_ident.setdefault(ident, []).append(path)
+
+    merge_stats('%s-all.pstat' % (outpath,), all_paths)
+    for ident, paths in paths_by_ident.items():
+        merge_stats('%s-%s.pstat' % (outpath, ident), paths)
+
+
+def do_record(tmpdir, path, *args):
+    env = os.environ.copy()
+    fmt = '%(identity)s-%(pid)s.%(now)s-dump.%(ext)s'
+    env['MITOGEN_PROFILING'] = '1'
+    env['MITOGEN_PROFILE_FMT'] = os.path.join(tmpdir, fmt)
+    rc = subprocess.call(args, env=env)
+    generate_stats(path, tmpdir)
+    return rc
+
+
+def do_report(tmpdir, path, sort='cumulative'):
+    stats = pstats.Stats(path).sort_stats(sort)
+    stats.print_stats(100)
+
+
+def do_stat(tmpdir, sort, *args):
+    valid_sorts = pstats.Stats.sort_arg_dict_default
+    if sort not in valid_sorts:
+        sys.stderr.write('Invalid sort %r, must be one of %s\n' %
+                         (sort, ', '.join(sorted(valid_sorts))))
+        sys.exit(1)
+
+    outfile = os.path.join(tmpdir, 'combined')
+    do_record(tmpdir, outfile, *args)
+    aggs = ('app.main', 'mitogen.broker', 'mitogen.child_main',
+            'mitogen.service.pool', 'Strategy', 'WorkerProcess',
+            'all')
+    for agg in aggs:
+        path = '%s-%s.pstat' % (outfile, agg)
+        if os.path.exists(path):
+            print()
+            print()
+            print('------ Aggregation %r ------' % (agg,))
+            print()
+            do_report(tmpdir, path, sort)
+            print()
+
+
+def main():
+    if len(sys.argv) < 2 or sys.argv[1] not in ('record', 'report', 'stat'):
+        sys.stderr.write(__doc__)
+        sys.exit(1)
+
+    func = globals()['do_' + sys.argv[1]]
+    tmpdir = tempfile.mkdtemp(prefix='mitogen.profiler')
+    try:
+        sys.exit(func(tmpdir, *sys.argv[2:]) or 0)
+    finally:
+        shutil.rmtree(tmpdir)
+
+if __name__ == '__main__':
+    main()
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/select.py b/ansible/plugins/mitogen-0.2.6/mitogen/select.py
new file mode 100644
index 000000000..51aebc227
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/select.py
@@ -0,0 +1,333 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import mitogen.core
+
+
+class Error(mitogen.core.Error):
+    pass
+
+
+class Event(object):
+    """
+    Represents one selected event.
+    """
+    #: The first Receiver or Latch the event traversed.
+    source = None
+
+    #: The :class:`mitogen.core.Message` delivered to a receiver, or the object
+    #: posted to a latch.
+    data = None
+
+
+class Select(object):
+    """
+    Support scatter/gather asynchronous calls and waiting on multiple
+    :class:`receivers <mitogen.core.Receiver>`,
+    :class:`channels <mitogen.core.Channel>`,
+    :class:`latches <mitogen.core.Latch>`, and
+    :class:`sub-selects <Select>`.
+
+    If `oneshot` is :data:`True`, then remove each receiver as it yields a
+    result; since :meth:`__iter__` terminates once the final receiver is
+    removed, this makes it convenient to respond to calls made in parallel:
+
+    .. code-block:: python
+
+        total = 0
+        recvs = [c.call_async(long_running_operation) for c in contexts]
+
+        for msg in mitogen.select.Select(recvs):
+            print('Got %s from %s' % (msg, msg.receiver))
+            total += msg.unpickle()
+
+        # Iteration ends when last Receiver yields a result.
+        print('Received total %s from %s receivers' % (total, len(recvs)))
+
+    :class:`Select` may drive a long-running scheduler:
+
+    .. code-block:: python
+
+        with mitogen.select.Select(oneshot=False) as select:
+            while running():
+                for msg in select:
+                    process_result(msg.receiver.context, msg.unpickle())
+                for context, workfunc in get_new_work():
+                    select.add(context.call_async(workfunc))
+
+    :class:`Select` may be nested:
+
+    .. code-block:: python
+
+        subselects = [
+            mitogen.select.Select(get_some_work()),
+            mitogen.select.Select(get_some_work()),
+            mitogen.select.Select([
+                mitogen.select.Select(get_some_work()),
+                mitogen.select.Select(get_some_work())
+            ])
+        ]
+
+        for msg in mitogen.select.Select(selects):
+            print(msg.unpickle())
+
+    :class:`Select` may be used to mix inter-thread and inter-process IO:
+
+        latch = mitogen.core.Latch()
+        start_thread(latch)
+        recv = remote_host.call_async(os.getuid)
+
+        sel = Select([latch, recv])
+        event = sel.get_event()
+        if event.source is latch:
+            # woken by a local thread
+        else:
+            # woken by function call result
+    """
+
+    notify = None
+
+    def __init__(self, receivers=(), oneshot=True):
+        self._receivers = []
+        self._oneshot = oneshot
+        self._latch = mitogen.core.Latch()
+        for recv in receivers:
+            self.add(recv)
+
+    @classmethod
+    def all(cls, receivers):
+        """
+        Take an iterable of receivers and retrieve a :class:`Message` from
+        each, returning the result of calling `msg.unpickle()` on each in turn.
+        Results are returned in the order they arrived.
+
+        This is sugar for handling batch :meth:`Context.call_async
+        <mitogen.parent.Context.call_async>` invocations:
+
+        .. code-block:: python
+
+            print('Total disk usage: %.02fMiB' % (sum(
+                mitogen.select.Select.all(
+                    context.call_async(get_disk_usage)
+                    for context in contexts
+                ) / 1048576.0
+            ),))
+
+        However, unlike in a naive comprehension such as:
+
+        .. code-block:: python
+
+            recvs = [c.call_async(get_disk_usage) for c in contexts]
+            sum(recv.get().unpickle() for recv in recvs)
+
+        Result processing happens in the order results arrive, rather than the
+        order requests were issued, so :meth:`all` should always be faster.
+        """
+        return list(msg.unpickle() for msg in cls(receivers))
+
+    def _put(self, value):
+        self._latch.put(value)
+        if self.notify:
+            self.notify(self)
+
+    def __bool__(self):
+        """
+        Return :data:`True` if any receivers are registered with this select.
+        """
+        return bool(self._receivers)
+
+    __nonzero__ = __bool__
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, e_type, e_val, e_tb):
+        self.close()
+
+    def iter_data(self):
+        """
+        Yield :attr:`Event.data` until no receivers remain in the select,
+        either because `oneshot` is :data:`True`, or each receiver was
+        explicitly removed via :meth:`remove`.
+
+        :meth:`__iter__` is an alias for :meth:`iter_data`, allowing loops
+        like::
+
+            for msg in Select([recv1, recv2]):
+                print msg.unpickle()
+        """
+        while self._receivers:
+            yield self.get_event().data
+
+    __iter__ = iter_data
+
+    def iter_events(self):
+        """
+        Yield :class:`Event` instances until no receivers remain in the select.
+        """
+        while self._receivers:
+            yield self.get_event()
+
+    loop_msg = 'Adding this Select instance would create a Select cycle'
+
+    def _check_no_loop(self, recv):
+        if recv is self:
+            raise Error(self.loop_msg)
+
+        for recv_ in self._receivers:
+            if recv_ == recv:
+                raise Error(self.loop_msg)
+            if isinstance(recv_, Select):
+                recv_._check_no_loop(recv)
+
+    owned_msg = 'Cannot add: Receiver is already owned by another Select'
+
+    def add(self, recv):
+        """
+        Add a :class:`mitogen.core.Receiver`, :class:`Select` or
+        :class:`mitogen.core.Latch` to the select.
+
+        :raises mitogen.select.Error:
+            An attempt was made to add a :class:`Select` to which this select
+            is indirectly a member of.
+        """
+        if isinstance(recv, Select):
+            recv._check_no_loop(self)
+
+        self._receivers.append(recv)
+        if recv.notify is not None:
+            raise Error(self.owned_msg)
+
+        recv.notify = self._put
+        # Avoid race by polling once after installation.
+        if not recv.empty():
+            self._put(recv)
+
+    not_present_msg = 'Instance is not a member of this Select'
+
+    def remove(self, recv):
+        """
+        Remove an object from from the select. Note that if the receiver has
+        notified prior to :meth:`remove`, it will still be returned by a
+        subsequent :meth:`get`. This may change in a future version.
+        """
+        try:
+            if recv.notify != self._put:
+                raise ValueError
+            self._receivers.remove(recv)
+            recv.notify = None
+        except (IndexError, ValueError):
+            raise Error(self.not_present_msg)
+
+    def close(self):
+        """
+        Remove the select's notifier function from each registered receiver,
+        mark the associated latch as closed, and cause any thread currently
+        sleeping in :meth:`get` to be woken with
+        :class:`mitogen.core.LatchError`.
+
+        This is necessary to prevent memory leaks in long-running receivers. It
+        is called automatically when the Python :keyword:`with` statement is
+        used.
+        """
+        for recv in self._receivers[:]:
+            self.remove(recv)
+        self._latch.close()
+
+    def empty(self):
+        """
+        Return :data:`True` if calling :meth:`get` would block.
+
+        As with :class:`Queue.Queue`, :data:`True` may be returned even though
+        a subsequent call to :meth:`get` will succeed, since a message may be
+        posted at any moment between :meth:`empty` and :meth:`get`.
+
+        :meth:`empty` may return :data:`False` even when :meth:`get` would
+        block if another thread has drained a receiver added to this select.
+        This can be avoided by only consuming each receiver from a single
+        thread.
+        """
+        return self._latch.empty()
+
+    empty_msg = 'Cannot get(), Select instance is empty'
+
+    def get(self, timeout=None, block=True):
+        """
+        Call `get_event(timeout, block)` returning :attr:`Event.data` of the
+        first available event.
+        """
+        return self.get_event(timeout, block).data
+
+    def get_event(self, timeout=None, block=True):
+        """
+        Fetch the next available :class:`Event` from any source, or raise
+        :class:`mitogen.core.TimeoutError` if no value is available within
+        `timeout` seconds.
+
+        On success, the message's :attr:`receiver
+        <mitogen.core.Message.receiver>` attribute is set to the receiver.
+
+        :param float timeout:
+            Timeout in seconds.
+        :param bool block:
+            If :data:`False`, immediately raise
+            :class:`mitogen.core.TimeoutError` if the select is empty.
+        :return:
+            :class:`Event`.
+        :raises mitogen.core.TimeoutError:
+            Timeout was reached.
+        :raises mitogen.core.LatchError:
+            :meth:`close` has been called, and the underlying latch is no
+            longer valid.
+        """
+        if not self._receivers:
+            raise Error(self.empty_msg)
+
+        event = Event()
+        while True:
+            recv = self._latch.get(timeout=timeout, block=block)
+            try:
+                if isinstance(recv, Select):
+                    event = recv.get_event(block=False)
+                else:
+                    event.source = recv
+                    event.data = recv.get(block=False)
+                if self._oneshot:
+                    self.remove(recv)
+                if isinstance(recv, mitogen.core.Receiver):
+                    # Remove in 0.3.x.
+                    event.data.receiver = recv
+                return event
+            except mitogen.core.TimeoutError:
+                # A receiver may have been queued with no result if another
+                # thread drained it before we woke up, or because another
+                # thread drained it between add() calling recv.empty() and
+                # self._put(). In this case just sleep again.
+                continue
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/service.py b/ansible/plugins/mitogen-0.2.6/mitogen/service.py
new file mode 100644
index 000000000..302e81ab8
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/service.py
@@ -0,0 +1,1085 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import grp
+import os
+import os.path
+import pprint
+import pwd
+import stat
+import sys
+import threading
+import time
+
+import mitogen.core
+import mitogen.select
+from mitogen.core import b
+from mitogen.core import LOG
+from mitogen.core import str_rpartition
+
+try:
+    all
+except NameError:
+    def all(it):
+        for elem in it:
+            if not elem:
+                return False
+        return True
+
+
+DEFAULT_POOL_SIZE = 16
+_pool = None
+_pool_pid = None
+#: Serialize pool construction.
+_pool_lock = threading.Lock()
+
+
+if mitogen.core.PY3:
+    def func_code(func):
+        return func.__code__
+    def func_name(func):
+        return func.__name__
+else:
+    def func_code(func):
+        return func.func_code
+    def func_name(func):
+        return func.func_name
+
+
+@mitogen.core.takes_router
+def get_or_create_pool(size=None, router=None):
+    global _pool
+    global _pool_pid
+    _pool_lock.acquire()
+    try:
+        if _pool_pid != os.getpid():
+            _pool = Pool(router, [], size=size or DEFAULT_POOL_SIZE,
+                         overwrite=True)
+            # In case of Broker shutdown crash, Pool can cause 'zombie'
+            # processes.
+            mitogen.core.listen(router.broker, 'shutdown',
+                                lambda: _pool.stop(join=False))
+            _pool_pid = os.getpid()
+        return _pool
+    finally:
+        _pool_lock.release()
+
+
+def validate_arg_spec(spec, args):
+    for name in spec:
+        try:
+            obj = args[name]
+        except KeyError:
+            raise mitogen.core.CallError(
+                'Required argument %r missing.' % (name,)
+            )
+
+        if not isinstance(obj, spec[name]):
+            raise mitogen.core.CallError(
+                'Argument %r type incorrect, got %r, expected %r' % (
+                    name,
+                    type(obj),
+                    spec[name]
+                )
+            )
+
+
+def arg_spec(spec):
+    """
+    Annotate a method as requiring arguments with a specific type. This only
+    validates required arguments. For optional arguments, write a manual check
+    within the function.
+
+    ::
+
+        @mitogen.service.arg_spec({
+            'path': str
+        })
+        def fetch_path(self, path, optional=None):
+            ...
+
+    :param dict spec:
+        Mapping from argument name to expected type.
+    """
+    def wrapper(func):
+        func.mitogen_service__arg_spec = spec
+        return func
+    return wrapper
+
+
+def expose(policy):
+    """
+    Annotate a method to permit access to contexts matching an authorization
+    policy. The annotation may be specified multiple times. Methods lacking any
+    authorization policy are not accessible.
+
+    ::
+
+        @mitogen.service.expose(policy=mitogen.service.AllowParents())
+        def unsafe_operation(self):
+            ...
+
+    :param mitogen.service.Policy policy:
+        The policy to require.
+    """
+    def wrapper(func):
+        func.mitogen_service__policies = (
+            [policy] +
+            getattr(func, 'mitogen_service__policies', [])
+        )
+        return func
+    return wrapper
+
+
+def no_reply():
+    """
+    Annotate a method as one that does not generate a response. Messages sent
+    by the method are done so explicitly. This can be used for fire-and-forget
+    endpoints where the requestee never receives a reply.
+    """
+    def wrapper(func):
+        func.mitogen_service__no_reply = True
+        return func
+    return wrapper
+
+
+class Error(Exception):
+    """
+    Raised when an error occurs configuring a service or pool.
+    """
+    pass  # cope with minify_source() bug.
+
+
+class Policy(object):
+    """
+    Base security policy.
+    """
+    def is_authorized(self, service, msg):
+        raise NotImplementedError()
+
+
+class AllowAny(Policy):
+    def is_authorized(self, service, msg):
+        return True
+
+
+class AllowParents(Policy):
+    def is_authorized(self, service, msg):
+        return (msg.auth_id in mitogen.parent_ids or
+                msg.auth_id == mitogen.context_id)
+
+
+class Activator(object):
+    """
+    """
+    def is_permitted(self, mod_name, class_name, msg):
+        return mitogen.core.has_parent_authority(msg)
+
+    not_active_msg = (
+        'Service %r is not yet activated in this context, and the '
+        'caller is not privileged, therefore autoactivation is disabled.'
+    )
+
+    def activate(self, pool, service_name, msg):
+        mod_name, _, class_name = str_rpartition(service_name, '.')
+        if msg and not self.is_permitted(mod_name, class_name, msg):
+            raise mitogen.core.CallError(self.not_active_msg, service_name)
+
+        module = mitogen.core.import_module(mod_name)
+        klass = getattr(module, class_name)
+        service = klass(router=pool.router)
+        pool.add(service)
+        return service
+
+
+class Invoker(object):
+    def __init__(self, service):
+        self.service = service
+
+    def __repr__(self):
+        return '%s(%s)' % (type(self).__name__, self.service)
+
+    unauthorized_msg = (
+        'Caller is not authorized to invoke %r of service %r'
+    )
+
+    def _validate(self, method_name, kwargs, msg):
+        method = getattr(self.service, method_name, None)
+        if method is None:
+            raise mitogen.core.CallError('No such method: %r', method_name)
+
+        policies = getattr(method, 'mitogen_service__policies', None)
+        if not policies:
+            raise mitogen.core.CallError('Method has no policies set.')
+
+        if not all(p.is_authorized(self.service, msg) for p in policies):
+            raise mitogen.core.CallError(
+                self.unauthorized_msg,
+                method_name,
+                self.service.name()
+            )
+
+        required = getattr(method, 'mitogen_service__arg_spec', {})
+        validate_arg_spec(required, kwargs)
+
+    def _invoke(self, method_name, kwargs, msg):
+        method = getattr(self.service, method_name)
+        if 'msg' in func_code(method).co_varnames:
+            kwargs['msg'] = msg  # TODO: hack
+
+        no_reply = getattr(method, 'mitogen_service__no_reply', False)
+        ret = None
+        try:
+            ret = method(**kwargs)
+            if no_reply:
+                return Service.NO_REPLY
+            return ret
+        except Exception:
+            if no_reply:
+                LOG.exception('While calling no-reply method %s.%s',
+                              type(self.service).__name__,
+                              func_name(method))
+            else:
+                raise
+
+    def invoke(self, method_name, kwargs, msg):
+        self._validate(method_name, kwargs, msg)
+        response = self._invoke(method_name, kwargs, msg)
+        if response is not Service.NO_REPLY:
+            msg.reply(response)
+
+
+class SerializedInvoker(Invoker):
+    def __init__(self, **kwargs):
+        super(SerializedInvoker, self).__init__(**kwargs)
+        self._lock = threading.Lock()
+        self._queue = []
+        self._running = False
+
+    def _pop(self):
+        self._lock.acquire()
+        try:
+            try:
+                return self._queue.pop(0)
+            except IndexError:
+                self._running = False
+        finally:
+            self._lock.release()
+
+    def _run(self):
+        while True:
+            tup = self._pop()
+            if tup is None:
+                return
+            method_name, kwargs, msg = tup
+            try:
+                super(SerializedInvoker, self).invoke(method_name, kwargs, msg)
+            except mitogen.core.CallError:
+                e = sys.exc_info()[1]
+                LOG.warning('%r: call error: %s: %s', self, msg, e)
+                msg.reply(e)
+            except Exception:
+                LOG.exception('%r: while invoking %s()', self, method_name)
+                msg.reply(mitogen.core.Message.dead())
+
+    def invoke(self, method_name, kwargs, msg):
+        self._lock.acquire()
+        try:
+            self._queue.append((method_name, kwargs, msg))
+            first = not self._running
+            self._running = True
+        finally:
+            self._lock.release()
+
+        if first:
+            self._run()
+        return Service.NO_REPLY
+
+
+class DeduplicatingInvoker(Invoker):
+    """
+    A service that deduplicates and caches expensive responses. Requests are
+    deduplicated according to a customizable key, and the single expensive
+    response is broadcast to all requestors.
+
+    A side effect of this class is that processing of the single response is
+    always serialized according to the result of :py:meth:`key_from_request`.
+
+    Only one pool thread is blocked during generation of the response,
+    regardless of the number of requestors.
+    """
+    def __init__(self, service):
+        super(DeduplicatingInvoker, self).__init__(service)
+        self._responses = {}
+        self._waiters = {}
+        self._lock = threading.Lock()
+
+    def key_from_request(self, method_name, kwargs):
+        """
+        Generate a deduplication key from the request. The default
+        implementation returns a string based on a stable representation of the
+        input dictionary generated by :py:func:`pprint.pformat`.
+        """
+        return pprint.pformat((method_name, kwargs))
+
+    def get_response(self, args):
+        raise NotImplementedError()
+
+    def _produce_response(self, key, response):
+        self._lock.acquire()
+        try:
+            assert key not in self._responses
+            assert key in self._waiters
+            self._responses[key] = response
+            for msg in self._waiters.pop(key):
+                msg.reply(response)
+        finally:
+            self._lock.release()
+
+    def _invoke(self, method_name, kwargs, msg):
+        key = self.key_from_request(method_name, kwargs)
+        self._lock.acquire()
+        try:
+            if key in self._responses:
+                return self._responses[key]
+
+            if key in self._waiters:
+                self._waiters[key].append(msg)
+                return Service.NO_REPLY
+
+            self._waiters[key] = [msg]
+        finally:
+            self._lock.release()
+
+        # I'm the unlucky thread that must generate the response.
+        try:
+            response = getattr(self, method_name)(**kwargs)
+            self._produce_response(key, response)
+        except mitogen.core.CallError:
+            e = sys.exc_info()[1]
+            self._produce_response(key, e)
+        except Exception:
+            e = sys.exc_info()[1]
+            self._produce_response(key, mitogen.core.CallError(e))
+
+        return Service.NO_REPLY
+
+
+class Service(object):
+    #: Sentinel object to suppress reply generation, since returning
+    #: :data:`None` will trigger a response message containing the pickled
+    #: :data:`None`.
+    NO_REPLY = object()
+
+    invoker_class = Invoker
+
+    @classmethod
+    def name(cls):
+        return u'%s.%s' % (cls.__module__, cls.__name__)
+
+    def __init__(self, router):
+        self.router = router
+        self.select = mitogen.select.Select()
+
+    def __repr__(self):
+        return '%s()' % (self.__class__.__name__,)
+
+    def on_message(self, event):
+        """
+        Called when a message arrives on any of :attr:`select`'s registered
+        receivers.
+
+        :param mitogen.select.Event event:
+        """
+        pass
+
+    def on_shutdown(self):
+        """
+        Called by Pool.shutdown() once the last worker thread has exitted.
+        """
+        pass
+
+
+class Pool(object):
+    """
+    Manage a pool of at least one thread that will be used to process messages
+    for a collection of services.
+
+    Internally this is implemented by subscribing every :py:class:`Service`'s
+    :py:class:`mitogen.core.Receiver` using a single
+    :py:class:`mitogen.select.Select`, then arranging for every thread to
+    consume messages delivered to that select.
+
+    In this way the threads are fairly shared by all available services, and no
+    resources are dedicated to a single idle service.
+
+    There is no penalty for exposing large numbers of services; the list of
+    exposed services could even be generated dynamically in response to your
+    program's configuration or its input data.
+
+    :param mitogen.core.Router router:
+        Router to listen for ``CALL_SERVICE`` messages on.
+    :param list services:
+        Initial list of services to register.
+    """
+    activator_class = Activator
+
+    def __init__(self, router, services=(), size=1, overwrite=False):
+        self.router = router
+        self._activator = self.activator_class()
+        self._ipc_latch = mitogen.core.Latch()
+        self._receiver = mitogen.core.Receiver(
+            router=router,
+            handle=mitogen.core.CALL_SERVICE,
+            overwrite=overwrite,
+        )
+
+        self._select = mitogen.select.Select(oneshot=False)
+        self._select.add(self._receiver)
+        self._select.add(self._ipc_latch)
+        #: Serialize service construction.
+        self._lock = threading.Lock()
+        self._func_by_source = {
+            self._receiver: self._on_service_call,
+            self._ipc_latch: self._on_ipc_latch,
+        }
+        self._invoker_by_name = {}
+
+        for service in services:
+            self.add(service)
+        self._py_24_25_compat()
+        self._threads = []
+        for x in range(size):
+            name = 'mitogen.service.Pool.%x.worker-%d' % (id(self), x,)
+            thread = threading.Thread(
+                name=name,
+                target=mitogen.core._profile_hook,
+                args=('mitogen.service.pool', self._worker_main),
+            )
+            thread.start()
+            self._threads.append(thread)
+
+        LOG.debug('%r: initialized', self)
+
+    def _py_24_25_compat(self):
+        if sys.version_info < (2, 6):
+            # import_module() is used to avoid dep scanner sending mitogen.fork
+            # to all mitogen.service importers.
+            os_fork = mitogen.core.import_module('mitogen.os_fork')
+            os_fork._notice_broker_or_pool(self)
+
+    @property
+    def size(self):
+        return len(self._threads)
+
+    def add(self, service):
+        name = service.name()
+        if name in self._invoker_by_name:
+            raise Error('service named %r already registered' % (name,))
+        assert service.select not in self._func_by_source
+        invoker = service.invoker_class(service=service)
+        self._invoker_by_name[name] = invoker
+        self._func_by_source[service.select] = service.on_message
+
+    closed = False
+
+    def stop(self, join=True):
+        self.closed = True
+        self._receiver.close()
+        self._select.close()
+        if join:
+            self.join()
+
+    def join(self):
+        for th in self._threads:
+            th.join()
+        for invoker in self._invoker_by_name.values():
+            invoker.service.on_shutdown()
+
+    def get_invoker(self, name, msg):
+        self._lock.acquire()
+        try:
+            invoker = self._invoker_by_name.get(name)
+            if not invoker:
+                service = self._activator.activate(self, name, msg)
+                invoker = service.invoker_class(service=service)
+                self._invoker_by_name[name] = invoker
+        finally:
+            self._lock.release()
+
+        return invoker
+
+    def get_service(self, name):
+        invoker = self.get_invoker(name, None)
+        return invoker.service
+
+    def _validate(self, msg):
+        tup = msg.unpickle(throw=False)
+        if not (isinstance(tup, tuple) and
+                len(tup) == 3 and
+                isinstance(tup[0], mitogen.core.AnyTextType) and
+                isinstance(tup[1], mitogen.core.AnyTextType) and
+                isinstance(tup[2], dict)):
+            raise mitogen.core.CallError('Invalid message format.')
+
+    def defer(self, func, *args, **kwargs):
+        """
+        Arrange for `func(*args, **kwargs)` to be invoked in the context of a
+        service pool thread.
+        """
+        self._ipc_latch.put(lambda: func(*args, **kwargs))
+
+    def _on_ipc_latch(self, event):
+        event.data()
+
+    def _on_service_call(self, event):
+        msg = event.data
+        service_name = None
+        method_name = None
+        try:
+            self._validate(msg)
+            service_name, method_name, kwargs = msg.unpickle()
+            invoker = self.get_invoker(service_name, msg)
+            return invoker.invoke(method_name, kwargs, msg)
+        except mitogen.core.CallError:
+            e = sys.exc_info()[1]
+            LOG.warning('%r: call error: %s: %s', self, msg, e)
+            msg.reply(e)
+        except Exception:
+            LOG.exception('%r: while invoking %r of %r',
+                          self, method_name, service_name)
+            e = sys.exc_info()[1]
+            msg.reply(mitogen.core.CallError(e))
+
+    def _worker_run(self):
+        while not self.closed:
+            try:
+                event = self._select.get_event()
+            except (mitogen.core.ChannelError, mitogen.core.LatchError):
+                e = sys.exc_info()[1]
+                LOG.debug('%r: channel or latch closed, exitting: %s', self, e)
+                return
+
+            func = self._func_by_source[event.source]
+            try:
+                func(event)
+            except Exception:
+                LOG.exception('While handling %r using %r', event.data, func)
+
+    def _worker_main(self):
+        try:
+            self._worker_run()
+        except Exception:
+            th = threading.currentThread()
+            LOG.exception('%r: worker %r crashed', self, th.getName())
+            raise
+
+    def __repr__(self):
+        th = threading.currentThread()
+        return 'mitogen.service.Pool(%#x, size=%d, th=%r)' % (
+            id(self),
+            len(self._threads),
+            th.getName(),
+        )
+
+
+class FileStreamState(object):
+    def __init__(self):
+        #: List of [(Sender, file object)]
+        self.jobs = []
+        self.completing = {}
+        #: In-flight byte count.
+        self.unacked = 0
+        #: Lock.
+        self.lock = threading.Lock()
+
+
+class PushFileService(Service):
+    """
+    Push-based file service. Files are delivered and cached in RAM, sent
+    recursively from parent to child. A child that requests a file via
+    :meth:`get` will block until it has ben delivered by a parent.
+
+    This service will eventually be merged into FileService.
+    """
+    def __init__(self, **kwargs):
+        super(PushFileService, self).__init__(**kwargs)
+        self._lock = threading.Lock()
+        self._cache = {}
+        self._waiters = {}
+        self._sent_by_stream = {}
+
+    def get(self, path):
+        """
+        Fetch a file from the cache.
+        """
+        assert isinstance(path, mitogen.core.UnicodeType)
+        self._lock.acquire()
+        try:
+            if path in self._cache:
+                return self._cache[path]
+            latch = mitogen.core.Latch()
+            waiters = self._waiters.setdefault(path, [])
+            waiters.append(lambda: latch.put(None))
+        finally:
+            self._lock.release()
+
+        LOG.debug('%r.get(%r) waiting for uncached file to arrive', self, path)
+        latch.get()
+        LOG.debug('%r.get(%r) -> %r', self, path, self._cache[path])
+        return self._cache[path]
+
+    def _forward(self, context, path):
+        stream = self.router.stream_by_id(context.context_id)
+        child = mitogen.core.Context(self.router, stream.remote_id)
+        sent = self._sent_by_stream.setdefault(stream, set())
+        if path in sent:
+            if child.context_id != context.context_id:
+                child.call_service_async(
+                    service_name=self.name(),
+                    method_name='forward',
+                    path=path,
+                    context=context
+                ).close()
+        else:
+            child.call_service_async(
+                service_name=self.name(),
+                method_name='store_and_forward',
+                path=path,
+                data=self._cache[path],
+                context=context
+            ).close()
+            sent.add(path)
+
+    @expose(policy=AllowParents())
+    @arg_spec({
+        'context': mitogen.core.Context,
+        'paths': list,
+        'modules': list,
+    })
+    def propagate_paths_and_modules(self, context, paths, modules):
+        """
+        One size fits all method to ensure a target context has been preloaded
+        with a set of small files and Python modules.
+        """
+        for path in paths:
+            self.propagate_to(context, mitogen.core.to_text(path))
+        self.router.responder.forward_modules(context, modules)
+
+    @expose(policy=AllowParents())
+    @arg_spec({
+        'context': mitogen.core.Context,
+        'path': mitogen.core.FsPathTypes,
+    })
+    def propagate_to(self, context, path):
+        LOG.debug('%r.propagate_to(%r, %r)', self, context, path)
+        if path not in self._cache:
+            fp = open(path, 'rb')
+            try:
+                self._cache[path] = mitogen.core.Blob(fp.read())
+            finally:
+                fp.close()
+        self._forward(context, path)
+
+    @expose(policy=AllowParents())
+    @no_reply()
+    @arg_spec({
+        'path': mitogen.core.UnicodeType,
+        'data': mitogen.core.Blob,
+        'context': mitogen.core.Context,
+    })
+    def store_and_forward(self, path, data, context):
+        LOG.debug('%r.store_and_forward(%r, %r, %r) %r',
+                  self, path, data, context,
+                  threading.currentThread().getName())
+        self._lock.acquire()
+        try:
+            self._cache[path] = data
+            waiters = self._waiters.pop(path, [])
+        finally:
+            self._lock.release()
+
+        if context.context_id != mitogen.context_id:
+            self._forward(context, path)
+        for callback in waiters:
+            callback()
+
+    @expose(policy=AllowParents())
+    @no_reply()
+    @arg_spec({
+        'path': mitogen.core.FsPathTypes,
+        'context': mitogen.core.Context,
+    })
+    def forward(self, path, context):
+        LOG.debug('%r.forward(%r, %r)', self, path, context)
+        func = lambda: self._forward(context, path)
+
+        self._lock.acquire()
+        try:
+            if path in self._cache:
+                func()
+            else:
+                LOG.debug('%r: %r not cached yet, queueing', self, path)
+                self._waiters.setdefault(path, []).append(func)
+        finally:
+            self._lock.release()
+
+
+class FileService(Service):
+    """
+    Streaming file server, used to serve small and huge files alike. Paths must
+    be registered by a trusted context before they will be served to a child.
+
+    Transfers are divided among the physical streams that connect external
+    contexts, ensuring each stream never has excessive data buffered in RAM,
+    while still maintaining enough to fully utilize available bandwidth. This
+    is achieved by making an initial bandwidth assumption, enqueueing enough
+    chunks to fill that assumed pipe, then responding to delivery
+    acknowledgements from the receiver by scheduling new chunks.
+
+    Transfers proceed one-at-a-time per stream. When multiple contexts exist on
+    a stream (e.g. one is the SSH account, another is a sudo account, and a
+    third is a proxied SSH connection), each request is satisfied in turn
+    before subsequent requests start flowing. This ensures when a stream is
+    contended, priority is given to completing individual transfers rather than
+    potentially aborting many partial transfers, causing the bandwidth to be
+    wasted.
+
+    Theory of operation:
+        1. Trusted context (i.e. WorkerProcess) calls register(), making a
+           file available to any untrusted context.
+        2. Requestee context creates a mitogen.core.Receiver() to receive
+           chunks, then calls fetch(path, recv.to_sender()), to set up the
+           transfer.
+        3. fetch() replies to the call with the file's metadata, then
+           schedules an initial burst up to the window size limit (1MiB).
+        4. Chunks begin to arrive in the requestee, which calls acknowledge()
+           for each 128KiB received.
+        5. The acknowledge() call arrives at FileService, which scheduled a new
+           chunk to refill the drained window back to the size limit.
+        6. When the last chunk has been pumped for a single transfer,
+           Sender.close() is called causing the receive loop in
+           target.py::_get_file() to exit, allowing that code to compare the
+           transferred size with the total file size from the metadata.
+        7. If the sizes mismatch, _get_file()'s caller is informed which will
+           discard the result and log/raise an error.
+
+    Shutdown:
+        1. process.py calls service.Pool.shutdown(), which arranges for the
+           service pool threads to exit and be joined, guranteeing no new
+           requests can arrive, before calling Service.on_shutdown() for each
+           registered service.
+        2. FileService.on_shutdown() walks every in-progress transfer and calls
+           Sender.close(), causing Receiver loops in the requestees to exit
+           early. The size check fails and any partially downloaded file is
+           discarded.
+        3. Control exits _get_file() in every target, and graceful shutdown can
+           proceed normally, without the associated thread needing to be
+           forcefully killed.
+    """
+    unregistered_msg = 'Path %r is not registered with FileService.'
+    context_mismatch_msg = 'sender= kwarg context must match requestee context'
+
+    #: Burst size. With 1MiB and 10ms RTT max throughput is 100MiB/sec, which
+    #: is 5x what SSH can handle on a 2011 era 2.4Ghz Core i5.
+    window_size_bytes = 1048576
+
+    def __init__(self, router):
+        super(FileService, self).__init__(router)
+        #: Set of registered paths.
+        self._paths = set()
+        #: Set of registered directory prefixes.
+        self._prefixes = set()
+        #: Mapping of Stream->FileStreamState.
+        self._state_by_stream = {}
+
+    def _name_or_none(self, func, n, attr):
+        try:
+            return getattr(func(n), attr)
+        except KeyError:
+            return None
+
+    @expose(policy=AllowParents())
+    @arg_spec({
+        'path': mitogen.core.FsPathTypes,
+    })
+    def register(self, path):
+        """
+        Authorize a path for access by children. Repeat calls with the same
+        path has no effect.
+
+        :param str path:
+            File path.
+        """
+        if path not in self._paths:
+            LOG.debug('%r: registering %r', self, path)
+            self._paths.add(path)
+
+    @expose(policy=AllowParents())
+    @arg_spec({
+        'path': mitogen.core.FsPathTypes,
+    })
+    def register_prefix(self, path):
+        """
+        Authorize a path and any subpaths for access by children. Repeat calls
+        with the same path has no effect.
+
+        :param str path:
+            File path.
+        """
+        if path not in self._prefixes:
+            LOG.debug('%r: registering prefix %r', self, path)
+            self._prefixes.add(path)
+
+    def _generate_stat(self, path):
+        st = os.stat(path)
+        if not stat.S_ISREG(st.st_mode):
+            raise IOError('%r is not a regular file.' % (path,))
+
+        return {
+            u'size': st.st_size,
+            u'mode': st.st_mode,
+            u'owner': self._name_or_none(pwd.getpwuid, 0, 'pw_name'),
+            u'group': self._name_or_none(grp.getgrgid, 0, 'gr_name'),
+            u'mtime': float(st.st_mtime),  # Python 2.4 uses int.
+            u'atime': float(st.st_atime),  # Python 2.4 uses int.
+        }
+
+    def on_shutdown(self):
+        """
+        Respond to shutdown by sending close() to every target, allowing their
+        receive loop to exit and clean up gracefully.
+        """
+        LOG.debug('%r.on_shutdown()', self)
+        for stream, state in self._state_by_stream.items():
+            state.lock.acquire()
+            try:
+                for sender, fp in reversed(state.jobs):
+                    sender.close()
+                    fp.close()
+                    state.jobs.pop()
+            finally:
+                state.lock.release()
+
+    # The IO loop pumps 128KiB chunks. An ideal message is a multiple of this,
+    # odd-sized messages waste one tiny write() per message on the trailer.
+    # Therefore subtract 10 bytes pickle overhead + 24 bytes header.
+    IO_SIZE = mitogen.core.CHUNK_SIZE - (mitogen.core.Stream.HEADER_LEN + (
+        len(
+            mitogen.core.Message.pickled(
+                mitogen.core.Blob(b(' ') * mitogen.core.CHUNK_SIZE)
+            ).data
+        ) - mitogen.core.CHUNK_SIZE
+    ))
+
+    def _schedule_pending_unlocked(self, state):
+        """
+        Consider the pending transfers for a stream, pumping new chunks while
+        the unacknowledged byte count is below :attr:`window_size_bytes`. Must
+        be called with the FileStreamState lock held.
+
+        :param FileStreamState state:
+            Stream to schedule chunks for.
+        """
+        while state.jobs and state.unacked < self.window_size_bytes:
+            sender, fp = state.jobs[0]
+            s = fp.read(self.IO_SIZE)
+            if s:
+                state.unacked += len(s)
+                sender.send(mitogen.core.Blob(s))
+            else:
+                # File is done. Cause the target's receive loop to exit by
+                # closing the sender, close the file, and remove the job entry.
+                sender.close()
+                fp.close()
+                state.jobs.pop(0)
+
+    def _prefix_is_authorized(self, path):
+        """
+        Return the set of all possible directory prefixes for `path`.
+        :func:`os.path.abspath` is used to ensure the path is absolute.
+
+        :param str path:
+            The path.
+        :returns: Set of prefixes.
+        """
+        path = os.path.abspath(path)
+        while True:
+            if path in self._prefixes:
+                return True
+            if path == '/':
+                break
+            path = os.path.dirname(path)
+        return False
+
+    @expose(policy=AllowAny())
+    @no_reply()
+    @arg_spec({
+        'path': mitogen.core.FsPathTypes,
+        'sender': mitogen.core.Sender,
+    })
+    def fetch(self, path, sender, msg):
+        """
+        Start a transfer for a registered path.
+
+        :param str path:
+            File path.
+        :param mitogen.core.Sender sender:
+            Sender to receive file data.
+        :returns:
+            Dict containing the file metadata:
+
+            * ``size``: File size in bytes.
+            * ``mode``: Integer file mode.
+            * ``owner``: Owner account name on host machine.
+            * ``group``: Owner group name on host machine.
+            * ``mtime``: Floating point modification time.
+            * ``ctime``: Floating point change time.
+        :raises Error:
+            Unregistered path, or Sender did not match requestee context.
+        """
+        if path not in self._paths and not self._prefix_is_authorized(path):
+            msg.reply(mitogen.core.CallError(
+                Error(self.unregistered_msg % (path,))
+            ))
+            return
+
+        if msg.src_id != sender.context.context_id:
+            msg.reply(mitogen.core.CallError(
+                Error(self.context_mismatch_msg)
+            ))
+            return
+
+        LOG.debug('Serving %r', path)
+
+        # Response must arrive first so requestee can begin receive loop,
+        # otherwise first ack won't arrive until all pending chunks were
+        # delivered. In that case max BDP would always be 128KiB, aka. max
+        # ~10Mbit/sec over a 100ms link.
+        try:
+            fp = open(path, 'rb', self.IO_SIZE)
+            msg.reply(self._generate_stat(path))
+        except IOError:
+            msg.reply(mitogen.core.CallError(
+                sys.exc_info()[1]
+            ))
+            return
+
+        stream = self.router.stream_by_id(sender.context.context_id)
+        state = self._state_by_stream.setdefault(stream, FileStreamState())
+        state.lock.acquire()
+        try:
+            state.jobs.append((sender, fp))
+            self._schedule_pending_unlocked(state)
+        finally:
+            state.lock.release()
+
+    @expose(policy=AllowAny())
+    @no_reply()
+    @arg_spec({
+        'size': int,
+    })
+    @no_reply()
+    def acknowledge(self, size, msg):
+        """
+        Acknowledge bytes received by a transfer target, scheduling new chunks
+        to keep the window full. This should be called for every chunk received
+        by the target.
+        """
+        stream = self.router.stream_by_id(msg.src_id)
+        state = self._state_by_stream[stream]
+        state.lock.acquire()
+        try:
+            if state.unacked < size:
+                LOG.error('%r.acknowledge(src_id %d): unacked=%d < size %d',
+                          self, msg.src_id, state.unacked, size)
+            state.unacked -= min(state.unacked, size)
+            self._schedule_pending_unlocked(state)
+        finally:
+            state.lock.release()
+
+    @classmethod
+    def get(cls, context, path, out_fp):
+        """
+        Streamily download a file from the connection multiplexer process in
+        the controller.
+
+        :param mitogen.core.Context context:
+            Reference to the context hosting the FileService that will be used
+            to fetch the file.
+        :param bytes path:
+            FileService registered name of the input file.
+        :param bytes out_path:
+            Name of the output path on the local disk.
+        :returns:
+            Tuple of (`ok`, `metadata`), where `ok` is :data:`True` on success,
+            or :data:`False` if the transfer was interrupted and the output
+            should be discarded.
+
+            `metadata` is a dictionary of file metadata as documented in
+            :meth:`fetch`.
+        """
+        LOG.debug('get_file(): fetching %r from %r', path, context)
+        t0 = time.time()
+        recv = mitogen.core.Receiver(router=context.router)
+        metadata = context.call_service(
+            service_name=cls.name(),
+            method_name='fetch',
+            path=path,
+            sender=recv.to_sender(),
+        )
+
+        received_bytes = 0
+        for chunk in recv:
+            s = chunk.unpickle()
+            LOG.debug('get_file(%r): received %d bytes', path, len(s))
+            context.call_service_async(
+                service_name=cls.name(),
+                method_name='acknowledge',
+                size=len(s),
+            ).close()
+            out_fp.write(s)
+            received_bytes += len(s)
+
+        ok = received_bytes == metadata['size']
+        if received_bytes < metadata['size']:
+            LOG.error('get_file(%r): receiver was closed early, controller '
+                      'may be shutting down, or the file was truncated '
+                      'during transfer. Expected %d bytes, received %d.',
+                      path, metadata['size'], received_bytes)
+        elif received_bytes > metadata['size']:
+            LOG.error('get_file(%r): the file appears to have grown '
+                      'while transfer was in progress. Expected %d '
+                      'bytes, received %d.',
+                      path, metadata['size'], received_bytes)
+
+        LOG.debug('target.get_file(): fetched %d bytes of %r from %r in %dms',
+                  metadata['size'], path, context, 1000 * (time.time() - t0))
+        return ok, metadata
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/setns.py b/ansible/plugins/mitogen-0.2.6/mitogen/setns.py
new file mode 100644
index 000000000..b1d697838
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/setns.py
@@ -0,0 +1,238 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import ctypes
+import grp
+import logging
+import os
+import pwd
+import subprocess
+import sys
+
+import mitogen.core
+import mitogen.parent
+
+
+LOG = logging.getLogger(__name__)
+LIBC = ctypes.CDLL(None, use_errno=True)
+LIBC__strerror = LIBC.strerror
+LIBC__strerror.restype = ctypes.c_char_p
+
+
+class Error(mitogen.core.StreamError):
+    pass
+
+
+def setns(kind, fd):
+    if LIBC.setns(int(fd), 0) == -1:
+        errno = ctypes.get_errno()
+        msg = 'setns(%s, %s): %s' % (fd, kind, LIBC__strerror(errno))
+        raise OSError(errno, msg)
+
+
+def _run_command(args):
+    argv = mitogen.parent.Argv(args)
+    try:
+        proc = subprocess.Popen(
+            args=args,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.STDOUT
+        )
+    except OSError:
+        e = sys.exc_info()[1]
+        raise Error('could not execute %s: %s', argv, e)
+
+    output, _ = proc.communicate()
+    if not proc.returncode:
+        return output.decode('utf-8', 'replace')
+
+    raise Error("%s exitted with status %d: %s",
+                mitogen.parent.Argv(args), proc.returncode, output)
+
+
+def get_docker_pid(path, name):
+    args = [path, 'inspect', '--format={{.State.Pid}}', name]
+    output = _run_command(args)
+    try:
+        return int(output)
+    except ValueError:
+        raise Error("could not find PID from docker output.\n%s", output)
+
+
+def get_lxc_pid(path, name):
+    output = _run_command([path, '-n', name])
+    for line in output.splitlines():
+        bits = line.split()
+        if bits and bits[0] == 'PID:':
+            return int(bits[1])
+
+    raise Error("could not find PID from lxc-info output.\n%s", output)
+
+
+def get_lxd_pid(path, name):
+    output = _run_command([path, 'info', name])
+    for line in output.splitlines():
+        bits = line.split()
+        if bits and bits[0] == 'Pid:':
+            return int(bits[1])
+
+    raise Error("could not find PID from lxc output.\n%s", output)
+
+
+def get_machinectl_pid(path, name):
+    output = _run_command([path, 'status', name])
+    for line in output.splitlines():
+        bits = line.split()
+        if bits and bits[0] == 'Leader:':
+            return int(bits[1])
+
+    raise Error("could not find PID from machinectl output.\n%s", output)
+
+
+class Stream(mitogen.parent.Stream):
+    child_is_immediate_subprocess = False
+
+    container = None
+    username = 'root'
+    kind = None
+    python_path = 'python'
+    docker_path = 'docker'
+    lxc_path = 'lxc'
+    lxc_info_path = 'lxc-info'
+    machinectl_path = 'machinectl'
+
+    GET_LEADER_BY_KIND = {
+        'docker': ('docker_path', get_docker_pid),
+        'lxc': ('lxc_info_path', get_lxc_pid),
+        'lxd': ('lxc_path', get_lxd_pid),
+        'machinectl': ('machinectl_path', get_machinectl_pid),
+    }
+
+    def construct(self, container, kind, username=None, docker_path=None,
+                  lxc_path=None, lxc_info_path=None, machinectl_path=None,
+                  **kwargs):
+        super(Stream, self).construct(**kwargs)
+        if kind not in self.GET_LEADER_BY_KIND:
+            raise Error('unsupported container kind: %r', kind)
+
+        self.container = container
+        self.kind = kind
+        if username:
+            self.username = username
+        if docker_path:
+            self.docker_path = docker_path
+        if lxc_path:
+            self.lxc_path = lxc_path
+        if lxc_info_path:
+            self.lxc_info_path = lxc_info_path
+        if machinectl_path:
+            self.machinectl_path = machinectl_path
+
+    # Order matters. https://github.com/karelzak/util-linux/commit/854d0fe/
+    NS_ORDER = ('ipc', 'uts', 'net', 'pid', 'mnt', 'user')
+
+    def preexec_fn(self):
+        nspath = '/proc/%d/ns/' % (self.leader_pid,)
+        selfpath = '/proc/self/ns/'
+        try:
+            ns_fps = [
+                open(nspath + name)
+                for name in self.NS_ORDER
+                if os.path.exists(nspath + name) and (
+                    os.readlink(nspath + name) != os.readlink(selfpath + name)
+                )
+            ]
+        except Exception:
+            e = sys.exc_info()[1]
+            raise Error(str(e))
+
+        os.chdir('/proc/%s/root' % (self.leader_pid,))
+        os.chroot('.')
+        os.chdir('/')
+        for fp in ns_fps:
+            setns(fp.name, fp.fileno())
+            fp.close()
+
+        for sym in 'endpwent', 'endgrent', 'endspent', 'endsgent':
+            try:
+                getattr(LIBC, sym)()
+            except AttributeError:
+                pass
+
+        try:
+            os.setgroups([grent.gr_gid
+                          for grent in grp.getgrall()
+                          if self.username in grent.gr_mem])
+            pwent = pwd.getpwnam(self.username)
+            os.setreuid(pwent.pw_uid, pwent.pw_uid)
+            # shadow-4.4/libmisc/setupenv.c. Not done: MAIL, PATH
+            os.environ.update({
+                'HOME': pwent.pw_dir,
+                'SHELL': pwent.pw_shell or '/bin/sh',
+                'LOGNAME': self.username,
+                'USER': self.username,
+            })
+            if ((os.path.exists(pwent.pw_dir) and
+                 os.access(pwent.pw_dir, os.X_OK))):
+                os.chdir(pwent.pw_dir)
+        except Exception:
+            e = sys.exc_info()[1]
+            raise Error(self.username_msg, self.username, self.container,
+                        type(e).__name__, e)
+
+    username_msg = 'while transitioning to user %r in container %r: %s: %s'
+
+    def get_boot_command(self):
+        # With setns(CLONE_NEWPID), new children of the caller receive a new
+        # PID namespace, however the caller's namespace won't change. That
+        # causes subsequent calls to clone() specifying CLONE_THREAD to fail
+        # with EINVAL, as threads in the same process can't have varying PID
+        # namespaces, meaning starting new threads in the exec'd program will
+        # fail. The solution is forking, so inject a /bin/sh call to achieve
+        # this.
+        argv = super(Stream, self).get_boot_command()
+        # bash will exec() if a single command was specified and the shell has
+        # nothing left to do, so "; exit $?" gives bash a reason to live.
+        return ['/bin/sh', '-c', '%s; exit $?' % (mitogen.parent.Argv(argv),)]
+
+    def create_child(self, args):
+        return mitogen.parent.create_child(args, preexec_fn=self.preexec_fn)
+
+    def _get_name(self):
+        return u'setns.' + self.container
+
+    def connect(self):
+        self.name = self._get_name()
+        attr, func = self.GET_LEADER_BY_KIND[self.kind]
+        tool_path = getattr(self, attr)
+        self.leader_pid = func(tool_path, self.container)
+        LOG.debug('Leader PID for %s container %r: %d',
+                  self.kind, self.container, self.leader_pid)
+        super(Stream, self).connect()
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/ssh.py b/ansible/plugins/mitogen-0.2.6/mitogen/ssh.py
new file mode 100644
index 000000000..11b74c1b3
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/ssh.py
@@ -0,0 +1,317 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""
+Functionality to allow establishing new slave contexts over an SSH connection.
+"""
+
+import logging
+import re
+
+try:
+    from shlex import quote as shlex_quote
+except ImportError:
+    from pipes import quote as shlex_quote
+
+import mitogen.parent
+from mitogen.core import b
+from mitogen.core import bytes_partition
+
+try:
+    any
+except NameError:
+    from mitogen.core import any
+
+
+LOG = logging.getLogger('mitogen')
+
+# sshpass uses 'assword' because it doesn't lowercase the input.
+PASSWORD_PROMPT = b('password')
+HOSTKEY_REQ_PROMPT = b('are you sure you want to continue connecting (yes/no)?')
+HOSTKEY_FAIL = b('host key verification failed.')
+
+# [user@host: ] permission denied
+PERMDENIED_RE = re.compile(
+    ('(?:[^@]+@[^:]+: )?'  # Absent in OpenSSH <7.5
+     'Permission denied').encode(),
+    re.I
+)
+
+
+DEBUG_PREFIXES = (b('debug1:'), b('debug2:'), b('debug3:'))
+
+
+def filter_debug(stream, it):
+    """
+    Read line chunks from it, either yielding them directly, or building up and
+    logging individual lines if they look like SSH debug output.
+
+    This contains the mess of dealing with both line-oriented input, and partial
+    lines such as the password prompt.
+
+    Yields `(line, partial)` tuples, where `line` is the line, `partial` is
+    :data:`True` if no terminating newline character was present and no more
+    data exists in the read buffer. Consuming code can use this to unreliably
+    detect the presence of an interactive prompt.
+    """
+    # The `partial` test is unreliable, but is only problematic when verbosity
+    # is enabled: it's possible for a combination of SSH banner, password
+    # prompt, verbose output, timing and OS buffering specifics to create a
+    # situation where an otherwise newline-terminated line appears to not be
+    # terminated, due to a partial read(). If something is broken when
+    # ssh_debug_level>0, this is the first place to look.
+    state = 'start_of_line'
+    buf = b('')
+    for chunk in it:
+        buf += chunk
+        while buf:
+            if state == 'start_of_line':
+                if len(buf) < 8:
+                    # short read near buffer limit, block awaiting at least 8
+                    # bytes so we can discern a debug line, or the minimum
+                    # interesting token from above or the bootstrap
+                    # ('password', 'MITO000\n').
+                    break
+                elif any(buf.startswith(p) for p in DEBUG_PREFIXES):
+                    state = 'in_debug'
+                else:
+                    state = 'in_plain'
+            elif state == 'in_debug':
+                if b('\n') not in buf:
+                    break
+                line, _, buf = bytes_partition(buf, b('\n'))
+                LOG.debug('%s: %s', stream.name,
+                          mitogen.core.to_text(line.rstrip()))
+                state = 'start_of_line'
+            elif state == 'in_plain':
+                line, nl, buf = bytes_partition(buf, b('\n'))
+                yield line + nl, not (nl or buf)
+                if nl:
+                    state = 'start_of_line'
+
+
+class PasswordError(mitogen.core.StreamError):
+    pass
+
+
+class HostKeyError(mitogen.core.StreamError):
+    pass
+
+
+class Stream(mitogen.parent.Stream):
+    child_is_immediate_subprocess = False
+
+    #: Default to whatever is available as 'python' on the remote machine,
+    #: overriding sys.executable use.
+    python_path = 'python'
+
+    #: Number of -v invocations to pass on command line.
+    ssh_debug_level = 0
+
+    #: The path to the SSH binary.
+    ssh_path = 'ssh'
+
+    hostname = None
+    username = None
+    port = None
+
+    identity_file = None
+    password = None
+    ssh_args = None
+
+    check_host_keys_msg = 'check_host_keys= must be set to accept, enforce or ignore'
+
+    def construct(self, hostname, username=None, ssh_path=None, port=None,
+                  check_host_keys='enforce', password=None, identity_file=None,
+                  compression=True, ssh_args=None, keepalive_enabled=True,
+                  keepalive_count=3, keepalive_interval=15,
+                  identities_only=True, ssh_debug_level=None, **kwargs):
+        super(Stream, self).construct(**kwargs)
+        if check_host_keys not in ('accept', 'enforce', 'ignore'):
+            raise ValueError(self.check_host_keys_msg)
+
+        self.hostname = hostname
+        self.username = username
+        self.port = port
+        self.check_host_keys = check_host_keys
+        self.password = password
+        self.identity_file = identity_file
+        self.identities_only = identities_only
+        self.compression = compression
+        self.keepalive_enabled = keepalive_enabled
+        self.keepalive_count = keepalive_count
+        self.keepalive_interval = keepalive_interval
+        if ssh_path:
+            self.ssh_path = ssh_path
+        if ssh_args:
+            self.ssh_args = ssh_args
+        if ssh_debug_level:
+            self.ssh_debug_level = ssh_debug_level
+
+        self._init_create_child()
+
+    def _requires_pty(self):
+        """
+        Return :data:`True` if the configuration requires a PTY to be
+        allocated. This is only true if we must interactively accept host keys,
+        or type a password.
+        """
+        return (self.check_host_keys == 'accept' or
+                self.password is not None)
+
+    def _init_create_child(self):
+        """
+        Initialize the base class :attr:`create_child` and
+        :attr:`create_child_args` according to whether we need a PTY or not.
+        """
+        if self._requires_pty():
+            self.create_child = mitogen.parent.hybrid_tty_create_child
+        else:
+            self.create_child = mitogen.parent.create_child
+            self.create_child_args = {
+                'stderr_pipe': True,
+            }
+
+    def get_boot_command(self):
+        bits = [self.ssh_path]
+        if self.ssh_debug_level:
+            bits += ['-' + ('v' * min(3, self.ssh_debug_level))]
+        else:
+            # issue #307: suppress any login banner, as it may contain the
+            # password prompt, and there is no robust way to tell the
+            # difference.
+            bits += ['-o', 'LogLevel ERROR']
+        if self.username:
+            bits += ['-l', self.username]
+        if self.port is not None:
+            bits += ['-p', str(self.port)]
+        if self.identities_only and (self.identity_file or self.password):
+            bits += ['-o', 'IdentitiesOnly yes']
+        if self.identity_file:
+            bits += ['-i', self.identity_file]
+        if self.compression:
+            bits += ['-o', 'Compression yes']
+        if self.keepalive_enabled:
+            bits += [
+                '-o', 'ServerAliveInterval %s' % (self.keepalive_interval,),
+                '-o', 'ServerAliveCountMax %s' % (self.keepalive_count,),
+            ]
+        if not self._requires_pty():
+            bits += ['-o', 'BatchMode yes']
+        if self.check_host_keys == 'enforce':
+            bits += ['-o', 'StrictHostKeyChecking yes']
+        if self.check_host_keys == 'accept':
+            bits += ['-o', 'StrictHostKeyChecking ask']
+        elif self.check_host_keys == 'ignore':
+            bits += [
+                '-o', 'StrictHostKeyChecking no',
+                '-o', 'UserKnownHostsFile /dev/null',
+                '-o', 'GlobalKnownHostsFile /dev/null',
+            ]
+        if self.ssh_args:
+            bits += self.ssh_args
+        bits.append(self.hostname)
+        base = super(Stream, self).get_boot_command()
+        return bits + [shlex_quote(s).strip() for s in base]
+
+    def _get_name(self):
+        s = u'ssh.' + mitogen.core.to_text(self.hostname)
+        if self.port:
+            s += u':%s' % (self.port,)
+        return s
+
+    auth_incorrect_msg = 'SSH authentication is incorrect'
+    password_incorrect_msg = 'SSH password is incorrect'
+    password_required_msg = 'SSH password was requested, but none specified'
+    hostkey_config_msg = (
+        'SSH requested permission to accept unknown host key, but '
+        'check_host_keys=ignore. This is likely due to ssh_args=  '
+        'conflicting with check_host_keys=. Please correct your '
+        'configuration.'
+    )
+    hostkey_failed_msg = (
+        'Host key checking is enabled, and SSH reported an unrecognized or '
+        'mismatching host key.'
+    )
+
+    def _host_key_prompt(self):
+        if self.check_host_keys == 'accept':
+            LOG.debug('%s: accepting host key', self.name)
+            self.diag_stream.transmit_side.write(b('yes\n'))
+            return
+
+        # _host_key_prompt() should never be reached with ignore or enforce
+        # mode, SSH should have handled that. User's ssh_args= is conflicting
+        # with ours.
+        raise HostKeyError(self.hostkey_config_msg)
+
+    def _connect_input_loop(self, it):
+        password_sent = False
+        for buf, partial in filter_debug(self, it):
+            LOG.debug('%s: stdout: %s', self.name, buf.rstrip())
+            if buf.endswith(self.EC0_MARKER):
+                self._ec0_received()
+                return
+            elif HOSTKEY_REQ_PROMPT in buf.lower():
+                self._host_key_prompt()
+            elif HOSTKEY_FAIL in buf.lower():
+                raise HostKeyError(self.hostkey_failed_msg)
+            elif PERMDENIED_RE.match(buf):
+                # issue #271: work around conflict with user shell reporting
+                # 'permission denied' e.g. during chdir($HOME) by only matching
+                # it at the start of the line.
+                if self.password is not None and password_sent:
+                    raise PasswordError(self.password_incorrect_msg)
+                elif PASSWORD_PROMPT in buf and self.password is None:
+                    # Permission denied (password,pubkey)
+                    raise PasswordError(self.password_required_msg)
+                else:
+                    raise PasswordError(self.auth_incorrect_msg)
+            elif partial and PASSWORD_PROMPT in buf.lower():
+                if self.password is None:
+                    raise PasswordError(self.password_required_msg)
+                LOG.debug('%s: sending password', self.name)
+                self.diag_stream.transmit_side.write(
+                    (self.password + '\n').encode()
+                )
+                password_sent = True
+
+        raise mitogen.core.StreamError('bootstrap failed')
+
+    def _connect_bootstrap(self):
+        fds = [self.receive_side.fd]
+        if self.diag_stream is not None:
+            fds.append(self.diag_stream.receive_side.fd)
+
+        it = mitogen.parent.iter_read(fds=fds, deadline=self.connect_deadline)
+        try:
+            self._connect_input_loop(it)
+        finally:
+            it.close()
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/su.py b/ansible/plugins/mitogen-0.2.6/mitogen/su.py
new file mode 100644
index 000000000..5ff9e177f
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/su.py
@@ -0,0 +1,128 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import logging
+
+import mitogen.core
+import mitogen.parent
+from mitogen.core import b
+
+try:
+    any
+except NameError:
+    from mitogen.core import any
+
+
+LOG = logging.getLogger(__name__)
+
+
+class PasswordError(mitogen.core.StreamError):
+    pass
+
+
+class Stream(mitogen.parent.Stream):
+    # TODO: BSD su cannot handle stdin being a socketpair, but it does let the
+    # child inherit fds from the parent. So we can still pass a socketpair in
+    # for hybrid_tty_create_child(), there just needs to be either a shell
+    # snippet or bootstrap support for fixing things up afterwards.
+    create_child = staticmethod(mitogen.parent.tty_create_child)
+    child_is_immediate_subprocess = False
+
+    #: Once connected, points to the corresponding DiagLogStream, allowing it to
+    #: be disconnected at the same time this stream is being torn down.
+
+    username = 'root'
+    password = None
+    su_path = 'su'
+    password_prompt = b('password:')
+    incorrect_prompts = (
+        b('su: sorry'),                    # BSD
+        b('su: authentication failure'),   # Linux
+        b('su: incorrect password'),       # CentOS 6
+        b('authentication is denied'),     # AIX
+    )
+
+    def construct(self, username=None, password=None, su_path=None,
+                  password_prompt=None, incorrect_prompts=None, **kwargs):
+        super(Stream, self).construct(**kwargs)
+        if username is not None:
+            self.username = username
+        if password is not None:
+            self.password = password
+        if su_path is not None:
+            self.su_path = su_path
+        if password_prompt is not None:
+            self.password_prompt = password_prompt.lower()
+        if incorrect_prompts is not None:
+            self.incorrect_prompts = map(str.lower, incorrect_prompts)
+
+    def _get_name(self):
+        return u'su.' + mitogen.core.to_text(self.username)
+
+    def get_boot_command(self):
+        argv = mitogen.parent.Argv(super(Stream, self).get_boot_command())
+        return [self.su_path, self.username, '-c', str(argv)]
+
+    password_incorrect_msg = 'su password is incorrect'
+    password_required_msg = 'su password is required'
+
+    def _connect_input_loop(self, it):
+        password_sent = False
+
+        for buf in it:
+            LOG.debug('%r: received %r', self, buf)
+            if buf.endswith(self.EC0_MARKER):
+                self._ec0_received()
+                return
+            if any(s in buf.lower() for s in self.incorrect_prompts):
+                if password_sent:
+                    raise PasswordError(self.password_incorrect_msg)
+            elif self.password_prompt in buf.lower():
+                if self.password is None:
+                    raise PasswordError(self.password_required_msg)
+                if password_sent:
+                    raise PasswordError(self.password_incorrect_msg)
+                LOG.debug('sending password')
+                self.transmit_side.write(
+                    mitogen.core.to_text(self.password + '\n').encode('utf-8')
+                )
+                password_sent = True
+
+        raise mitogen.core.StreamError('bootstrap failed')
+
+    def _connect_bootstrap(self):
+        it = mitogen.parent.iter_read(
+            fds=[self.receive_side.fd],
+            deadline=self.connect_deadline,
+        )
+        try:
+            self._connect_input_loop(it)
+        finally:
+            it.close()
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/sudo.py b/ansible/plugins/mitogen-0.2.6/mitogen/sudo.py
new file mode 100644
index 000000000..868d4d76c
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/sudo.py
@@ -0,0 +1,277 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import base64
+import logging
+import optparse
+import re
+
+import mitogen.core
+import mitogen.parent
+from mitogen.core import b
+
+
+LOG = logging.getLogger(__name__)
+
+# These are base64-encoded UTF-8 as our existing minifier/module server
+# struggles with Unicode Python source in some (forgotten) circumstances.
+PASSWORD_PROMPTS = [
+    'cGFzc3dvcmQ=',                                              # english
+    'bG96aW5rYQ==',                                              # sr@latin.po
+    '44OR44K544Ov44O844OJ',                                      # ja.po
+    '4Kaq4Ka+4Ka44KaT4Kef4Ka+4Kaw4KeN4Kah',                      # bn.po
+    '2YPZhNmF2Kkg2KfZhNiz2LE=',                                  # ar.po
+    'cGFzYWhpdHph',                                              # eu.po
+    '0L/QsNGA0L7Qu9GM',                                          # uk.po
+    'cGFyb29s',                                                  # et.po
+    'c2FsYXNhbmE=',                                              # fi.po
+    '4Kiq4Ki+4Ki44Ki14Kiw4Kih',                                  # pa.po
+    'Y29udHJhc2lnbm8=',                                          # ia.po
+    'Zm9jYWwgZmFpcmU=',                                          # ga.po
+    '16HXodee15Q=',                                              # he.po
+    '4Kqq4Kq+4Kq44Kq14Kqw4KuN4Kqh',                              # gu.po
+    '0L/QsNGA0L7Qu9Cw',                                          # bg.po
+    '4Kyq4K2N4Kyw4Kys4K2H4Ky2IOCsuOCsmeCtjeCsleCth+CspA==',      # or.po
+    '4K6V4K6f4K614K+B4K6a4K+N4K6a4K+K4K6y4K+N',                  # ta.po
+    'cGFzc3dvcnQ=',                                              # de.po
+    '7JWU7Zi4',                                                  # ko.po
+    '0LvQvtC30LjQvdC60LA=',                                      # sr.po
+    'beG6rXQga2jhuql1',                                          # vi.po
+    'c2VuaGE=',                                                  # pt_BR.po
+    'cGFzc3dvcmQ=',                                              # it.po
+    'aGVzbG8=',                                                  # cs.po
+    '5a+G56K877ya',                                              # zh_TW.po
+    'aGVzbG8=',                                                  # sk.po
+    '4LC44LCC4LCV4LGH4LCk4LCq4LCm4LCu4LGB',                      # te.po
+    '0L/QsNGA0L7Qu9GM',                                          # kk.po
+    'aGFzxYJv',                                                  # pl.po
+    'Y29udHJhc2VueWE=',                                          # ca.po
+    'Y29udHJhc2XDsWE=',                                          # es.po
+    '4LSF4LSf4LSv4LS+4LSz4LS14LS+4LSV4LWN4LSV4LWN',              # ml.po
+    'c2VuaGE=',                                                  # pt.po
+    '5a+G56CB77ya',                                              # zh_CN.po
+    '4KSX4KWB4KSq4KWN4KSk4KS24KSs4KWN4KSm',                      # mr.po
+    'bMO2c2Vub3Jk',                                              # sv.po
+    '4YOe4YOQ4YOg4YOd4YOa4YOY',                                  # ka.po
+    '4KS24KSs4KWN4KSm4KSV4KWC4KSf',                              # hi.po
+    'YWRnYW5nc2tvZGU=',                                          # da.po
+    '4La74LeE4LeD4LeK4La04Lav4La6',                              # si.po
+    'cGFzc29yZA==',                                              # nb.po
+    'd2FjaHR3b29yZA==',                                          # nl.po
+    '4Kaq4Ka+4Ka44KaT4Kef4Ka+4Kaw4KeN4Kah',                      # bn_IN.po
+    'cGFyb2xh',                                                  # tr.po
+    '4LKX4LOB4LKq4LON4LKk4LKq4LKm',                              # kn.po
+    'c2FuZGk=',                                                  # id.po
+    '0L/QsNGA0L7Qu9GM',                                          # ru.po
+    'amVsc3rDsw==',                                              # hu.po
+    'bW90IGRlIHBhc3Nl',                                          # fr.po
+    'aXBoYXNpd2VkaQ==',                                          # zu.po
+    '4Z6W4Z624Z6A4Z+S4Z6Z4Z6f4Z6Y4Z+S4Z6E4Z624Z6P4Z+LwqDhn5Y=',  # km.po
+    '4KaX4KeB4Kaq4KeN4Kak4Ka24Kas4KeN4Kam',                      # as.po
+]
+
+
+PASSWORD_PROMPT_RE = re.compile(
+    u'|'.join(
+        base64.b64decode(s).decode('utf-8')
+        for s in PASSWORD_PROMPTS
+    )
+)
+
+
+PASSWORD_PROMPT = b('password')
+SUDO_OPTIONS = [
+    #(False, 'bool', '--askpass', '-A')
+    #(False, 'str', '--auth-type', '-a')
+    #(False, 'bool', '--background', '-b')
+    #(False, 'str', '--close-from', '-C')
+    #(False, 'str', '--login-class', 'c')
+    (True,  'bool', '--preserve-env', '-E'),
+    #(False, 'bool', '--edit', '-e')
+    #(False, 'str', '--group', '-g')
+    (True,  'bool', '--set-home', '-H'),
+    #(False, 'str', '--host', '-h')
+    (False, 'bool', '--login', '-i'),
+    #(False, 'bool', '--remove-timestamp', '-K')
+    #(False, 'bool', '--reset-timestamp', '-k')
+    #(False, 'bool', '--list', '-l')
+    #(False, 'bool', '--preserve-groups', '-P')
+    #(False, 'str', '--prompt', '-p')
+
+    # SELinux options. Passed through as-is.
+    (False, 'str', '--role', '-r'),
+    (False, 'str', '--type', '-t'),
+
+    # These options are supplied by default by Ansible, but are ignored, as
+    # sudo always runs under a TTY with Mitogen.
+    (True, 'bool', '--stdin', '-S'),
+    (True, 'bool', '--non-interactive', '-n'),
+
+    #(False, 'str', '--shell', '-s')
+    #(False, 'str', '--other-user', '-U')
+    (False, 'str', '--user', '-u'),
+    #(False, 'bool', '--version', '-V')
+    #(False, 'bool', '--validate', '-v')
+]
+
+
+class OptionParser(optparse.OptionParser):
+    def help(self):
+        self.exit()
+    def error(self, msg):
+        self.exit(msg=msg)
+    def exit(self, status=0, msg=None):
+        msg = 'sudo: ' + (msg or 'unsupported option')
+        raise mitogen.core.StreamError(msg)
+
+
+def make_sudo_parser():
+    parser = OptionParser()
+    for supported, kind, longopt, shortopt in SUDO_OPTIONS:
+        if kind == 'bool':
+            parser.add_option(longopt, shortopt, action='store_true')
+        else:
+            parser.add_option(longopt, shortopt)
+    return parser
+
+
+def parse_sudo_flags(args):
+    parser = make_sudo_parser()
+    opts, args = parser.parse_args(args)
+    if len(args):
+        raise mitogen.core.StreamError('unsupported sudo arguments:'+str(args))
+    return opts
+
+
+class PasswordError(mitogen.core.StreamError):
+    pass
+
+
+def option(default, *args):
+    for arg in args:
+        if arg is not None:
+            return arg
+    return default
+
+
+class Stream(mitogen.parent.Stream):
+    create_child = staticmethod(mitogen.parent.hybrid_tty_create_child)
+    child_is_immediate_subprocess = False
+
+    sudo_path = 'sudo'
+    username = 'root'
+    password = None
+    preserve_env = False
+    set_home = False
+    login = False
+
+    selinux_role = None
+    selinux_type = None
+
+    def construct(self, username=None, sudo_path=None, password=None,
+                  preserve_env=None, set_home=None, sudo_args=None,
+                  login=None, selinux_role=None, selinux_type=None, **kwargs):
+        super(Stream, self).construct(**kwargs)
+        opts = parse_sudo_flags(sudo_args or [])
+
+        self.username = option(self.username, username, opts.user)
+        self.sudo_path = option(self.sudo_path, sudo_path)
+        self.password = password or None
+        self.preserve_env = option(self.preserve_env,
+            preserve_env, opts.preserve_env)
+        self.set_home = option(self.set_home, set_home, opts.set_home)
+        self.login = option(self.login, login, opts.login)
+        self.selinux_role = option(self.selinux_role, selinux_role, opts.role)
+        self.selinux_type = option(self.selinux_type, selinux_type, opts.type)
+
+    def _get_name(self):
+        return u'sudo.' + mitogen.core.to_text(self.username)
+
+    def get_boot_command(self):
+        # Note: sudo did not introduce long-format option processing until July
+        # 2013, so even though we parse long-format options, supply short-form
+        # to the sudo command.
+        bits = [self.sudo_path, '-u', self.username]
+        if self.preserve_env:
+            bits += ['-E']
+        if self.set_home:
+            bits += ['-H']
+        if self.login:
+            bits += ['-i']
+        if self.selinux_role:
+            bits += ['-r', self.selinux_role]
+        if self.selinux_type:
+            bits += ['-t', self.selinux_type]
+
+        bits = bits + ['--'] + super(Stream, self).get_boot_command()
+        LOG.debug('sudo command line: %r', bits)
+        return bits
+
+    password_incorrect_msg = 'sudo password is incorrect'
+    password_required_msg = 'sudo password is required'
+
+    def _connect_input_loop(self, it):
+        password_sent = False
+
+        for buf in it:
+            LOG.debug('%s: received %r', self.name, buf)
+            if buf.endswith(self.EC0_MARKER):
+                self._ec0_received()
+                return
+
+            match = PASSWORD_PROMPT_RE.search(buf.decode('utf-8').lower())
+            if match is not None:
+                LOG.debug('%s: matched password prompt %r',
+                          self.name, match.group(0))
+                if self.password is None:
+                    raise PasswordError(self.password_required_msg)
+                if password_sent:
+                    raise PasswordError(self.password_incorrect_msg)
+                self.diag_stream.transmit_side.write(
+                    (mitogen.core.to_text(self.password) + '\n').encode('utf-8')
+                )
+                password_sent = True
+
+        raise mitogen.core.StreamError('bootstrap failed')
+
+    def _connect_bootstrap(self):
+        fds = [self.receive_side.fd]
+        if self.diag_stream is not None:
+            fds.append(self.diag_stream.receive_side.fd)
+
+        it = mitogen.parent.iter_read(
+            fds=fds,
+            deadline=self.connect_deadline,
+        )
+
+        try:
+            self._connect_input_loop(it)
+        finally:
+            it.close()
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/unix.py b/ansible/plugins/mitogen-0.2.6/mitogen/unix.py
new file mode 100644
index 000000000..66141eec1
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/unix.py
@@ -0,0 +1,168 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+"""
+Permit connection of additional contexts that may act with the authority of
+this context. For now, the UNIX socket is always mode 0600, i.e. can only be
+accessed by root or the same UID. Therefore we can always trust connections to
+have the same privilege (auth_id) as the current process.
+"""
+
+import errno
+import os
+import socket
+import struct
+import sys
+import tempfile
+
+import mitogen.core
+import mitogen.master
+
+from mitogen.core import LOG
+
+
+def is_path_dead(path):
+    s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+    try:
+        try:
+            s.connect(path)
+        except socket.error:
+            e = sys.exc_info()[1]
+            return e.args[0] in (errno.ECONNREFUSED, errno.ENOENT)
+    finally:
+        s.close()
+    return False
+
+
+def make_socket_path():
+    return tempfile.mktemp(prefix='mitogen_unix_', suffix='.sock')
+
+
+class Listener(mitogen.core.BasicStream):
+    keep_alive = True
+
+    def __repr__(self):
+        return '%s.%s(%r)' % (
+            __name__,
+            self.__class__.__name__,
+            self.path,
+        )
+
+    def __init__(self, router, path=None, backlog=100):
+        self._router = router
+        self.path = path or make_socket_path()
+        self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+
+        if os.path.exists(self.path) and is_path_dead(self.path):
+            LOG.debug('%r: deleting stale %r', self, self.path)
+            os.unlink(self.path)
+
+        self._sock.bind(self.path)
+        os.chmod(self.path, int('0600', 8))
+        self._sock.listen(backlog)
+        self.receive_side = mitogen.core.Side(self, self._sock.fileno())
+        router.broker.start_receive(self)
+
+    def _unlink_socket(self):
+        try:
+            os.unlink(self.path)
+        except OSError:
+            e = sys.exc_info()[1]
+            # Prevent a shutdown race with the parent process.
+            if e.args[0] != errno.ENOENT:
+                raise
+
+    def on_shutdown(self, broker):
+        broker.stop_receive(self)
+        self._unlink_socket()
+        self._sock.close()
+        self.receive_side.closed = True
+
+    def _accept_client(self, sock):
+        sock.setblocking(True)
+        try:
+            pid, = struct.unpack('>L', sock.recv(4))
+        except (struct.error, socket.error):
+            LOG.error('%r: failed to read remote identity: %s',
+                      self, sys.exc_info()[1])
+            return
+
+        context_id = self._router.id_allocator.allocate()
+        context = mitogen.parent.Context(self._router, context_id)
+        stream = mitogen.core.Stream(self._router, context_id)
+        stream.name = u'unix_client.%d' % (pid,)
+        stream.auth_id = mitogen.context_id
+        stream.is_privileged = True
+
+        try:
+            sock.send(struct.pack('>LLL', context_id, mitogen.context_id,
+                                  os.getpid()))
+        except socket.error:
+            LOG.error('%r: failed to assign identity to PID %d: %s',
+                      self, pid, sys.exc_info()[1])
+            return
+
+        LOG.debug('%r: accepted %r', self, stream)
+        stream.accept(sock.fileno(), sock.fileno())
+        self._router.register(context, stream)
+
+    def on_receive(self, broker):
+        sock, _ = self._sock.accept()
+        try:
+            self._accept_client(sock)
+        finally:
+            sock.close()
+
+
+def connect(path, broker=None):
+    LOG.debug('unix.connect(path=%r)', path)
+    sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+    sock.connect(path)
+    sock.send(struct.pack('>L', os.getpid()))
+    mitogen.context_id, remote_id, pid = struct.unpack('>LLL', sock.recv(12))
+    mitogen.parent_id = remote_id
+    mitogen.parent_ids = [remote_id]
+
+    LOG.debug('unix.connect(): local ID is %r, remote is %r',
+              mitogen.context_id, remote_id)
+
+    router = mitogen.master.Router(broker=broker)
+    stream = mitogen.core.Stream(router, remote_id)
+    stream.accept(sock.fileno(), sock.fileno())
+    stream.name = u'unix_listener.%d' % (pid,)
+
+    context = mitogen.parent.Context(router, remote_id)
+    router.register(context, stream)
+
+    mitogen.core.listen(router.broker, 'shutdown',
+                        lambda: router.disconnect_stream(stream))
+
+    sock.close()
+    return router, context
diff --git a/ansible/plugins/mitogen-0.2.6/mitogen/utils.py b/ansible/plugins/mitogen-0.2.6/mitogen/utils.py
new file mode 100644
index 000000000..94a171fb0
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/mitogen/utils.py
@@ -0,0 +1,227 @@
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+# !mitogen: minify_safe
+
+import datetime
+import logging
+import os
+import sys
+
+import mitogen
+import mitogen.core
+import mitogen.master
+import mitogen.parent
+
+
+LOG = logging.getLogger('mitogen')
+iteritems = getattr(dict, 'iteritems', dict.items)
+
+if mitogen.core.PY3:
+    iteritems = dict.items
+else:
+    iteritems = dict.iteritems
+
+
+def setup_gil():
+    """
+    Set extremely long GIL release interval to let threads naturally progress
+    through CPU-heavy sequences without forcing the wake of another thread that
+    may contend trying to run the same CPU-heavy code. For the new-style
+    Ansible work, this drops runtime ~33% and involuntary context switches by
+    >80%, essentially making threads cooperatively scheduled.
+    """
+    try:
+        # Python 2.
+        sys.setcheckinterval(100000)
+    except AttributeError:
+        pass
+
+    try:
+        # Python 3.
+        sys.setswitchinterval(10)
+    except AttributeError:
+        pass
+
+
+def disable_site_packages():
+    """
+    Remove all entries mentioning ``site-packages`` or ``Extras`` from
+    :attr:sys.path. Used primarily for testing on OS X within a virtualenv,
+    where OS X bundles some ancient version of the :mod:`six` module.
+    """
+    for entry in sys.path[:]:
+        if 'site-packages' in entry or 'Extras' in entry:
+            sys.path.remove(entry)
+
+
+def _formatTime(record, datefmt=None):
+    dt = datetime.datetime.fromtimestamp(record.created)
+    return dt.strftime(datefmt)
+
+
+def log_get_formatter():
+    datefmt = '%H:%M:%S'
+    if sys.version_info > (2, 6):
+        datefmt += '.%f'
+    fmt = '%(asctime)s %(levelname).1s %(name)s: %(message)s'
+    formatter = logging.Formatter(fmt, datefmt)
+    formatter.formatTime = _formatTime
+    return formatter
+
+
+def log_to_file(path=None, io=False, level='INFO'):
+    """
+    Install a new :class:`logging.Handler` writing applications logs to the
+    filesystem. Useful when debugging slave IO problems.
+
+    Parameters to this function may be overridden at runtime using environment
+    variables. See :ref:`logging-env-vars`.
+
+    :param str path:
+        If not :data:`None`, a filesystem path to write logs to. Otherwise,
+        logs are written to :data:`sys.stderr`.
+
+    :param bool io:
+        If :data:`True`, include extremely verbose IO logs in the output.
+        Useful for debugging hangs, less useful for debugging application code.
+
+    :param str level:
+        Name of the :mod:`logging` package constant that is the minimum level
+        to log at. Useful levels are ``DEBUG``, ``INFO``, ``WARNING``, and
+        ``ERROR``.
+    """
+    log = logging.getLogger('')
+    if path:
+        fp = open(path, 'w', 1)
+        mitogen.core.set_cloexec(fp.fileno())
+    else:
+        fp = sys.stderr
+
+    level = os.environ.get('MITOGEN_LOG_LEVEL', level).upper()
+    io = level == 'IO'
+    if io:
+        level = 'DEBUG'
+        logging.getLogger('mitogen.io').setLevel(level)
+
+    level = getattr(logging, level, logging.INFO)
+    log.setLevel(level)
+
+    # Prevent accidental duplicate log_to_file() calls from generating
+    # duplicate output.
+    for handler_ in reversed(log.handlers):
+        if getattr(handler_, 'is_mitogen', None):
+            log.handlers.remove(handler_)
+
+    handler = logging.StreamHandler(fp)
+    handler.is_mitogen = True
+    handler.formatter = log_get_formatter()
+    log.handlers.insert(0, handler)
+
+
+def run_with_router(func, *args, **kwargs):
+    """
+    Arrange for `func(router, *args, **kwargs)` to run with a temporary
+    :class:`mitogen.master.Router`, ensuring the Router and Broker are
+    correctly shut down during normal or exceptional return.
+
+    :returns:
+        `func`'s return value.
+    """
+    broker = mitogen.master.Broker()
+    router = mitogen.master.Router(broker)
+    try:
+        return func(router, *args, **kwargs)
+    finally:
+        broker.shutdown()
+        broker.join()
+
+
+def with_router(func):
+    """
+    Decorator version of :func:`run_with_router`. Example:
+
+    .. code-block:: python
+
+        @with_router
+        def do_stuff(router, arg):
+            pass
+
+        do_stuff(blah, 123)
+    """
+    def wrapper(*args, **kwargs):
+        return run_with_router(func, *args, **kwargs)
+    if mitogen.core.PY3:
+        wrapper.func_name = func.__name__
+    else:
+        wrapper.func_name = func.func_name
+    return wrapper
+
+
+PASSTHROUGH = (
+    int, float, bool,
+    type(None),
+    mitogen.core.Context,
+    mitogen.core.CallError,
+    mitogen.core.Blob,
+    mitogen.core.Secret,
+)
+
+
+def cast(obj):
+    """
+    Many tools love to subclass built-in types in order to implement useful
+    functionality, such as annotating the safety of a Unicode string, or adding
+    additional methods to a dict. However, cPickle loves to preserve those
+    subtypes during serialization, resulting in CallError during :meth:`call
+    <mitogen.parent.Context.call>` in the target when it tries to deserialize
+    the data.
+
+    This function walks the object graph `obj`, producing a copy with any
+    custom sub-types removed. The functionality is not default since the
+    resulting walk may be computationally expensive given a large enough graph.
+
+    See :ref:`serialization-rules` for a list of supported types.
+
+    :param obj:
+        Object to undecorate.
+    :returns:
+        Undecorated object.
+    """
+    if isinstance(obj, dict):
+        return dict((cast(k), cast(v)) for k, v in iteritems(obj))
+    if isinstance(obj, (list, tuple)):
+        return [cast(v) for v in obj]
+    if isinstance(obj, PASSTHROUGH):
+        return obj
+    if isinstance(obj, mitogen.core.UnicodeType):
+        return mitogen.core.UnicodeType(obj)
+    if isinstance(obj, mitogen.core.BytesType):
+        return mitogen.core.BytesType(obj)
+
+    raise TypeError("Cannot serialize: %r: %r" % (type(obj), obj))
diff --git a/ansible/plugins/mitogen-0.2.6/setup.cfg b/ansible/plugins/mitogen-0.2.6/setup.cfg
new file mode 100644
index 000000000..98ceb29d7
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/setup.cfg
@@ -0,0 +1,15 @@
+[coverage:run]
+branch = true
+source = 
+	mitogen
+omit = 
+	mitogen/compat/*
+
+[flake8]
+ignore = E402,E128,W503,E731
+exclude = mitogen/compat
+
+[egg_info]
+tag_build = 
+tag_date = 0
+
diff --git a/ansible/plugins/mitogen-0.2.6/setup.py b/ansible/plugins/mitogen-0.2.6/setup.py
new file mode 100644
index 000000000..c32579968
--- /dev/null
+++ b/ansible/plugins/mitogen-0.2.6/setup.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python2
+# Copyright 2019, David Wilson
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# 3. Neither the name of the copyright holder nor the names of its contributors
+# may be used to endorse or promote products derived from this software without
+# specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+import os
+
+from setuptools import find_packages, setup
+
+
+def grep_version():
+    path = os.path.join(os.path.dirname(__file__), 'mitogen/__init__.py')
+    with open(path) as fp:
+        for line in fp:
+            if line.startswith('__version__'):
+                _, _, s = line.partition('=')
+                return '.'.join(map(str, eval(s)))
+
+
+setup(
+    name = 'mitogen',
+    version = grep_version(),
+    description = 'Library for writing distributed self-replicating programs.',
+    author = 'David Wilson',
+    license = 'New BSD',
+    url = 'https://github.com/dw/mitogen/',
+    packages = find_packages(exclude=['tests', 'examples']),
+    zip_safe = False,
+    classifiers = [
+        'Environment :: Console',
+        'Intended Audience :: System Administrators',
+        'License :: OSI Approved :: BSD License',
+        'Operating System :: POSIX',
+        'Programming Language :: Python',
+        'Programming Language :: Python :: 2.4',
+        'Programming Language :: Python :: 2.5',
+        'Programming Language :: Python :: 2.6',
+        'Programming Language :: Python :: 2.7',
+        'Programming Language :: Python :: 3.6',
+        'Programming Language :: Python :: Implementation :: CPython',
+        'Topic :: System :: Distributed Computing',
+        'Topic :: System :: Systems Administration',
+    ],
+)
diff --git a/test/plugins b/test/plugins
new file mode 120000
index 000000000..dd0160360
--- /dev/null
+++ b/test/plugins
@@ -0,0 +1 @@
+../ansible/plugins
\ No newline at end of file
-- 
GitLab