diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 2e871216b364b99218c112e33a63d3cd2c992708..3920aee114959f69242b80abff6d5c66b9f6acca 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,5 +1,6 @@ include: - .gitlab/ci_templates/kaniko.yml + - .gitlab/ci_templates/ssh_setup.yml stages: - build - setup-cluster @@ -22,11 +23,6 @@ default: ci_test_image: stage: build - image: - # We need a shell to provide the registry credentials, so we need to use the - # kaniko debug image (https://github.com/GoogleContainerTools/kaniko#debug-image) - name: gcr.io/kaniko-project/executor:debug - entrypoint: [""] only: changes: - .gitlab-ci.yml @@ -36,14 +32,7 @@ ci_test_image: bootstrap: stage: setup-cluster - before_script: - - ansible --version script: - # Ensure test/ is not world-writable otherwise ansible-playbook refuses to run, see - # https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir - - chmod 755 ansible/ - - eval $(ssh-agent -s) - - echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null - echo "hostname $HOSTNAME, subdomain $SUBDOMAIN, domain $DOMAIN, address $ADDRESS" - python3 -m openappstack $HOSTNAME create --create-droplet $DOMAIN --hostname $HOSTNAME --ssh-key-id $SSH_KEY_ID --create-domain-records --subdomain $SUBDOMAIN - python3 -m openappstack $HOSTNAME install --ansible-param='--skip-tags=helmfile' @@ -59,17 +48,11 @@ bootstrap: - helmfiles/**/* - test/**/* - openappstack/**/* + extends: .ssh_setup install: stage: install-apps - variables: - ANSIBLE_HOST_KEY_CHECKING: 'False' script: - # Ensure test/ is not world-writable otherwise ansible-playbook refuses to run, see - # https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir - - chmod 755 ansible/ - - eval $(ssh-agent -s) - - echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null - python3 -m openappstack $HOSTNAME install --ansible-param='--tags=helmfile' # Show versions of installed apps/binaries - ansible master -m shell -a 'oas-version-info.sh 2>&1' @@ -85,14 +68,11 @@ install: - helmfiles/**/* - test/**/* - openappstack/**/* + extends: .ssh_setup testinfra: stage: health-test script: - - mkdir ~/.ssh - - eval $(ssh-agent -s) - - echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null - - echo -e 'Host *\n stricthostkeychecking no' > ~/.ssh/config - cd ansible/ - pytest -v -m 'testinfra' --connection=ansible --ansible-inventory=../clusters/${HOSTNAME}/inventory.yml --hosts='ansible://*' only: @@ -102,15 +82,12 @@ testinfra: - helmfiles/**/* - test/**/* - openappstack/**/* + extends: .ssh_setup certs: stage: health-test allow_failure: true script: - - mkdir ~/.ssh - - eval $(ssh-agent -s) - - echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null - - echo -e 'Host *\n stricthostkeychecking no' > ~/.ssh/config - cd ansible/ - pytest -s -m 'certs' --connection=ansible --ansible-inventory=../clusters/${HOSTNAME}/inventory.yml --hosts='ansible://*' only: @@ -120,6 +97,7 @@ certs: - helmfiles/**/* - test/**/* - openappstack/**/* + extends: .ssh_setup prometheus-alerts: stage: health-test @@ -127,10 +105,6 @@ prometheus-alerts: OAS_DOMAIN: 'ci-${CI_PIPELINE_ID}.ci.openappstack.net' allow_failure: true script: - - mkdir ~/.ssh - - eval $(ssh-agent -s) - - echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null - - echo -e 'Host *\n stricthostkeychecking no' > ~/.ssh/config - cd test/ - pytest -s -m 'prometheus' --connection=ansible --ansible-inventory=../clusters/${HOSTNAME}/inventory.yml --hosts='ansible://*' only: @@ -139,6 +113,7 @@ prometheus-alerts: - ansible/**/* - helmfiles/**/* - test/**/* + extends: .ssh_setup behave-nextcloud: stage: integration-test @@ -175,9 +150,32 @@ behave-grafana: - test/**/* - openappstack/**/* -# Remove droplet after merge -terminate_droplet_after_merge: +terminate_current_pipeline_droplet: + variables: + HOSTNAME: "ci-${CI_PIPELINE_ID}" stage: cleanup + before_script: + - echo "Terminate the current pipeline droplet on success but only for \ + the master branch, since we wanted to have MR droplets running even for \ + successful deploys to be able to investgate." + script: + - python3 -c "import greenhost_cloud; greenhost_cloud.terminate_droplets_by_name(\"^${HOSTNAME}\$\")" + only: + changes: + - .gitlab-ci.yml + - ansible/**/* + - helmfiles/**/* + - test/**/* + - openappstack/**/* + refs: + - master + +terminate_mr_droplet_after_merge: + stage: cleanup + before_script: + - echo "We leave MR droplets running even when the pipeline is successful \ + to be able to investigate a MR. We need to terminate them when the MR \ + is merged into master." script: | if [ "$(git show -s --pretty=%p HEAD | wc -w)" -gt 1 ] then @@ -196,7 +194,7 @@ terminate_droplet_after_merge: terminate_old_droplets: stage: cleanup script: - # Remove droplet older than 2 days + - echo "Terminate stale, old droplets after 2 days." - python3 -c "import greenhost_cloud; greenhost_cloud.terminate_droplets_by_name(\"^ci-\", 2)" only: changes: diff --git a/.gitlab/ci_templates/kaniko.yml b/.gitlab/ci_templates/kaniko.yml index 967b8b3316f47da0832d7f06c6b4d0717cd891e9..9ea7d296769c52ac8180609ba92d28ac8b081d8b 100644 --- a/.gitlab/ci_templates/kaniko.yml +++ b/.gitlab/ci_templates/kaniko.yml @@ -1,5 +1,11 @@ # Optional environment variables: -# - KANIKO_BUILD_IMAGENAME: Build/target image image +# +# - KANIKO_BUILD_IMAGENAME: Build/target image name. +# If empty, image URL will be the root of the gitlab project path, i.e. +# `open.greenhost.net:4567/GROUP/PROJECT:TAG +# Is set, images will be named like +# `open.greenhost.net:4567/GROUP/PROJECT/KANIKO_BUILD_IMAGENAME:TAG +# # - KANIKO_CONTEXT: The subdir which holds the Dockerfile, leave unset if # the Dockerfile is located at root level of the project. .kaniko_build: @@ -11,4 +17,5 @@ entrypoint: [""] script: - echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > /kaniko/.docker/config.json - - /kaniko/executor --context ${CI_PROJECT_DIR}/${KANIKO_CONTEXT:-.} --dockerfile ${CI_PROJECT_DIR}/${KANIKO_CONTEXT:-.}/Dockerfile --destination $CI_REGISTRY_IMAGE/${KANIKO_BUILD_IMAGENAME/#//}:${CI_COMMIT_REF_NAME} + - if [ -n "${KANIKO_BUILD_IMAGENAME}" ]; then export IMAGENAME="/${KANIKO_BUILD_IMAGENAME}"; fi + - /kaniko/executor --context ${CI_PROJECT_DIR}/${KANIKO_CONTEXT:-.} --dockerfile ${CI_PROJECT_DIR}/${KANIKO_CONTEXT:-.}/Dockerfile --destination ${CI_REGISTRY_IMAGE}${IMAGENAME}:${CI_COMMIT_REF_NAME} diff --git a/.gitlab/ci_templates/ssh_setup.yml b/.gitlab/ci_templates/ssh_setup.yml new file mode 100644 index 0000000000000000000000000000000000000000..e0d0f6104a91c44825422ec60ddbcd78929fa775 --- /dev/null +++ b/.gitlab/ci_templates/ssh_setup.yml @@ -0,0 +1,6 @@ +.ssh_setup: + before_script: + - mkdir ~/.ssh + - echo -e 'Host *\n stricthostkeychecking no' > ~/.ssh/config + - eval $(ssh-agent -s) + - echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null diff --git a/.gitlab/issue_templates/feedback.md b/.gitlab/issue_templates/feedback.md new file mode 100644 index 0000000000000000000000000000000000000000..9ec51ace6d732861bdae3ece85c66620960a1163 --- /dev/null +++ b/.gitlab/issue_templates/feedback.md @@ -0,0 +1,39 @@ +This feedback template can be used to provide feedback based on the [OAS testing instructions](https://docs.openappstack.net/en/latest/testing_instructions.html). + +# Installation + +> What problems did you encounter during installation? (If these problems prevent you from continuing with the testing process please provide some details what went wrong so we may try to help you get unstuck!) + +> We know that the installation process as it is right now is too technical, especially given that we want OpenAppStack to be easy to use for people that are only moderately computer-savvy. +> We plan to make the installation process easier in the future, probably using some kind of web-based installer. +> Even so, what do you think could be improved about the installation process or tutorial ? Did anything bother you, was something extra hard to do, or unclear? + + + +# Nextcloud + +> Is there anything that didn't work properly logging in or creating users, or something that you'd like to remark ? + +> Did you have any issues using the Nextcloud desktop client ? + +> Did you have any issues using the Nextcloud mobile client ? + +> Anything else you tried: How did you do it? What was hard and what did you like?Y + + +# Onlyoffice + +> Creating and editing a document: Is there anything that doesn't work properly, or something that you'd like to remark? + +> Collaboration on the same document: Did this work properly? Is the collaboration smooth in a technical sense (i.e., no lag)? + +> Something you like Please think of something else you would do with OnlyOffice and describe it here. Now try to do it. How did you do it? What was hard and what did you like? + +> Anything else you tried: How did you do it? What was hard and what did you like? + + +# Closing questions + +> What's missing? Now that you have gotten an idea of OpenAppStack's initial offering (Nextcloud and OnlyOffice), think of what the next thing is your organisation would need to collaborate more efficiently. Or think of a tool that you or your organisation use a lot that is also "centralised" (e.g., something exclusively provided by Google, Microsoft or a different company) + +> Do you have any other questions, comments, remarks, suggestions? diff --git a/.gitlab/issue_templates/release.md b/.gitlab/issue_templates/release.md new file mode 100644 index 0000000000000000000000000000000000000000..e830a546bd0955e05530533e8fc8bb6b6e680601 --- /dev/null +++ b/.gitlab/issue_templates/release.md @@ -0,0 +1,10 @@ +# Release checklist: + +- [ ] update/review documentation and make sure it matches the current state + - Update how to checkout the latest stable tag +- [ ] update [CHANGELOG.md](https://keepachangelog.com/en/1.0.0/) + - Include `Known issues` +- [ ] update the version number in the `VERSION` file +- [ ] commit +- [ ] create signed tag (`git tag -s 0.2.0 -m 'Release 0.2.0'`) +- [ ] Push to MR, including tag diff --git a/CHANGELOG.md b/CHANGELOG.md index c9b0d6b73a18a54e18a5490dc67de064c561fd95..0249575a6a80443eea83cc0077fd317a310c6dbd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,7 +4,20 @@ No unreleased changes yet. -## [0.2.0] - 2091-10-22 +## [0.2.1] - 2019-10-29 + +This is the release for the second user test. + +Fixes: +* [#354](https://open.greenhost.net/openappstack/openappstack/issues/354) Nextcloud timeout during helmchart install +* [#305](https://open.greenhost.net/openappstack/openappstack/issues/305) Use prometheus default securitycontext +* [#351](https://open.greenhost.net/openappstack/openappstack/issues/351) Prometheus installation cannot be repeated +* [#349](https://open.greenhost.net/openappstack/openappstack/issues/349) Improve test instructions +* [#350](https://open.greenhost.net/openappstack/openappstack/issues/350) Increase nextcloud livenessprobe's leniency +* [#346](https://open.greenhost.net/openappstack/openappstack/issues/346) Creating cluster fails if the hostname is oas-dev +* [#319](https://open.greenhost.net/openappstack/openappstack/issues/319) kube_config_cluster.yml should be copied to local machine earlier + +## [0.2.0] - 2019-10-22 * Release candidate for second user test * Added features: diff --git a/VERSION b/VERSION index 0ea3a944b399d25f7e1b8fe684d754eb8da9fe7f..0c62199f16ac1e2d7f7ae75b420c1231325dff4e 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.2.0 +0.2.1 diff --git a/ansible/ansible.cfg b/ansible/ansible.cfg index 827f17f5576bde9fb2df95819785946c04c872f0..20322a11438bff8435e32757518661afb382133d 100644 --- a/ansible/ansible.cfg +++ b/ansible/ansible.cfg @@ -3,5 +3,5 @@ callback_whitelist = profile_tasks, timer inventory = inventory.yml nocows = 1 stdout_callback = yaml -strategy_plugins = plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/strategy +strategy_plugins = plugins/mitogen-0.2.9/ansible_mitogen/plugins/strategy strategy = mitogen_linear diff --git a/ansible/group_vars/all/oas.yml b/ansible/group_vars/all/oas.yml index 95d9797fd922ea4761c52d77619308a15c392302..9c94404c738d25460bd3773c3dc2f99f50145953 100644 --- a/ansible/group_vars/all/oas.yml +++ b/ansible/group_vars/all/oas.yml @@ -22,11 +22,7 @@ onlyoffice_rabbitmq_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/ grafana_admin_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/grafana_admin_password chars=ascii_letters') }}" -# Kubernetes version -kubernetes_version: "v1.14.3-rancher1-1" - # git repo versions -git_charts_version: 'HEAD' git_local_storage_version: 'HEAD' # version of the https://open.greenhost.net/openappstack/nextcloud repo git_nextcloud_version: 'd882b6952c32b5cce03e6ad9a534035ce6f01230' @@ -50,11 +46,14 @@ krew: checksum: 'sha256:dc2f2e1ec8a0acb6f3e23580d4a8b38c44823e948c40342e13ff6e8e12edb15a' rke: + # You can change the kubernetes version used by rke in + # `ansible/group_vars/all/settings.yml.example` + # # https://github.com/rancher/rke/releases - version: '0.2.7' + version: '0.3.2' # Also possible: # checksum: 'sha256:https://github.com/rancher/rke/releases/download/v0.2.4/sha256sum.txt' - checksum: 'sha256:7c05727aa3d6f8c4b5f60b057f1fe7883af48d5a778e3b1668f178dda84883ee' + checksum: 'sha256:96b366fe1faaa668b3e47f5b6d4bfd6334224e33c21e55dc79ec96f85e0e48e8' cert_manager: # cert-manager requires custom resource definitions applied before installing diff --git a/ansible/group_vars/all/settings.yml.example b/ansible/group_vars/all/settings.yml.example index 51b2984df49020e3e0f75f7c2f206629247a552c..bf9c3ce75e816c1f866a27a301674c631447e590 100644 --- a/ansible/group_vars/all/settings.yml.example +++ b/ansible/group_vars/all/settings.yml.example @@ -23,6 +23,7 @@ helmfiles: # Optional, custom rke config. # I.e. you can set the desired Kubernetes version but please be aware of # the [every rke release has only a few supported kubernetes versions](https://rancher.com/docs/rke/latest/en/config-options/#kubernetes-version). +# See also https://rancher.com/blog/2019/keeping-k8s-current-with-rancher # # rke_custom_config: # kubernetes_version: "v1.14.3-rancher1-1" diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/process.py b/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/process.py deleted file mode 100644 index e4e61e8bc2781cd095fadb5274eee74eabfab536..0000000000000000000000000000000000000000 --- a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/process.py +++ /dev/null @@ -1,358 +0,0 @@ -# Copyright 2019, David Wilson -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# 1. Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# 3. Neither the name of the copyright holder nor the names of its contributors -# may be used to endorse or promote products derived from this software without -# specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -from __future__ import absolute_import -import atexit -import errno -import logging -import os -import signal -import socket -import sys -import time - -try: - import faulthandler -except ImportError: - faulthandler = None - -import mitogen -import mitogen.core -import mitogen.debug -import mitogen.master -import mitogen.parent -import mitogen.service -import mitogen.unix -import mitogen.utils - -import ansible -import ansible.constants as C -import ansible_mitogen.logging -import ansible_mitogen.services - -from mitogen.core import b -import ansible_mitogen.affinity - - -LOG = logging.getLogger(__name__) - -ANSIBLE_PKG_OVERRIDE = ( - u"__version__ = %r\n" - u"__author__ = %r\n" -) - - -def clean_shutdown(sock): - """ - Shut the write end of `sock`, causing `recv` in the worker process to wake - up with a 0-byte read and initiate mux process exit, then wait for a 0-byte - read from the read end, which will occur after the the child closes the - descriptor on exit. - - This is done using :mod:`atexit` since Ansible lacks any more sensible hook - to run code during exit, and unless some synchronization exists with - MuxProcess, debug logs may appear on the user's terminal *after* the prompt - has been printed. - """ - sock.shutdown(socket.SHUT_WR) - sock.recv(1) - - -def getenv_int(key, default=0): - """ - Get an integer-valued environment variable `key`, if it exists and parses - as an integer, otherwise return `default`. - """ - try: - return int(os.environ.get(key, str(default))) - except ValueError: - return default - - -def save_pid(name): - """ - When debugging and profiling, it is very annoying to poke through the - process list to discover the currently running Ansible and MuxProcess IDs, - especially when trying to catch an issue during early startup. So here, if - a magic environment variable set, stash them in hidden files in the CWD:: - - alias muxpid="cat .ansible-mux.pid" - alias anspid="cat .ansible-controller.pid" - - gdb -p $(muxpid) - perf top -p $(anspid) - """ - if os.environ.get('MITOGEN_SAVE_PIDS'): - with open('.ansible-%s.pid' % (name,), 'w') as fp: - fp.write(str(os.getpid())) - - -class MuxProcess(object): - """ - Implement a subprocess forked from the Ansible top-level, as a safe place - to contain the Mitogen IO multiplexer thread, keeping its use of the - logging package (and the logging package's heavy use of locks) far away - from the clutches of os.fork(), which is used continuously by the - multiprocessing package in the top-level process. - - The problem with running the multiplexer in that process is that should the - multiplexer thread be in the process of emitting a log entry (and holding - its lock) at the point of fork, in the child, the first attempt to log any - log entry using the same handler will deadlock the child, as in the memory - image the child received, the lock will always be marked held. - - See https://bugs.python.org/issue6721 for a thorough description of the - class of problems this worker is intended to avoid. - """ - - #: In the top-level process, this references one end of a socketpair(), - #: which the MuxProcess blocks reading from in order to determine when - #: the master process dies. Once the read returns, the MuxProcess will - #: begin shutting itself down. - worker_sock = None - - #: In the worker process, this references the other end of - #: :py:attr:`worker_sock`. - child_sock = None - - #: In the top-level process, this is the PID of the single MuxProcess - #: that was spawned. - worker_pid = None - - #: A copy of :data:`os.environ` at the time the multiplexer process was - #: started. It's used by mitogen_local.py to find changes made to the - #: top-level environment (e.g. vars plugins -- issue #297) that must be - #: applied to locally executed commands and modules. - original_env = None - - #: In both processes, this is the temporary UNIX socket used for - #: forked WorkerProcesses to contact the MuxProcess - unix_listener_path = None - - #: Singleton. - _instance = None - - @classmethod - def start(cls, _init_logging=True): - """ - Arrange for the subprocess to be started, if it is not already running. - - The parent process picks a UNIX socket path the child will use prior to - fork, creates a socketpair used essentially as a semaphore, then blocks - waiting for the child to indicate the UNIX socket is ready for use. - - :param bool _init_logging: - For testing, if :data:`False`, don't initialize logging. - """ - if cls.worker_sock is not None: - return - - if faulthandler is not None: - faulthandler.enable() - - mitogen.utils.setup_gil() - cls.unix_listener_path = mitogen.unix.make_socket_path() - cls.worker_sock, cls.child_sock = socket.socketpair() - atexit.register(lambda: clean_shutdown(cls.worker_sock)) - mitogen.core.set_cloexec(cls.worker_sock.fileno()) - mitogen.core.set_cloexec(cls.child_sock.fileno()) - - cls.profiling = os.environ.get('MITOGEN_PROFILING') is not None - if cls.profiling: - mitogen.core.enable_profiling() - if _init_logging: - ansible_mitogen.logging.setup() - - cls.original_env = dict(os.environ) - cls.child_pid = os.fork() - if cls.child_pid: - save_pid('controller') - ansible_mitogen.logging.set_process_name('top') - ansible_mitogen.affinity.policy.assign_controller() - cls.child_sock.close() - cls.child_sock = None - mitogen.core.io_op(cls.worker_sock.recv, 1) - else: - save_pid('mux') - ansible_mitogen.logging.set_process_name('mux') - ansible_mitogen.affinity.policy.assign_muxprocess() - cls.worker_sock.close() - cls.worker_sock = None - self = cls() - self.worker_main() - - def worker_main(self): - """ - The main function of for the mux process: setup the Mitogen broker - thread and ansible_mitogen services, then sleep waiting for the socket - connected to the parent to be closed (indicating the parent has died). - """ - self._setup_master() - self._setup_services() - - try: - # Let the parent know our listening socket is ready. - mitogen.core.io_op(self.child_sock.send, b('1')) - # Block until the socket is closed, which happens on parent exit. - mitogen.core.io_op(self.child_sock.recv, 1) - finally: - self.broker.shutdown() - self.broker.join() - - # Test frameworks living somewhere higher on the stack of the - # original parent process may try to catch sys.exit(), so do a C - # level exit instead. - os._exit(0) - - def _enable_router_debug(self): - if 'MITOGEN_ROUTER_DEBUG' in os.environ: - self.router.enable_debug() - - def _enable_stack_dumps(self): - secs = getenv_int('MITOGEN_DUMP_THREAD_STACKS', default=0) - if secs: - mitogen.debug.dump_to_logger(secs=secs) - - def _setup_simplejson(self, responder): - """ - We support serving simplejson for Python 2.4 targets on Ansible 2.3, at - least so the package's own CI Docker scripts can run without external - help, however newer versions of simplejson no longer support Python - 2.4. Therefore override any installed/loaded version with a - 2.4-compatible version we ship in the compat/ directory. - """ - responder.whitelist_prefix('simplejson') - - # issue #536: must be at end of sys.path, in case existing newer - # version is already loaded. - compat_path = os.path.join(os.path.dirname(__file__), 'compat') - sys.path.append(compat_path) - - for fullname, is_pkg, suffix in ( - (u'simplejson', True, '__init__.py'), - (u'simplejson.decoder', False, 'decoder.py'), - (u'simplejson.encoder', False, 'encoder.py'), - (u'simplejson.scanner', False, 'scanner.py'), - ): - path = os.path.join(compat_path, 'simplejson', suffix) - fp = open(path, 'rb') - try: - source = fp.read() - finally: - fp.close() - - responder.add_source_override( - fullname=fullname, - path=path, - source=source, - is_pkg=is_pkg, - ) - - def _setup_responder(self, responder): - """ - Configure :class:`mitogen.master.ModuleResponder` to only permit - certain packages, and to generate custom responses for certain modules. - """ - responder.whitelist_prefix('ansible') - responder.whitelist_prefix('ansible_mitogen') - self._setup_simplejson(responder) - - # Ansible 2.3 is compatible with Python 2.4 targets, however - # ansible/__init__.py is not. Instead, executor/module_common.py writes - # out a 2.4-compatible namespace package for unknown reasons. So we - # copy it here. - responder.add_source_override( - fullname='ansible', - path=ansible.__file__, - source=(ANSIBLE_PKG_OVERRIDE % ( - ansible.__version__, - ansible.__author__, - )).encode(), - is_pkg=True, - ) - - def _setup_master(self): - """ - Construct a Router, Broker, and mitogen.unix listener - """ - self.broker = mitogen.master.Broker(install_watcher=False) - self.router = mitogen.master.Router( - broker=self.broker, - max_message_size=4096 * 1048576, - ) - self._setup_responder(self.router.responder) - mitogen.core.listen(self.broker, 'shutdown', self.on_broker_shutdown) - mitogen.core.listen(self.broker, 'exit', self.on_broker_exit) - self.listener = mitogen.unix.Listener( - router=self.router, - path=self.unix_listener_path, - backlog=C.DEFAULT_FORKS, - ) - self._enable_router_debug() - self._enable_stack_dumps() - - def _setup_services(self): - """ - Construct a ContextService and a thread to service requests for it - arriving from worker processes. - """ - self.pool = mitogen.service.Pool( - router=self.router, - services=[ - mitogen.service.FileService(router=self.router), - mitogen.service.PushFileService(router=self.router), - ansible_mitogen.services.ContextService(self.router), - ansible_mitogen.services.ModuleDepService(self.router), - ], - size=getenv_int('MITOGEN_POOL_SIZE', default=32), - ) - LOG.debug('Service pool configured: size=%d', self.pool.size) - - def on_broker_shutdown(self): - """ - Respond to broker shutdown by beginning service pool shutdown. Do not - join on the pool yet, since that would block the broker thread which - then cannot clean up pending handlers, which is required for the - threads to exit gracefully. - """ - # In normal operation we presently kill the process because there is - # not yet any way to cancel connect(). - self.pool.stop(join=self.profiling) - - def on_broker_exit(self): - """ - Respond to the broker thread about to exit by sending SIGTERM to - ourself. In future this should gracefully join the pool, but TERM is - fine for now. - """ - if not self.profiling: - # In normal operation we presently kill the process because there is - # not yet any way to cancel connect(). When profiling, threads - # including the broker must shut down gracefully, otherwise pstats - # won't be written. - os.kill(os.getpid(), signal.SIGTERM) diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/doas.py b/ansible/plugins/mitogen-0.2.8-pre/mitogen/doas.py deleted file mode 100644 index 1b687fb20793d326f96c5e575eb7fc0dd33fb346..0000000000000000000000000000000000000000 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/doas.py +++ /dev/null @@ -1,113 +0,0 @@ -# Copyright 2019, David Wilson -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# 1. Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# 3. Neither the name of the copyright holder nor the names of its contributors -# may be used to endorse or promote products derived from this software without -# specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -# !mitogen: minify_safe - -import logging - -import mitogen.core -import mitogen.parent -from mitogen.core import b - - -LOG = logging.getLogger(__name__) - - -class PasswordError(mitogen.core.StreamError): - pass - - -class Stream(mitogen.parent.Stream): - create_child = staticmethod(mitogen.parent.hybrid_tty_create_child) - child_is_immediate_subprocess = False - - username = 'root' - password = None - doas_path = 'doas' - password_prompt = b('Password:') - incorrect_prompts = ( - b('doas: authentication failed'), - ) - - def construct(self, username=None, password=None, doas_path=None, - password_prompt=None, incorrect_prompts=None, **kwargs): - super(Stream, self).construct(**kwargs) - if username is not None: - self.username = username - if password is not None: - self.password = password - if doas_path is not None: - self.doas_path = doas_path - if password_prompt is not None: - self.password_prompt = password_prompt.lower() - if incorrect_prompts is not None: - self.incorrect_prompts = map(str.lower, incorrect_prompts) - - def _get_name(self): - return u'doas.' + mitogen.core.to_text(self.username) - - def get_boot_command(self): - bits = [self.doas_path, '-u', self.username, '--'] - bits = bits + super(Stream, self).get_boot_command() - LOG.debug('doas command line: %r', bits) - return bits - - password_incorrect_msg = 'doas password is incorrect' - password_required_msg = 'doas password is required' - - def _connect_input_loop(self, it): - password_sent = False - for buf in it: - LOG.debug('%r: received %r', self, buf) - if buf.endswith(self.EC0_MARKER): - self._ec0_received() - return - if any(s in buf.lower() for s in self.incorrect_prompts): - if password_sent: - raise PasswordError(self.password_incorrect_msg) - elif self.password_prompt in buf.lower(): - if self.password is None: - raise PasswordError(self.password_required_msg) - if password_sent: - raise PasswordError(self.password_incorrect_msg) - LOG.debug('sending password') - self.diag_stream.transmit_side.write( - mitogen.core.to_text(self.password + '\n').encode('utf-8') - ) - password_sent = True - raise mitogen.core.StreamError('bootstrap failed') - - def _connect_bootstrap(self): - it = mitogen.parent.iter_read( - fds=[self.receive_side.fd, self.diag_stream.receive_side.fd], - deadline=self.connect_deadline, - ) - try: - self._connect_input_loop(it) - finally: - it.close() diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/ssh.py b/ansible/plugins/mitogen-0.2.8-pre/mitogen/ssh.py deleted file mode 100644 index 11b74c1b33e30200be305a6dec53291ec2f4f321..0000000000000000000000000000000000000000 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/ssh.py +++ /dev/null @@ -1,317 +0,0 @@ -# Copyright 2019, David Wilson -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# 1. Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# 3. Neither the name of the copyright holder nor the names of its contributors -# may be used to endorse or promote products derived from this software without -# specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -# !mitogen: minify_safe - -""" -Functionality to allow establishing new slave contexts over an SSH connection. -""" - -import logging -import re - -try: - from shlex import quote as shlex_quote -except ImportError: - from pipes import quote as shlex_quote - -import mitogen.parent -from mitogen.core import b -from mitogen.core import bytes_partition - -try: - any -except NameError: - from mitogen.core import any - - -LOG = logging.getLogger('mitogen') - -# sshpass uses 'assword' because it doesn't lowercase the input. -PASSWORD_PROMPT = b('password') -HOSTKEY_REQ_PROMPT = b('are you sure you want to continue connecting (yes/no)?') -HOSTKEY_FAIL = b('host key verification failed.') - -# [user@host: ] permission denied -PERMDENIED_RE = re.compile( - ('(?:[^@]+@[^:]+: )?' # Absent in OpenSSH <7.5 - 'Permission denied').encode(), - re.I -) - - -DEBUG_PREFIXES = (b('debug1:'), b('debug2:'), b('debug3:')) - - -def filter_debug(stream, it): - """ - Read line chunks from it, either yielding them directly, or building up and - logging individual lines if they look like SSH debug output. - - This contains the mess of dealing with both line-oriented input, and partial - lines such as the password prompt. - - Yields `(line, partial)` tuples, where `line` is the line, `partial` is - :data:`True` if no terminating newline character was present and no more - data exists in the read buffer. Consuming code can use this to unreliably - detect the presence of an interactive prompt. - """ - # The `partial` test is unreliable, but is only problematic when verbosity - # is enabled: it's possible for a combination of SSH banner, password - # prompt, verbose output, timing and OS buffering specifics to create a - # situation where an otherwise newline-terminated line appears to not be - # terminated, due to a partial read(). If something is broken when - # ssh_debug_level>0, this is the first place to look. - state = 'start_of_line' - buf = b('') - for chunk in it: - buf += chunk - while buf: - if state == 'start_of_line': - if len(buf) < 8: - # short read near buffer limit, block awaiting at least 8 - # bytes so we can discern a debug line, or the minimum - # interesting token from above or the bootstrap - # ('password', 'MITO000\n'). - break - elif any(buf.startswith(p) for p in DEBUG_PREFIXES): - state = 'in_debug' - else: - state = 'in_plain' - elif state == 'in_debug': - if b('\n') not in buf: - break - line, _, buf = bytes_partition(buf, b('\n')) - LOG.debug('%s: %s', stream.name, - mitogen.core.to_text(line.rstrip())) - state = 'start_of_line' - elif state == 'in_plain': - line, nl, buf = bytes_partition(buf, b('\n')) - yield line + nl, not (nl or buf) - if nl: - state = 'start_of_line' - - -class PasswordError(mitogen.core.StreamError): - pass - - -class HostKeyError(mitogen.core.StreamError): - pass - - -class Stream(mitogen.parent.Stream): - child_is_immediate_subprocess = False - - #: Default to whatever is available as 'python' on the remote machine, - #: overriding sys.executable use. - python_path = 'python' - - #: Number of -v invocations to pass on command line. - ssh_debug_level = 0 - - #: The path to the SSH binary. - ssh_path = 'ssh' - - hostname = None - username = None - port = None - - identity_file = None - password = None - ssh_args = None - - check_host_keys_msg = 'check_host_keys= must be set to accept, enforce or ignore' - - def construct(self, hostname, username=None, ssh_path=None, port=None, - check_host_keys='enforce', password=None, identity_file=None, - compression=True, ssh_args=None, keepalive_enabled=True, - keepalive_count=3, keepalive_interval=15, - identities_only=True, ssh_debug_level=None, **kwargs): - super(Stream, self).construct(**kwargs) - if check_host_keys not in ('accept', 'enforce', 'ignore'): - raise ValueError(self.check_host_keys_msg) - - self.hostname = hostname - self.username = username - self.port = port - self.check_host_keys = check_host_keys - self.password = password - self.identity_file = identity_file - self.identities_only = identities_only - self.compression = compression - self.keepalive_enabled = keepalive_enabled - self.keepalive_count = keepalive_count - self.keepalive_interval = keepalive_interval - if ssh_path: - self.ssh_path = ssh_path - if ssh_args: - self.ssh_args = ssh_args - if ssh_debug_level: - self.ssh_debug_level = ssh_debug_level - - self._init_create_child() - - def _requires_pty(self): - """ - Return :data:`True` if the configuration requires a PTY to be - allocated. This is only true if we must interactively accept host keys, - or type a password. - """ - return (self.check_host_keys == 'accept' or - self.password is not None) - - def _init_create_child(self): - """ - Initialize the base class :attr:`create_child` and - :attr:`create_child_args` according to whether we need a PTY or not. - """ - if self._requires_pty(): - self.create_child = mitogen.parent.hybrid_tty_create_child - else: - self.create_child = mitogen.parent.create_child - self.create_child_args = { - 'stderr_pipe': True, - } - - def get_boot_command(self): - bits = [self.ssh_path] - if self.ssh_debug_level: - bits += ['-' + ('v' * min(3, self.ssh_debug_level))] - else: - # issue #307: suppress any login banner, as it may contain the - # password prompt, and there is no robust way to tell the - # difference. - bits += ['-o', 'LogLevel ERROR'] - if self.username: - bits += ['-l', self.username] - if self.port is not None: - bits += ['-p', str(self.port)] - if self.identities_only and (self.identity_file or self.password): - bits += ['-o', 'IdentitiesOnly yes'] - if self.identity_file: - bits += ['-i', self.identity_file] - if self.compression: - bits += ['-o', 'Compression yes'] - if self.keepalive_enabled: - bits += [ - '-o', 'ServerAliveInterval %s' % (self.keepalive_interval,), - '-o', 'ServerAliveCountMax %s' % (self.keepalive_count,), - ] - if not self._requires_pty(): - bits += ['-o', 'BatchMode yes'] - if self.check_host_keys == 'enforce': - bits += ['-o', 'StrictHostKeyChecking yes'] - if self.check_host_keys == 'accept': - bits += ['-o', 'StrictHostKeyChecking ask'] - elif self.check_host_keys == 'ignore': - bits += [ - '-o', 'StrictHostKeyChecking no', - '-o', 'UserKnownHostsFile /dev/null', - '-o', 'GlobalKnownHostsFile /dev/null', - ] - if self.ssh_args: - bits += self.ssh_args - bits.append(self.hostname) - base = super(Stream, self).get_boot_command() - return bits + [shlex_quote(s).strip() for s in base] - - def _get_name(self): - s = u'ssh.' + mitogen.core.to_text(self.hostname) - if self.port: - s += u':%s' % (self.port,) - return s - - auth_incorrect_msg = 'SSH authentication is incorrect' - password_incorrect_msg = 'SSH password is incorrect' - password_required_msg = 'SSH password was requested, but none specified' - hostkey_config_msg = ( - 'SSH requested permission to accept unknown host key, but ' - 'check_host_keys=ignore. This is likely due to ssh_args= ' - 'conflicting with check_host_keys=. Please correct your ' - 'configuration.' - ) - hostkey_failed_msg = ( - 'Host key checking is enabled, and SSH reported an unrecognized or ' - 'mismatching host key.' - ) - - def _host_key_prompt(self): - if self.check_host_keys == 'accept': - LOG.debug('%s: accepting host key', self.name) - self.diag_stream.transmit_side.write(b('yes\n')) - return - - # _host_key_prompt() should never be reached with ignore or enforce - # mode, SSH should have handled that. User's ssh_args= is conflicting - # with ours. - raise HostKeyError(self.hostkey_config_msg) - - def _connect_input_loop(self, it): - password_sent = False - for buf, partial in filter_debug(self, it): - LOG.debug('%s: stdout: %s', self.name, buf.rstrip()) - if buf.endswith(self.EC0_MARKER): - self._ec0_received() - return - elif HOSTKEY_REQ_PROMPT in buf.lower(): - self._host_key_prompt() - elif HOSTKEY_FAIL in buf.lower(): - raise HostKeyError(self.hostkey_failed_msg) - elif PERMDENIED_RE.match(buf): - # issue #271: work around conflict with user shell reporting - # 'permission denied' e.g. during chdir($HOME) by only matching - # it at the start of the line. - if self.password is not None and password_sent: - raise PasswordError(self.password_incorrect_msg) - elif PASSWORD_PROMPT in buf and self.password is None: - # Permission denied (password,pubkey) - raise PasswordError(self.password_required_msg) - else: - raise PasswordError(self.auth_incorrect_msg) - elif partial and PASSWORD_PROMPT in buf.lower(): - if self.password is None: - raise PasswordError(self.password_required_msg) - LOG.debug('%s: sending password', self.name) - self.diag_stream.transmit_side.write( - (self.password + '\n').encode() - ) - password_sent = True - - raise mitogen.core.StreamError('bootstrap failed') - - def _connect_bootstrap(self): - fds = [self.receive_side.fd] - if self.diag_stream is not None: - fds.append(self.diag_stream.receive_side.fd) - - it = mitogen.parent.iter_read(fds=fds, deadline=self.connect_deadline) - try: - self._connect_input_loop(it) - finally: - it.close() diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/su.py b/ansible/plugins/mitogen-0.2.8-pre/mitogen/su.py deleted file mode 100644 index 5ff9e177f28c9e28d38d282f4f177f585c893191..0000000000000000000000000000000000000000 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/su.py +++ /dev/null @@ -1,128 +0,0 @@ -# Copyright 2019, David Wilson -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# 1. Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# -# 2. Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# 3. Neither the name of the copyright holder nor the names of its contributors -# may be used to endorse or promote products derived from this software without -# specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -# !mitogen: minify_safe - -import logging - -import mitogen.core -import mitogen.parent -from mitogen.core import b - -try: - any -except NameError: - from mitogen.core import any - - -LOG = logging.getLogger(__name__) - - -class PasswordError(mitogen.core.StreamError): - pass - - -class Stream(mitogen.parent.Stream): - # TODO: BSD su cannot handle stdin being a socketpair, but it does let the - # child inherit fds from the parent. So we can still pass a socketpair in - # for hybrid_tty_create_child(), there just needs to be either a shell - # snippet or bootstrap support for fixing things up afterwards. - create_child = staticmethod(mitogen.parent.tty_create_child) - child_is_immediate_subprocess = False - - #: Once connected, points to the corresponding DiagLogStream, allowing it to - #: be disconnected at the same time this stream is being torn down. - - username = 'root' - password = None - su_path = 'su' - password_prompt = b('password:') - incorrect_prompts = ( - b('su: sorry'), # BSD - b('su: authentication failure'), # Linux - b('su: incorrect password'), # CentOS 6 - b('authentication is denied'), # AIX - ) - - def construct(self, username=None, password=None, su_path=None, - password_prompt=None, incorrect_prompts=None, **kwargs): - super(Stream, self).construct(**kwargs) - if username is not None: - self.username = username - if password is not None: - self.password = password - if su_path is not None: - self.su_path = su_path - if password_prompt is not None: - self.password_prompt = password_prompt.lower() - if incorrect_prompts is not None: - self.incorrect_prompts = map(str.lower, incorrect_prompts) - - def _get_name(self): - return u'su.' + mitogen.core.to_text(self.username) - - def get_boot_command(self): - argv = mitogen.parent.Argv(super(Stream, self).get_boot_command()) - return [self.su_path, self.username, '-c', str(argv)] - - password_incorrect_msg = 'su password is incorrect' - password_required_msg = 'su password is required' - - def _connect_input_loop(self, it): - password_sent = False - - for buf in it: - LOG.debug('%r: received %r', self, buf) - if buf.endswith(self.EC0_MARKER): - self._ec0_received() - return - if any(s in buf.lower() for s in self.incorrect_prompts): - if password_sent: - raise PasswordError(self.password_incorrect_msg) - elif self.password_prompt in buf.lower(): - if self.password is None: - raise PasswordError(self.password_required_msg) - if password_sent: - raise PasswordError(self.password_incorrect_msg) - LOG.debug('sending password') - self.transmit_side.write( - mitogen.core.to_text(self.password + '\n').encode('utf-8') - ) - password_sent = True - - raise mitogen.core.StreamError('bootstrap failed') - - def _connect_bootstrap(self): - it = mitogen.parent.iter_read( - fds=[self.receive_side.fd], - deadline=self.connect_deadline, - ) - try: - self._connect_input_loop(it) - finally: - it.close() diff --git a/ansible/plugins/mitogen-0.2.9/.lgtm.yml b/ansible/plugins/mitogen-0.2.9/.lgtm.yml new file mode 100644 index 0000000000000000000000000000000000000000..a8e91c02255e5364d82fbea305b052c7c25e610a --- /dev/null +++ b/ansible/plugins/mitogen-0.2.9/.lgtm.yml @@ -0,0 +1,10 @@ +path_classifiers: + library: + - "mitogen/compat" + - "ansible_mitogen/compat" +queries: + # Mitogen 2.4 compatibility trips this query everywhere, so just disable it + - exclude: py/unreachable-statement + - exclude: py/should-use-with + # mitogen.core.b() trips this query everywhere, so just disable it + - exclude: py/import-and-import-from diff --git a/ansible/plugins/mitogen-0.2.8-pre/LICENSE b/ansible/plugins/mitogen-0.2.9/LICENSE similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/LICENSE rename to ansible/plugins/mitogen-0.2.9/LICENSE diff --git a/ansible/plugins/mitogen-0.2.9/MANIFEST.in b/ansible/plugins/mitogen-0.2.9/MANIFEST.in new file mode 100644 index 0000000000000000000000000000000000000000..1aba38f67a2211cf5b09466d7b411206cb7223bf --- /dev/null +++ b/ansible/plugins/mitogen-0.2.9/MANIFEST.in @@ -0,0 +1 @@ +include LICENSE diff --git a/ansible/plugins/mitogen-0.2.8-pre/README.md b/ansible/plugins/mitogen-0.2.9/README.md similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/README.md rename to ansible/plugins/mitogen-0.2.9/README.md diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/__init__.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/__init__.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/__init__.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/__init__.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/affinity.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/affinity.py similarity index 83% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/affinity.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/affinity.py index 09a6aceed5466f86d36e9cb9db76c17186ffa4e9..7f4c8db568348d3a095dbe8f5cd3d8708565efb1 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/affinity.py +++ b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/affinity.py @@ -73,7 +73,9 @@ necessarily involves preventing the scheduler from making load balancing decisions. """ +from __future__ import absolute_import import ctypes +import logging import mmap import multiprocessing import os @@ -83,41 +85,44 @@ import mitogen.core import mitogen.parent +LOG = logging.getLogger(__name__) + + try: _libc = ctypes.CDLL(None, use_errno=True) _strerror = _libc.strerror _strerror.restype = ctypes.c_char_p - _pthread_mutex_init = _libc.pthread_mutex_init - _pthread_mutex_lock = _libc.pthread_mutex_lock - _pthread_mutex_unlock = _libc.pthread_mutex_unlock + _sem_init = _libc.sem_init + _sem_wait = _libc.sem_wait + _sem_post = _libc.sem_post _sched_setaffinity = _libc.sched_setaffinity except (OSError, AttributeError): _libc = None _strerror = None - _pthread_mutex_init = None - _pthread_mutex_lock = None - _pthread_mutex_unlock = None + _sem_init = None + _sem_wait = None + _sem_post = None _sched_setaffinity = None -class pthread_mutex_t(ctypes.Structure): +class sem_t(ctypes.Structure): """ - Wrap pthread_mutex_t to allow storing a lock in shared memory. + Wrap sem_t to allow storing a lock in shared memory. """ _fields_ = [ - ('data', ctypes.c_uint8 * 512), + ('data', ctypes.c_uint8 * 128), ] def init(self): - if _pthread_mutex_init(self.data, 0): + if _sem_init(self.data, 1, 1): raise Exception(_strerror(ctypes.get_errno())) def acquire(self): - if _pthread_mutex_lock(self.data): + if _sem_wait(self.data): raise Exception(_strerror(ctypes.get_errno())) def release(self): - if _pthread_mutex_unlock(self.data): + if _sem_post(self.data): raise Exception(_strerror(ctypes.get_errno())) @@ -128,7 +133,7 @@ class State(ctypes.Structure): the context of the new child process. """ _fields_ = [ - ('lock', pthread_mutex_t), + ('lock', sem_t), ('counter', ctypes.c_uint8), ] @@ -142,7 +147,7 @@ class Policy(object): Assign the Ansible top-level policy to this process. """ - def assign_muxprocess(self): + def assign_muxprocess(self, index): """ Assign the MuxProcess policy to this process. """ @@ -177,9 +182,9 @@ class FixedPolicy(Policy): cores, before reusing the second hyperthread of an existing core. A hook is installed that causes :meth:`reset` to run in the child of any - process created with :func:`mitogen.parent.detach_popen`, ensuring - CPU-intensive children like SSH are not forced to share the same core as - the (otherwise potentially very busy) parent. + process created with :func:`mitogen.parent.popen`, ensuring CPU-intensive + children like SSH are not forced to share the same core as the (otherwise + potentially very busy) parent. """ def __init__(self, cpu_count=None): #: For tests. @@ -207,11 +212,13 @@ class FixedPolicy(Policy): self._reserve_mask = 3 self._reserve_shift = 2 - def _set_affinity(self, mask): + def _set_affinity(self, descr, mask): + if descr: + LOG.debug('CPU mask for %s: %#08x', descr, mask) mitogen.parent._preexec_hook = self._clear self._set_cpu_mask(mask) - def _balance(self): + def _balance(self, descr): self.state.lock.acquire() try: n = self.state.counter @@ -219,28 +226,28 @@ class FixedPolicy(Policy): finally: self.state.lock.release() - self._set_cpu(self._reserve_shift + ( + self._set_cpu(descr, self._reserve_shift + ( (n % (self.cpu_count - self._reserve_shift)) )) - def _set_cpu(self, cpu): - self._set_affinity(1 << cpu) + def _set_cpu(self, descr, cpu): + self._set_affinity(descr, 1 << (cpu % self.cpu_count)) def _clear(self): all_cpus = (1 << self.cpu_count) - 1 - self._set_affinity(all_cpus & ~self._reserve_mask) + self._set_affinity(None, all_cpus & ~self._reserve_mask) def assign_controller(self): if self._reserve_controller: - self._set_cpu(1) + self._set_cpu('Ansible top-level process', 1) else: - self._balance() + self._balance('Ansible top-level process') - def assign_muxprocess(self): - self._set_cpu(0) + def assign_muxprocess(self, index): + self._set_cpu('MuxProcess %d' % (index,), index) def assign_worker(self): - self._balance() + self._balance('WorkerProcess') def assign_subprocess(self): self._clear() @@ -258,9 +265,19 @@ class LinuxPolicy(FixedPolicy): mask >>= 64 return mitogen.core.b('').join(chunks) + def _get_thread_ids(self): + try: + ents = os.listdir('/proc/self/task') + except OSError: + LOG.debug('cannot fetch thread IDs for current process') + return [os.getpid()] + + return [int(s) for s in ents if s.isdigit()] + def _set_cpu_mask(self, mask): s = self._mask_to_bytes(mask) - _sched_setaffinity(os.getpid(), len(s), s) + for tid in self._get_thread_ids(): + _sched_setaffinity(tid, len(s), s) if _sched_setaffinity is not None: diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/compat/__init__.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/compat/__init__.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/compat/__init__.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/compat/__init__.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/compat/simplejson/__init__.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/compat/simplejson/__init__.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/compat/simplejson/__init__.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/compat/simplejson/__init__.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/compat/simplejson/decoder.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/compat/simplejson/decoder.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/compat/simplejson/decoder.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/compat/simplejson/decoder.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/compat/simplejson/encoder.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/compat/simplejson/encoder.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/compat/simplejson/encoder.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/compat/simplejson/encoder.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/compat/simplejson/scanner.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/compat/simplejson/scanner.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/compat/simplejson/scanner.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/compat/simplejson/scanner.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/connection.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/connection.py similarity index 84% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/connection.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/connection.py index 42fa2ef860b275d0735c48e9aced402f97304be8..5e08eb15b2970fe9eadaeb6a920c9872386b9836 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/connection.py +++ b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/connection.py @@ -37,7 +37,6 @@ import stat import sys import time -import jinja2.runtime import ansible.constants as C import ansible.errors import ansible.plugins.connection @@ -45,9 +44,9 @@ import ansible.utils.shlex import mitogen.core import mitogen.fork -import mitogen.unix import mitogen.utils +import ansible_mitogen.mixins import ansible_mitogen.parsing import ansible_mitogen.process import ansible_mitogen.services @@ -57,6 +56,12 @@ import ansible_mitogen.transport_config LOG = logging.getLogger(__name__) +task_vars_msg = ( + 'could not recover task_vars. This means some connection ' + 'settings may erroneously be reset to their defaults. ' + 'Please report a bug if you encounter this message.' +) + def get_remote_name(spec): """ @@ -407,15 +412,6 @@ CONNECTION_METHOD = { } -class Broker(mitogen.master.Broker): - """ - WorkerProcess maintains at most 2 file descriptors, therefore does not need - the exuberant syscall expense of EpollPoller, so override it and restore - the poll() poller. - """ - poller_class = mitogen.core.Poller - - class CallChain(mitogen.parent.CallChain): """ Extend :class:`mitogen.parent.CallChain` to additionally cause the @@ -459,15 +455,10 @@ class CallChain(mitogen.parent.CallChain): class Connection(ansible.plugins.connection.ConnectionBase): - #: mitogen.master.Broker for this worker. - broker = None - - #: mitogen.master.Router for this worker. - router = None - - #: mitogen.parent.Context representing the parent Context, which is - #: presently always the connection multiplexer process. - parent = None + #: The :class:`ansible_mitogen.process.Binding` representing the connection + #: multiplexer this connection's target is assigned to. :data:`None` when + #: disconnected. + binding = None #: mitogen.parent.Context for the target account on the target, possibly #: reached via become. @@ -501,15 +492,9 @@ class Connection(ansible.plugins.connection.ConnectionBase): # the case of the synchronize module. # - #: Set to the host name as it appears in inventory by on_action_run(). - inventory_hostname = None - #: Set to task_vars by on_action_run(). _task_vars = None - #: Set to 'hostvars' by on_action_run() - host_vars = None - #: Set by on_action_run() delegate_to_hostname = None @@ -518,13 +503,6 @@ class Connection(ansible.plugins.connection.ConnectionBase): #: matching vanilla Ansible behaviour. loader_basedir = None - def __init__(self, play_context, new_stdin, **kwargs): - assert ansible_mitogen.process.MuxProcess.unix_listener_path, ( - 'Mitogen connection types may only be instantiated ' - 'while the "mitogen" strategy is active.' - ) - super(Connection, self).__init__(play_context, new_stdin) - def __del__(self): """ Ansible cannot be trusted to always call close() e.g. the synchronize @@ -549,12 +527,67 @@ class Connection(ansible.plugins.connection.ConnectionBase): :param str loader_basedir: Loader base directory; see :attr:`loader_basedir`. """ - self.inventory_hostname = task_vars['inventory_hostname'] self._task_vars = task_vars - self.host_vars = task_vars['hostvars'] self.delegate_to_hostname = delegate_to_hostname self.loader_basedir = loader_basedir - self._mitogen_reset(mode='put') + self._put_connection() + + def _get_task_vars(self): + """ + More information is needed than normally provided to an Ansible + connection. For proxied connections, intermediary configuration must + be inferred, and for any connection the configured Python interpreter + must be known. + + There is no clean way to access this information that would not deviate + from the running Ansible version. The least invasive method known is to + reuse the running task's task_vars dict. + + This method walks the stack to find task_vars of the Action plugin's + run(), or if no Action is present, from Strategy's _execute_meta(), as + in the case of 'meta: reset_connection'. The stack is walked in + addition to subclassing Action.run()/on_action_run(), as it is possible + for new connections to be constructed in addition to the preconstructed + connection passed into any running action. + """ + if self._task_vars is not None: + return self._task_vars + + f = sys._getframe() + while f: + if f.f_code.co_name == 'run': + f_locals = f.f_locals + f_self = f_locals.get('self') + if isinstance(f_self, ansible_mitogen.mixins.ActionModuleMixin): + task_vars = f_locals.get('task_vars') + if task_vars: + LOG.debug('recovered task_vars from Action') + return task_vars + elif f.f_code.co_name == '_execute_meta': + f_all_vars = f.f_locals.get('all_vars') + if isinstance(f_all_vars, dict): + LOG.debug('recovered task_vars from meta:') + return f_all_vars + + f = f.f_back + + raise ansible.errors.AnsibleConnectionFailure(task_vars_msg) + + def get_host_vars(self, inventory_hostname): + """ + Fetch the HostVars for a host. + + :returns: + Variables dictionary or :data:`None`. + :raises ansible.errors.AnsibleConnectionFailure: + Task vars unavailable. + """ + task_vars = self._get_task_vars() + hostvars = task_vars.get('hostvars') + if hostvars: + return hostvars.get(inventory_hostname) + + raise ansible.errors.AnsibleConnectionFailure(task_vars_msg) def get_task_var(self, key, default=None): """ @@ -567,16 +600,16 @@ class Connection(ansible.plugins.connection.ConnectionBase): does not make sense to extract connection-related configuration for the delegated-to machine from them. """ - if self._task_vars: - if self.delegate_to_hostname is None: - if key in self._task_vars: - return self._task_vars[key] - else: - delegated_vars = self._task_vars['ansible_delegated_vars'] - if self.delegate_to_hostname in delegated_vars: - task_vars = delegated_vars[self.delegate_to_hostname] - if key in task_vars: - return task_vars[key] + task_vars = self._get_task_vars() + if self.delegate_to_hostname is None: + if key in task_vars: + return task_vars[key] + else: + delegated_vars = task_vars['ansible_delegated_vars'] + if self.delegate_to_hostname in delegated_vars: + task_vars = delegated_vars[self.delegate_to_hostname] + if key in task_vars: + return task_vars[key] return default @@ -585,6 +618,15 @@ class Connection(ansible.plugins.connection.ConnectionBase): self._connect() return self.init_child_result['home_dir'] + def get_binding(self): + """ + Return the :class:`ansible_mitogen.process.Binding` representing the + process that hosts the physical connection and services (context + establishment, file transfer, ..) for our desired target. + """ + assert self.binding is not None + return self.binding + @property def connected(self): return self.context is not None @@ -599,7 +641,8 @@ class Connection(ansible.plugins.connection.ConnectionBase): # must use __contains__ to avoid a TypeError for a missing host on # Ansible 2.3. - if self.host_vars is None or inventory_name not in self.host_vars: + via_vars = self.get_host_vars(inventory_name) + if via_vars is None: raise ansible.errors.AnsibleConnectionFailure( self.unknown_via_msg % ( via_spec, @@ -607,7 +650,6 @@ class Connection(ansible.plugins.connection.ConnectionBase): ) ) - via_vars = self.host_vars[inventory_name] return ansible_mitogen.transport_config.MitogenViaSpec( inventory_name=inventory_name, play_context=self._play_context, @@ -672,18 +714,6 @@ class Connection(ansible.plugins.connection.ConnectionBase): return stack - def _connect_broker(self): - """ - Establish a reference to the Broker, Router and parent context used for - connections. - """ - if not self.broker: - self.broker = mitogen.master.Broker() - self.router, self.parent = mitogen.unix.connect( - path=ansible_mitogen.process.MuxProcess.unix_listener_path, - broker=self.broker, - ) - def _build_stack(self): """ Construct a list of dictionaries representing the connection @@ -691,14 +721,14 @@ class Connection(ansible.plugins.connection.ConnectionBase): additionally used by the integration tests "mitogen_get_stack" action to fetch the would-be connection configuration. """ - return self._stack_from_spec( - ansible_mitogen.transport_config.PlayContextSpec( - connection=self, - play_context=self._play_context, - transport=self.transport, - inventory_name=self.inventory_hostname, - ) + spec = ansible_mitogen.transport_config.PlayContextSpec( + connection=self, + play_context=self._play_context, + transport=self.transport, + inventory_name=self.get_task_var('inventory_hostname'), ) + stack = self._stack_from_spec(spec) + return spec.inventory_name(), stack def _connect_stack(self, stack): """ @@ -711,7 +741,8 @@ class Connection(ansible.plugins.connection.ConnectionBase): description of the returned dictionary. """ try: - dct = self.parent.call_service( + dct = mitogen.service.call( + call_context=self.binding.get_service_context(), service_name='ansible_mitogen.services.ContextService', method_name='get', stack=mitogen.utils.cast(list(stack)), @@ -758,27 +789,27 @@ class Connection(ansible.plugins.connection.ConnectionBase): if self.connected: return - self._connect_broker() - stack = self._build_stack() + inventory_name, stack = self._build_stack() + worker_model = ansible_mitogen.process.get_worker_model() + self.binding = worker_model.get_binding( + mitogen.utils.cast(inventory_name) + ) self._connect_stack(stack) - def _mitogen_reset(self, mode): + def _put_connection(self): """ Forget everything we know about the connected context. This function cannot be called _reset() since that name is used as a public API by Ansible 2.4 wait_for_connection plug-in. - - :param str mode: - Name of ContextService method to use to discard the context, either - 'put' or 'reset'. """ if not self.context: return self.chain.reset() - self.parent.call_service( + mitogen.service.call( + call_context=self.binding.get_service_context(), service_name='ansible_mitogen.services.ContextService', - method_name=mode, + method_name='put', context=self.context ) @@ -787,48 +818,16 @@ class Connection(ansible.plugins.connection.ConnectionBase): self.init_child_result = None self.chain = None - def _shutdown_broker(self): - """ - Shutdown the broker thread during :meth:`close` or :meth:`reset`. - """ - if self.broker: - self.broker.shutdown() - self.broker.join() - self.broker = None - self.router = None - - # #420: Ansible executes "meta" actions in the top-level process, - # meaning "reset_connection" will cause :class:`mitogen.core.Latch` - # FDs to be cached and erroneously shared by children on subsequent - # WorkerProcess forks. To handle that, call on_fork() to ensure any - # shared state is discarded. - # #490: only attempt to clean up when it's known that some - # resources exist to cleanup, otherwise later __del__ double-call - # to close() due to GC at random moment may obliterate an unrelated - # Connection's resources. - mitogen.fork.on_fork() - def close(self): """ Arrange for the mitogen.master.Router running in the worker to gracefully shut down, and wait for shutdown to complete. Safe to call multiple times. """ - self._mitogen_reset(mode='put') - self._shutdown_broker() - - def _reset_find_task_vars(self): - """ - Monsterous hack: since "meta: reset_connection" does not run from an - action, we cannot capture task variables via :meth:`on_action_run`. - Instead walk the parent frames searching for the `all_vars` local from - StrategyBase._execute_meta(). If this fails, just leave task_vars - unset, likely causing a subtly wrong configuration to be selected. - """ - frame = sys._getframe() - while frame and not self._task_vars: - self._task_vars = frame.f_locals.get('all_vars') - frame = frame.f_back + self._put_connection() + if self.binding: + self.binding.close() + self.binding = None reset_compat_msg = ( 'Mitogen only supports "reset_connection" on Ansible 2.5.6 or later' @@ -841,9 +840,6 @@ class Connection(ansible.plugins.connection.ConnectionBase): the 'disconnected' state, and informs ContextService the connection is bad somehow, and should be shut down and discarded. """ - if self._task_vars is None: - self._reset_find_task_vars() - if self._play_context.remote_addr is None: # <2.5.6 incorrectly populate PlayContext for reset_connection # https://github.com/ansible/ansible/issues/27520 @@ -851,9 +847,24 @@ class Connection(ansible.plugins.connection.ConnectionBase): self.reset_compat_msg ) - self._connect() - self._mitogen_reset(mode='reset') - self._shutdown_broker() + # Clear out state in case we were ever connected. + self.close() + + inventory_name, stack = self._build_stack() + if self._play_context.become: + stack = stack[:-1] + + worker_model = ansible_mitogen.process.get_worker_model() + binding = worker_model.get_binding(inventory_name) + try: + mitogen.service.call( + call_context=binding.get_service_context(), + service_name='ansible_mitogen.services.ContextService', + method_name='reset', + stack=mitogen.utils.cast(list(stack)), + ) + finally: + binding.close() # Compatibility with Ansible 2.4 wait_for_connection plug-in. _reset = reset @@ -948,11 +959,13 @@ class Connection(ansible.plugins.connection.ConnectionBase): :param str out_path: Local filesystem path to write. """ - output = self.get_chain().call( - ansible_mitogen.target.read_path, - mitogen.utils.cast(in_path), + self._connect() + ansible_mitogen.target.transfer_file( + context=self.context, + # in_path may be AnsibleUnicode + in_path=mitogen.utils.cast(in_path), + out_path=out_path ) - ansible_mitogen.target.write_path(out_path, output) def put_data(self, out_path, data, mode=None, utimes=None): """ @@ -1024,7 +1037,8 @@ class Connection(ansible.plugins.connection.ConnectionBase): utimes=(st.st_atime, st.st_mtime)) self._connect() - self.parent.call_service( + mitogen.service.call( + call_context=self.binding.get_service_context(), service_name='mitogen.service.FileService', method_name='register', path=mitogen.utils.cast(in_path) @@ -1036,7 +1050,7 @@ class Connection(ansible.plugins.connection.ConnectionBase): # file alive, but that requires more work. self.get_chain().call( ansible_mitogen.target.transfer_file, - context=self.parent, + context=self.binding.get_child_service_context(), in_path=in_path, out_path=out_path ) diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/loaders.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/loaders.py similarity index 88% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/loaders.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/loaders.py index ff06c0c5bcbba773493e8a75eb7d073a56672ae9..9ce6b1fa96f00e5a091e8004ee14bb79baee028a 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/loaders.py +++ b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/loaders.py @@ -32,6 +32,15 @@ Stable names for PluginLoader instances across Ansible versions. from __future__ import absolute_import +__all__ = [ + 'action_loader', + 'connection_loader', + 'module_loader', + 'module_utils_loader', + 'shell_loader', + 'strategy_loader', +] + try: from ansible.plugins.loader import action_loader from ansible.plugins.loader import connection_loader @@ -46,3 +55,8 @@ except ImportError: # Ansible <2.4 from ansible.plugins import module_utils_loader from ansible.plugins import shell_loader from ansible.plugins import strategy_loader + + +# These are original, unwrapped implementations +action_loader__get = action_loader.get +connection_loader__get = connection_loader.get diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/logging.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/logging.py similarity index 97% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/logging.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/logging.py index ce6f165916eb4fb534fca37e752defd03726cba0..00a701842740ec847cabde0e6e32854a06e6e9af 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/logging.py +++ b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/logging.py @@ -107,8 +107,9 @@ def setup(): l_mitogen = logging.getLogger('mitogen') l_mitogen_io = logging.getLogger('mitogen.io') l_ansible_mitogen = logging.getLogger('ansible_mitogen') + l_operon = logging.getLogger('operon') - for logger in l_mitogen, l_mitogen_io, l_ansible_mitogen: + for logger in l_mitogen, l_mitogen_io, l_ansible_mitogen, l_operon: logger.handlers = [Handler(display.vvv)] logger.propagate = False diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/mixins.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/mixins.py similarity index 96% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/mixins.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/mixins.py index 890467fd5dce0c715c212e3ec9936405cf30920d..cfdf838484be5e753f91af664394d37b53446441 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/mixins.py +++ b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/mixins.py @@ -55,6 +55,11 @@ import ansible_mitogen.planner import ansible_mitogen.target from ansible.module_utils._text import to_text +try: + from ansible.utils.unsafe_proxy import wrap_var +except ImportError: + from ansible.vars.unsafe_proxy import wrap_var + LOG = logging.getLogger(__name__) @@ -182,14 +187,6 @@ class ActionModuleMixin(ansible.plugins.action.ActionBase): ) ) - def _generate_tmp_path(self): - return os.path.join( - self._connection.get_good_temp_dir(), - 'ansible_mitogen_action_%016x' % ( - random.getrandbits(8*8), - ) - ) - def _make_tmp_path(self, remote_user=None): """ Create a temporary subdirectory as a child of the temporary directory @@ -314,7 +311,7 @@ class ActionModuleMixin(ansible.plugins.action.ActionBase): except AttributeError: return getattr(self._task, 'async') - def _temp_file_gibberish(self, module_args, wrap_async): + def _set_temp_file_args(self, module_args, wrap_async): # Ansible>2.5 module_utils reuses the action's temporary directory if # one exists. Older versions error if this key is present. if ansible.__version__ > '2.5': @@ -351,7 +348,7 @@ class ActionModuleMixin(ansible.plugins.action.ActionBase): self._update_module_args(module_name, module_args, task_vars) env = {} self._compute_environment_string(env) - self._temp_file_gibberish(module_args, wrap_async) + self._set_temp_file_args(module_args, wrap_async) self._connection._connect() result = ansible_mitogen.planner.invoke( @@ -368,13 +365,12 @@ class ActionModuleMixin(ansible.plugins.action.ActionBase): ) ) - if ansible.__version__ < '2.5' and delete_remote_tmp and \ - getattr(self._connection._shell, 'tmpdir', None) is not None: + if tmp and ansible.__version__ < '2.5' and delete_remote_tmp: # Built-in actions expected tmpdir to be cleaned up automatically # on _execute_module(). - self._remove_tmp_path(self._connection._shell.tmpdir) + self._remove_tmp_path(tmp) - return result + return wrap_var(result) def _postprocess_response(self, result): """ diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/module_finder.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/module_finder.py similarity index 99% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/module_finder.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/module_finder.py index 633e3cadeeaa7f70cc088a82b3c0859ce78155a9..89aa2beba6667b3c4c8e6457b95b6a35aaca6091 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/module_finder.py +++ b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/module_finder.py @@ -57,7 +57,7 @@ def get_code(module): """ Compile and return a Module's code object. """ - fp = open(module.path) + fp = open(module.path, 'rb') try: return compile(fp.read(), str(module.name), 'exec') finally: diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/parsing.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/parsing.py similarity index 92% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/parsing.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/parsing.py index 525e60cfe1bbc5a3709734f22ab6943af990d8a9..27fca7cd6793c81b394916b55b562637585a4db6 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/parsing.py +++ b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/parsing.py @@ -26,14 +26,6 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. -""" -Classes to detect each case from [0] and prepare arguments necessary for the -corresponding Runner class within the target, including preloading requisite -files/modules known missing. - -[0] "Ansible Module Architecture", developing_program_flow_modules.html -""" - from __future__ import absolute_import from __future__ import unicode_literals diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/planner.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/planner.py similarity index 81% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/planner.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/planner.py index 2eebd36ddb58ac609214b5e18ca47bd5ab35e7e6..8febbdb32ff38848f8fe847b00da4b00f0cdbf02 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/planner.py +++ b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/planner.py @@ -45,6 +45,7 @@ import random from ansible.executor import module_common import ansible.errors import ansible.module_utils +import ansible.release import mitogen.core import mitogen.select @@ -58,6 +59,8 @@ NO_METHOD_MSG = 'Mitogen: no invocation method found for: ' NO_INTERPRETER_MSG = 'module (%s) is missing interpreter line' NO_MODULE_MSG = 'The module %s was not found in configured module paths.' +_planner_by_path = {} + class Invocation(object): """ @@ -92,7 +95,12 @@ class Invocation(object): self.module_path = None #: Initially ``None``, but set by :func:`invoke`. The raw source or #: binary contents of the module. - self.module_source = None + self._module_source = None + + def get_module_source(self): + if self._module_source is None: + self._module_source = read_file(self.module_path) + return self._module_source def __repr__(self): return 'Invocation(module_name=%s)' % (self.module_name,) @@ -107,7 +115,8 @@ class Planner(object): def __init__(self, invocation): self._inv = invocation - def detect(self): + @classmethod + def detect(cls, path, source): """ Return true if the supplied `invocation` matches the module type implemented by this planner. @@ -148,6 +157,8 @@ class Planner(object): # named by `runner_name`. } """ + binding = self._inv.connection.get_binding() + new = dict((mitogen.core.UnicodeType(k), kwargs[k]) for k in kwargs) new.setdefault('good_temp_dir', @@ -155,7 +166,7 @@ class Planner(object): new.setdefault('cwd', self._inv.connection.get_default_cwd()) new.setdefault('extra_env', self._inv.connection.get_default_env()) new.setdefault('emulate_tty', True) - new.setdefault('service_context', self._inv.connection.parent) + new.setdefault('service_context', binding.get_child_service_context()) return new def __repr__(self): @@ -169,8 +180,9 @@ class BinaryPlanner(Planner): """ runner_name = 'BinaryRunner' - def detect(self): - return module_common._is_binary(self._inv.module_source) + @classmethod + def detect(cls, path, source): + return module_common._is_binary(source) def get_push_files(self): return [mitogen.core.to_text(self._inv.module_path)] @@ -216,7 +228,7 @@ class ScriptPlanner(BinaryPlanner): def _get_interpreter(self): path, arg = ansible_mitogen.parsing.parse_hashbang( - self._inv.module_source + self._inv.get_module_source() ) if path is None: raise ansible.errors.AnsibleError(NO_INTERPRETER_MSG % ( @@ -245,8 +257,9 @@ class JsonArgsPlanner(ScriptPlanner): """ runner_name = 'JsonArgsRunner' - def detect(self): - return module_common.REPLACER_JSONARGS in self._inv.module_source + @classmethod + def detect(cls, path, source): + return module_common.REPLACER_JSONARGS in source class WantJsonPlanner(ScriptPlanner): @@ -263,8 +276,9 @@ class WantJsonPlanner(ScriptPlanner): """ runner_name = 'WantJsonRunner' - def detect(self): - return b'WANT_JSON' in self._inv.module_source + @classmethod + def detect(cls, path, source): + return b'WANT_JSON' in source class NewStylePlanner(ScriptPlanner): @@ -276,8 +290,9 @@ class NewStylePlanner(ScriptPlanner): runner_name = 'NewStyleRunner' marker = b'from ansible.module_utils.' - def detect(self): - return self.marker in self._inv.module_source + @classmethod + def detect(cls, path, source): + return cls.marker in source def _get_interpreter(self): return None, None @@ -321,14 +336,15 @@ class NewStylePlanner(ScriptPlanner): for path in ansible_mitogen.loaders.module_utils_loader._get_paths( subdirs=False ) - if os.path.isdir(path) ) _module_map = None def get_module_map(self): if self._module_map is None: - self._module_map = self._inv.connection.parent.call_service( + binding = self._inv.connection.get_binding() + self._module_map = mitogen.service.call( + call_context=binding.get_service_context(), service_name='ansible_mitogen.services.ModuleDepService', method_name='scan', @@ -343,6 +359,10 @@ class NewStylePlanner(ScriptPlanner): def get_kwargs(self): return super(NewStylePlanner, self).get_kwargs( module_map=self.get_module_map(), + py_module_name=py_modname_from_path( + self._inv.module_name, + self._inv.module_path, + ), ) @@ -372,14 +392,16 @@ class ReplacerPlanner(NewStylePlanner): """ runner_name = 'ReplacerRunner' - def detect(self): - return module_common.REPLACER in self._inv.module_source + @classmethod + def detect(cls, path, source): + return module_common.REPLACER in source class OldStylePlanner(ScriptPlanner): runner_name = 'OldStyleRunner' - def detect(self): + @classmethod + def detect(cls, path, source): # Everything else. return True @@ -394,20 +416,63 @@ _planners = [ ] -def get_module_data(name): - path = ansible_mitogen.loaders.module_loader.find_plugin(name, '') - if path is None: - raise ansible.errors.AnsibleError(NO_MODULE_MSG % (name,)) +try: + _get_ansible_module_fqn = module_common._get_ansible_module_fqn +except AttributeError: + _get_ansible_module_fqn = None + - with open(path, 'rb') as fp: - source = fp.read() - return mitogen.core.to_text(path), source +def py_modname_from_path(name, path): + """ + Fetch the logical name of a new-style module as it might appear in + :data:`sys.modules` of the target's Python interpreter. + + * For Ansible <2.7, this is an unpackaged module named like + "ansible_module_%s". + + * For Ansible <2.9, this is an unpackaged module named like + "ansible.modules.%s" + + * Since Ansible 2.9, modules appearing within a package have the original + package hierarchy approximated on the target, enabling relative imports + to function correctly. For example, "ansible.modules.system.setup". + """ + # 2.9+ + if _get_ansible_module_fqn: + try: + return _get_ansible_module_fqn(path) + except ValueError: + pass + + if ansible.__version__ < '2.7': + return 'ansible_module_' + name + + return 'ansible.modules.' + name + + +def read_file(path): + fd = os.open(path, os.O_RDONLY) + try: + bits = [] + chunk = True + while True: + chunk = os.read(fd, 65536) + if not chunk: + break + bits.append(chunk) + finally: + os.close(fd) + + return mitogen.core.b('').join(bits) def _propagate_deps(invocation, planner, context): - invocation.connection.parent.call_service( + binding = invocation.connection.get_binding() + mitogen.service.call( + call_context=binding.get_service_context(), service_name='mitogen.service.PushFileService', method_name='propagate_paths_and_modules', + context=context, paths=planner.get_push_files(), modules=planner.get_module_deps(), @@ -459,14 +524,12 @@ def _invoke_isolated_task(invocation, planner): context.shutdown() -def _get_planner(invocation): +def _get_planner(name, path, source): for klass in _planners: - planner = klass(invocation) - if planner.detect(): - LOG.debug('%r accepted %r (filename %r)', planner, - invocation.module_name, invocation.module_path) - return planner - LOG.debug('%r rejected %r', planner, invocation.module_name) + if klass.detect(path, source): + LOG.debug('%r accepted %r (filename %r)', klass, name, path) + return klass + LOG.debug('%r rejected %r', klass, name) raise ansible.errors.AnsibleError(NO_METHOD_MSG + repr(invocation)) @@ -481,10 +544,24 @@ def invoke(invocation): :raises ansible.errors.AnsibleError: Unrecognized/unsupported module type. """ - (invocation.module_path, - invocation.module_source) = get_module_data(invocation.module_name) - planner = _get_planner(invocation) + path = ansible_mitogen.loaders.module_loader.find_plugin( + invocation.module_name, + '', + ) + if path is None: + raise ansible.errors.AnsibleError(NO_MODULE_MSG % ( + invocation.module_name, + )) + + invocation.module_path = mitogen.core.to_text(path) + if invocation.module_path not in _planner_by_path: + _planner_by_path[invocation.module_path] = _get_planner( + invocation.module_name, + invocation.module_path, + invocation.get_module_source() + ) + planner = _planner_by_path[invocation.module_path](invocation) if invocation.wrap_async: response = _invoke_async_task(invocation, planner) elif planner.should_fork(): diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/__init__.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/__init__.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/__init__.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/__init__.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/action/__init__.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/action/__init__.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/action/__init__.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/action/__init__.py diff --git a/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/action/mitogen_fetch.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/action/mitogen_fetch.py new file mode 100644 index 0000000000000000000000000000000000000000..1844efd8814c553c024a77e9aad5e43145987082 --- /dev/null +++ b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/action/mitogen_fetch.py @@ -0,0 +1,162 @@ +# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> +# +# This file is part of Ansible +# +# Ansible is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# Ansible is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with Ansible. If not, see <http://www.gnu.org/licenses/>. +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ansible.module_utils._text import to_bytes +from ansible.module_utils.six import string_types +from ansible.module_utils.parsing.convert_bool import boolean +from ansible.plugins.action import ActionBase +from ansible.utils.hashing import checksum, md5, secure_hash +from ansible.utils.path import makedirs_safe + + +REMOTE_CHECKSUM_ERRORS = { + '0': "unable to calculate the checksum of the remote file", + '1': "the remote file does not exist", + '2': "no read permission on remote file", + '3': "remote file is a directory, fetch cannot work on directories", + '4': "python isn't present on the system. Unable to compute checksum", + '5': "stdlib json was not found on the remote machine. Only the raw module can work without those installed", +} + + +class ActionModule(ActionBase): + + def run(self, tmp=None, task_vars=None): + ''' handler for fetch operations ''' + if task_vars is None: + task_vars = dict() + + result = super(ActionModule, self).run(tmp, task_vars) + try: + if self._play_context.check_mode: + result['skipped'] = True + result['msg'] = 'check mode not (yet) supported for this module' + return result + + flat = boolean(self._task.args.get('flat'), strict=False) + fail_on_missing = boolean(self._task.args.get('fail_on_missing', True), strict=False) + validate_checksum = boolean(self._task.args.get('validate_checksum', True), strict=False) + + # validate source and dest are strings FIXME: use basic.py and module specs + source = self._task.args.get('src') + if not isinstance(source, string_types): + result['msg'] = "Invalid type supplied for source option, it must be a string" + + dest = self._task.args.get('dest') + if not isinstance(dest, string_types): + result['msg'] = "Invalid type supplied for dest option, it must be a string" + + if result.get('msg'): + result['failed'] = True + return result + + source = self._connection._shell.join_path(source) + source = self._remote_expand_user(source) + + # calculate checksum for the remote file, don't bother if using + # become as slurp will be used Force remote_checksum to follow + # symlinks because fetch always follows symlinks + remote_checksum = self._remote_checksum(source, all_vars=task_vars, follow=True) + + # calculate the destination name + if os.path.sep not in self._connection._shell.join_path('a', ''): + source = self._connection._shell._unquote(source) + source_local = source.replace('\\', '/') + else: + source_local = source + + dest = os.path.expanduser(dest) + if flat: + if os.path.isdir(to_bytes(dest, errors='surrogate_or_strict')) and not dest.endswith(os.sep): + result['msg'] = "dest is an existing directory, use a trailing slash if you want to fetch src into that directory" + result['file'] = dest + result['failed'] = True + return result + if dest.endswith(os.sep): + # if the path ends with "/", we'll use the source filename as the + # destination filename + base = os.path.basename(source_local) + dest = os.path.join(dest, base) + if not dest.startswith("/"): + # if dest does not start with "/", we'll assume a relative path + dest = self._loader.path_dwim(dest) + else: + # files are saved in dest dir, with a subdir for each host, then the filename + if 'inventory_hostname' in task_vars: + target_name = task_vars['inventory_hostname'] + else: + target_name = self._play_context.remote_addr + dest = "%s/%s/%s" % (self._loader.path_dwim(dest), target_name, source_local) + + dest = dest.replace("//", "/") + + if remote_checksum in REMOTE_CHECKSUM_ERRORS: + result['changed'] = False + result['file'] = source + result['msg'] = REMOTE_CHECKSUM_ERRORS[remote_checksum] + # Historically, these don't fail because you may want to transfer + # a log file that possibly MAY exist but keep going to fetch other + # log files. Today, this is better achieved by adding + # ignore_errors or failed_when to the task. Control the behaviour + # via fail_when_missing + if fail_on_missing: + result['failed'] = True + del result['changed'] + else: + result['msg'] += ", not transferring, ignored" + return result + + # calculate checksum for the local file + local_checksum = checksum(dest) + + if remote_checksum != local_checksum: + # create the containing directories, if needed + makedirs_safe(os.path.dirname(dest)) + + # fetch the file and check for changes + self._connection.fetch_file(source, dest) + new_checksum = secure_hash(dest) + # For backwards compatibility. We'll return None on FIPS enabled systems + try: + new_md5 = md5(dest) + except ValueError: + new_md5 = None + + if validate_checksum and new_checksum != remote_checksum: + result.update(dict(failed=True, md5sum=new_md5, + msg="checksum mismatch", file=source, dest=dest, remote_md5sum=None, + checksum=new_checksum, remote_checksum=remote_checksum)) + else: + result.update({'changed': True, 'md5sum': new_md5, 'dest': dest, + 'remote_md5sum': None, 'checksum': new_checksum, + 'remote_checksum': remote_checksum}) + else: + # For backwards compatibility. We'll return None on FIPS enabled systems + try: + local_md5 = md5(dest) + except ValueError: + local_md5 = None + result.update(dict(changed=False, md5sum=local_md5, file=source, dest=dest, checksum=local_checksum)) + + finally: + self._remove_tmp_path(self._connection._shell.tmpdir) + + return result diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/action/mitogen_get_stack.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/action/mitogen_get_stack.py similarity index 96% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/action/mitogen_get_stack.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/action/mitogen_get_stack.py index 12afbfbaa6c9ccf5f9acca210b6906530f281a11..171f84ea7cf5e9877bbbca3a2f8cef2a7c66f5f0 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/action/mitogen_get_stack.py +++ b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/action/mitogen_get_stack.py @@ -47,8 +47,9 @@ class ActionModule(ActionBase): 'skipped': True, } + _, stack = self._connection._build_stack() return { 'changed': True, - 'result': self._connection._build_stack(), + 'result': stack, '_ansible_verbose_always': True, } diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/__init__.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/__init__.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/__init__.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/__init__.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_buildah.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_buildah.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_buildah.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_buildah.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_doas.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_doas.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_doas.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_doas.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_docker.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_docker.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_docker.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_docker.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_jail.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_jail.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_jail.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_jail.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_kubectl.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_kubectl.py similarity index 92% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_kubectl.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_kubectl.py index 2dab131b00bbf8d616cbda1f2ccfcd01a73cc09a..44d3b50a2057b59ecb22f9bed7ac7f474933dc07 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_kubectl.py +++ b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_kubectl.py @@ -31,11 +31,6 @@ from __future__ import absolute_import import os.path import sys -try: - from ansible.plugins.connection import kubectl -except ImportError: - kubectl = None - from ansible.errors import AnsibleConnectionFailure from ansible.module_utils.six import iteritems @@ -47,6 +42,19 @@ except ImportError: del base_dir import ansible_mitogen.connection +import ansible_mitogen.loaders + + +_class = ansible_mitogen.loaders.connection_loader__get( + 'kubectl', + class_only=True, +) + +if _class: + kubectl = sys.modules[_class.__module__] + del _class +else: + kubectl = None class Connection(ansible_mitogen.connection.Connection): diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_local.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_local.py similarity index 97% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_local.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_local.py index 24b84a03606dfc6894b5c7d79bc8c36355da9d36..a98c834c59e0cb7e3fc4709e36aa9263b6351237 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_local.py +++ b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_local.py @@ -81,6 +81,6 @@ class Connection(ansible_mitogen.connection.Connection): from WorkerProcess, we must emulate that. """ return dict_diff( - old=ansible_mitogen.process.MuxProcess.original_env, + old=ansible_mitogen.process.MuxProcess.cls_original_env, new=os.environ, ) diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_lxc.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_lxc.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_lxc.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_lxc.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_lxd.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_lxd.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_lxd.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_lxd.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_machinectl.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_machinectl.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_machinectl.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_machinectl.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_setns.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_setns.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_setns.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_setns.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_ssh.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_ssh.py similarity index 93% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_ssh.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_ssh.py index df0e87cbe62003fc3166e5c4aea2bd890aeba64b..1c81dae52eacc39195d5dc51e400d786a8a3590b 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_ssh.py +++ b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_ssh.py @@ -42,21 +42,23 @@ DOCUMENTATION = """ options: """ -import ansible.plugins.connection.ssh - try: - import ansible_mitogen.connection + import ansible_mitogen except ImportError: base_dir = os.path.dirname(__file__) sys.path.insert(0, os.path.abspath(os.path.join(base_dir, '../../..'))) del base_dir import ansible_mitogen.connection +import ansible_mitogen.loaders class Connection(ansible_mitogen.connection.Connection): transport = 'ssh' - vanilla_class = ansible.plugins.connection.ssh.Connection + vanilla_class = ansible_mitogen.loaders.connection_loader__get( + 'ssh', + class_only=True, + ) @staticmethod def _create_control_path(*args, **kwargs): diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_su.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_su.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_su.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_su.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_sudo.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_sudo.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/connection/mitogen_sudo.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/connection/mitogen_sudo.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/strategy/__init__.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/strategy/__init__.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/strategy/__init__.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/strategy/__init__.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/strategy/mitogen.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/strategy/mitogen.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/strategy/mitogen.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/strategy/mitogen.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/strategy/mitogen_free.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/strategy/mitogen_free.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/strategy/mitogen_free.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/strategy/mitogen_free.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/strategy/mitogen_host_pinned.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/strategy/mitogen_host_pinned.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/strategy/mitogen_host_pinned.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/strategy/mitogen_host_pinned.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/strategy/mitogen_linear.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/strategy/mitogen_linear.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/strategy/mitogen_linear.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/plugins/strategy/mitogen_linear.py diff --git a/ansible/plugins/mitogen-0.2.9/ansible_mitogen/process.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/process.py new file mode 100644 index 0000000000000000000000000000000000000000..1fc7bf801a76d488226b638c0366d7577d7d4421 --- /dev/null +++ b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/process.py @@ -0,0 +1,745 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from __future__ import absolute_import +import atexit +import logging +import multiprocessing +import os +import resource +import socket +import signal +import sys + +try: + import faulthandler +except ImportError: + faulthandler = None + +try: + import setproctitle +except ImportError: + setproctitle = None + +import mitogen +import mitogen.core +import mitogen.debug +import mitogen.fork +import mitogen.master +import mitogen.parent +import mitogen.service +import mitogen.unix +import mitogen.utils + +import ansible +import ansible.constants as C +import ansible.errors +import ansible_mitogen.logging +import ansible_mitogen.services + +from mitogen.core import b +import ansible_mitogen.affinity + + +LOG = logging.getLogger(__name__) + +ANSIBLE_PKG_OVERRIDE = ( + u"__version__ = %r\n" + u"__author__ = %r\n" +) + +MAX_MESSAGE_SIZE = 4096 * 1048576 + +worker_model_msg = ( + 'Mitogen connection types may only be instantiated when one of the ' + '"mitogen_*" or "operon_*" strategies are active.' +) + +shutting_down_msg = ( + 'The task worker cannot connect. Ansible may be shutting down, or ' + 'the maximum open files limit may have been exceeded. If this occurs ' + 'midway through a run, please retry after increasing the open file ' + 'limit (ulimit -n). Original error: %s' +) + + +#: The worker model as configured by the currently running strategy. This is +#: managed via :func:`get_worker_model` / :func:`set_worker_model` functions by +#: :class:`StrategyMixin`. +_worker_model = None + + +#: A copy of the sole :class:`ClassicWorkerModel` that ever exists during a +#: classic run, as return by :func:`get_classic_worker_model`. +_classic_worker_model = None + + +def set_worker_model(model): + """ + To remove process model-wiring from + :class:`ansible_mitogen.connection.Connection`, it is necessary to track + some idea of the configured execution environment outside the connection + plug-in. + + That is what :func:`set_worker_model` and :func:`get_worker_model` are for. + """ + global _worker_model + assert model is None or _worker_model is None + _worker_model = model + + +def get_worker_model(): + """ + Return the :class:`WorkerModel` currently configured by the running + strategy. + """ + if _worker_model is None: + raise ansible.errors.AnsibleConnectionFailure(worker_model_msg) + return _worker_model + + +def get_classic_worker_model(**kwargs): + """ + Return the single :class:`ClassicWorkerModel` instance, constructing it if + necessary. + """ + global _classic_worker_model + assert _classic_worker_model is None or (not kwargs), \ + "ClassicWorkerModel kwargs supplied but model already constructed" + + if _classic_worker_model is None: + _classic_worker_model = ClassicWorkerModel(**kwargs) + return _classic_worker_model + + +def getenv_int(key, default=0): + """ + Get an integer-valued environment variable `key`, if it exists and parses + as an integer, otherwise return `default`. + """ + try: + return int(os.environ.get(key, str(default))) + except ValueError: + return default + + +def save_pid(name): + """ + When debugging and profiling, it is very annoying to poke through the + process list to discover the currently running Ansible and MuxProcess IDs, + especially when trying to catch an issue during early startup. So here, if + a magic environment variable set, stash them in hidden files in the CWD:: + + alias muxpid="cat .ansible-mux.pid" + alias anspid="cat .ansible-controller.pid" + + gdb -p $(muxpid) + perf top -p $(anspid) + """ + if os.environ.get('MITOGEN_SAVE_PIDS'): + with open('.ansible-%s.pid' % (name,), 'w') as fp: + fp.write(str(os.getpid())) + + +def setup_pool(pool): + """ + Configure a connection multiplexer's :class:`mitogen.service.Pool` with + services accessed by clients and WorkerProcesses. + """ + pool.add(mitogen.service.FileService(router=pool.router)) + pool.add(mitogen.service.PushFileService(router=pool.router)) + pool.add(ansible_mitogen.services.ContextService(router=pool.router)) + pool.add(ansible_mitogen.services.ModuleDepService(pool.router)) + LOG.debug('Service pool configured: size=%d', pool.size) + + +def _setup_simplejson(responder): + """ + We support serving simplejson for Python 2.4 targets on Ansible 2.3, at + least so the package's own CI Docker scripts can run without external + help, however newer versions of simplejson no longer support Python + 2.4. Therefore override any installed/loaded version with a + 2.4-compatible version we ship in the compat/ directory. + """ + responder.whitelist_prefix('simplejson') + + # issue #536: must be at end of sys.path, in case existing newer + # version is already loaded. + compat_path = os.path.join(os.path.dirname(__file__), 'compat') + sys.path.append(compat_path) + + for fullname, is_pkg, suffix in ( + (u'simplejson', True, '__init__.py'), + (u'simplejson.decoder', False, 'decoder.py'), + (u'simplejson.encoder', False, 'encoder.py'), + (u'simplejson.scanner', False, 'scanner.py'), + ): + path = os.path.join(compat_path, 'simplejson', suffix) + fp = open(path, 'rb') + try: + source = fp.read() + finally: + fp.close() + + responder.add_source_override( + fullname=fullname, + path=path, + source=source, + is_pkg=is_pkg, + ) + + +def _setup_responder(responder): + """ + Configure :class:`mitogen.master.ModuleResponder` to only permit + certain packages, and to generate custom responses for certain modules. + """ + responder.whitelist_prefix('ansible') + responder.whitelist_prefix('ansible_mitogen') + _setup_simplejson(responder) + + # Ansible 2.3 is compatible with Python 2.4 targets, however + # ansible/__init__.py is not. Instead, executor/module_common.py writes + # out a 2.4-compatible namespace package for unknown reasons. So we + # copy it here. + responder.add_source_override( + fullname='ansible', + path=ansible.__file__, + source=(ANSIBLE_PKG_OVERRIDE % ( + ansible.__version__, + ansible.__author__, + )).encode(), + is_pkg=True, + ) + + +def increase_open_file_limit(): + """ + #549: in order to reduce the possibility of hitting an open files limit, + increase :data:`resource.RLIMIT_NOFILE` from its soft limit to its hard + limit, if they differ. + + It is common that a low soft limit is configured by default, where the hard + limit is much higher. + """ + soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) + if hard == resource.RLIM_INFINITY: + hard_s = '(infinity)' + # cap in case of O(RLIMIT_NOFILE) algorithm in some subprocess. + hard = 524288 + else: + hard_s = str(hard) + + LOG.debug('inherited open file limits: soft=%d hard=%s', soft, hard_s) + if soft >= hard: + LOG.debug('max open files already set to hard limit: %d', hard) + return + + # OS X is limited by kern.maxfilesperproc sysctl, rather than the + # advertised unlimited hard RLIMIT_NOFILE. Just hard-wire known defaults + # for that sysctl, to avoid the mess of querying it. + for value in (hard, 10240): + try: + resource.setrlimit(resource.RLIMIT_NOFILE, (value, hard)) + LOG.debug('raised soft open file limit from %d to %d', soft, value) + break + except ValueError as e: + LOG.debug('could not raise soft open file limit from %d to %d: %s', + soft, value, e) + + +def common_setup(enable_affinity=True, _init_logging=True): + save_pid('controller') + ansible_mitogen.logging.set_process_name('top') + + if _init_logging: + ansible_mitogen.logging.setup() + + if enable_affinity: + ansible_mitogen.affinity.policy.assign_controller() + + mitogen.utils.setup_gil() + if faulthandler is not None: + faulthandler.enable() + + MuxProcess.profiling = getenv_int('MITOGEN_PROFILING') > 0 + if MuxProcess.profiling: + mitogen.core.enable_profiling() + + MuxProcess.cls_original_env = dict(os.environ) + increase_open_file_limit() + + +def get_cpu_count(default=None): + """ + Get the multiplexer CPU count from the MITOGEN_CPU_COUNT environment + variable, returning `default` if one isn't set, or is out of range. + + :param int default: + Default CPU, or :data:`None` to use all available CPUs. + """ + max_cpus = multiprocessing.cpu_count() + if default is None: + default = max_cpus + + cpu_count = getenv_int('MITOGEN_CPU_COUNT', default=default) + if cpu_count < 1 or cpu_count > max_cpus: + cpu_count = default + + return cpu_count + + +class Broker(mitogen.master.Broker): + """ + WorkerProcess maintains at most 2 file descriptors, therefore does not need + the exuberant syscall expense of EpollPoller, so override it and restore + the poll() poller. + """ + poller_class = mitogen.core.Poller + + +class Binding(object): + """ + Represent a bound connection for a particular inventory hostname. When + operating in sharded mode, the actual MuxProcess implementing a connection + varies according to the target machine. Depending on the particular + implementation, this class represents a binding to the correct MuxProcess. + """ + def get_child_service_context(self): + """ + Return the :class:`mitogen.core.Context` to which children should + direct requests for services such as FileService, or :data:`None` for + the local process. + + This can be different from :meth:`get_service_context` where MuxProcess + and WorkerProcess are combined, and it is discovered a task is + delegated after being assigned to its initial worker for the original + un-delegated hostname. In that case, connection management and + expensive services like file transfer must be implemented by the + MuxProcess connected to the target, rather than routed to the + MuxProcess responsible for executing the task. + """ + raise NotImplementedError() + + def get_service_context(self): + """ + Return the :class:`mitogen.core.Context` to which this process should + direct ContextService requests, or :data:`None` for the local process. + """ + raise NotImplementedError() + + def close(self): + """ + Finalize any associated resources. + """ + raise NotImplementedError() + + +class WorkerModel(object): + """ + Interface used by StrategyMixin to manage various Mitogen services, by + default running in one or more connection multiplexer subprocesses spawned + off the top-level Ansible process. + """ + def on_strategy_start(self): + """ + Called prior to strategy start in the top-level process. Responsible + for preparing any worker/connection multiplexer state. + """ + raise NotImplementedError() + + def on_strategy_complete(self): + """ + Called after strategy completion in the top-level process. Must place + Ansible back in a "compatible" state where any other strategy plug-in + may execute. + """ + raise NotImplementedError() + + def get_binding(self, inventory_name): + """ + Return a :class:`Binding` to access Mitogen services for + `inventory_name`. Usually called from worker processes, but may also be + called from top-level process to handle "meta: reset_connection". + """ + raise NotImplementedError() + + +class ClassicBinding(Binding): + """ + Only one connection may be active at a time in a classic worker, so its + binding just provides forwarders back to :class:`ClassicWorkerModel`. + """ + def __init__(self, model): + self.model = model + + def get_service_context(self): + """ + See Binding.get_service_context(). + """ + return self.model.parent + + def get_child_service_context(self): + """ + See Binding.get_child_service_context(). + """ + return self.model.parent + + def close(self): + """ + See Binding.close(). + """ + self.model.on_binding_close() + + +class ClassicWorkerModel(WorkerModel): + #: In the top-level process, this references one end of a socketpair(), + #: whose other end child MuxProcesses block reading from to determine when + #: the master process dies. When the top-level exits abnormally, or + #: normally but where :func:`_on_process_exit` has been called, this socket + #: will be closed, causing all the children to wake. + parent_sock = None + + #: In the mux process, this is the other end of :attr:`cls_parent_sock`. + #: The main thread blocks on a read from it until :attr:`cls_parent_sock` + #: is closed. + child_sock = None + + #: mitogen.master.Router for this worker. + router = None + + #: mitogen.master.Broker for this worker. + broker = None + + #: Name of multiplexer process socket we are currently connected to. + listener_path = None + + #: mitogen.parent.Context representing the parent Context, which is the + #: connection multiplexer process when running in classic mode, or the + #: top-level process when running a new-style mode. + parent = None + + def __init__(self, _init_logging=True): + """ + Arrange for classic model multiplexers to be started. The parent choses + UNIX socket paths each child will use prior to fork, creates a + socketpair used essentially as a semaphore, then blocks waiting for the + child to indicate the UNIX socket is ready for use. + + :param bool _init_logging: + For testing, if :data:`False`, don't initialize logging. + """ + # #573: The process ID that installed the :mod:`atexit` handler. If + # some unknown Ansible plug-in forks the Ansible top-level process and + # later performs a graceful Python exit, it may try to wait for child + # PIDs it never owned, causing a crash. We want to avoid that. + self._pid = os.getpid() + + common_setup(_init_logging=_init_logging) + + self.parent_sock, self.child_sock = socket.socketpair() + mitogen.core.set_cloexec(self.parent_sock.fileno()) + mitogen.core.set_cloexec(self.child_sock.fileno()) + + self._muxes = [ + MuxProcess(self, index) + for index in range(get_cpu_count(default=1)) + ] + for mux in self._muxes: + mux.start() + + atexit.register(self._on_process_exit) + self.child_sock.close() + self.child_sock = None + + def _listener_for_name(self, name): + """ + Given an inventory hostname, return the UNIX listener that should + communicate with it. This is a simple hash of the inventory name. + """ + mux = self._muxes[abs(hash(name)) % len(self._muxes)] + LOG.debug('will use multiplexer %d (%s) to connect to "%s"', + mux.index, mux.path, name) + return mux.path + + def _reconnect(self, path): + if self.router is not None: + # Router can just be overwritten, but the previous parent + # connection must explicitly be removed from the broker first. + self.router.disconnect(self.parent) + self.parent = None + self.router = None + + try: + self.router, self.parent = mitogen.unix.connect( + path=path, + broker=self.broker, + ) + except mitogen.unix.ConnectError as e: + # This is not AnsibleConnectionFailure since we want to break + # with_items loops. + raise ansible.errors.AnsibleError(shutting_down_msg % (e,)) + + self.router.max_message_size = MAX_MESSAGE_SIZE + self.listener_path = path + + def _on_process_exit(self): + """ + This is an :mod:`atexit` handler installed in the top-level process. + + Shut the write end of `sock`, causing the receive side of the socket in + every :class:`MuxProcess` to return 0-byte reads, and causing their + main threads to wake and initiate shutdown. After shutting the socket + down, wait on each child to finish exiting. + + This is done using :mod:`atexit` since Ansible lacks any better hook to + run code during exit, and unless some synchronization exists with + MuxProcess, debug logs may appear on the user's terminal *after* the + prompt has been printed. + """ + if self._pid != os.getpid(): + return + + try: + self.parent_sock.shutdown(socket.SHUT_WR) + except socket.error: + # Already closed. This is possible when tests are running. + LOG.debug('_on_process_exit: ignoring duplicate call') + return + + mitogen.core.io_op(self.parent_sock.recv, 1) + self.parent_sock.close() + + for mux in self._muxes: + _, status = os.waitpid(mux.pid, 0) + status = mitogen.fork._convert_exit_status(status) + LOG.debug('multiplexer %d PID %d %s', mux.index, mux.pid, + mitogen.parent.returncode_to_str(status)) + + def _test_reset(self): + """ + Used to clean up in unit tests. + """ + self.on_binding_close() + self._on_process_exit() + set_worker_model(None) + + global _classic_worker_model + _classic_worker_model = None + + def on_strategy_start(self): + """ + See WorkerModel.on_strategy_start(). + """ + + def on_strategy_complete(self): + """ + See WorkerModel.on_strategy_complete(). + """ + + def get_binding(self, inventory_name): + """ + See WorkerModel.get_binding(). + """ + if self.broker is None: + self.broker = Broker() + + path = self._listener_for_name(inventory_name) + if path != self.listener_path: + self._reconnect(path) + + return ClassicBinding(self) + + def on_binding_close(self): + if not self.broker: + return + + self.broker.shutdown() + self.broker.join() + self.router = None + self.broker = None + self.parent = None + self.listener_path = None + + # #420: Ansible executes "meta" actions in the top-level process, + # meaning "reset_connection" will cause :class:`mitogen.core.Latch` FDs + # to be cached and erroneously shared by children on subsequent + # WorkerProcess forks. To handle that, call on_fork() to ensure any + # shared state is discarded. + # #490: only attempt to clean up when it's known that some resources + # exist to cleanup, otherwise later __del__ double-call to close() due + # to GC at random moment may obliterate an unrelated Connection's + # related resources. + mitogen.fork.on_fork() + + +class MuxProcess(object): + """ + Implement a subprocess forked from the Ansible top-level, as a safe place + to contain the Mitogen IO multiplexer thread, keeping its use of the + logging package (and the logging package's heavy use of locks) far away + from os.fork(), which is used continuously by the multiprocessing package + in the top-level process. + + The problem with running the multiplexer in that process is that should the + multiplexer thread be in the process of emitting a log entry (and holding + its lock) at the point of fork, in the child, the first attempt to log any + log entry using the same handler will deadlock the child, as in the memory + image the child received, the lock will always be marked held. + + See https://bugs.python.org/issue6721 for a thorough description of the + class of problems this worker is intended to avoid. + """ + #: A copy of :data:`os.environ` at the time the multiplexer process was + #: started. It's used by mitogen_local.py to find changes made to the + #: top-level environment (e.g. vars plugins -- issue #297) that must be + #: applied to locally executed commands and modules. + cls_original_env = None + + def __init__(self, model, index): + #: :class:`ClassicWorkerModel` instance we were created by. + self.model = model + #: MuxProcess CPU index. + self.index = index + #: Individual path of this process. + self.path = mitogen.unix.make_socket_path() + + def start(self): + self.pid = os.fork() + if self.pid: + # Wait for child to boot before continuing. + mitogen.core.io_op(self.model.parent_sock.recv, 1) + return + + ansible_mitogen.logging.set_process_name('mux:' + str(self.index)) + if setproctitle: + setproctitle.setproctitle('mitogen mux:%s (%s)' % ( + self.index, + os.path.basename(self.path), + )) + + self.model.parent_sock.close() + self.model.parent_sock = None + try: + try: + self.worker_main() + except Exception: + LOG.exception('worker_main() crashed') + finally: + sys.exit() + + def worker_main(self): + """ + The main function of the mux process: setup the Mitogen broker thread + and ansible_mitogen services, then sleep waiting for the socket + connected to the parent to be closed (indicating the parent has died). + """ + save_pid('mux') + + # #623: MuxProcess ignores SIGINT because it wants to live until every + # Ansible worker process has been cleaned up by + # TaskQueueManager.cleanup(), otherwise harmles yet scary warnings + # about being unable connect to MuxProess could be printed. + signal.signal(signal.SIGINT, signal.SIG_IGN) + ansible_mitogen.logging.set_process_name('mux') + ansible_mitogen.affinity.policy.assign_muxprocess(self.index) + + self._setup_master() + self._setup_services() + + try: + # Let the parent know our listening socket is ready. + mitogen.core.io_op(self.model.child_sock.send, b('1')) + # Block until the socket is closed, which happens on parent exit. + mitogen.core.io_op(self.model.child_sock.recv, 1) + finally: + self.broker.shutdown() + self.broker.join() + + # Test frameworks living somewhere higher on the stack of the + # original parent process may try to catch sys.exit(), so do a C + # level exit instead. + os._exit(0) + + def _enable_router_debug(self): + if 'MITOGEN_ROUTER_DEBUG' in os.environ: + self.router.enable_debug() + + def _enable_stack_dumps(self): + secs = getenv_int('MITOGEN_DUMP_THREAD_STACKS', default=0) + if secs: + mitogen.debug.dump_to_logger(secs=secs) + + def _setup_master(self): + """ + Construct a Router, Broker, and mitogen.unix listener + """ + self.broker = mitogen.master.Broker(install_watcher=False) + self.router = mitogen.master.Router( + broker=self.broker, + max_message_size=MAX_MESSAGE_SIZE, + ) + _setup_responder(self.router.responder) + mitogen.core.listen(self.broker, 'shutdown', self._on_broker_shutdown) + mitogen.core.listen(self.broker, 'exit', self._on_broker_exit) + self.listener = mitogen.unix.Listener.build_stream( + router=self.router, + path=self.path, + backlog=C.DEFAULT_FORKS, + ) + self._enable_router_debug() + self._enable_stack_dumps() + + def _setup_services(self): + """ + Construct a ContextService and a thread to service requests for it + arriving from worker processes. + """ + self.pool = mitogen.service.Pool( + router=self.router, + size=getenv_int('MITOGEN_POOL_SIZE', default=32), + ) + setup_pool(self.pool) + + def _on_broker_shutdown(self): + """ + Respond to broker shutdown by shutting down the pool. Do not join on it + yet, since that would block the broker thread which then cannot clean + up pending handlers and connections, which is required for the threads + to exit gracefully. + """ + self.pool.stop(join=False) + + def _on_broker_exit(self): + """ + Respond to the broker thread about to exit by finally joining on the + pool. This is safe since pools only block in connection attempts, and + connection attempts fail with CancelledError when broker shutdown + begins. + """ + self.pool.join() diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/runner.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/runner.py similarity index 94% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/runner.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/runner.py index 843ffe19a3c9a82c4bd851d73a20b45a3c45888d..064023442294607ed2db537e89edbb2308d0bdbd 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/runner.py +++ b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/runner.py @@ -37,7 +37,6 @@ how to build arguments for it, preseed related data, etc. """ import atexit -import codecs import imp import os import re @@ -52,7 +51,6 @@ import mitogen.core import ansible_mitogen.target # TODO: circular import from mitogen.core import b from mitogen.core import bytes_partition -from mitogen.core import str_partition from mitogen.core import str_rpartition from mitogen.core import to_text @@ -104,12 +102,20 @@ iteritems = getattr(dict, 'iteritems', dict.items) LOG = logging.getLogger(__name__) -if mitogen.core.PY3: - shlex_split = shlex.split -else: - def shlex_split(s, comments=False): - return [mitogen.core.to_text(token) - for token in shlex.split(str(s), comments=comments)] +def shlex_split_b(s): + """ + Use shlex.split() to split characters in some single-byte encoding, without + knowing what that encoding is. The input is bytes, the output is a list of + bytes. + """ + assert isinstance(s, mitogen.core.BytesType) + if mitogen.core.PY3: + return [ + t.encode('latin1') + for t in shlex.split(s.decode('latin1'), comments=True) + ] + + return [t for t in shlex.split(s, comments=True)] class TempFileWatcher(object): @@ -165,13 +171,19 @@ class EnvironmentFileWatcher(object): A more robust future approach may simply be to arrange for the persistent interpreter to restart when a change is detected. """ + # We know nothing about the character set of /etc/environment or the + # process environment. + environ = getattr(os, 'environb', os.environ) + def __init__(self, path): self.path = os.path.expanduser(path) #: Inode data at time of last check. self._st = self._stat() #: List of inherited keys appearing to originated from this file. - self._keys = [key for key, value in self._load() - if value == os.environ.get(key)] + self._keys = [ + key for key, value in self._load() + if value == self.environ.get(key) + ] LOG.debug('%r installed; existing keys: %r', self, self._keys) def __repr__(self): @@ -185,7 +197,7 @@ class EnvironmentFileWatcher(object): def _load(self): try: - fp = codecs.open(self.path, 'r', encoding='utf-8') + fp = open(self.path, 'rb') try: return list(self._parse(fp)) finally: @@ -199,36 +211,36 @@ class EnvironmentFileWatcher(object): """ for line in fp: # ' #export foo=some var ' -> ['#export', 'foo=some var '] - bits = shlex_split(line, comments=True) - if (not bits) or bits[0].startswith('#'): + bits = shlex_split_b(line) + if (not bits) or bits[0].startswith(b('#')): continue - if bits[0] == u'export': + if bits[0] == b('export'): bits.pop(0) - key, sep, value = str_partition(u' '.join(bits), u'=') + key, sep, value = bytes_partition(b(' ').join(bits), b('=')) if key and sep: yield key, value def _on_file_changed(self): LOG.debug('%r: file changed, reloading', self) for key, value in self._load(): - if key in os.environ: + if key in self.environ: LOG.debug('%r: existing key %r=%r exists, not setting %r', - self, key, os.environ[key], value) + self, key, self.environ[key], value) else: LOG.debug('%r: setting key %r to %r', self, key, value) self._keys.append(key) - os.environ[key] = value + self.environ[key] = value def _remove_existing(self): """ When a change is detected, remove keys that existed in the old file. """ for key in self._keys: - if key in os.environ: + if key in self.environ: LOG.debug('%r: removing old key %r', self, key) - del os.environ[key] + del self.environ[key] self._keys = [] def check(self): @@ -791,9 +803,10 @@ class NewStyleRunner(ScriptRunner): #: path => new-style module bytecode. _code_by_path = {} - def __init__(self, module_map, **kwargs): + def __init__(self, module_map, py_module_name, **kwargs): super(NewStyleRunner, self).__init__(**kwargs) self.module_map = module_map + self.py_module_name = py_module_name def _setup_imports(self): """ @@ -930,9 +943,22 @@ class NewStyleRunner(ScriptRunner): self._handle_magic_exception(mod, sys.exc_info()[1]) raise + def _get_module_package(self): + """ + Since Ansible 2.9 __package__ must be set in accordance with an + approximation of the original package hierarchy, so that relative + imports function correctly. + """ + pkg, sep, modname = str_rpartition(self.py_module_name, '.') + if not sep: + return None + if mitogen.core.PY3: + return pkg + return pkg.encode() + def _run(self): mod = types.ModuleType(self.main_module_name) - mod.__package__ = None + mod.__package__ = self._get_module_package() # Some Ansible modules use __file__ to find the Ansiballz temporary # directory. We must provide some temporary path in __file__, but we # don't want to pointlessly write the module to disk when it never diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/services.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/services.py similarity index 93% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/services.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/services.py index a7c0e46f3e7d1b64f262e2ec137911f34ea32ef9..52171903da5887f70fa157b9f9a459f0c9fa47db 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/services.py +++ b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/services.py @@ -156,20 +156,41 @@ class ContextService(mitogen.service.Service): @mitogen.service.expose(mitogen.service.AllowParents()) @mitogen.service.arg_spec({ - 'context': mitogen.core.Context + 'stack': list, }) - def reset(self, context): + def reset(self, stack): """ Return a reference, forcing close and discard of the underlying connection. Used for 'meta: reset_connection' or when some other error is detected. + + :returns: + :data:`True` if a connection was found to discard, otherwise + :data:`False`. """ - LOG.debug('%r.reset(%r)', self, context) - self._lock.acquire() - try: + LOG.debug('%r.reset(%r)', self, stack) + + l = mitogen.core.Latch() + context = None + with self._lock: + for i, spec in enumerate(stack): + key = key_from_dict(via=context, **spec) + response = self._response_by_key.get(key) + if response is None: + LOG.debug('%r: could not find connection to shut down; ' + 'failed at hop %d', self, i) + return False + + context = response['context'] + + mitogen.core.listen(context, 'disconnect', l.put) self._shutdown_unlocked(context) - finally: - self._lock.release() + + # The timeout below is to turn a hang into a crash in case there is any + # possible race between 'disconnect' signal subscription, and the child + # abruptly disconnecting. + l.get(timeout=30.0) + return True @mitogen.service.expose(mitogen.service.AllowParents()) @mitogen.service.arg_spec({ @@ -180,7 +201,7 @@ class ContextService(mitogen.service.Service): Return a reference, making it eligable for recycling once its reference count reaches zero. """ - LOG.debug('%r.put(%r)', self, context) + LOG.debug('decrementing reference count for %r', context) self._lock.acquire() try: if self._refs_by_context.get(context, 0) == 0: @@ -326,7 +347,8 @@ class ContextService(mitogen.service.Service): ) def _send_module_forwards(self, context): - self.router.responder.forward_modules(context, self.ALWAYS_PRELOAD) + if hasattr(self.router.responder, 'forward_modules'): + self.router.responder.forward_modules(context, self.ALWAYS_PRELOAD) _candidate_temp_dirs = None @@ -372,7 +394,7 @@ class ContextService(mitogen.service.Service): try: method = getattr(self.router, spec['method']) except AttributeError: - raise Error('unsupported method: %(transport)s' % spec) + raise Error('unsupported method: %(method)s' % spec) context = method(via=via, unidirectional=True, **spec['kwargs']) if via and spec.get('enable_lru'): @@ -443,7 +465,7 @@ class ContextService(mitogen.service.Service): @mitogen.service.arg_spec({ 'stack': list }) - def get(self, msg, stack): + def get(self, stack): """ Return a Context referring to an established connection with the given configuration, establishing new connections as necessary. diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/strategy.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/strategy.py similarity index 69% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/strategy.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/strategy.py index 01dff285401dcbd07c1daffffc8ffec77d4e8556..d82e61120b583916bf9753d36e97351ab334ed75 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/strategy.py +++ b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/strategy.py @@ -27,10 +27,16 @@ # POSSIBILITY OF SUCH DAMAGE. from __future__ import absolute_import +import distutils.version import os import signal import threading +try: + import setproctitle +except ImportError: + setproctitle = None + import mitogen.core import ansible_mitogen.affinity import ansible_mitogen.loaders @@ -47,8 +53,8 @@ except ImportError: Sentinel = None -ANSIBLE_VERSION_MIN = '2.3' -ANSIBLE_VERSION_MAX = '2.8' +ANSIBLE_VERSION_MIN = (2, 3) +ANSIBLE_VERSION_MAX = (2, 9) NEW_VERSION_MSG = ( "Your Ansible version (%s) is too recent. The most recent version\n" "supported by Mitogen for Ansible is %s.x. Please check the Mitogen\n" @@ -71,13 +77,15 @@ def _assert_supported_release(): an unsupported Ansible release. """ v = ansible.__version__ + if not isinstance(v, tuple): + v = tuple(distutils.version.LooseVersion(v).version) - if v[:len(ANSIBLE_VERSION_MIN)] < ANSIBLE_VERSION_MIN: + if v[:2] < ANSIBLE_VERSION_MIN: raise ansible.errors.AnsibleError( OLD_VERSION_MSG % (v, ANSIBLE_VERSION_MIN) ) - if v[:len(ANSIBLE_VERSION_MAX)] > ANSIBLE_VERSION_MAX: + if v[:2] > ANSIBLE_VERSION_MAX: raise ansible.errors.AnsibleError( NEW_VERSION_MSG % (ansible.__version__, ANSIBLE_VERSION_MAX) ) @@ -119,13 +127,15 @@ def wrap_action_loader__get(name, *args, **kwargs): the use of shell fragments wherever possible. This is used instead of static subclassing as it generalizes to third party - action modules outside the Ansible tree. + action plugins outside the Ansible tree. """ get_kwargs = {'class_only': True} + if name in ('fetch',): + name = 'mitogen_' + name if ansible.__version__ >= '2.8': get_kwargs['collection_list'] = kwargs.pop('collection_list', None) - klass = action_loader__get(name, **get_kwargs) + klass = ansible_mitogen.loaders.action_loader__get(name, **get_kwargs) if klass: bases = (ansible_mitogen.mixins.ActionModuleMixin, klass) adorned_klass = type(str(name), bases, {}) @@ -134,22 +144,44 @@ def wrap_action_loader__get(name, *args, **kwargs): return adorned_klass(*args, **kwargs) +REDIRECTED_CONNECTION_PLUGINS = ( + 'buildah', + 'docker', + 'kubectl', + 'jail', + 'local', + 'lxc', + 'lxd', + 'machinectl', + 'setns', + 'ssh', +) + + def wrap_connection_loader__get(name, *args, **kwargs): """ - While the strategy is active, rewrite connection_loader.get() calls for - some transports into requests for a compatible Mitogen transport. + While a Mitogen strategy is active, rewrite connection_loader.get() calls + for some transports into requests for a compatible Mitogen transport. """ - if name in ('buildah', 'docker', 'kubectl', 'jail', 'local', - 'lxc', 'lxd', 'machinectl', 'setns', 'ssh'): + if name in REDIRECTED_CONNECTION_PLUGINS: name = 'mitogen_' + name - return connection_loader__get(name, *args, **kwargs) + + return ansible_mitogen.loaders.connection_loader__get(name, *args, **kwargs) -def wrap_worker__run(*args, **kwargs): +def wrap_worker__run(self): """ - While the strategy is active, rewrite connection_loader.get() calls for - some transports into requests for a compatible Mitogen transport. + While a Mitogen strategy is active, trap WorkerProcess.run() calls and use + the opportunity to set the worker's name in the process list and log + output, activate profiling if requested, and bind the worker to a specific + CPU. """ + if setproctitle: + setproctitle.setproctitle('worker:%s task:%s' % ( + self._host.name, + self._task.action, + )) + # Ignore parent's attempts to murder us when we still need to write # profiling output. if mitogen.core._profile_hook.__name__ != '_profile_hook': @@ -158,16 +190,69 @@ def wrap_worker__run(*args, **kwargs): ansible_mitogen.logging.set_process_name('task') ansible_mitogen.affinity.policy.assign_worker() return mitogen.core._profile_hook('WorkerProcess', - lambda: worker__run(*args, **kwargs) + lambda: worker__run(self) ) +class AnsibleWrappers(object): + """ + Manage add/removal of various Ansible runtime hooks. + """ + def _add_plugin_paths(self): + """ + Add the Mitogen plug-in directories to the ModuleLoader path, avoiding + the need for manual configuration. + """ + base_dir = os.path.join(os.path.dirname(__file__), 'plugins') + ansible_mitogen.loaders.connection_loader.add_directory( + os.path.join(base_dir, 'connection') + ) + ansible_mitogen.loaders.action_loader.add_directory( + os.path.join(base_dir, 'action') + ) + + def _install_wrappers(self): + """ + Install our PluginLoader monkey patches and update global variables + with references to the real functions. + """ + ansible_mitogen.loaders.action_loader.get = wrap_action_loader__get + ansible_mitogen.loaders.connection_loader.get = wrap_connection_loader__get + + global worker__run + worker__run = ansible.executor.process.worker.WorkerProcess.run + ansible.executor.process.worker.WorkerProcess.run = wrap_worker__run + + def _remove_wrappers(self): + """ + Uninstall the PluginLoader monkey patches. + """ + ansible_mitogen.loaders.action_loader.get = ( + ansible_mitogen.loaders.action_loader__get + ) + ansible_mitogen.loaders.connection_loader.get = ( + ansible_mitogen.loaders.connection_loader__get + ) + ansible.executor.process.worker.WorkerProcess.run = worker__run + + def install(self): + self._add_plugin_paths() + self._install_wrappers() + + def remove(self): + self._remove_wrappers() + + class StrategyMixin(object): """ - This mix-in enhances any built-in strategy by arranging for various Mitogen - services to be initialized in the Ansible top-level process, and for worker - processes to grow support for using those top-level services to communicate - with and execute modules on remote hosts. + This mix-in enhances any built-in strategy by arranging for an appropriate + WorkerModel instance to be constructed as necessary, or for the existing + one to be reused. + + The WorkerModel in turn arranges for a connection multiplexer to be started + somewhere (by default in an external process), and for WorkerProcesses to + grow support for using those top-level services to communicate with remote + hosts. Mitogen: @@ -185,18 +270,19 @@ class StrategyMixin(object): services, review the Standard Handles section of the How It Works guide in the documentation. - A ContextService is installed as a message handler in the master - process and run on a private thread. It is responsible for accepting - requests to establish new SSH connections from worker processes, and - ensuring precisely one connection exists and is reused for subsequent - playbook steps. The service presently runs in a single thread, so to - begin with, new SSH connections are serialized. + A ContextService is installed as a message handler in the connection + mutliplexer subprocess and run on a private thread. It is responsible + for accepting requests to establish new SSH connections from worker + processes, and ensuring precisely one connection exists and is reused + for subsequent playbook steps. The service presently runs in a single + thread, so to begin with, new SSH connections are serialized. Finally a mitogen.unix listener is created through which WorkerProcess - can establish a connection back into the master process, in order to - avail of ContextService. A UNIX listener socket is necessary as there - is no more sane mechanism to arrange for IPC between the Router in the - master process, and the corresponding Router in the worker process. + can establish a connection back into the connection multiplexer, in + order to avail of ContextService. A UNIX listener socket is necessary + as there is no more sane mechanism to arrange for IPC between the + Router in the connection multiplexer, and the corresponding Router in + the worker process. Ansible: @@ -204,10 +290,10 @@ class StrategyMixin(object): connection and action plug-ins. For connection plug-ins, if the desired method is "local" or "ssh", it - is redirected to the "mitogen" connection plug-in. That plug-in - implements communication via a UNIX socket connection to the top-level - Ansible process, and uses ContextService running in the top-level - process to actually establish and manage the connection. + is redirected to one of the "mitogen_*" connection plug-ins. That + plug-in implements communication via a UNIX socket connection to the + connection multiplexer process, and uses ContextService running there + to establish a persistent connection to the target. For action plug-ins, the original class is looked up as usual, but a new subclass is created dynamically in order to mix-in @@ -223,43 +309,6 @@ class StrategyMixin(object): remote process, all the heavy lifting of transferring the action module and its dependencies are automatically handled by Mitogen. """ - def _install_wrappers(self): - """ - Install our PluginLoader monkey patches and update global variables - with references to the real functions. - """ - global action_loader__get - action_loader__get = ansible_mitogen.loaders.action_loader.get - ansible_mitogen.loaders.action_loader.get = wrap_action_loader__get - - global connection_loader__get - connection_loader__get = ansible_mitogen.loaders.connection_loader.get - ansible_mitogen.loaders.connection_loader.get = wrap_connection_loader__get - - global worker__run - worker__run = ansible.executor.process.worker.WorkerProcess.run - ansible.executor.process.worker.WorkerProcess.run = wrap_worker__run - - def _remove_wrappers(self): - """ - Uninstall the PluginLoader monkey patches. - """ - ansible_mitogen.loaders.action_loader.get = action_loader__get - ansible_mitogen.loaders.connection_loader.get = connection_loader__get - ansible.executor.process.worker.WorkerProcess.run = worker__run - - def _add_plugin_paths(self): - """ - Add the Mitogen plug-in directories to the ModuleLoader path, avoiding - the need for manual configuration. - """ - base_dir = os.path.join(os.path.dirname(__file__), 'plugins') - ansible_mitogen.loaders.connection_loader.add_directory( - os.path.join(base_dir, 'connection') - ) - ansible_mitogen.loaders.action_loader.add_directory( - os.path.join(base_dir, 'action') - ) def _queue_task(self, host, task, task_vars, play_context): """ @@ -290,20 +339,35 @@ class StrategyMixin(object): play_context=play_context, ) + def _get_worker_model(self): + """ + In classic mode a single :class:`WorkerModel` exists, which manages + references and configuration of the associated connection multiplexer + process. + """ + return ansible_mitogen.process.get_classic_worker_model() + def run(self, iterator, play_context, result=0): """ - Arrange for a mitogen.master.Router to be available for the duration of - the strategy's real run() method. + Wrap :meth:`run` to ensure requisite infrastructure and modifications + are configured for the duration of the call. """ _assert_supported_release() - - ansible_mitogen.process.MuxProcess.start() - run = super(StrategyMixin, self).run - self._add_plugin_paths() - self._install_wrappers() + wrappers = AnsibleWrappers() + self._worker_model = self._get_worker_model() + ansible_mitogen.process.set_worker_model(self._worker_model) try: - return mitogen.core._profile_hook('Strategy', - lambda: run(iterator, play_context) - ) + self._worker_model.on_strategy_start() + try: + wrappers.install() + try: + run = super(StrategyMixin, self).run + return mitogen.core._profile_hook('Strategy', + lambda: run(iterator, play_context) + ) + finally: + wrappers.remove() + finally: + self._worker_model.on_strategy_complete() finally: - self._remove_wrappers() + ansible_mitogen.process.set_worker_model(None) diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/target.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/target.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/target.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/target.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/transport_config.py b/ansible/plugins/mitogen-0.2.9/ansible_mitogen/transport_config.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/ansible_mitogen/transport_config.py rename to ansible/plugins/mitogen-0.2.9/ansible_mitogen/transport_config.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/dev_requirements.txt b/ansible/plugins/mitogen-0.2.9/dev_requirements.txt similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/dev_requirements.txt rename to ansible/plugins/mitogen-0.2.9/dev_requirements.txt diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/__init__.py b/ansible/plugins/mitogen-0.2.9/mitogen/__init__.py similarity index 97% rename from ansible/plugins/mitogen-0.2.8-pre/mitogen/__init__.py rename to ansible/plugins/mitogen-0.2.9/mitogen/__init__.py index 47fe4d382684e9bf2445612228127c4b3360a598..f18c5a900c63ef7cf7a6a1697f5915cc48972fea 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/__init__.py +++ b/ansible/plugins/mitogen-0.2.9/mitogen/__init__.py @@ -35,7 +35,7 @@ be expected. On the slave, it is built dynamically during startup. #: Library version as a tuple. -__version__ = (0, 2, 7) +__version__ = (0, 2, 9) #: This is :data:`False` in slave contexts. Previously it was used to prevent @@ -111,10 +111,10 @@ def main(log_level='INFO', profiling=_default_profiling): if profiling: mitogen.core.enable_profiling() mitogen.master.Router.profiling = profiling - utils.log_to_file(level=log_level) + mitogen.utils.log_to_file(level=log_level) return mitogen.core._profile_hook( 'app.main', - utils.run_with_router, + mitogen.utils.run_with_router, func, ) return wrapper diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/buildah.py b/ansible/plugins/mitogen-0.2.9/mitogen/buildah.py similarity index 76% rename from ansible/plugins/mitogen-0.2.8-pre/mitogen/buildah.py rename to ansible/plugins/mitogen-0.2.9/mitogen/buildah.py index eec415f3223dd3eced64dd9d10317522165a6125..f850234d66de99bc7e912805f7ce738d133cabce 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/buildah.py +++ b/ansible/plugins/mitogen-0.2.9/mitogen/buildah.py @@ -37,37 +37,37 @@ import mitogen.parent LOG = logging.getLogger(__name__) -class Stream(mitogen.parent.Stream): - child_is_immediate_subprocess = False - +class Options(mitogen.parent.Options): container = None username = None buildah_path = 'buildah' - # TODO: better way of capturing errors such as "No such container." - create_child_args = { - 'merge_stdio': True - } - - def construct(self, container=None, - buildah_path=None, username=None, - **kwargs): - assert container or image - super(Stream, self).construct(**kwargs) - if container: - self.container = container + def __init__(self, container=None, buildah_path=None, username=None, + **kwargs): + super(Options, self).__init__(**kwargs) + assert container is not None + self.container = container if buildah_path: self.buildah_path = buildah_path if username: self.username = username + +class Connection(mitogen.parent.Connection): + options_class = Options + child_is_immediate_subprocess = False + + # TODO: better way of capturing errors such as "No such container." + create_child_args = { + 'merge_stdio': True + } + def _get_name(self): - return u'buildah.' + self.container + return u'buildah.' + self.options.container def get_boot_command(self): - args = [] - if self.username: - args += ['--user=' + self.username] - bits = [self.buildah_path, 'run'] + args + ['--', self.container] - - return bits + super(Stream, self).get_boot_command() + args = [self.options.buildah_path, 'run'] + if self.options.username: + args += ['--user=' + self.options.username] + args += ['--', self.options.container] + return args + super(Connection, self).get_boot_command() diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/compat/__init__.py b/ansible/plugins/mitogen-0.2.9/mitogen/compat/__init__.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/mitogen/compat/__init__.py rename to ansible/plugins/mitogen-0.2.9/mitogen/compat/__init__.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/compat/pkgutil.py b/ansible/plugins/mitogen-0.2.9/mitogen/compat/pkgutil.py similarity index 99% rename from ansible/plugins/mitogen-0.2.8-pre/mitogen/compat/pkgutil.py rename to ansible/plugins/mitogen-0.2.9/mitogen/compat/pkgutil.py index 28e2aeade31bccb3ea64fa203a03639917038392..15eb2afa340045fb1ded64a7bf0d6ecf493cf4f3 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/compat/pkgutil.py +++ b/ansible/plugins/mitogen-0.2.9/mitogen/compat/pkgutil.py @@ -542,7 +542,8 @@ def extend_path(path, name): if os.path.isfile(pkgfile): try: f = open(pkgfile) - except IOError, msg: + except IOError: + msg = sys.exc_info()[1] sys.stderr.write("Can't open %s: %s\n" % (pkgfile, msg)) else: diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/compat/tokenize.py b/ansible/plugins/mitogen-0.2.9/mitogen/compat/tokenize.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/mitogen/compat/tokenize.py rename to ansible/plugins/mitogen-0.2.9/mitogen/compat/tokenize.py diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/core.py b/ansible/plugins/mitogen-0.2.9/mitogen/core.py similarity index 69% rename from ansible/plugins/mitogen-0.2.8-pre/mitogen/core.py rename to ansible/plugins/mitogen-0.2.9/mitogen/core.py index ea83f9618e1a2b411e6ad285f0a9652de0234750..d8c57ba7802441ba9787cd16b5a1c7dc344710cb 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/core.py +++ b/ansible/plugins/mitogen-0.2.9/mitogen/core.py @@ -37,6 +37,7 @@ bootstrap implementation sent to every new slave context. import binascii import collections import encodings.latin_1 +import encodings.utf_8 import errno import fcntl import itertools @@ -49,6 +50,7 @@ import signal import socket import struct import sys +import syslog import threading import time import traceback @@ -102,10 +104,9 @@ LOG = logging.getLogger('mitogen') IOLOG = logging.getLogger('mitogen.io') IOLOG.setLevel(logging.INFO) -LATIN1_CODEC = encodings.latin_1.Codec() # str.encode() may take import lock. Deadlock possible if broker calls # .encode() on behalf of thread currently waiting for module. -UTF8_CODEC = encodings.latin_1.Codec() +LATIN1_CODEC = encodings.latin_1.Codec() _v = False _vv = False @@ -121,6 +122,7 @@ LOAD_MODULE = 107 FORWARD_MODULE = 108 DETACHING = 109 CALL_SERVICE = 110 +STUB_CALL_SERVICE = 111 #: Special value used to signal disconnection or the inability to route a #: message, when it appears in the `reply_to` field. Usually causes @@ -214,7 +216,8 @@ else: class Error(Exception): - """Base for all exceptions raised by Mitogen. + """ + Base for all exceptions raised by Mitogen. :param str fmt: Exception text, or format string if `args` is non-empty. @@ -230,14 +233,18 @@ class Error(Exception): class LatchError(Error): - """Raised when an attempt is made to use a :class:`mitogen.core.Latch` - that has been marked closed.""" + """ + Raised when an attempt is made to use a :class:`mitogen.core.Latch` that + has been marked closed. + """ pass class Blob(BytesType): - """A serializable bytes subclass whose content is summarized in repr() - output, making it suitable for logging binary data.""" + """ + A serializable bytes subclass whose content is summarized in repr() output, + making it suitable for logging binary data. + """ def __repr__(self): return '[blob: %d bytes]' % len(self) @@ -246,8 +253,10 @@ class Blob(BytesType): class Secret(UnicodeType): - """A serializable unicode subclass whose content is masked in repr() - output, making it suitable for logging passwords.""" + """ + A serializable unicode subclass whose content is masked in repr() output, + making it suitable for logging passwords. + """ def __repr__(self): return '[secret]' @@ -281,7 +290,7 @@ class Kwargs(dict): def __init__(self, dct): for k, v in dct.iteritems(): if type(k) is unicode: - k, _ = UTF8_CODEC.encode(k) + k, _ = encodings.utf_8.encode(k) self[k] = v def __repr__(self): @@ -321,30 +330,42 @@ def _unpickle_call_error(s): class ChannelError(Error): - """Raised when a channel dies or has been closed.""" + """ + Raised when a channel dies or has been closed. + """ remote_msg = 'Channel closed by remote end.' local_msg = 'Channel closed by local end.' class StreamError(Error): - """Raised when a stream cannot be established.""" + """ + Raised when a stream cannot be established. + """ pass class TimeoutError(Error): - """Raised when a timeout occurs on a stream.""" + """ + Raised when a timeout occurs on a stream. + """ pass def to_text(o): - """Coerce `o` to Unicode by decoding it from UTF-8 if it is an instance of + """ + Coerce `o` to Unicode by decoding it from UTF-8 if it is an instance of :class:`bytes`, otherwise pass it to the :class:`str` constructor. The - returned object is always a plain :class:`str`, any subclass is removed.""" + returned object is always a plain :class:`str`, any subclass is removed. + """ if isinstance(o, BytesType): return o.decode('utf-8') return UnicodeType(o) +# Documented in api.rst to work around Sphinx limitation. +now = getattr(time, 'monotonic', time.time) + + # Python 2.4 try: any @@ -378,41 +399,84 @@ else: return _partition(s, sep, s.find) or (s, '', '') +def _has_parent_authority(context_id): + return ( + (context_id == mitogen.context_id) or + (context_id in mitogen.parent_ids) + ) + def has_parent_authority(msg, _stream=None): - """Policy function for use with :class:`Receiver` and + """ + Policy function for use with :class:`Receiver` and :meth:`Router.add_handler` that requires incoming messages to originate from a parent context, or on a :class:`Stream` whose :attr:`auth_id <Stream.auth_id>` has been set to that of a parent context or the current - context.""" - return (msg.auth_id == mitogen.context_id or - msg.auth_id in mitogen.parent_ids) + context. + """ + return _has_parent_authority(msg.auth_id) + + +def _signals(obj, signal): + return ( + obj.__dict__ + .setdefault('_signals', {}) + .setdefault(signal, []) + ) def listen(obj, name, func): """ - Arrange for `func(*args, **kwargs)` to be invoked when the named signal is + Arrange for `func()` to be invoked when signal `name` is fired on `obj`. + """ + _signals(obj, name).append(func) + + +def unlisten(obj, name, func): + """ + Remove `func()` from the list of functions invoked when signal `name` is fired by `obj`. + + :raises ValueError: + `func()` was not on the list. """ - signals = vars(obj).setdefault('_signals', {}) - signals.setdefault(name, []).append(func) + _signals(obj, name).remove(func) def fire(obj, name, *args, **kwargs): """ Arrange for `func(*args, **kwargs)` to be invoked for every function - registered for the named signal on `obj`. + registered for signal `name` on `obj`. """ - signals = vars(obj).get('_signals', {}) - for func in signals.get(name, ()): + for func in _signals(obj, name): func(*args, **kwargs) def takes_econtext(func): + """ + Decorator that marks a function or class method to automatically receive a + kwarg named `econtext`, referencing the + :class:`mitogen.core.ExternalContext` active in the context in which the + function is being invoked in. The decorator is only meaningful when the + function is invoked via :data:`CALL_FUNCTION <mitogen.core.CALL_FUNCTION>`. + + When the function is invoked directly, `econtext` must still be passed to + it explicitly. + """ func.mitogen_takes_econtext = True return func def takes_router(func): + """ + Decorator that marks a function or class method to automatically receive a + kwarg named `router`, referencing the :class:`mitogen.core.Router` active + in the context in which the function is being invoked in. The decorator is + only meaningful when the function is invoked via :data:`CALL_FUNCTION + <mitogen.core.CALL_FUNCTION>`. + + When the function is invoked directly, `router` must still be passed to it + explicitly. + """ func.mitogen_takes_router = True return func @@ -432,35 +496,42 @@ def is_blacklisted_import(importer, fullname): def set_cloexec(fd): - """Set the file descriptor `fd` to automatically close on - :func:`os.execve`. This has no effect on file descriptors inherited across - :func:`os.fork`, they must be explicitly closed through some other means, - such as :func:`mitogen.fork.on_fork`.""" + """ + Set the file descriptor `fd` to automatically close on :func:`os.execve`. + This has no effect on file descriptors inherited across :func:`os.fork`, + they must be explicitly closed through some other means, such as + :func:`mitogen.fork.on_fork`. + """ flags = fcntl.fcntl(fd, fcntl.F_GETFD) - assert fd > 2 + assert fd > 2, 'fd %r <= 2' % (fd,) fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) def set_nonblock(fd): - """Set the file descriptor `fd` to non-blocking mode. For most underlying - file types, this causes :func:`os.read` or :func:`os.write` to raise + """ + Set the file descriptor `fd` to non-blocking mode. For most underlying file + types, this causes :func:`os.read` or :func:`os.write` to raise :class:`OSError` with :data:`errno.EAGAIN` rather than block the thread - when the underlying kernel buffer is exhausted.""" + when the underlying kernel buffer is exhausted. + """ flags = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK) def set_block(fd): - """Inverse of :func:`set_nonblock`, i.e. cause `fd` to block the thread - when the underlying kernel buffer is exhausted.""" + """ + Inverse of :func:`set_nonblock`, i.e. cause `fd` to block the thread when + the underlying kernel buffer is exhausted. + """ flags = fcntl.fcntl(fd, fcntl.F_GETFL) fcntl.fcntl(fd, fcntl.F_SETFL, flags & ~os.O_NONBLOCK) def io_op(func, *args): - """Wrap `func(*args)` that may raise :class:`select.error`, - :class:`IOError`, or :class:`OSError`, trapping UNIX error codes relating - to disconnection and retry events in various subsystems: + """ + Wrap `func(*args)` that may raise :class:`select.error`, :class:`IOError`, + or :class:`OSError`, trapping UNIX error codes relating to disconnection + and retry events in various subsystems: * When a signal is delivered to the process on Python 2, system call retry is signalled through :data:`errno.EINTR`. The invocation is automatically @@ -491,7 +562,8 @@ def io_op(func, *args): class PidfulStreamHandler(logging.StreamHandler): - """A :class:`logging.StreamHandler` subclass used when + """ + A :class:`logging.StreamHandler` subclass used when :meth:`Router.enable_debug() <mitogen.master.Router.enable_debug>` has been called, or the `debug` parameter was specified during context construction. Verifies the process ID has not changed on each call to :meth:`emit`, @@ -568,7 +640,7 @@ def _real_profile_hook(name, func, *args): return func(*args) finally: path = _profile_fmt % { - 'now': int(1e6 * time.time()), + 'now': int(1e6 * now()), 'identity': name, 'pid': os.getpid(), 'ext': '%s' @@ -596,6 +668,43 @@ def import_module(modname): return __import__(modname, None, None, ['']) +def pipe(): + """ + Create a UNIX pipe pair using :func:`os.pipe`, wrapping the returned + descriptors in Python file objects in order to manage their lifetime and + ensure they are closed when their last reference is discarded and they have + not been closed explicitly. + """ + rfd, wfd = os.pipe() + return ( + os.fdopen(rfd, 'rb', 0), + os.fdopen(wfd, 'wb', 0) + ) + + +def iter_split(buf, delim, func): + """ + Invoke `func(s)` for each `delim`-delimited chunk in the potentially large + `buf`, avoiding intermediate lists and quadratic string operations. Return + the trailing undelimited portion of `buf`, or any unprocessed portion of + `buf` after `func(s)` returned :data:`False`. + + :returns: + `(trailer, cont)`, where `cont` is :data:`False` if the last call to + `func(s)` returned :data:`False`. + """ + dlen = len(delim) + start = 0 + cont = True + while cont: + nl = buf.find(delim, start) + if nl == -1: + break + cont = not func(buf[start:nl]) is False + start = nl + dlen + return buf[start:], cont + + class Py24Pickler(py_pickle.Pickler): """ Exceptions were classic classes until Python 2.5. Sadly for 2.4, cPickle @@ -687,6 +796,10 @@ class Message(object): #: the :class:`mitogen.select.Select` interface. Defaults to :data:`None`. receiver = None + HEADER_FMT = '>hLLLLLL' + HEADER_LEN = struct.calcsize(HEADER_FMT) + HEADER_MAGIC = 0x4d49 # 'MI' + def __init__(self, **kwargs): """ Construct a message from from the supplied `kwargs`. :attr:`src_id` and @@ -695,7 +808,15 @@ class Message(object): self.src_id = mitogen.context_id self.auth_id = mitogen.context_id vars(self).update(kwargs) - assert isinstance(self.data, BytesType) + assert isinstance(self.data, BytesType), 'Message data is not Bytes' + + def pack(self): + return ( + struct.pack(self.HEADER_FMT, self.HEADER_MAGIC, self.dst_id, + self.src_id, self.auth_id, self.handle, + self.reply_to or 0, len(self.data)) + + self.data + ) def _unpickle_context(self, context_id, name): return _unpickle_context(context_id, name, router=self.router) @@ -708,8 +829,10 @@ class Message(object): return s def _find_global(self, module, func): - """Return the class implementing `module_name.class_name` or raise - `StreamError` if the module is not whitelisted.""" + """ + Return the class implementing `module_name.class_name` or raise + `StreamError` if the module is not whitelisted. + """ if module == __name__: if func == '_unpickle_call_error' or func == 'CallError': return _unpickle_call_error @@ -744,7 +867,7 @@ class Message(object): """ Syntax helper to construct a dead message. """ - kwargs['data'], _ = UTF8_CODEC.encode(reason or u'') + kwargs['data'], _ = encodings.utf_8.encode(reason or u'') return cls(reply_to=IS_DEAD, **kwargs) @classmethod @@ -785,7 +908,8 @@ class Message(object): if msg.handle: (self.router or router).route(msg) else: - LOG.debug('Message.reply(): discarding due to zero handle: %r', msg) + LOG.debug('dropping reply to message with no return address: %r', + msg) if PY3: UNPICKLER_KWARGS = {'encoding': 'bytes'} @@ -824,7 +948,11 @@ class Message(object): unpickler.find_global = self._find_global try: # Must occur off the broker thread. - obj = unpickler.load() + try: + obj = unpickler.load() + except: + LOG.error('raw pickle was: %r', self.data) + raise self._unpickled = obj except (TypeError, ValueError): e = sys.exc_info()[1] @@ -851,7 +979,7 @@ class Sender(object): Senders may be serialized, making them convenient to wire up data flows. See :meth:`mitogen.core.Receiver.to_sender` for more information. - :param Context context: + :param mitogen.core.Context context: Context to send messages to. :param int dst_handle: Destination handle to send messages to. @@ -893,7 +1021,7 @@ def _unpickle_sender(router, context_id, dst_handle): if not (isinstance(router, Router) and isinstance(context_id, (int, long)) and context_id >= 0 and isinstance(dst_handle, (int, long)) and dst_handle > 0): - raise TypeError('cannot unpickle Sender: bad input') + raise TypeError('cannot unpickle Sender: bad input or missing router') return Sender(Context(router, context_id), dst_handle) @@ -920,11 +1048,11 @@ class Receiver(object): routed to the context due to disconnection, and ignores messages that did not originate from the respondent context. """ - #: If not :data:`None`, a reference to a function invoked as - #: `notify(receiver)` when a new message is delivered to this receiver. The - #: function is invoked on the broker thread, therefore it must not block. - #: Used by :class:`mitogen.select.Select` to implement waiting on multiple - #: receivers. + #: If not :data:`None`, a function invoked as `notify(receiver)` after a + #: message has been received. The function is invoked on :class:`Broker` + #: thread, therefore it must not block. Used by + #: :class:`mitogen.select.Select` to efficiently implement waiting on + #: multiple event sources. notify = None raise_channelerror = True @@ -997,13 +1125,32 @@ class Receiver(object): self.handle = None self._latch.close() + def size(self): + """ + Return the number of items currently buffered. + + As with :class:`Queue.Queue`, `0` may be returned even though a + subsequent call to :meth:`get` will succeed, since a message may be + posted at any moment between :meth:`size` and :meth:`get`. + + As with :class:`Queue.Queue`, `>0` may be returned even though a + subsequent call to :meth:`get` will block, since another waiting thread + may be woken at any moment between :meth:`size` and :meth:`get`. + + :raises LatchError: + The underlying latch has already been marked closed. + """ + return self._latch.size() + def empty(self): """ - Return :data:`True` if calling :meth:`get` would block. + Return `size() == 0`. + + .. deprecated:: 0.2.8 + Use :meth:`size` instead. - As with :class:`Queue.Queue`, :data:`True` may be returned even though - a subsequent call to :meth:`get` will succeed, since a message may be - posted at any moment between :meth:`empty` and :meth:`get`. + :raises LatchError: + The latch has already been marked closed. """ return self._latch.empty() @@ -1052,7 +1199,10 @@ class Channel(Sender, Receiver): A channel inherits from :class:`mitogen.core.Sender` and `mitogen.core.Receiver` to provide bidirectional functionality. - This class is incomplete and obsolete, it will be removed in Mitogen 0.3. + .. deprecated:: 0.2.0 + This class is incomplete and obsolete, it will be removed in Mitogen + 0.3. + Channels were an early attempt at syntax sugar. It is always easier to pass around unidirectional pairs of senders/receivers, even though the syntax is baroque: @@ -1130,6 +1280,7 @@ class Importer(object): ALWAYS_BLACKLIST += ['cStringIO'] def __init__(self, router, context, core_src, whitelist=(), blacklist=()): + self._log = logging.getLogger('mitogen.importer') self._context = context self._present = {'mitogen': self.MITOGEN_PKG_CONTENT} self._lock = threading.Lock() @@ -1178,7 +1329,7 @@ class Importer(object): ) def __repr__(self): - return 'Importer()' + return 'Importer' def builtin_find_module(self, fullname): # imp.find_module() will always succeed for __main__, because it is a @@ -1203,18 +1354,18 @@ class Importer(object): _tls.running = True try: - _v and LOG.debug('%r.find_module(%r)', self, fullname) + #_v and self._log.debug('Python requested %r', fullname) fullname = to_text(fullname) pkgname, dot, _ = str_rpartition(fullname, '.') pkg = sys.modules.get(pkgname) if pkgname and getattr(pkg, '__loader__', None) is not self: - LOG.debug('%r: %r is submodule of a package we did not load', - self, fullname) + self._log.debug('%s is submodule of a locally loaded package', + fullname) return None suffix = fullname[len(pkgname+dot):] if pkgname and suffix not in self._present.get(pkgname, ()): - LOG.debug('%r: master doesn\'t know %r', self, fullname) + self._log.debug('%s has no submodule %s', pkgname, suffix) return None # #114: explicitly whitelisted prefixes override any @@ -1225,10 +1376,9 @@ class Importer(object): try: self.builtin_find_module(fullname) - _vv and IOLOG.debug('%r: %r is available locally', - self, fullname) + _vv and self._log.debug('%r is available locally', fullname) except ImportError: - _vv and IOLOG.debug('find_module(%r) returning self', fullname) + _vv and self._log.debug('we will try to load %r', fullname) return self finally: del _tls.running @@ -1279,7 +1429,7 @@ class Importer(object): tup = msg.unpickle() fullname = tup[0] - _v and LOG.debug('Importer._on_load_module(%r)', fullname) + _v and self._log.debug('received %s', fullname) self._lock.acquire() try: @@ -1303,10 +1453,12 @@ class Importer(object): if not present: funcs = self._callbacks.get(fullname) if funcs is not None: - _v and LOG.debug('_request_module(%r): in flight', fullname) + _v and self._log.debug('existing request for %s in flight', + fullname) funcs.append(callback) else: - _v and LOG.debug('_request_module(%r): new request', fullname) + _v and self._log.debug('sending new %s request to parent', + fullname) self._callbacks[fullname] = [callback] self._context.send( Message(data=b(fullname), handle=GET_MODULE) @@ -1319,7 +1471,7 @@ class Importer(object): def load_module(self, fullname): fullname = to_text(fullname) - _v and LOG.debug('Importer.load_module(%r)', fullname) + _v and self._log.debug('requesting %s', fullname) self._refuse_imports(fullname) event = threading.Event() @@ -1343,7 +1495,7 @@ class Importer(object): if mod.__package__ and not PY3: # 2.x requires __package__ to be exactly a string. - mod.__package__, _ = UTF8_CODEC.encode(mod.__package__) + mod.__package__, _ = encodings.utf_8.encode(mod.__package__) source = self.get_source(fullname) try: @@ -1385,11 +1537,30 @@ class Importer(object): class LogHandler(logging.Handler): + """ + A :class:`logging.Handler` subclass that arranges for :data:`FORWARD_LOG` + messages to be sent to a parent context in response to logging messages + generated by the current context. This is installed by default in child + contexts during bootstrap, so that :mod:`logging` events can be viewed and + managed centrally in the master process. + + The handler is initially *corked* after construction, such that it buffers + messages until :meth:`uncork` is called. This allows logging to be + installed prior to communication with the target being available, and + avoids any possible race where early log messages might be dropped. + + :param mitogen.core.Context context: + The context to send log messages towards. At present this is always + the master process. + """ def __init__(self, context): logging.Handler.__init__(self) self.context = context self.local = threading.local() self._buffer = [] + # Private synchronization is needed while corked, to ensure no + # concurrent call to _send() exists during uncork(). + self._buffer_lock = threading.Lock() def uncork(self): """ @@ -1397,15 +1568,30 @@ class LogHandler(logging.Handler): possible to route messages, therefore messages are buffered until :meth:`uncork` is called by :class:`ExternalContext`. """ - self._send = self.context.send - for msg in self._buffer: - self._send(msg) - self._buffer = None + self._buffer_lock.acquire() + try: + self._send = self.context.send + for msg in self._buffer: + self._send(msg) + self._buffer = None + finally: + self._buffer_lock.release() def _send(self, msg): - self._buffer.append(msg) + self._buffer_lock.acquire() + try: + if self._buffer is None: + # uncork() may run concurrent to _send() + self._send(msg) + else: + self._buffer.append(msg) + finally: + self._buffer_lock.release() def emit(self, rec): + """ + Send a :data:`FORWARD_LOG` message towards the target context. + """ if rec.name == 'mitogen.io' or \ getattr(self.local, 'in_emit', False): return @@ -1422,45 +1608,372 @@ class LogHandler(logging.Handler): self.local.in_emit = False +class Stream(object): + """ + A :class:`Stream` is one readable and optionally one writeable file + descriptor (represented by :class:`Side`) aggregated alongside an + associated :class:`Protocol` that knows how to respond to IO readiness + events for those descriptors. + + Streams are registered with :class:`Broker`, and callbacks are invoked on + the broker thread in response to IO activity. When registered using + :meth:`Broker.start_receive` or :meth:`Broker._start_transmit`, the broker + may call any of :meth:`on_receive`, :meth:`on_transmit`, + :meth:`on_shutdown` or :meth:`on_disconnect`. + + It is expected that the :class:`Protocol` associated with a stream will + change over its life. For example during connection setup, the initial + protocol may be :class:`mitogen.parent.BootstrapProtocol` that knows how to + enter SSH and sudo passwords and transmit the :mod:`mitogen.core` source to + the target, before handing off to :class:`MitogenProtocol` when the target + process is initialized. + + Streams connecting to children are in turn aggregated by + :class:`mitogen.parent.Connection`, which contains additional logic for + managing any child process, and a reference to any separate ``stderr`` + :class:`Stream` connected to that process. + """ + #: A :class:`Side` representing the stream's receive file descriptor. + receive_side = None + + #: A :class:`Side` representing the stream's transmit file descriptor. + transmit_side = None + + #: A :class:`Protocol` representing the protocol active on the stream. + protocol = None + + #: In parents, the :class:`mitogen.parent.Connection` instance. + conn = None + + #: The stream name. This is used in the :meth:`__repr__` output in any log + #: messages, it may be any descriptive string. + name = u'default' + + def set_protocol(self, protocol): + """ + Bind a :class:`Protocol` to this stream, by updating + :attr:`Protocol.stream` to refer to this stream, and updating this + stream's :attr:`Stream.protocol` to the refer to the protocol. Any + prior protocol's :attr:`Protocol.stream` is set to :data:`None`. + """ + if self.protocol: + self.protocol.stream = None + self.protocol = protocol + self.protocol.stream = self + + def accept(self, rfp, wfp): + """ + Attach a pair of file objects to :attr:`receive_side` and + :attr:`transmit_side`, after wrapping them in :class:`Side` instances. + :class:`Side` will call :func:`set_nonblock` and :func:`set_cloexec` + on the underlying file descriptors during construction. + + The same file object may be used for both sides. The default + :meth:`on_disconnect` is handles the possibility that only one + descriptor may need to be closed. + + :param file rfp: + The file object to receive from. + :param file wfp: + The file object to transmit to. + """ + self.receive_side = Side(self, rfp) + self.transmit_side = Side(self, wfp) + + def __repr__(self): + return "<Stream %s #%04x>" % (self.name, id(self) & 0xffff,) + + def on_receive(self, broker): + """ + Invoked by :class:`Broker` when the stream's :attr:`receive_side` has + been marked readable using :meth:`Broker.start_receive` and the broker + has detected the associated file descriptor is ready for reading. + + Subclasses must implement this if they are registered using + :meth:`Broker.start_receive`, and the method must invoke + :meth:`on_disconnect` if reading produces an empty string. + + The default implementation reads :attr:`Protocol.read_size` bytes and + passes the resulting bytestring to :meth:`Protocol.on_receive`. If the + bytestring is 0 bytes, invokes :meth:`on_disconnect` instead. + """ + buf = self.receive_side.read(self.protocol.read_size) + if not buf: + LOG.debug('%r: empty read, disconnecting', self.receive_side) + return self.on_disconnect(broker) + + self.protocol.on_receive(broker, buf) + + def on_transmit(self, broker): + """ + Invoked by :class:`Broker` when the stream's :attr:`transmit_side` has + been marked writeable using :meth:`Broker._start_transmit` and the + broker has detected the associated file descriptor is ready for + writing. + + Subclasses must implement they are ever registerd with + :meth:`Broker._start_transmit`. + + The default implementation invokes :meth:`Protocol.on_transmit`. + """ + self.protocol.on_transmit(broker) + + def on_shutdown(self, broker): + """ + Invoked by :meth:`Broker.shutdown` to allow the stream time to + gracefully shutdown. + + The default implementation emits a ``shutdown`` signal before + invoking :meth:`on_disconnect`. + """ + fire(self, 'shutdown') + self.protocol.on_shutdown(broker) + + def on_disconnect(self, broker): + """ + Invoked by :class:`Broker` to force disconnect the stream during + shutdown, invoked by the default :meth:`on_shutdown` implementation, + and usually invoked by any subclass :meth:`on_receive` implementation + in response to a 0-byte read. + + The base implementation fires a ``disconnect`` event, then closes + :attr:`receive_side` and :attr:`transmit_side` after unregistering the + stream from the broker. + """ + fire(self, 'disconnect') + self.protocol.on_disconnect(broker) + + +class Protocol(object): + """ + Implement the program behaviour associated with activity on a + :class:`Stream`. The protocol in use may vary over a stream's life, for + example to allow :class:`mitogen.parent.BootstrapProtocol` to initialize + the connected child before handing it off to :class:`MitogenProtocol`. A + stream's active protocol is tracked in the :attr:`Stream.protocol` + attribute, and modified via :meth:`Stream.set_protocol`. + + Protocols do not handle IO, they are entirely reliant on the interface + provided by :class:`Stream` and :class:`Side`, allowing the underlying IO + implementation to be replaced without modifying behavioural logic. + """ + stream_class = Stream + + #: The :class:`Stream` this protocol is currently bound to, or + #: :data:`None`. + stream = None + + #: The size of the read buffer used by :class:`Stream` when this is the + #: active protocol for the stream. + read_size = CHUNK_SIZE + + @classmethod + def build_stream(cls, *args, **kwargs): + stream = cls.stream_class() + stream.set_protocol(cls(*args, **kwargs)) + return stream + + def __repr__(self): + return '%s(%s)' % ( + self.__class__.__name__, + self.stream and self.stream.name, + ) + + def on_shutdown(self, broker): + _v and LOG.debug('%r: shutting down', self) + self.stream.on_disconnect(broker) + + def on_disconnect(self, broker): + # Normally both sides an FD, so it is important that tranmit_side is + # deregistered from Poller before closing the receive side, as pollers + # like epoll and kqueue unregister all events on FD close, causing + # subsequent attempt to unregister the transmit side to fail. + LOG.debug('%r: disconnecting', self) + broker.stop_receive(self.stream) + if self.stream.transmit_side: + broker._stop_transmit(self.stream) + + self.stream.receive_side.close() + if self.stream.transmit_side: + self.stream.transmit_side.close() + + +class DelimitedProtocol(Protocol): + """ + Provide a :meth:`Protocol.on_receive` implementation for protocols that are + delimited by a fixed string, like text based protocols. Each message is + passed to :meth:`on_line_received` as it arrives, with incomplete messages + passed to :meth:`on_partial_line_received`. + + When emulating user input it is often necessary to respond to incomplete + lines, such as when a "Password: " prompt is sent. + :meth:`on_partial_line_received` may be called repeatedly with an + increasingly complete message. When a complete message is finally received, + :meth:`on_line_received` will be called once for it before the buffer is + discarded. + + If :func:`on_line_received` returns :data:`False`, remaining data is passed + unprocessed to the stream's current protocol's :meth:`on_receive`. This + allows switching from line-oriented to binary while the input buffer + contains both kinds of data. + """ + #: The delimiter. Defaults to newline. + delimiter = b('\n') + _trailer = b('') + + def on_receive(self, broker, buf): + _vv and IOLOG.debug('%r.on_receive()', self) + stream = self.stream + self._trailer, cont = mitogen.core.iter_split( + buf=self._trailer + buf, + delim=self.delimiter, + func=self.on_line_received, + ) + + if self._trailer: + if cont: + self.on_partial_line_received(self._trailer) + else: + assert stream.protocol is not self, \ + 'stream protocol is no longer %r' % (self,) + stream.protocol.on_receive(broker, self._trailer) + + def on_line_received(self, line): + """ + Receive a line from the stream. + + :param bytes line: + The encoded line, excluding the delimiter. + :returns: + :data:`False` to indicate this invocation modified the stream's + active protocol, and any remaining buffered data should be passed + to the new protocol's :meth:`on_receive` method. + + Any other return value is ignored. + """ + pass + + def on_partial_line_received(self, line): + """ + Receive a trailing unterminated partial line from the stream. + + :param bytes line: + The encoded partial line. + """ + pass + + +class BufferedWriter(object): + """ + Implement buffered output while avoiding quadratic string operations. This + is currently constructed by each protocol, in future it may become fixed + for each stream instead. + """ + def __init__(self, broker, protocol): + self._broker = broker + self._protocol = protocol + self._buf = collections.deque() + self._len = 0 + + def write(self, s): + """ + Transmit `s` immediately, falling back to enqueuing it and marking the + stream writeable if no OS buffer space is available. + """ + if not self._len: + # Modifying epoll/Kqueue state is expensive, as are needless broker + # loops. Rather than wait for writeability, just write immediately, + # and fall back to the broker loop on error or full buffer. + try: + n = self._protocol.stream.transmit_side.write(s) + if n: + if n == len(s): + return + s = s[n:] + except OSError: + pass + + self._broker._start_transmit(self._protocol.stream) + self._buf.append(s) + self._len += len(s) + + def on_transmit(self, broker): + """ + Respond to stream writeability by retrying previously buffered + :meth:`write` calls. + """ + if self._buf: + buf = self._buf.popleft() + written = self._protocol.stream.transmit_side.write(buf) + if not written: + _v and LOG.debug('disconnected during write to %r', self) + self._protocol.stream.on_disconnect(broker) + return + elif written != len(buf): + self._buf.appendleft(BufferType(buf, written)) + + _vv and IOLOG.debug('transmitted %d bytes to %r', written, self) + self._len -= written + + if not self._buf: + broker._stop_transmit(self._protocol.stream) + + class Side(object): """ - Represent a single side of a :class:`BasicStream`. This exists to allow - streams implemented using unidirectional (e.g. UNIX pipe) and bidirectional - (e.g. UNIX socket) file descriptors to operate identically. + Represent one side of a :class:`Stream`. This allows unidirectional (e.g. + pipe) and bidirectional (e.g. socket) streams to operate identically. + + Sides are also responsible for tracking the open/closed state of the + underlying FD, preventing erroneous duplicate calls to :func:`os.close` due + to duplicate :meth:`Stream.on_disconnect` calls, which would otherwise risk + silently succeeding by closing an unrelated descriptor. For this reason, it + is crucial only one file object exists per unique descriptor. :param mitogen.core.Stream stream: The stream this side is associated with. - - :param int fd: - Underlying file descriptor. - + :param object fp: + The file or socket object managing the underlying file descriptor. Any + object may be used that supports `fileno()` and `close()` methods. + :param bool cloexec: + If :data:`True`, the descriptor has its :data:`fcntl.FD_CLOEXEC` flag + enabled using :func:`fcntl.fcntl`. :param bool keep_alive: - Value for :attr:`keep_alive` - - During construction, the file descriptor has its :data:`os.O_NONBLOCK` flag - enabled using :func:`fcntl.fcntl`. + If :data:`True`, the continued existence of this side will extend the + shutdown grace period until it has been unregistered from the broker. + :param bool blocking: + If :data:`False`, the descriptor has its :data:`os.O_NONBLOCK` flag + enabled using :func:`fcntl.fcntl`. """ _fork_refs = weakref.WeakValueDictionary() + closed = False - def __init__(self, stream, fd, cloexec=True, keep_alive=True, blocking=False): + def __init__(self, stream, fp, cloexec=True, keep_alive=True, blocking=False): #: The :class:`Stream` for which this is a read or write side. self.stream = stream + # File or socket object responsible for the lifetime of its underlying + # file descriptor. + self.fp = fp #: Integer file descriptor to perform IO on, or :data:`None` if - #: :meth:`close` has been called. - self.fd = fd - self.closed = False + #: :meth:`close` has been called. This is saved separately from the + #: file object, since :meth:`file.fileno` cannot be called on it after + #: it has been closed. + self.fd = fp.fileno() #: If :data:`True`, causes presence of this side in #: :class:`Broker`'s active reader set to defer shutdown until the #: side is disconnected. self.keep_alive = keep_alive self._fork_refs[id(self)] = self if cloexec: - set_cloexec(fd) + set_cloexec(self.fd) if not blocking: - set_nonblock(fd) + set_nonblock(self.fd) def __repr__(self): - return '<Side of %r fd %s>' % (self.stream, self.fd) + return '<Side of %s fd %s>' % ( + self.stream.name or repr(self.stream), + self.fd + ) @classmethod def _on_fork(cls): @@ -1471,13 +1984,13 @@ class Side(object): def close(self): """ - Call :func:`os.close` on :attr:`fd` if it is not :data:`None`, + Call :meth:`file.close` on :attr:`fp` if it is not :data:`None`, then set it to :data:`None`. """ + _vv and IOLOG.debug('%r.close()', self) if not self.closed: - _vv and IOLOG.debug('%r.close()', self) self.closed = True - os.close(self.fd) + self.fp.close() def read(self, n=CHUNK_SIZE): """ @@ -1490,7 +2003,7 @@ class Side(object): in a 0-sized read like a regular file. :returns: - Bytes read, or the empty to string to indicate disconnection was + Bytes read, or the empty string to indicate disconnection was detected. """ if self.closed: @@ -1499,7 +2012,7 @@ class Side(object): return b('') s, disconnected = io_op(os.read, self.fd, n) if disconnected: - LOG.debug('%r.read(): disconnected: %s', self, disconnected) + LOG.debug('%r: disconnected during read: %s', self, disconnected) return b('') return s @@ -1513,107 +2026,63 @@ class Side(object): Number of bytes written, or :data:`None` if disconnection was detected. """ - if self.closed or self.fd is None: - # Refuse to touch the handle after closed, it may have been reused - # by another thread. + if self.closed: + # Don't touch the handle after close, it may be reused elsewhere. return None written, disconnected = io_op(os.write, self.fd, s) if disconnected: - LOG.debug('%r.write(): disconnected: %s', self, disconnected) + LOG.debug('%r: disconnected during write: %s', self, disconnected) return None return written -class BasicStream(object): - #: A :class:`Side` representing the stream's receive file descriptor. - receive_side = None - - #: A :class:`Side` representing the stream's transmit file descriptor. - transmit_side = None - - def on_receive(self, broker): - """ - Called by :class:`Broker` when the stream's :attr:`receive_side` has - been marked readable using :meth:`Broker.start_receive` and the broker - has detected the associated file descriptor is ready for reading. - - Subclasses must implement this if :meth:`Broker.start_receive` is ever - called on them, and the method must call :meth:`on_disconect` if - reading produces an empty string. - """ - pass - - def on_transmit(self, broker): - """ - Called by :class:`Broker` when the stream's :attr:`transmit_side` - has been marked writeable using :meth:`Broker._start_transmit` and - the broker has detected the associated file descriptor is ready for - writing. - - Subclasses must implement this if :meth:`Broker._start_transmit` is - ever called on them. - """ - pass - - def on_shutdown(self, broker): - """ - Called by :meth:`Broker.shutdown` to allow the stream time to - gracefully shutdown. The base implementation simply called - :meth:`on_disconnect`. - """ - _v and LOG.debug('%r.on_shutdown()', self) - fire(self, 'shutdown') - self.on_disconnect(broker) - - def on_disconnect(self, broker): - """ - Called by :class:`Broker` to force disconnect the stream. The base - implementation simply closes :attr:`receive_side` and - :attr:`transmit_side` and unregisters the stream from the broker. - """ - LOG.debug('%r.on_disconnect()', self) - if self.receive_side: - broker.stop_receive(self) - self.receive_side.close() - if self.transmit_side: - broker._stop_transmit(self) - self.transmit_side.close() - fire(self, 'disconnect') - - -class Stream(BasicStream): +class MitogenProtocol(Protocol): """ - :class:`BasicStream` subclass implementing mitogen's :ref:`stream - protocol <stream-protocol>`. + :class:`Protocol` implementing mitogen's :ref:`stream protocol + <stream-protocol>`. """ - #: If not :data:`None`, :class:`Router` stamps this into - #: :attr:`Message.auth_id` of every message received on this stream. - auth_id = None - #: If not :data:`False`, indicates the stream has :attr:`auth_id` set and #: its value is the same as :data:`mitogen.context_id` or appears in #: :data:`mitogen.parent_ids`. is_privileged = False - def __init__(self, router, remote_id, **kwargs): + #: Invoked as `on_message(stream, msg)` each message received from the + #: peer. + on_message = None + + def __init__(self, router, remote_id, auth_id=None, + local_id=None, parent_ids=None): self._router = router self.remote_id = remote_id - self.name = u'default' + #: If not :data:`None`, :class:`Router` stamps this into + #: :attr:`Message.auth_id` of every message received on this stream. + self.auth_id = auth_id + + if parent_ids is None: + parent_ids = mitogen.parent_ids + if local_id is None: + local_id = mitogen.context_id + + self.is_privileged = ( + (remote_id in parent_ids) or + auth_id in ([local_id] + parent_ids) + ) self.sent_modules = set(['mitogen', 'mitogen.core']) - self.construct(**kwargs) self._input_buf = collections.deque() - self._output_buf = collections.deque() self._input_buf_len = 0 - self._output_buf_len = 0 + self._writer = BufferedWriter(router.broker, self) + #: Routing records the dst_id of every message arriving from this #: stream. Any arriving DEL_ROUTE is rebroadcast for any such ID. self.egress_ids = set() - def construct(self): - pass - - def _internal_receive(self, broker, buf): + def on_receive(self, broker, buf): + """ + Handle the next complete message on the stream. Raise + :class:`StreamError` on failure. + """ + _vv and IOLOG.debug('%r.on_receive()', self) if self._input_buf and self._input_buf_len < 128: self._input_buf[0] += buf else: @@ -1623,60 +2092,45 @@ class Stream(BasicStream): while self._receive_one(broker): pass - def on_receive(self, broker): - """Handle the next complete message on the stream. Raise - :class:`StreamError` on failure.""" - _vv and IOLOG.debug('%r.on_receive()', self) - - buf = self.receive_side.read() - if not buf: - return self.on_disconnect(broker) - - self._internal_receive(broker, buf) - - HEADER_FMT = '>hLLLLLL' - HEADER_LEN = struct.calcsize(HEADER_FMT) - HEADER_MAGIC = 0x4d49 # 'MI' - corrupt_msg = ( - 'Corruption detected: frame signature incorrect. This likely means ' - 'some external process is interfering with the connection. Received:' + '%s: Corruption detected: frame signature incorrect. This likely means' + ' some external process is interfering with the connection. Received:' '\n\n' '%r' ) def _receive_one(self, broker): - if self._input_buf_len < self.HEADER_LEN: + if self._input_buf_len < Message.HEADER_LEN: return False msg = Message() msg.router = self._router (magic, msg.dst_id, msg.src_id, msg.auth_id, msg.handle, msg.reply_to, msg_len) = struct.unpack( - self.HEADER_FMT, - self._input_buf[0][:self.HEADER_LEN], + Message.HEADER_FMT, + self._input_buf[0][:Message.HEADER_LEN], ) - if magic != self.HEADER_MAGIC: - LOG.error(self.corrupt_msg, self._input_buf[0][:2048]) - self.on_disconnect(broker) + if magic != Message.HEADER_MAGIC: + LOG.error(self.corrupt_msg, self.stream.name, self._input_buf[0][:2048]) + self.stream.on_disconnect(broker) return False if msg_len > self._router.max_message_size: - LOG.error('Maximum message size exceeded (got %d, max %d)', - msg_len, self._router.max_message_size) - self.on_disconnect(broker) + LOG.error('%r: Maximum message size exceeded (got %d, max %d)', + self, msg_len, self._router.max_message_size) + self.stream.on_disconnect(broker) return False - total_len = msg_len + self.HEADER_LEN + total_len = msg_len + Message.HEADER_LEN if self._input_buf_len < total_len: _vv and IOLOG.debug( '%r: Input too short (want %d, got %d)', - self, msg_len, self._input_buf_len - self.HEADER_LEN + self, msg_len, self._input_buf_len - Message.HEADER_LEN ) return False - start = self.HEADER_LEN + start = Message.HEADER_LEN prev_start = start remain = total_len bits = [] @@ -1691,7 +2145,7 @@ class Stream(BasicStream): msg.data = b('').join(bits) self._input_buf.appendleft(buf[prev_start+len(bit):]) self._input_buf_len -= total_len - self._router._async_route(msg, self) + self._router._async_route(msg, self.stream) return True def pending_bytes(self): @@ -1703,68 +2157,31 @@ class Stream(BasicStream): For an accurate result, this method should be called from the Broker thread, for example by using :meth:`Broker.defer_sync`. """ - return self._output_buf_len + return self._writer._len def on_transmit(self, broker): - """Transmit buffered messages.""" + """ + Transmit buffered messages. + """ _vv and IOLOG.debug('%r.on_transmit()', self) - - if self._output_buf: - buf = self._output_buf.popleft() - written = self.transmit_side.write(buf) - if not written: - _v and LOG.debug('%r.on_transmit(): disconnection detected', self) - self.on_disconnect(broker) - return - elif written != len(buf): - self._output_buf.appendleft(BufferType(buf, written)) - - _vv and IOLOG.debug('%r.on_transmit() -> len %d', self, written) - self._output_buf_len -= written - - if not self._output_buf: - broker._stop_transmit(self) + self._writer.on_transmit(broker) def _send(self, msg): _vv and IOLOG.debug('%r._send(%r)', self, msg) - pkt = struct.pack(self.HEADER_FMT, self.HEADER_MAGIC, msg.dst_id, - msg.src_id, msg.auth_id, msg.handle, - msg.reply_to or 0, len(msg.data)) + msg.data - - if not self._output_buf_len: - # Modifying epoll/Kqueue state is expensive, as are needless broker - # loops. Rather than wait for writeability, just write immediately, - # and fall back to the broker loop on error or full buffer. - try: - n = self.transmit_side.write(pkt) - if n: - if n == len(pkt): - return - pkt = pkt[n:] - except OSError: - pass - - self._router.broker._start_transmit(self) - self._output_buf.append(pkt) - self._output_buf_len += len(pkt) + self._writer.write(msg.pack()) def send(self, msg): - """Send `data` to `handle`, and tell the broker we have output. May - be called from any thread.""" + """ + Send `data` to `handle`, and tell the broker we have output. May be + called from any thread. + """ self._router.broker.defer(self._send, msg) def on_shutdown(self, broker): - """Override BasicStream behaviour of immediately disconnecting.""" - _v and LOG.debug('%r.on_shutdown(%r)', self, broker) - - def accept(self, rfd, wfd): - # TODO: what is this os.dup for? - self.receive_side = Side(self, os.dup(rfd)) - self.transmit_side = Side(self, os.dup(wfd)) - - def __repr__(self): - cls = type(self) - return "%s.%s('%s')" % (cls.__module__, cls.__name__, self.name) + """ + Disable :class:`Protocol` immediate disconnect behaviour. + """ + _v and LOG.debug('%r: shutting down', self) class Context(object): @@ -1783,28 +2200,27 @@ class Context(object): explicitly, as that method is deduplicating, and returns the only context instance :ref:`signals` will be raised on. - :param Router router: + :param mitogen.core.Router router: Router to emit messages through. :param int context_id: Context ID. :param str name: Context name. """ + name = None remote_name = None def __init__(self, router, context_id, name=None): self.router = router self.context_id = context_id - self.name = name + if name: + self.name = to_text(name) def __reduce__(self): - name = self.name - if name and not isinstance(name, UnicodeType): - name = UnicodeType(name, 'utf-8') - return _unpickle_context, (self.context_id, name) + return _unpickle_context, (self.context_id, self.name) def on_disconnect(self): - _v and LOG.debug('%r.on_disconnect()', self) + _v and LOG.debug('%r: disconnecting', self) fire(self, 'disconnect') def send_async(self, msg, persist=False): @@ -1825,24 +2241,21 @@ class Context(object): :class:`Receiver` configured to receive any replies sent to the message's `reply_to` handle. """ - if self.router.broker._thread == threading.currentThread(): # TODO - raise SystemError('Cannot making blocking call on broker thread') - receiver = Receiver(self.router, persist=persist, respondent=self) msg.dst_id = self.context_id msg.reply_to = receiver.handle - _v and LOG.debug('%r.send_async(%r)', self, msg) + _v and LOG.debug('sending message to %r: %r', self, msg) self.send(msg) return receiver def call_service_async(self, service_name, method_name, **kwargs): - _v and LOG.debug('%r.call_service_async(%r, %r, %r)', - self, service_name, method_name, kwargs) if isinstance(service_name, BytesType): service_name = service_name.encode('utf-8') elif not isinstance(service_name, UnicodeType): service_name = service_name.name() # Service.name() + _v and LOG.debug('calling service %s.%s of %r, args: %r', + service_name, method_name, self, kwargs) tup = (service_name, to_text(method_name), Kwargs(kwargs)) msg = Message.pickled(tup, handle=CALL_SERVICE) return self.send_async(msg) @@ -1946,7 +2359,7 @@ class Poller(object): self._wfds = {} def __repr__(self): - return '%s(%#x)' % (type(self).__name__, id(self)) + return '%s' % (type(self).__name__,) def _update(self, fd): """ @@ -2029,9 +2442,6 @@ class Poller(object): if gen and gen < self._generation: yield data - if timeout: - timeout *= 1000 - def poll(self, timeout=None): """ Block the calling thread until one or more FDs are ready for IO. @@ -2063,8 +2473,18 @@ class Latch(object): See :ref:`waking-sleeping-threads` for further discussion. """ + #: The :class:`Poller` implementation to use for waiting. Since the poller + #: will be very short-lived, we prefer :class:`mitogen.parent.PollPoller` + #: if it is available, or :class:`mitogen.core.Poller` otherwise, since + #: these implementations require no system calls to create, configure or + #: destroy. poller_class = Poller + #: If not :data:`None`, a function invoked as `notify(latch)` after a + #: successful call to :meth:`put`. The function is invoked on the + #: :meth:`put` caller's thread, which may be the :class:`Broker` thread, + #: therefore it must not block. Used by :class:`mitogen.select.Select` to + #: efficiently implement waiting on multiple event sources. notify = None # The _cls_ prefixes here are to make it crystal clear in the code which @@ -2117,19 +2537,17 @@ class Latch(object): finally: self._lock.release() - def empty(self): + def size(self): """ - Return :data:`True` if calling :meth:`get` would block. + Return the number of items currently buffered. - As with :class:`Queue.Queue`, :data:`True` may be returned even - though a subsequent call to :meth:`get` will succeed, since a - message may be posted at any moment between :meth:`empty` and - :meth:`get`. + As with :class:`Queue.Queue`, `0` may be returned even though a + subsequent call to :meth:`get` will succeed, since a message may be + posted at any moment between :meth:`size` and :meth:`get`. - As with :class:`Queue.Queue`, :data:`False` may be returned even - though a subsequent call to :meth:`get` will block, since another - waiting thread may be woken at any moment between :meth:`empty` and - :meth:`get`. + As with :class:`Queue.Queue`, `>0` may be returned even though a + subsequent call to :meth:`get` will block, since another waiting thread + may be woken at any moment between :meth:`size` and :meth:`get`. :raises LatchError: The latch has already been marked closed. @@ -2138,10 +2556,22 @@ class Latch(object): try: if self.closed: raise LatchError() - return len(self._queue) == 0 + return len(self._queue) finally: self._lock.release() + def empty(self): + """ + Return `size() == 0`. + + .. deprecated:: 0.2.8 + Use :meth:`size` instead. + + :raises LatchError: + The latch has already been marked closed. + """ + return self.size() == 0 + def _get_socketpair(self): """ Return an unused socketpair, creating one if none exist. @@ -2150,6 +2580,7 @@ class Latch(object): return self._cls_idle_socketpairs.pop() # pop() must be atomic except IndexError: rsock, wsock = socket.socketpair() + rsock.setblocking(False) set_cloexec(rsock.fileno()) set_cloexec(wsock.fileno()) self._cls_all_sockets.extend((rsock, wsock)) @@ -2219,14 +2650,13 @@ class Latch(object): :meth:`put` to write a byte to our socket pair. """ _vv and IOLOG.debug( - '%r._get_sleep(timeout=%r, block=%r, rfd=%d, wfd=%d)', + '%r._get_sleep(timeout=%r, block=%r, fd=%d/%d)', self, timeout, block, rsock.fileno(), wsock.fileno() ) e = None - woken = None try: - woken = list(poller.poll(timeout)) + list(poller.poll(timeout)) except Exception: e = sys.exc_info()[1] @@ -2234,11 +2664,19 @@ class Latch(object): try: i = self._sleeping.index((wsock, cookie)) del self._sleeping[i] - if not woken: - raise e or TimeoutError() - got_cookie = rsock.recv(self.COOKIE_SIZE) + try: + got_cookie = rsock.recv(self.COOKIE_SIZE) + except socket.error: + e2 = sys.exc_info()[1] + if e2.args[0] == errno.EAGAIN: + e = TimeoutError() + else: + e = e2 + self._cls_idle_socketpairs.append((rsock, wsock)) + if e: + raise e assert cookie == got_cookie, ( "Cookie incorrect; got %r, expected %r" \ @@ -2274,17 +2712,20 @@ class Latch(object): raise LatchError() self._queue.append(obj) + wsock = None if self._waking < len(self._sleeping): wsock, cookie = self._sleeping[self._waking] self._waking += 1 _vv and IOLOG.debug('%r.put() -> waking wfd=%r', self, wsock.fileno()) - self._wake(wsock, cookie) elif self.notify: self.notify(self) finally: self._lock.release() + if wsock: + self._wake(wsock, cookie) + def _wake(self, wsock, cookie): written, disconnected = io_op(os.write, wsock.fileno(), cookie) assert written == len(cookie) and not disconnected @@ -2297,30 +2738,31 @@ class Latch(object): ) -class Waker(BasicStream): +class Waker(Protocol): """ - :class:`BasicStream` subclass implementing the `UNIX self-pipe trick`_. - Used to wake the multiplexer when another thread needs to modify its state - (via a cross-thread function call). + :class:`Protocol` implementing the `UNIX self-pipe trick`_. Used to wake + :class:`Broker` when another thread needs to modify its state, by enqueing + a function call to run on the :class:`Broker` thread. .. _UNIX self-pipe trick: https://cr.yp.to/docs/selfpipe.html """ + read_size = 1 broker_ident = None + @classmethod + def build_stream(cls, broker): + stream = super(Waker, cls).build_stream(broker) + stream.accept(*pipe()) + return stream + def __init__(self, broker): self._broker = broker - self._lock = threading.Lock() - self._deferred = [] - - rfd, wfd = os.pipe() - self.receive_side = Side(self, rfd) - self.transmit_side = Side(self, wfd) + self._deferred = collections.deque() def __repr__(self): - return 'Waker(%r rfd=%r, wfd=%r)' % ( - self._broker, - self.receive_side and self.receive_side.fd, - self.transmit_side and self.transmit_side.fd, + return 'Waker(fd=%r/%r)' % ( + self.stream.receive_side and self.stream.receive_side.fd, + self.stream.transmit_side and self.stream.transmit_side.fd, ) @property @@ -2328,34 +2770,27 @@ class Waker(BasicStream): """ Prevent immediate Broker shutdown while deferred functions remain. """ - self._lock.acquire() - try: - return len(self._deferred) - finally: - self._lock.release() + return len(self._deferred) - def on_receive(self, broker): + def on_receive(self, broker, buf): """ Drain the pipe and fire callbacks. Since :attr:`_deferred` is synchronized, :meth:`defer` and :meth:`on_receive` can conspire to ensure only one byte needs to be pending regardless of queue length. """ _vv and IOLOG.debug('%r.on_receive()', self) - self._lock.acquire() - try: - self.receive_side.read(1) - deferred = self._deferred - self._deferred = [] - finally: - self._lock.release() + while True: + try: + func, args, kwargs = self._deferred.popleft() + except IndexError: + return - for func, args, kwargs in deferred: try: func(*args, **kwargs) except Exception: LOG.exception('defer() crashed: %r(*%r, **%r)', func, args, kwargs) - self._broker.shutdown() + broker.shutdown() def _wake(self): """ @@ -2363,10 +2798,10 @@ class Waker(BasicStream): teardown, the FD may already be closed, so ignore EBADF. """ try: - self.transmit_side.write(b(' ')) + self.stream.transmit_side.write(b(' ')) except OSError: e = sys.exc_info()[1] - if e.args[0] != errno.EBADF: + if e.args[0] in (errno.EBADF, errno.EWOULDBLOCK): raise broker_shutdown_msg = ( @@ -2390,65 +2825,67 @@ class Waker(BasicStream): if self._broker._exitted: raise Error(self.broker_shutdown_msg) - _vv and IOLOG.debug('%r.defer() [fd=%r]', self, self.transmit_side.fd) - self._lock.acquire() - try: - if not self._deferred: - self._wake() - self._deferred.append((func, args, kwargs)) - finally: - self._lock.release() + _vv and IOLOG.debug('%r.defer() [fd=%r]', self, + self.stream.transmit_side.fd) + self._deferred.append((func, args, kwargs)) + self._wake() -class IoLogger(BasicStream): +class IoLoggerProtocol(DelimitedProtocol): """ - :class:`BasicStream` subclass that sets up redirection of a standard - UNIX file descriptor back into the Python :mod:`logging` package. - """ - _buf = '' - - def __init__(self, broker, name, dest_fd): - self._broker = broker - self._name = name - self._rsock, self._wsock = socket.socketpair() - os.dup2(self._wsock.fileno(), dest_fd) - set_cloexec(self._wsock.fileno()) + Attached to one end of a socket pair whose other end overwrites one of the + standard ``stdout`` or ``stderr`` file descriptors in a child context. + Received data is split up into lines, decoded as UTF-8 and logged to the + :mod:`logging` package as either the ``stdout`` or ``stderr`` logger. + Logging in child contexts is in turn forwarded to the master process using + :class:`LogHandler`. + """ + @classmethod + def build_stream(cls, name, dest_fd): + """ + Even though the file descriptor `dest_fd` will hold the opposite end of + the socket open, we must keep a separate dup() of it (i.e. wsock) in + case some code decides to overwrite `dest_fd` later, which would + prevent break :meth:`on_shutdown` from calling :meth:`shutdown() + <socket.socket.shutdown>` on it. + """ + rsock, wsock = socket.socketpair() + os.dup2(wsock.fileno(), dest_fd) + stream = super(IoLoggerProtocol, cls).build_stream(name) + stream.name = name + stream.accept(rsock, wsock) + return stream + + def __init__(self, name): self._log = logging.getLogger(name) # #453: prevent accidental log initialization in a child creating a # feedback loop. self._log.propagate = False self._log.handlers = logging.getLogger().handlers[:] - self.receive_side = Side(self, self._rsock.fileno()) - self.transmit_side = Side(self, dest_fd, cloexec=False, blocking=True) - self._broker.start_receive(self) - - def __repr__(self): - return '<IoLogger %s>' % (self._name,) - - def _log_lines(self): - while self._buf.find('\n') != -1: - line, _, self._buf = str_partition(self._buf, '\n') - self._log.info('%s', line.rstrip('\n')) - def on_shutdown(self, broker): - """Shut down the write end of the logging socket.""" - _v and LOG.debug('%r.on_shutdown()', self) + """ + Shut down the write end of the socket, preventing any further writes to + it by this process, or subprocess that inherited it. This allows any + remaining kernel-buffered data to be drained during graceful shutdown + without the buffer continuously refilling due to some out of control + child process. + """ + _v and LOG.debug('%r: shutting down', self) if not IS_WSL: - # #333: WSL generates invalid readiness indication on shutdown() - self._wsock.shutdown(socket.SHUT_WR) - self._wsock.close() - self.transmit_side.close() - - def on_receive(self, broker): - _vv and IOLOG.debug('%r.on_receive()', self) - buf = self.receive_side.read() - if not buf: - return self.on_disconnect(broker) + # #333: WSL generates invalid readiness indication on shutdown(). + # This modifies the *kernel object* inherited by children, causing + # EPIPE on subsequent writes to any dupped FD in any process. The + # read side can then drain completely of prior buffered data. + self.stream.transmit_side.fp.shutdown(socket.SHUT_WR) + self.stream.transmit_side.close() - self._buf += buf.decode('latin1') - self._log_lines() + def on_line_received(self, line): + """ + Decode the received line as UTF-8 and pass it to the logging framework. + """ + self._log.info('%s', line.decode('utf-8', 'replace')) class Router(object): @@ -2460,7 +2897,12 @@ class Router(object): **Note:** This is the somewhat limited core version of the Router class used by child contexts. The master subclass is documented below this one. """ + #: The :class:`mitogen.core.Context` subclass to use when constructing new + #: :class:`Context` objects in :meth:`myself` and :meth:`context_by_id`. + #: Permits :class:`Router` subclasses to extend the :class:`Context` + #: interface, as done in :class:`mitogen.parent.Router`. context_class = Context + max_message_size = 128 * 1048576 #: When :data:`True`, permit children to only communicate with the current @@ -2480,6 +2922,18 @@ class Router(object): #: parameter. unidirectional = False + duplicate_handle_msg = 'cannot register a handle that already exists' + refused_msg = 'refused by policy' + invalid_handle_msg = 'invalid handle' + too_large_msg = 'message too large (max %d bytes)' + respondent_disconnect_msg = 'the respondent Context has disconnected' + broker_exit_msg = 'Broker has exitted' + no_route_msg = 'no route to %r, my ID is %r' + unidirectional_msg = ( + 'routing mode prevents forward of message from context %d to ' + 'context %d via context %d' + ) + def __init__(self, broker): self.broker = broker listen(broker, 'exit', self._on_broker_exit) @@ -2518,12 +2972,13 @@ class Router(object): corresponding :attr:`_context_by_id` member. This is replaced by :class:`mitogen.parent.RouteMonitor` in an upgraded context. """ - LOG.error('%r._on_del_route() %r', self, msg) if msg.is_dead: return target_id_s, _, name = bytes_partition(msg.data, b(':')) target_id = int(target_id_s, 10) + LOG.error('%r: deleting route to %s (%d)', + self, to_text(name), target_id) context = self._context_by_id.get(target_id) if context: fire(context, 'disconnect') @@ -2546,16 +3001,21 @@ class Router(object): for context in notify: context.on_disconnect() - broker_exit_msg = 'Broker has exitted' - def _on_broker_exit(self): + """ + Called prior to broker exit, informs callbacks registered with + :meth:`add_handler` the connection is dead. + """ + _v and LOG.debug('%r: broker has exitted', self) while self._handle_map: _, (_, func, _, _) = self._handle_map.popitem() func(Message.dead(self.broker_exit_msg)) def myself(self): """ - Return a :class:`Context` referring to the current process. + Return a :class:`Context` referring to the current process. Since + :class:`Context` is serializable, this is convenient to use in remote + function call parameter lists. """ return self.context_class( router=self, @@ -2565,8 +3025,25 @@ class Router(object): def context_by_id(self, context_id, via_id=None, create=True, name=None): """ - Messy factory/lookup function to find a context by its ID, or construct - it. This will eventually be replaced by a more sensible interface. + Return or construct a :class:`Context` given its ID. An internal + mapping of ID to the canonical :class:`Context` representing that ID, + so that :ref:`signals` can be raised. + + This may be called from any thread, lookup and construction are atomic. + + :param int context_id: + The context ID to look up. + :param int via_id: + If the :class:`Context` does not already exist, set its + :attr:`Context.via` to the :class:`Context` matching this ID. + :param bool create: + If the :class:`Context` does not already exist, create it. + :param str name: + If the :class:`Context` does not already exist, set its name. + + :returns: + :class:`Context`, or return :data:`None` if `create` is + :data:`False` and no :class:`Context` previously existed. """ context = self._context_by_id.get(context_id) if context: @@ -2595,7 +3072,8 @@ class Router(object): the stream's receive side to the I/O multiplexer. This method remains public while the design has not yet settled. """ - _v and LOG.debug('register(%r, %r)', context, stream) + _v and LOG.debug('%s: registering %r to stream %r', + self, context, stream) self._write_lock.acquire() try: self._stream_by_id[context.context_id] = stream @@ -2610,7 +3088,13 @@ class Router(object): """ Return the :class:`Stream` that should be used to communicate with `dst_id`. If a specific route for `dst_id` is not known, a reference to - the parent context's stream is returned. + the parent context's stream is returned. If the parent is disconnected, + or when running in the master context, return :data:`None` instead. + + This can be used from any thread, but its output is only meaningful + from the context of the :class:`Broker` thread, as disconnection or + replacement could happen in parallel on the broker thread at any + moment. """ return ( self._stream_by_id.get(dst_id) or @@ -2646,7 +3130,7 @@ class Router(object): If :data:`False`, the handler will be unregistered after a single message has been received. - :param Context respondent: + :param mitogen.core.Context respondent: Context that messages to this handle are expected to be sent from. If specified, arranges for a dead message to be delivered to `fn` when disconnection of the context is detected. @@ -2700,55 +3184,61 @@ class Router(object): return handle - duplicate_handle_msg = 'cannot register a handle that is already exists' - refused_msg = 'refused by policy' - invalid_handle_msg = 'invalid handle' - too_large_msg = 'message too large (max %d bytes)' - respondent_disconnect_msg = 'the respondent Context has disconnected' - broker_shutdown_msg = 'Broker is shutting down' - no_route_msg = 'no route to %r, my ID is %r' - unidirectional_msg = ( - 'routing mode prevents forward of message from context %d via ' - 'context %d' - ) - def _on_respondent_disconnect(self, context): for handle in self._handles_by_respondent.pop(context, ()): _, fn, _, _ = self._handle_map[handle] fn(Message.dead(self.respondent_disconnect_msg)) del self._handle_map[handle] - def on_shutdown(self, broker): - """Called during :meth:`Broker.shutdown`, informs callbacks registered - with :meth:`add_handle_cb` the connection is dead.""" - _v and LOG.debug('%r.on_shutdown(%r)', self, broker) - fire(self, 'shutdown') - for handle, (persist, fn) in self._handle_map.iteritems(): - _v and LOG.debug('%r.on_shutdown(): killing %r: %r', self, handle, fn) - fn(Message.dead(self.broker_shutdown_msg)) + def _maybe_send_dead(self, unreachable, msg, reason, *args): + """ + Send a dead message to either the original sender or the intended + recipient of `msg`, if the original sender was expecting a reply + (because its `reply_to` was set), otherwise assume the message is a + reply of some sort, and send the dead message to the original + destination. - def _maybe_send_dead(self, msg, reason, *args): + :param bool unreachable: + If :data:`True`, the recipient is known to be dead or routing + failed due to a security precaution, so don't attempt to fallback + to sending the dead message to the recipient if the original sender + did not include a reply address. + :param mitogen.core.Message msg: + Message that triggered the dead message. + :param str reason: + Human-readable error reason. + :param tuple args: + Elements to interpolate with `reason`. + """ if args: reason %= args LOG.debug('%r: %r is dead: %r', self, msg, reason) if msg.reply_to and not msg.is_dead: msg.reply(Message.dead(reason=reason), router=self) + elif not unreachable: + self._async_route( + Message.dead( + dst_id=msg.dst_id, + handle=msg.handle, + reason=reason, + ) + ) def _invoke(self, msg, stream): # IOLOG.debug('%r._invoke(%r)', self, msg) try: persist, fn, policy, respondent = self._handle_map[msg.handle] except KeyError: - self._maybe_send_dead(msg, reason=self.invalid_handle_msg) + self._maybe_send_dead(True, msg, reason=self.invalid_handle_msg) return if respondent and not (msg.is_dead or msg.src_id == respondent.context_id): - self._maybe_send_dead(msg, 'reply from unexpected context') + self._maybe_send_dead(True, msg, 'reply from unexpected context') return if policy and not policy(msg, stream): - self._maybe_send_dead(msg, self.refused_msg) + self._maybe_send_dead(True, msg, self.refused_msg) return if not persist: @@ -2776,52 +3266,71 @@ class Router(object): _vv and IOLOG.debug('%r._async_route(%r, %r)', self, msg, in_stream) if len(msg.data) > self.max_message_size: - self._maybe_send_dead(msg, self.too_large_msg % ( + self._maybe_send_dead(False, msg, self.too_large_msg % ( self.max_message_size, )) return - # Perform source verification. + parent_stream = self._stream_by_id.get(mitogen.parent_id) + src_stream = self._stream_by_id.get(msg.src_id, parent_stream) + + # When the ingress stream is known, verify the message was received on + # the same as the stream we would expect to receive messages from the + # src_id and auth_id. This is like Reverse Path Filtering in IP, and + # ensures messages from a privileged context cannot be spoofed by a + # child. if in_stream: - parent = self._stream_by_id.get(mitogen.parent_id) - expect = self._stream_by_id.get(msg.auth_id, parent) - if in_stream != expect: + auth_stream = self._stream_by_id.get(msg.auth_id, parent_stream) + if in_stream != auth_stream: LOG.error('%r: bad auth_id: got %r via %r, not %r: %r', - self, msg.auth_id, in_stream, expect, msg) + self, msg.auth_id, in_stream, auth_stream, msg) return - if msg.src_id != msg.auth_id: - expect = self._stream_by_id.get(msg.src_id, parent) - if in_stream != expect: - LOG.error('%r: bad src_id: got %r via %r, not %r: %r', - self, msg.src_id, in_stream, expect, msg) - return + if msg.src_id != msg.auth_id and in_stream != src_stream: + LOG.error('%r: bad src_id: got %r via %r, not %r: %r', + self, msg.src_id, in_stream, src_stream, msg) + return - if in_stream.auth_id is not None: - msg.auth_id = in_stream.auth_id + # If the stream's MitogenProtocol has auth_id set, copy it to the + # message. This allows subtrees to become privileged by stamping a + # parent's context ID. It is used by mitogen.unix to mark client + # streams (like Ansible WorkerProcess) as having the same rights as + # the parent. + if in_stream.protocol.auth_id is not None: + msg.auth_id = in_stream.protocol.auth_id + if in_stream.protocol.on_message is not None: + in_stream.protocol.on_message(in_stream, msg) - # Maintain a set of IDs the source ever communicated with. - in_stream.egress_ids.add(msg.dst_id) + # Record the IDs the source ever communicated with. + in_stream.protocol.egress_ids.add(msg.dst_id) if msg.dst_id == mitogen.context_id: return self._invoke(msg, in_stream) out_stream = self._stream_by_id.get(msg.dst_id) - if out_stream is None: - out_stream = self._stream_by_id.get(mitogen.parent_id) + if (not out_stream) and (parent_stream != src_stream or not in_stream): + # No downstream route exists. The message could be from a child or + # ourselves for a parent, in which case we must forward it + # upstream, or it could be from a parent for a dead child, in which + # case its src_id/auth_id would fail verification if returned to + # the parent, so in that case reply with a dead message instead. + out_stream = parent_stream if out_stream is None: - self._maybe_send_dead(msg, self.no_route_msg, + self._maybe_send_dead(True, msg, self.no_route_msg, msg.dst_id, mitogen.context_id) return if in_stream and self.unidirectional and not \ - (in_stream.is_privileged or out_stream.is_privileged): - self._maybe_send_dead(msg, self.unidirectional_msg, - in_stream.remote_id, out_stream.remote_id) + (in_stream.protocol.is_privileged or + out_stream.protocol.is_privileged): + self._maybe_send_dead(True, msg, self.unidirectional_msg, + in_stream.protocol.remote_id, + out_stream.protocol.remote_id, + mitogen.context_id) return - out_stream._send(msg) + out_stream.protocol._send(msg) def route(self, msg): """ @@ -2836,17 +3345,26 @@ class Router(object): self.broker.defer(self._async_route, msg) +class NullTimerList(object): + def get_timeout(self): + return None + + class Broker(object): """ Responsible for handling I/O multiplexing in a private thread. - **Note:** This is the somewhat limited core version of the Broker class - used by child contexts. The master subclass is documented below. + **Note:** This somewhat limited core version is used by children. The + master subclass is documented below. """ poller_class = Poller _waker = None _thread = None + # :func:`mitogen.parent._upgrade_broker` replaces this with + # :class:`mitogen.parent.TimerList` during upgrade. + timers = NullTimerList() + #: Seconds grace to allow :class:`streams <Stream>` to shutdown gracefully #: before force-disconnecting them during :meth:`shutdown`. shutdown_timeout = 3.0 @@ -2854,11 +3372,11 @@ class Broker(object): def __init__(self, poller_class=None, activate_compat=True): self._alive = True self._exitted = False - self._waker = Waker(self) + self._waker = Waker.build_stream(self) #: Arrange for `func(\*args, \**kwargs)` to be executed on the broker #: thread, or immediately if the current thread is the broker thread. #: Safe to call from any thread. - self.defer = self._waker.defer + self.defer = self._waker.protocol.defer self.poller = self.poller_class() self.poller.start_receive( self._waker.receive_side.fd, @@ -2881,7 +3399,7 @@ class Broker(object): if sys.version_info < (2, 6): # import_module() is used to avoid dep scanner. os_fork = import_module('mitogen.os_fork') - mitogen.os_fork._notice_broker_or_pool(self) + os_fork._notice_broker_or_pool(self) def start_receive(self, stream): """ @@ -2892,7 +3410,7 @@ class Broker(object): """ _vv and IOLOG.debug('%r.start_receive(%r)', self, stream) side = stream.receive_side - assert side and side.fd is not None + assert side and not side.closed self.defer(self.poller.start_receive, side.fd, (side, stream.on_receive)) @@ -2913,7 +3431,7 @@ class Broker(object): """ _vv and IOLOG.debug('%r._start_transmit(%r)', self, stream) side = stream.transmit_side - assert side and side.fd is not None + assert side and not side.closed self.poller.start_transmit(side.fd, (side, stream.on_transmit)) def _stop_transmit(self, stream): @@ -2932,7 +3450,7 @@ class Broker(object): progress (e.g. log draining). """ it = (side.keep_alive for (_, (side, _)) in self.poller.readers) - return sum(it, 0) + return sum(it, 0) > 0 or self.timers.get_timeout() is not None def defer_sync(self, func): """ @@ -2975,10 +3493,19 @@ class Broker(object): """ _vv and IOLOG.debug('%r._loop_once(%r, %r)', self, timeout, self.poller) + + timer_to = self.timers.get_timeout() + if timeout is None: + timeout = timer_to + elif timer_to is not None and timer_to < timeout: + timeout = timer_to + #IOLOG.debug('readers =\n%s', pformat(self.poller.readers)) #IOLOG.debug('writers =\n%s', pformat(self.poller.writers)) for side, func in self.poller.poll(timeout): self._call(side.stream, func) + if timer_to is not None: + self.timers.expire() def _broker_exit(self): """ @@ -2986,7 +3513,7 @@ class Broker(object): to shut down gracefully, then discard the :class:`Poller`. """ for _, (side, _) in self.poller.readers + self.poller.writers: - LOG.debug('_broker_main() force disconnecting %r', side) + LOG.debug('%r: force disconnecting %r', self, side) side.stream.on_disconnect(self) self.poller.close() @@ -3001,15 +3528,15 @@ class Broker(object): for _, (side, _) in self.poller.readers + self.poller.writers: self._call(side.stream, side.stream.on_shutdown) - deadline = time.time() + self.shutdown_timeout - while self.keep_alive() and time.time() < deadline: - self._loop_once(max(0, deadline - time.time())) + deadline = now() + self.shutdown_timeout + while self.keep_alive() and now() < deadline: + self._loop_once(max(0, deadline - now())) if self.keep_alive(): - LOG.error('%r: some streams did not close gracefully. ' - 'The most likely cause for this is one or ' - 'more child processes still connected to ' - 'our stdout/stderr pipes.', self) + LOG.error('%r: pending work still existed %d seconds after ' + 'shutdown began. This may be due to a timer that is yet ' + 'to expire, or a child connection that did not fully ' + 'shut down.', self, self.shutdown_timeout) def _do_broker_main(self): """ @@ -3017,30 +3544,37 @@ class Broker(object): :meth:`shutdown` is called. """ # For Python 2.4, no way to retrieve ident except on thread. - self._waker.broker_ident = thread.get_ident() + self._waker.protocol.broker_ident = thread.get_ident() try: while self._alive: self._loop_once() + fire(self, 'before_shutdown') fire(self, 'shutdown') self._broker_shutdown() except Exception: - LOG.exception('_broker_main() crashed') + e = sys.exc_info()[1] + LOG.exception('broker crashed') + syslog.syslog(syslog.LOG_ERR, 'broker crashed: %s' % (e,)) + syslog.closelog() # prevent test 'fd leak'. self._alive = False # Ensure _alive is consistent on crash. self._exitted = True self._broker_exit() def _broker_main(self): - _profile_hook('mitogen.broker', self._do_broker_main) - fire(self, 'exit') + try: + _profile_hook('mitogen.broker', self._do_broker_main) + finally: + # 'finally' to ensure _on_broker_exit() can always SIGTERM. + fire(self, 'exit') def shutdown(self): """ Request broker gracefully disconnect streams and stop. Safe to call from any thread. """ - _v and LOG.debug('%r.shutdown()', self) + _v and LOG.debug('%r: shutting down', self) def _shutdown(): self._alive = False if self._alive and not self._exitted: @@ -3054,7 +3588,7 @@ class Broker(object): self._thread.join() def __repr__(self): - return 'Broker(%#x)' % (id(self),) + return 'Broker(%04x)' % (id(self) & 0xffff,) class Dispatcher(object): @@ -3068,14 +3602,38 @@ class Dispatcher(object): mode, any exception that occurs is recorded, and causes all subsequent calls with the same `chain_id` to fail with the same exception. """ + _service_recv = None + + def __repr__(self): + return 'Dispatcher' + def __init__(self, econtext): self.econtext = econtext #: Chain ID -> CallError if prior call failed. self._error_by_chain_id = {} - self.recv = Receiver(router=econtext.router, - handle=CALL_FUNCTION, - policy=has_parent_authority) - listen(econtext.broker, 'shutdown', self.recv.close) + self.recv = Receiver( + router=econtext.router, + handle=CALL_FUNCTION, + policy=has_parent_authority, + ) + #: The :data:`CALL_SERVICE` :class:`Receiver` that will eventually be + #: reused by :class:`mitogen.service.Pool`, should it ever be loaded. + #: This is necessary for race-free reception of all service requests + #: delivered regardless of whether the stub or real service pool are + #: loaded. See #547 for related sorrows. + Dispatcher._service_recv = Receiver( + router=econtext.router, + handle=CALL_SERVICE, + policy=has_parent_authority, + ) + self._service_recv.notify = self._on_call_service + listen(econtext.broker, 'shutdown', self._on_broker_shutdown) + + def _on_broker_shutdown(self): + if self._service_recv.notify == self._on_call_service: + self._service_recv.notify = None + self.recv.close() + @classmethod @takes_econtext @@ -3084,7 +3642,7 @@ class Dispatcher(object): def _parse_request(self, msg): data = msg.unpickle(throw=False) - _v and LOG.debug('_dispatch_one(%r)', data) + _v and LOG.debug('%r: dispatching %r', self, data) chain_id, modname, klass, func, args, kwargs = data obj = import_module(modname) @@ -3115,10 +3673,47 @@ class Dispatcher(object): self._error_by_chain_id[chain_id] = e return chain_id, e + def _on_call_service(self, recv): + """ + Notifier for the :data:`CALL_SERVICE` receiver. This is called on the + :class:`Broker` thread for any service messages arriving at this + context, for as long as no real service pool implementation is loaded. + + In order to safely bootstrap the service pool implementation a sentinel + message is enqueued on the :data:`CALL_FUNCTION` receiver in order to + wake the main thread, where the importer can run without any + possibility of suffering deadlock due to concurrent uses of the + importer. + + Should the main thread be blocked indefinitely, preventing the import + from ever running, if it is blocked waiting on a service call, then it + means :mod:`mitogen.service` has already been imported and + :func:`mitogen.service.get_or_create_pool` has already run, meaning the + service pool is already active and the duplicate initialization was not + needed anyway. + + #547: This trickery is needed to avoid the alternate option of spinning + a temporary thread to import the service pool, which could deadlock if + a custom import hook executing on the main thread (under the importer + lock) would block waiting for some data that was in turn received by a + service. Main thread import lock can't be released until service is + running, service cannot satisfy request until import lock is released. + """ + self.recv._on_receive(Message(handle=STUB_CALL_SERVICE)) + + def _init_service_pool(self): + import mitogen.service + mitogen.service.get_or_create_pool(router=self.econtext.router) + def _dispatch_calls(self): for msg in self.recv: + if msg.handle == STUB_CALL_SERVICE: + if msg.src_id == mitogen.context_id: + self._init_service_pool() + continue + chain_id, ret = self._dispatch_one(msg) - _v and LOG.debug('_dispatch_calls: %r -> %r', msg, ret) + _v and LOG.debug('%r: %r -> %r', self, msg, ret) if msg.reply_to: msg.reply(ret) elif isinstance(ret, CallError) and chain_id is None: @@ -3135,30 +3730,36 @@ class ExternalContext(object): """ External context implementation. + This class contains the main program implementation for new children. It is + responsible for setting up everything about the process environment, import + hooks, standard IO redirection, logging, configuring a :class:`Router` and + :class:`Broker`, and finally arranging for :class:`Dispatcher` to take over + the main thread after initialization is complete. + .. attribute:: broker + The :class:`mitogen.core.Broker` instance. .. attribute:: context + The :class:`mitogen.core.Context` instance. .. attribute:: channel + The :class:`mitogen.core.Channel` over which :data:`CALL_FUNCTION` requests are received. - .. attribute:: stdout_log - The :class:`mitogen.core.IoLogger` connected to ``stdout``. - .. attribute:: importer + The :class:`mitogen.core.Importer` instance. .. attribute:: stdout_log - The :class:`IoLogger` connected to ``stdout``. + + The :class:`IoLogger` connected to :data:`sys.stdout`. .. attribute:: stderr_log - The :class:`IoLogger` connected to ``stderr``. - .. method:: _dispatch_calls - Implementation for the main thread in every child context. + The :class:`IoLogger` connected to :data:`sys.stderr`. """ detached = False @@ -3169,37 +3770,9 @@ class ExternalContext(object): if not self.config['profiling']: os.kill(os.getpid(), signal.SIGTERM) - #: On Python >3.4, the global importer lock has been sharded into a - #: per-module lock, meaning there is no guarantee the import statement in - #: service_stub_main will be truly complete before a second thread - #: attempting the same import will see a partially initialized module. - #: Sigh. Therefore serialize execution of the stub itself. - service_stub_lock = threading.Lock() - - def _service_stub_main(self, msg): - self.service_stub_lock.acquire() - try: - import mitogen.service - pool = mitogen.service.get_or_create_pool(router=self.router) - pool._receiver._on_receive(msg) - finally: - self.service_stub_lock.release() - - def _on_call_service_msg(self, msg): - """ - Stub service handler. Start a thread to import the mitogen.service - implementation from, and deliver the message to the newly constructed - pool. This must be done as CALL_SERVICE for e.g. PushFileService may - race with a CALL_FUNCTION blocking the main thread waiting for a result - from that service. - """ - if not msg.is_dead: - th = threading.Thread(target=self._service_stub_main, args=(msg,)) - th.start() - def _on_shutdown_msg(self, msg): - _v and LOG.debug('_on_shutdown_msg(%r)', msg) if not msg.is_dead: + _v and LOG.debug('shutdown request from context %d', msg.src_id) self.broker.shutdown() def _on_parent_disconnect(self): @@ -3208,7 +3781,7 @@ class ExternalContext(object): mitogen.parent_id = None LOG.info('Detachment complete') else: - _v and LOG.debug('%r: parent stream is gone, dying.', self) + _v and LOG.debug('parent stream is gone, dying.') self.broker.shutdown() def detach(self): @@ -3219,7 +3792,7 @@ class ExternalContext(object): self.parent.send_await(Message(handle=DETACHING)) LOG.info('Detaching from %r; parent is %s', stream, self.parent) for x in range(20): - pending = self.broker.defer_sync(lambda: stream.pending_bytes()) + pending = self.broker.defer_sync(stream.protocol.pending_bytes) if not pending: break time.sleep(0.05) @@ -3234,17 +3807,12 @@ class ExternalContext(object): self.broker = Broker(activate_compat=False) self.router = Router(self.broker) self.router.debug = self.config.get('debug', False) - self.router.undirectional = self.config['unidirectional'] + self.router.unidirectional = self.config['unidirectional'] self.router.add_handler( fn=self._on_shutdown_msg, handle=SHUTDOWN, policy=has_parent_authority, ) - self.router.add_handler( - fn=self._on_call_service_msg, - handle=CALL_SERVICE, - policy=has_parent_authority, - ) self.master = Context(self.router, 0, 'master') parent_id = self.config['parent_ids'][0] if parent_id == 0: @@ -3253,17 +3821,23 @@ class ExternalContext(object): self.parent = Context(self.router, parent_id, 'parent') in_fd = self.config.get('in_fd', 100) - out_fd = self.config.get('out_fd', 1) - self.stream = Stream(self.router, parent_id) + in_fp = os.fdopen(os.dup(in_fd), 'rb', 0) + os.close(in_fd) + + out_fp = os.fdopen(os.dup(self.config.get('out_fd', 1)), 'wb', 0) + self.stream = MitogenProtocol.build_stream( + self.router, + parent_id, + local_id=self.config['context_id'], + parent_ids=self.config['parent_ids'] + ) + self.stream.accept(in_fp, out_fp) self.stream.name = 'parent' - self.stream.accept(in_fd, out_fd) self.stream.receive_side.keep_alive = False listen(self.stream, 'disconnect', self._on_parent_disconnect) listen(self.broker, 'exit', self._on_broker_exit) - os.close(in_fd) - def _reap_first_stage(self): try: os.wait() # Reap first stage. @@ -3331,31 +3905,33 @@ class ExternalContext(object): def _nullify_stdio(self): """ - Open /dev/null to replace stdin, and stdout/stderr temporarily. In case - of odd startup, assume we may be allocated a standard handle. + Open /dev/null to replace stdio temporarily. In case of odd startup, + assume we may be allocated a standard handle. """ - fd = os.open('/dev/null', os.O_RDWR) - try: - for stdfd in (0, 1, 2): - if fd != stdfd: - os.dup2(fd, stdfd) - finally: - if fd not in (0, 1, 2): + for stdfd, mode in ((0, os.O_RDONLY), (1, os.O_RDWR), (2, os.O_RDWR)): + fd = os.open('/dev/null', mode) + if fd != stdfd: + os.dup2(fd, stdfd) os.close(fd) - def _setup_stdio(self): - # #481: when stderr is a TTY due to being started via - # tty_create_child()/hybrid_tty_create_child(), and some privilege - # escalation tool like prehistoric versions of sudo exec this process - # over the top of itself, there is nothing left to keep the slave PTY - # open after we replace our stdio. Therefore if stderr is a TTY, keep - # around a permanent dup() to avoid receiving SIGHUP. + def _preserve_tty_fp(self): + """ + #481: when stderr is a TTY due to being started via tty_create_child() + or hybrid_tty_create_child(), and some privilege escalation tool like + prehistoric versions of sudo exec this process over the top of itself, + there is nothing left to keep the slave PTY open after we replace our + stdio. Therefore if stderr is a TTY, keep around a permanent dup() to + avoid receiving SIGHUP. + """ try: if os.isatty(2): - self.reserve_tty_fd = os.dup(2) - set_cloexec(self.reserve_tty_fd) + self.reserve_tty_fp = os.fdopen(os.dup(2), 'r+b', 0) + set_cloexec(self.reserve_tty_fp.fileno()) except OSError: pass + + def _setup_stdio(self): + self._preserve_tty_fp() # When sys.stdout was opened by the runtime, overwriting it will not # close FD 1. However when forking from a child that previously used # fdopen(), overwriting it /will/ close FD 1. So we must swallow the @@ -3368,8 +3944,12 @@ class ExternalContext(object): sys.stdout.close() self._nullify_stdio() - self.stdout_log = IoLogger(self.broker, 'stdout', 1) - self.stderr_log = IoLogger(self.broker, 'stderr', 2) + self.loggers = [] + for name, fd in (('stdout', 1), ('stderr', 2)): + log = IoLoggerProtocol.build_stream(name, fd) + self.broker.start_receive(log) + self.loggers.append(log) + # Reopen with line buffering. sys.stdout = os.fdopen(1, 'w', 1) @@ -3389,18 +3969,23 @@ class ExternalContext(object): self.dispatcher = Dispatcher(self) self.router.register(self.parent, self.stream) self.router._setup_logging() - self.log_handler.uncork() - sys.executable = os.environ.pop('ARGV0', sys.executable) - _v and LOG.debug('Connected to context %s; my ID is %r', - self.parent, mitogen.context_id) + _v and LOG.debug('Python version is %s', sys.version) + _v and LOG.debug('Parent is context %r (%s); my ID is %r', + self.parent.context_id, self.parent.name, + mitogen.context_id) _v and LOG.debug('pid:%r ppid:%r uid:%r/%r, gid:%r/%r host:%r', os.getpid(), os.getppid(), os.geteuid(), os.getuid(), os.getegid(), os.getgid(), socket.gethostname()) + + sys.executable = os.environ.pop('ARGV0', sys.executable) _v and LOG.debug('Recovered sys.executable: %r', sys.executable) + if self.config.get('send_ec2', True): + self.stream.transmit_side.write(b('MITO002\n')) self.broker._py24_25_compat() + self.log_handler.uncork() self.dispatcher.run() _v and LOG.debug('ExternalContext.main() normal exit') except KeyboardInterrupt: @@ -3410,4 +3995,3 @@ class ExternalContext(object): raise finally: self.broker.shutdown() - self.broker.join() diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/debug.py b/ansible/plugins/mitogen-0.2.9/mitogen/debug.py similarity index 99% rename from ansible/plugins/mitogen-0.2.8-pre/mitogen/debug.py rename to ansible/plugins/mitogen-0.2.9/mitogen/debug.py index 3d13347f07bbb072702a6d8c7ebd14ac85a30ae4..dbab550ec71bc5b2b4523bb7d4d84f52414ae867 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/debug.py +++ b/ansible/plugins/mitogen-0.2.9/mitogen/debug.py @@ -230,7 +230,7 @@ class ContextDebugger(object): def _handle_debug_msg(self, msg): try: method, args, kwargs = msg.unpickle() - msg.reply(getattr(cls, method)(*args, **kwargs)) + msg.reply(getattr(self, method)(*args, **kwargs)) except Exception: e = sys.exc_info()[1] msg.reply(mitogen.core.CallError(e)) diff --git a/ansible/plugins/mitogen-0.2.9/mitogen/doas.py b/ansible/plugins/mitogen-0.2.9/mitogen/doas.py new file mode 100644 index 0000000000000000000000000000000000000000..5b212b9bbf61779cfe2ffe9dcac4af44097547fa --- /dev/null +++ b/ansible/plugins/mitogen-0.2.9/mitogen/doas.py @@ -0,0 +1,142 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +import logging +import re + +import mitogen.core +import mitogen.parent + + +LOG = logging.getLogger(__name__) + +password_incorrect_msg = 'doas password is incorrect' +password_required_msg = 'doas password is required' + + +class PasswordError(mitogen.core.StreamError): + pass + + +class Options(mitogen.parent.Options): + username = u'root' + password = None + doas_path = 'doas' + password_prompt = u'Password:' + incorrect_prompts = ( + u'doas: authentication failed', # slicer69/doas + u'doas: Authorization failed', # openbsd/src + ) + + def __init__(self, username=None, password=None, doas_path=None, + password_prompt=None, incorrect_prompts=None, **kwargs): + super(Options, self).__init__(**kwargs) + if username is not None: + self.username = mitogen.core.to_text(username) + if password is not None: + self.password = mitogen.core.to_text(password) + if doas_path is not None: + self.doas_path = doas_path + if password_prompt is not None: + self.password_prompt = mitogen.core.to_text(password_prompt) + if incorrect_prompts is not None: + self.incorrect_prompts = [ + mitogen.core.to_text(p) + for p in incorrect_prompts + ] + + +class BootstrapProtocol(mitogen.parent.RegexProtocol): + password_sent = False + + def setup_patterns(self, conn): + prompt_pattern = re.compile( + re.escape(conn.options.password_prompt).encode('utf-8'), + re.I + ) + incorrect_prompt_pattern = re.compile( + u'|'.join( + re.escape(s) + for s in conn.options.incorrect_prompts + ).encode('utf-8'), + re.I + ) + + self.PATTERNS = [ + (incorrect_prompt_pattern, type(self)._on_incorrect_password), + ] + self.PARTIAL_PATTERNS = [ + (prompt_pattern, type(self)._on_password_prompt), + ] + + def _on_incorrect_password(self, line, match): + if self.password_sent: + self.stream.conn._fail_connection( + PasswordError(password_incorrect_msg) + ) + + def _on_password_prompt(self, line, match): + if self.stream.conn.options.password is None: + self.stream.conn._fail_connection( + PasswordError(password_required_msg) + ) + return + + if self.password_sent: + self.stream.conn._fail_connection( + PasswordError(password_incorrect_msg) + ) + return + + LOG.debug('sending password') + self.stream.transmit_side.write( + (self.stream.conn.options.password + '\n').encode('utf-8') + ) + self.password_sent = True + + +class Connection(mitogen.parent.Connection): + options_class = Options + diag_protocol_class = BootstrapProtocol + + create_child = staticmethod(mitogen.parent.hybrid_tty_create_child) + child_is_immediate_subprocess = False + + def _get_name(self): + return u'doas.' + self.options.username + + def stderr_stream_factory(self): + stream = super(Connection, self).stderr_stream_factory() + stream.protocol.setup_patterns(self) + return stream + + def get_boot_command(self): + bits = [self.options.doas_path, '-u', self.options.username, '--'] + return bits + super(Connection, self).get_boot_command() diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/docker.py b/ansible/plugins/mitogen-0.2.9/mitogen/docker.py similarity index 67% rename from ansible/plugins/mitogen-0.2.8-pre/mitogen/docker.py rename to ansible/plugins/mitogen-0.2.9/mitogen/docker.py index 0c0d40e778e73ac03bd2a8aac7ca10170109b953..48848c89348e43d68987cbcaa36be801a6402488 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/docker.py +++ b/ansible/plugins/mitogen-0.2.9/mitogen/docker.py @@ -37,45 +37,47 @@ import mitogen.parent LOG = logging.getLogger(__name__) -class Stream(mitogen.parent.Stream): - child_is_immediate_subprocess = False - +class Options(mitogen.parent.Options): container = None image = None username = None - docker_path = 'docker' - - # TODO: better way of capturing errors such as "No such container." - create_child_args = { - 'merge_stdio': True - } + docker_path = u'docker' - def construct(self, container=None, image=None, - docker_path=None, username=None, - **kwargs): + def __init__(self, container=None, image=None, docker_path=None, + username=None, **kwargs): + super(Options, self).__init__(**kwargs) assert container or image - super(Stream, self).construct(**kwargs) if container: - self.container = container + self.container = mitogen.core.to_text(container) if image: - self.image = image + self.image = mitogen.core.to_text(image) if docker_path: - self.docker_path = docker_path + self.docker_path = mitogen.core.to_text(docker_path) if username: - self.username = username + self.username = mitogen.core.to_text(username) + + +class Connection(mitogen.parent.Connection): + options_class = Options + child_is_immediate_subprocess = False + + # TODO: better way of capturing errors such as "No such container." + create_child_args = { + 'merge_stdio': True + } def _get_name(self): - return u'docker.' + (self.container or self.image) + return u'docker.' + (self.options.container or self.options.image) def get_boot_command(self): args = ['--interactive'] - if self.username: - args += ['--user=' + self.username] + if self.options.username: + args += ['--user=' + self.options.username] - bits = [self.docker_path] - if self.container: - bits += ['exec'] + args + [self.container] - elif self.image: - bits += ['run'] + args + ['--rm', self.image] + bits = [self.options.docker_path] + if self.options.container: + bits += ['exec'] + args + [self.options.container] + elif self.options.image: + bits += ['run'] + args + ['--rm', self.options.image] - return bits + super(Stream, self).get_boot_command() + return bits + super(Connection, self).get_boot_command() diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/fakessh.py b/ansible/plugins/mitogen-0.2.9/mitogen/fakessh.py similarity index 93% rename from ansible/plugins/mitogen-0.2.8-pre/mitogen/fakessh.py rename to ansible/plugins/mitogen-0.2.9/mitogen/fakessh.py index d39a710d0df7c1b89c5ab5b45753c6edb0a2fcf7..e62cf84a71852011fac1da9ed2dbad79ceba120b 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/fakessh.py +++ b/ansible/plugins/mitogen-0.2.9/mitogen/fakessh.py @@ -117,14 +117,12 @@ SSH_GETOPTS = ( _mitogen = None -class IoPump(mitogen.core.BasicStream): +class IoPump(mitogen.core.Protocol): _output_buf = '' _closed = False - def __init__(self, broker, stdin_fd, stdout_fd): + def __init__(self, broker): self._broker = broker - self.receive_side = mitogen.core.Side(self, stdout_fd) - self.transmit_side = mitogen.core.Side(self, stdin_fd) def write(self, s): self._output_buf += s @@ -134,13 +132,13 @@ class IoPump(mitogen.core.BasicStream): self._closed = True # If local process hasn't exitted yet, ensure its write buffer is # drained before lazily triggering disconnect in on_transmit. - if self.transmit_side.fd is not None: + if self.transmit_side.fp.fileno() is not None: self._broker._start_transmit(self) - def on_shutdown(self, broker): + def on_shutdown(self, stream, broker): self.close() - def on_transmit(self, broker): + def on_transmit(self, stream, broker): written = self.transmit_side.write(self._output_buf) IOLOG.debug('%r.on_transmit() -> len %r', self, written) if written is None: @@ -153,8 +151,8 @@ class IoPump(mitogen.core.BasicStream): if self._closed: self.on_disconnect(broker) - def on_receive(self, broker): - s = self.receive_side.read() + def on_receive(self, stream, broker): + s = stream.receive_side.read() IOLOG.debug('%r.on_receive() -> len %r', self, len(s)) if s: mitogen.core.fire(self, 'receive', s) @@ -163,8 +161,8 @@ class IoPump(mitogen.core.BasicStream): def __repr__(self): return 'IoPump(%r, %r)' % ( - self.receive_side.fd, - self.transmit_side.fd, + self.receive_side.fp.fileno(), + self.transmit_side.fp.fileno(), ) @@ -173,14 +171,15 @@ class Process(object): Manages the lifetime and pipe connections of the SSH command running in the slave. """ - def __init__(self, router, stdin_fd, stdout_fd, proc=None): + def __init__(self, router, stdin, stdout, proc=None): self.router = router - self.stdin_fd = stdin_fd - self.stdout_fd = stdout_fd + self.stdin = stdin + self.stdout = stdout self.proc = proc self.control_handle = router.add_handler(self._on_control) self.stdin_handle = router.add_handler(self._on_stdin) - self.pump = IoPump(router.broker, stdin_fd, stdout_fd) + self.pump = IoPump.build_stream(router.broker) + self.pump.accept(stdin, stdout) self.stdin = None self.control = None self.wake_event = threading.Event() @@ -193,7 +192,7 @@ class Process(object): pmon.add(proc.pid, self._on_proc_exit) def __repr__(self): - return 'Process(%r, %r)' % (self.stdin_fd, self.stdout_fd) + return 'Process(%r, %r)' % (self.stdin, self.stdout) def _on_proc_exit(self, status): LOG.debug('%r._on_proc_exit(%r)', self, status) @@ -202,12 +201,12 @@ class Process(object): def _on_stdin(self, msg): if msg.is_dead: IOLOG.debug('%r._on_stdin() -> %r', self, data) - self.pump.close() + self.pump.protocol.close() return data = msg.unpickle() IOLOG.debug('%r._on_stdin() -> len %d', self, len(data)) - self.pump.write(data) + self.pump.protocol.write(data) def _on_control(self, msg): if not msg.is_dead: @@ -279,13 +278,7 @@ def _start_slave(src_id, cmdline, router): stdout=subprocess.PIPE, ) - process = Process( - router, - proc.stdin.fileno(), - proc.stdout.fileno(), - proc, - ) - + process = Process(router, proc.stdin, proc.stdout, proc) return process.control_handle, process.stdin_handle @@ -361,7 +354,9 @@ def _fakessh_main(dest_context_id, econtext): LOG.debug('_fakessh_main: received control_handle=%r, stdin_handle=%r', control_handle, stdin_handle) - process = Process(econtext.router, 1, 0) + process = Process(econtext.router, + stdin=os.fdopen(1, 'w+b', 0), + stdout=os.fdopen(0, 'r+b', 0)) process.start_master( stdin=mitogen.core.Sender(dest, stdin_handle), control=mitogen.core.Sender(dest, control_handle), @@ -427,7 +422,7 @@ def run(dest, router, args, deadline=None, econtext=None): stream = mitogen.core.Stream(router, context_id) stream.name = u'fakessh' - stream.accept(sock1.fileno(), sock1.fileno()) + stream.accept(sock1, sock1) router.register(fakessh, stream) # Held in socket buffer until process is booted. diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/fork.py b/ansible/plugins/mitogen-0.2.9/mitogen/fork.py similarity index 74% rename from ansible/plugins/mitogen-0.2.8-pre/mitogen/fork.py rename to ansible/plugins/mitogen-0.2.9/mitogen/fork.py index d6685d70b5268bc4a92c06079fc286f8adcc3c01..f0c2d7e7c74c6a6222eea0f01b945100b07320d0 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/fork.py +++ b/ansible/plugins/mitogen-0.2.9/mitogen/fork.py @@ -28,6 +28,7 @@ # !mitogen: minify_safe +import errno import logging import os import random @@ -37,9 +38,10 @@ import traceback import mitogen.core import mitogen.parent +from mitogen.core import b -LOG = logging.getLogger('mitogen') +LOG = logging.getLogger(__name__) # Python 2.4/2.5 cannot support fork+threads whatsoever, it doesn't even fix up # interpreter state. So 2.4/2.5 interpreters start .local() contexts for @@ -71,22 +73,14 @@ def reset_logging_framework(): threads in the parent may have been using the logging package at the moment of fork. - It is not possible to solve this problem in general; see - https://github.com/dw/mitogen/issues/150 for a full discussion. + It is not possible to solve this problem in general; see :gh:issue:`150` + for a full discussion. """ logging._lock = threading.RLock() # The root logger does not appear in the loggerDict. - for name in [None] + list(logging.Logger.manager.loggerDict): - for handler in logging.getLogger(name).handlers: - handler.createLock() - - root = logging.getLogger() - root.handlers = [ - handler - for handler in root.handlers - if not isinstance(handler, mitogen.core.LogHandler) - ] + logging.Logger.manager.loggerDict = {} + logging.getLogger().handlers = [] def on_fork(): @@ -119,32 +113,53 @@ def handle_child_crash(): os._exit(1) -class Stream(mitogen.parent.Stream): - child_is_immediate_subprocess = True +def _convert_exit_status(status): + """ + Convert a :func:`os.waitpid`-style exit status to a :mod:`subprocess` style + exit status. + """ + if os.WIFEXITED(status): + return os.WEXITSTATUS(status) + elif os.WIFSIGNALED(status): + return -os.WTERMSIG(status) + elif os.WIFSTOPPED(status): + return -os.WSTOPSIG(status) + + +class Process(mitogen.parent.Process): + def poll(self): + try: + pid, status = os.waitpid(self.pid, os.WNOHANG) + except OSError: + e = sys.exc_info()[1] + if e.args[0] == errno.ECHILD: + LOG.warn('%r: waitpid(%r) produced ECHILD', self, self.pid) + return + raise + + if not pid: + return + return _convert_exit_status(status) + +class Options(mitogen.parent.Options): #: Reference to the importer, if any, recovered from the parent. importer = None #: User-supplied function for cleaning up child process state. on_fork = None - python_version_msg = ( - "The mitogen.fork method is not supported on Python versions " - "prior to 2.6, since those versions made no attempt to repair " - "critical interpreter state following a fork. Please use the " - "local() method instead." - ) - - def construct(self, old_router, max_message_size, on_fork=None, - debug=False, profiling=False, unidirectional=False, - on_start=None): + def __init__(self, old_router, max_message_size, on_fork=None, debug=False, + profiling=False, unidirectional=False, on_start=None, + name=None): if not FORK_SUPPORTED: raise Error(self.python_version_msg) # fork method only supports a tiny subset of options. - super(Stream, self).construct(max_message_size=max_message_size, - debug=debug, profiling=profiling, - unidirectional=False) + super(Options, self).__init__( + max_message_size=max_message_size, debug=debug, + profiling=profiling, unidirectional=unidirectional, name=name, + ) self.on_fork = on_fork self.on_start = on_start @@ -152,17 +167,26 @@ class Stream(mitogen.parent.Stream): if isinstance(responder, mitogen.parent.ModuleForwarder): self.importer = responder.importer + +class Connection(mitogen.parent.Connection): + options_class = Options + child_is_immediate_subprocess = True + + python_version_msg = ( + "The mitogen.fork method is not supported on Python versions " + "prior to 2.6, since those versions made no attempt to repair " + "critical interpreter state following a fork. Please use the " + "local() method instead." + ) + name_prefix = u'fork' def start_child(self): parentfp, childfp = mitogen.parent.create_socketpair() - self.pid = os.fork() - if self.pid: + pid = os.fork() + if pid: childfp.close() - # Decouple the socket from the lifetime of the Python socket object. - fd = os.dup(parentfp.fileno()) - parentfp.close() - return self.pid, fd, None + return Process(pid, stdin=parentfp, stdout=parentfp) else: parentfp.close() self._wrap_child_main(childfp) @@ -173,12 +197,24 @@ class Stream(mitogen.parent.Stream): except BaseException: handle_child_crash() + def get_econtext_config(self): + config = super(Connection, self).get_econtext_config() + config['core_src_fd'] = None + config['importer'] = self.options.importer + config['send_ec2'] = False + config['setup_package'] = False + if self.options.on_start: + config['on_start'] = self.options.on_start + return config + def _child_main(self, childfp): on_fork() - if self.on_fork: - self.on_fork() + if self.options.on_fork: + self.options.on_fork() mitogen.core.set_block(childfp.fileno()) + childfp.send(b('MITO002\n')) + # Expected by the ExternalContext.main(). os.dup2(childfp.fileno(), 1) os.dup2(childfp.fileno(), 100) @@ -201,23 +237,14 @@ class Stream(mitogen.parent.Stream): if childfp.fileno() not in (0, 1, 100): childfp.close() - config = self.get_econtext_config() - config['core_src_fd'] = None - config['importer'] = self.importer - config['setup_package'] = False - if self.on_start: - config['on_start'] = self.on_start + mitogen.core.IOLOG.setLevel(logging.INFO) try: try: - mitogen.core.ExternalContext(config).main() + mitogen.core.ExternalContext(self.get_econtext_config()).main() except Exception: # TODO: report exception somehow. os._exit(72) finally: # Don't trigger atexit handlers, they were copied from the parent. os._exit(0) - - def _connect_bootstrap(self): - # None required. - pass diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/jail.py b/ansible/plugins/mitogen-0.2.9/mitogen/jail.py similarity index 72% rename from ansible/plugins/mitogen-0.2.8-pre/mitogen/jail.py rename to ansible/plugins/mitogen-0.2.9/mitogen/jail.py index 6e0ac68be24668f1b7de6683c3d39e5ecd59d096..4da7eb0df25a82916dc41e305170ccb80283372b 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/jail.py +++ b/ansible/plugins/mitogen-0.2.9/mitogen/jail.py @@ -28,38 +28,38 @@ # !mitogen: minify_safe -import logging - import mitogen.core import mitogen.parent -LOG = logging.getLogger(__name__) +class Options(mitogen.parent.Options): + container = None + username = None + jexec_path = u'/usr/sbin/jexec' + + def __init__(self, container, jexec_path=None, username=None, **kwargs): + super(Options, self).__init__(**kwargs) + self.container = mitogen.core.to_text(container) + if username: + self.username = mitogen.core.to_text(username) + if jexec_path: + self.jexec_path = jexec_path + +class Connection(mitogen.parent.Connection): + options_class = Options -class Stream(mitogen.parent.Stream): child_is_immediate_subprocess = False create_child_args = { 'merge_stdio': True } - container = None - username = None - jexec_path = '/usr/sbin/jexec' - - def construct(self, container, jexec_path=None, username=None, **kwargs): - super(Stream, self).construct(**kwargs) - self.container = container - self.username = username - if jexec_path: - self.jexec_path = jexec_path - def _get_name(self): - return u'jail.' + self.container + return u'jail.' + self.options.container def get_boot_command(self): - bits = [self.jexec_path] - if self.username: - bits += ['-U', self.username] - bits += [self.container] - return bits + super(Stream, self).get_boot_command() + bits = [self.options.jexec_path] + if self.options.username: + bits += ['-U', self.options.username] + bits += [self.options.container] + return bits + super(Connection, self).get_boot_command() diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/kubectl.py b/ansible/plugins/mitogen-0.2.9/mitogen/kubectl.py similarity index 79% rename from ansible/plugins/mitogen-0.2.8-pre/mitogen/kubectl.py rename to ansible/plugins/mitogen-0.2.9/mitogen/kubectl.py index ef626e1bc34414bae5f3cc61d5fe087642bb573e..374ab7470c174e744c3fb6097cd0b7325ac7c9e0 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/kubectl.py +++ b/ansible/plugins/mitogen-0.2.9/mitogen/kubectl.py @@ -28,38 +28,40 @@ # !mitogen: minify_safe -import logging - import mitogen.core import mitogen.parent -LOG = logging.getLogger(__name__) - - -class Stream(mitogen.parent.Stream): - child_is_immediate_subprocess = True - +class Options(mitogen.parent.Options): pod = None kubectl_path = 'kubectl' kubectl_args = None - # TODO: better way of capturing errors such as "No such container." - create_child_args = { - 'merge_stdio': True - } - - def construct(self, pod, kubectl_path=None, kubectl_args=None, **kwargs): - super(Stream, self).construct(**kwargs) + def __init__(self, pod, kubectl_path=None, kubectl_args=None, **kwargs): + super(Options, self).__init__(**kwargs) assert pod self.pod = pod if kubectl_path: self.kubectl_path = kubectl_path self.kubectl_args = kubectl_args or [] + +class Connection(mitogen.parent.Connection): + options_class = Options + child_is_immediate_subprocess = True + + # TODO: better way of capturing errors such as "No such container." + create_child_args = { + 'merge_stdio': True + } + def _get_name(self): - return u'kubectl.%s%s' % (self.pod, self.kubectl_args) + return u'kubectl.%s%s' % (self.options.pod, self.options.kubectl_args) def get_boot_command(self): - bits = [self.kubectl_path] + self.kubectl_args + ['exec', '-it', self.pod] - return bits + ["--"] + super(Stream, self).get_boot_command() + bits = [ + self.options.kubectl_path + ] + self.options.kubectl_args + [ + 'exec', '-it', self.options.pod + ] + return bits + ["--"] + super(Connection, self).get_boot_command() diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/lxc.py b/ansible/plugins/mitogen-0.2.9/mitogen/lxc.py similarity index 85% rename from ansible/plugins/mitogen-0.2.8-pre/mitogen/lxc.py rename to ansible/plugins/mitogen-0.2.9/mitogen/lxc.py index 879d19a168df398b28cccb8d3db5412c72a530f7..a86ce5f0fbd75aa35e1c663855853e9d2f0d957b 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/lxc.py +++ b/ansible/plugins/mitogen-0.2.9/mitogen/lxc.py @@ -28,16 +28,24 @@ # !mitogen: minify_safe -import logging - import mitogen.core import mitogen.parent -LOG = logging.getLogger(__name__) +class Options(mitogen.parent.Options): + container = None + lxc_attach_path = 'lxc-attach' + + def __init__(self, container, lxc_attach_path=None, **kwargs): + super(Options, self).__init__(**kwargs) + self.container = container + if lxc_attach_path: + self.lxc_attach_path = lxc_attach_path + +class Connection(mitogen.parent.Connection): + options_class = Options -class Stream(mitogen.parent.Stream): child_is_immediate_subprocess = False create_child_args = { # If lxc-attach finds any of stdin, stdout, stderr connected to a TTY, @@ -47,29 +55,20 @@ class Stream(mitogen.parent.Stream): 'merge_stdio': True } - container = None - lxc_attach_path = 'lxc-attach' - eof_error_hint = ( 'Note: many versions of LXC do not report program execution failure ' 'meaningfully. Please check the host logs (/var/log) for more ' 'information.' ) - def construct(self, container, lxc_attach_path=None, **kwargs): - super(Stream, self).construct(**kwargs) - self.container = container - if lxc_attach_path: - self.lxc_attach_path = lxc_attach_path - def _get_name(self): - return u'lxc.' + self.container + return u'lxc.' + self.options.container def get_boot_command(self): bits = [ - self.lxc_attach_path, + self.options.lxc_attach_path, '--clear-env', - '--name', self.container, + '--name', self.options.container, '--', ] - return bits + super(Stream, self).get_boot_command() + return bits + super(Connection, self).get_boot_command() diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/lxd.py b/ansible/plugins/mitogen-0.2.9/mitogen/lxd.py similarity index 85% rename from ansible/plugins/mitogen-0.2.8-pre/mitogen/lxd.py rename to ansible/plugins/mitogen-0.2.9/mitogen/lxd.py index faea2561f2809c0c43837a8de06d9e822c6763f8..675dddcdc738a5e509893217f6d32913d4130225 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/lxd.py +++ b/ansible/plugins/mitogen-0.2.9/mitogen/lxd.py @@ -28,16 +28,25 @@ # !mitogen: minify_safe -import logging - import mitogen.core import mitogen.parent -LOG = logging.getLogger(__name__) +class Options(mitogen.parent.Options): + container = None + lxc_path = 'lxc' + python_path = 'python' + + def __init__(self, container, lxc_path=None, **kwargs): + super(Options, self).__init__(**kwargs) + self.container = container + if lxc_path: + self.lxc_path = lxc_path + +class Connection(mitogen.parent.Connection): + options_class = Options -class Stream(mitogen.parent.Stream): child_is_immediate_subprocess = False create_child_args = { # If lxc finds any of stdin, stdout, stderr connected to a TTY, to @@ -47,31 +56,21 @@ class Stream(mitogen.parent.Stream): 'merge_stdio': True } - container = None - lxc_path = 'lxc' - python_path = 'python' - eof_error_hint = ( 'Note: many versions of LXC do not report program execution failure ' 'meaningfully. Please check the host logs (/var/log) for more ' 'information.' ) - def construct(self, container, lxc_path=None, **kwargs): - super(Stream, self).construct(**kwargs) - self.container = container - if lxc_path: - self.lxc_path = lxc_path - def _get_name(self): - return u'lxd.' + self.container + return u'lxd.' + self.options.container def get_boot_command(self): bits = [ - self.lxc_path, + self.options.lxc_path, 'exec', '--mode=noninteractive', - self.container, + self.options.container, '--', ] - return bits + super(Stream, self).get_boot_command() + return bits + super(Connection, self).get_boot_command() diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/master.py b/ansible/plugins/mitogen-0.2.9/mitogen/master.py similarity index 81% rename from ansible/plugins/mitogen-0.2.8-pre/mitogen/master.py rename to ansible/plugins/mitogen-0.2.9/mitogen/master.py index fb4f505b5f413076f250627d0208dcaccc5b5d8d..f9ddf3ddaabc9c4780d31c4e7eb5988df6373440 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/master.py +++ b/ansible/plugins/mitogen-0.2.9/mitogen/master.py @@ -47,7 +47,6 @@ import re import string import sys import threading -import time import types import zlib @@ -91,7 +90,8 @@ RLOG = logging.getLogger('mitogen.ctx') def _stdlib_paths(): - """Return a set of paths from which Python imports the standard library. + """ + Return a set of paths from which Python imports the standard library. """ attr_candidates = [ 'prefix', @@ -111,8 +111,8 @@ def _stdlib_paths(): def is_stdlib_name(modname): - """Return :data:`True` if `modname` appears to come from the standard - library. + """ + Return :data:`True` if `modname` appears to come from the standard library. """ if imp.is_builtin(modname) != 0: return True @@ -139,7 +139,8 @@ def is_stdlib_path(path): def get_child_modules(path): - """Return the suffixes of submodules directly neated beneath of the package + """ + Return the suffixes of submodules directly neated beneath of the package directory at `path`. :param str path: @@ -301,8 +302,10 @@ class ThreadWatcher(object): @classmethod def _reset(cls): - """If we have forked since the watch dictionaries were initialized, all - that has is garbage, so clear it.""" + """ + If we have forked since the watch dictionaries were initialized, all + that has is garbage, so clear it. + """ if os.getpid() != cls._cls_pid: cls._cls_pid = os.getpid() cls._cls_instances_by_target.clear() @@ -383,18 +386,18 @@ class LogForwarder(object): if msg.is_dead: return - logger = self._cache.get(msg.src_id) - if logger is None: - context = self._router.context_by_id(msg.src_id) - if context is None: - LOG.error('%s: dropping log from unknown context ID %d', - self, msg.src_id) - return + context = self._router.context_by_id(msg.src_id) + if context is None: + LOG.error('%s: dropping log from unknown context %d', + self, msg.src_id) + return - name = '%s.%s' % (RLOG.name, context.name) - self._cache[msg.src_id] = logger = logging.getLogger(name) + name, level_s, s = msg.data.decode('utf-8', 'replace').split('\x00', 2) - name, level_s, s = msg.data.decode('latin1').split('\x00', 2) + logger_name = '%s.[%s]' % (name, context.name) + logger = self._cache.get(logger_name) + if logger is None: + self._cache[logger_name] = logger = logging.getLogger(logger_name) # See logging.Handler.makeRecord() record = logging.LogRecord( @@ -402,7 +405,7 @@ class LogForwarder(object): level=int(level_s), pathname='(unknown file)', lineno=0, - msg=('%s: %s' % (name, s)), + msg=s, args=(), exc_info=None, ) @@ -426,8 +429,8 @@ class FinderMethod(object): def find(self, fullname): """ - Accept a canonical module name and return `(path, source, is_pkg)` - tuples, where: + Accept a canonical module name as would be found in :data:`sys.modules` + and return a `(path, source, is_pkg)` tuple, where: * `path`: Unicode string containing path to source file. * `source`: Bytestring containing source file's content. @@ -443,10 +446,13 @@ class DefectivePython3xMainMethod(FinderMethod): """ Recent versions of Python 3.x introduced an incomplete notion of importer specs, and in doing so created permanent asymmetry in the - :mod:`pkgutil` interface handling for the `__main__` module. Therefore - we must handle `__main__` specially. + :mod:`pkgutil` interface handling for the :mod:`__main__` module. Therefore + we must handle :mod:`__main__` specially. """ def find(self, fullname): + """ + Find :mod:`__main__` using its :data:`__file__` attribute. + """ if fullname != '__main__': return None @@ -455,7 +461,7 @@ class DefectivePython3xMainMethod(FinderMethod): return None path = getattr(mod, '__file__', None) - if not (os.path.exists(path) and _looks_like_script(path)): + if not (path is not None and os.path.exists(path) and _looks_like_script(path)): return None fp = open(path, 'rb') @@ -473,6 +479,9 @@ class PkgutilMethod(FinderMethod): be the only required implementation of get_module(). """ def find(self, fullname): + """ + Find `fullname` using :func:`pkgutil.find_loader`. + """ try: # Pre-'import spec' this returned None, in Python3.6 it raises # ImportError. @@ -518,27 +527,33 @@ class PkgutilMethod(FinderMethod): class SysModulesMethod(FinderMethod): """ - Attempt to fetch source code via sys.modules. This is specifically to - support __main__, but it may catch a few more cases. + Attempt to fetch source code via :data:`sys.modules`. This was originally + specifically to support :mod:`__main__`, but it may catch a few more cases. """ def find(self, fullname): + """ + Find `fullname` using its :data:`__file__` attribute. + """ module = sys.modules.get(fullname) - LOG.debug('_get_module_via_sys_modules(%r) -> %r', fullname, module) - if getattr(module, '__name__', None) != fullname: - LOG.debug('sys.modules[%r].__name__ does not match %r, assuming ' - 'this is a hacky module alias and ignoring it', - fullname, fullname) + if not isinstance(module, types.ModuleType): + LOG.debug('%r: sys.modules[%r] absent or not a regular module', + self, fullname) return - if not isinstance(module, types.ModuleType): - LOG.debug('sys.modules[%r] absent or not a regular module', - fullname) + LOG.debug('_get_module_via_sys_modules(%r) -> %r', fullname, module) + alleged_name = getattr(module, '__name__', None) + if alleged_name != fullname: + LOG.debug('sys.modules[%r].__name__ is incorrect, assuming ' + 'this is a hacky module alias and ignoring it. ' + 'Got %r, module object: %r', + fullname, alleged_name, module) return path = _py_filename(getattr(module, '__file__', '')) if not path: return + LOG.debug('%r: sys.modules[%r]: found %s', self, fullname, path) is_pkg = hasattr(module, '__path__') try: source = inspect.getsource(module) @@ -560,40 +575,57 @@ class SysModulesMethod(FinderMethod): class ParentEnumerationMethod(FinderMethod): """ Attempt to fetch source code by examining the module's (hopefully less - insane) parent package. Required for older versions of - ansible.compat.six and plumbum.colors, and Ansible 2.8 - ansible.module_utils.distro. - - For cases like module_utils.distro, this must handle cases where a package - transmuted itself into a totally unrelated module during import and vice - versa. + insane) parent package, and if no insane parents exist, simply use + :mod:`sys.path` to search for it from scratch on the filesystem using the + normal Python lookup mechanism. + + This is required for older versions of :mod:`ansible.compat.six`, + :mod:`plumbum.colors`, Ansible 2.8 :mod:`ansible.module_utils.distro` and + its submodule :mod:`ansible.module_utils.distro._distro`. + + When some package dynamically replaces itself in :data:`sys.modules`, but + only conditionally according to some program logic, it is possible that + children may attempt to load modules and subpackages from it that can no + longer be resolved by examining a (corrupted) parent. + + For cases like :mod:`ansible.module_utils.distro`, this must handle cases + where a package transmuted itself into a totally unrelated module during + import and vice versa, where :data:`sys.modules` is replaced with junk that + makes it impossible to discover the loaded module using the in-memory + module object or any parent package's :data:`__path__`, since they have all + been overwritten. Some men just want to watch the world burn. """ - def find(self, fullname): - if fullname not in sys.modules: - # Don't attempt this unless a module really exists in sys.modules, - # else we could return junk. - return - - pkgname, _, modname = str_rpartition(to_text(fullname), u'.') - pkg = sys.modules.get(pkgname) - if pkg is None or not hasattr(pkg, '__file__'): - LOG.debug('%r: %r is not a package or lacks __file__ attribute', - self, pkgname) - return - - pkg_path = [os.path.dirname(pkg.__file__)] - try: - fp, path, (suffix, _, kind) = imp.find_module(modname, pkg_path) - except ImportError: - e = sys.exc_info()[1] - LOG.debug('%r: imp.find_module(%r, %r) -> %s', - self, modname, [pkg_path], e) - return None - - if kind == imp.PKG_DIRECTORY: - return self._found_package(fullname, path) - else: - return self._found_module(fullname, path, fp) + def _find_sane_parent(self, fullname): + """ + Iteratively search :data:`sys.modules` for the least indirect parent of + `fullname` that is loaded and contains a :data:`__path__` attribute. + + :return: + `(parent_name, path, modpath)` tuple, where: + + * `modname`: canonical name of the found package, or the empty + string if none is found. + * `search_path`: :data:`__path__` attribute of the least + indirect parent found, or :data:`None` if no indirect parent + was found. + * `modpath`: list of module name components leading from `path` + to the target module. + """ + path = None + modpath = [] + while True: + pkgname, _, modname = str_rpartition(to_text(fullname), u'.') + modpath.insert(0, modname) + if not pkgname: + return [], None, modpath + + pkg = sys.modules.get(pkgname) + path = getattr(pkg, '__path__', None) + if pkg and path: + return pkgname.split('.'), path, modpath + + LOG.debug('%r: %r lacks __path__ attribute', self, pkgname) + fullname = pkgname def _found_package(self, fullname, path): path = os.path.join(path, '__init__.py') @@ -622,6 +654,47 @@ class ParentEnumerationMethod(FinderMethod): source = source.encode('utf-8') return path, source, is_pkg + def _find_one_component(self, modname, search_path): + try: + #fp, path, (suffix, _, kind) = imp.find_module(modname, search_path) + return imp.find_module(modname, search_path) + except ImportError: + e = sys.exc_info()[1] + LOG.debug('%r: imp.find_module(%r, %r) -> %s', + self, modname, [search_path], e) + return None + + def find(self, fullname): + """ + See implementation for a description of how this works. + """ + #if fullname not in sys.modules: + # Don't attempt this unless a module really exists in sys.modules, + # else we could return junk. + #return + + fullname = to_text(fullname) + modname, search_path, modpath = self._find_sane_parent(fullname) + while True: + tup = self._find_one_component(modpath.pop(0), search_path) + if tup is None: + return None + + fp, path, (suffix, _, kind) = tup + if modpath: + # Still more components to descent. Result must be a package + if fp: + fp.close() + if kind != imp.PKG_DIRECTORY: + LOG.debug('%r: %r appears to be child of non-package %r', + self, fullname, path) + return None + search_path = [path] + elif kind == imp.PKG_DIRECTORY: + return self._found_package(fullname, path) + else: + return self._found_module(fullname, path, fp) + class ModuleFinder(object): """ @@ -667,7 +740,8 @@ class ModuleFinder(object): ] def get_module_source(self, fullname): - """Given the name of a loaded module `fullname`, attempt to find its + """ + Given the name of a loaded module `fullname`, attempt to find its source code. :returns: @@ -691,9 +765,10 @@ class ModuleFinder(object): return tup def resolve_relpath(self, fullname, level): - """Given an ImportFrom AST node, guess the prefix that should be tacked - on to an alias name to produce a canonical name. `fullname` is the name - of the module in which the ImportFrom appears. + """ + Given an ImportFrom AST node, guess the prefix that should be tacked on + to an alias name to produce a canonical name. `fullname` is the name of + the module in which the ImportFrom appears. """ mod = sys.modules.get(fullname, None) if hasattr(mod, '__path__'): @@ -722,7 +797,7 @@ class ModuleFinder(object): The list is determined by retrieving the source code of `fullname`, compiling it, and examining all IMPORT_NAME ops. - :param fullname: Fully qualified name of an _already imported_ module + :param fullname: Fully qualified name of an *already imported* module for which source code can be retrieved :type fullname: str """ @@ -770,7 +845,7 @@ class ModuleFinder(object): This method is like :py:meth:`find_related_imports`, but also recursively searches any modules which are imported by `fullname`. - :param fullname: Fully qualified name of an _already imported_ module + :param fullname: Fully qualified name of an *already imported* module for which source code can be retrieved :type fullname: str """ @@ -789,6 +864,7 @@ class ModuleFinder(object): class ModuleResponder(object): def __init__(self, router): + self._log = logging.getLogger('mitogen.responder') self._router = router self._finder = ModuleFinder() self._cache = {} # fullname -> pickled @@ -817,11 +893,11 @@ class ModuleResponder(object): ) def __repr__(self): - return 'ModuleResponder(%r)' % (self._router,) + return 'ModuleResponder' def add_source_override(self, fullname, path, source, is_pkg): """ - See :meth:`ModuleFinder.add_source_override. + See :meth:`ModuleFinder.add_source_override`. """ self._finder.add_source_override(fullname, path, source, is_pkg) @@ -844,9 +920,11 @@ class ModuleResponder(object): self.blacklist.append(fullname) def neutralize_main(self, path, src): - """Given the source for the __main__ module, try to find where it - begins conditional execution based on a "if __name__ == '__main__'" - guard, and remove any code after that point.""" + """ + Given the source for the __main__ module, try to find where it begins + conditional execution based on a "if __name__ == '__main__'" guard, and + remove any code after that point. + """ match = self.MAIN_RE.search(src) if match: return src[:match.start()] @@ -854,7 +932,7 @@ class ModuleResponder(object): if b('mitogen.main(') in src: return src - LOG.error(self.main_guard_msg, path) + self._log.error(self.main_guard_msg, path) raise ImportError('refused') def _make_negative_response(self, fullname): @@ -873,8 +951,7 @@ class ModuleResponder(object): if path and is_stdlib_path(path): # Prevent loading of 2.x<->3.x stdlib modules! This costs one # RTT per hit, so a client-side solution is also required. - LOG.debug('%r: refusing to serve stdlib module %r', - self, fullname) + self._log.debug('refusing to serve stdlib module %r', fullname) tup = self._make_negative_response(fullname) self._cache[fullname] = tup return tup @@ -882,21 +959,21 @@ class ModuleResponder(object): if source is None: # TODO: make this .warning() or similar again once importer has its # own logging category. - LOG.debug('_build_tuple(%r): could not locate source', fullname) + self._log.debug('could not find source for %r', fullname) tup = self._make_negative_response(fullname) self._cache[fullname] = tup return tup if self.minify_safe_re.search(source): # If the module contains a magic marker, it's safe to minify. - t0 = time.time() + t0 = mitogen.core.now() source = mitogen.minify.minimize_source(source).encode('utf-8') - self.minify_secs += time.time() - t0 + self.minify_secs += mitogen.core.now() - t0 if is_pkg: pkg_present = get_child_modules(path) - LOG.debug('_build_tuple(%r, %r) -> %r', - path, fullname, pkg_present) + self._log.debug('%s is a package at %s with submodules %r', + fullname, path, pkg_present) else: pkg_present = None @@ -920,17 +997,17 @@ class ModuleResponder(object): return tup def _send_load_module(self, stream, fullname): - if fullname not in stream.sent_modules: + if fullname not in stream.protocol.sent_modules: tup = self._build_tuple(fullname) msg = mitogen.core.Message.pickled( tup, - dst_id=stream.remote_id, + dst_id=stream.protocol.remote_id, handle=mitogen.core.LOAD_MODULE, ) - LOG.debug('%s: sending module %s (%.2f KiB)', - stream.name, fullname, len(msg.data) / 1024.0) + self._log.debug('sending %s (%.2f KiB) to %s', + fullname, len(msg.data) / 1024.0, stream.name) self._router._async_route(msg) - stream.sent_modules.add(fullname) + stream.protocol.sent_modules.add(fullname) if tup[2] is not None: self.good_load_module_count += 1 self.good_load_module_size += len(msg.data) @@ -939,23 +1016,23 @@ class ModuleResponder(object): def _send_module_load_failed(self, stream, fullname): self.bad_load_module_count += 1 - stream.send( + stream.protocol.send( mitogen.core.Message.pickled( self._make_negative_response(fullname), - dst_id=stream.remote_id, + dst_id=stream.protocol.remote_id, handle=mitogen.core.LOAD_MODULE, ) ) def _send_module_and_related(self, stream, fullname): - if fullname in stream.sent_modules: + if fullname in stream.protocol.sent_modules: return try: tup = self._build_tuple(fullname) for name in tup[4]: # related parent, _, _ = str_partition(name, '.') - if parent != fullname and parent not in stream.sent_modules: + if parent != fullname and parent not in stream.protocol.sent_modules: # Parent hasn't been sent, so don't load submodule yet. continue @@ -974,25 +1051,25 @@ class ModuleResponder(object): return fullname = msg.data.decode() - LOG.debug('%s requested module %s', stream.name, fullname) + self._log.debug('%s requested module %s', stream.name, fullname) self.get_module_count += 1 - if fullname in stream.sent_modules: + if fullname in stream.protocol.sent_modules: LOG.warning('_on_get_module(): dup request for %r from %r', fullname, stream) - t0 = time.time() + t0 = mitogen.core.now() try: self._send_module_and_related(stream, fullname) finally: - self.get_module_secs += time.time() - t0 + self.get_module_secs += mitogen.core.now() - t0 def _send_forward_module(self, stream, context, fullname): - if stream.remote_id != context.context_id: - stream.send( + if stream.protocol.remote_id != context.context_id: + stream.protocol._send( mitogen.core.Message( data=b('%s\x00%s' % (context.context_id, fullname)), handle=mitogen.core.FORWARD_MODULE, - dst_id=stream.remote_id, + dst_id=stream.protocol.remote_id, ) ) @@ -1061,6 +1138,7 @@ class Broker(mitogen.core.Broker): on_join=self.shutdown, ) super(Broker, self).__init__() + self.timers = mitogen.parent.TimerList() def shutdown(self): super(Broker, self).shutdown() @@ -1206,6 +1284,21 @@ class Router(mitogen.parent.Router): class IdAllocator(object): + """ + Allocate IDs for new contexts constructed locally, and blocks of IDs for + children to allocate their own IDs using + :class:`mitogen.parent.ChildIdAllocator` without risk of conflict, and + without necessitating network round-trips for each new context. + + This class responds to :data:`mitogen.core.ALLOCATE_ID` messages received + from children by replying with fresh block ID allocations. + + The master's :class:`IdAllocator` instance can be accessed via + :attr:`mitogen.master.Router.id_allocator`. + """ + #: Block allocations are made in groups of 1000 by default. + BLOCK_SIZE = 1000 + def __init__(self, router): self.router = router self.next_id = 1 @@ -1218,14 +1311,12 @@ class IdAllocator(object): def __repr__(self): return 'IdAllocator(%r)' % (self.router,) - BLOCK_SIZE = 1000 - def allocate(self): """ - Arrange for a unique context ID to be allocated and associated with a - route leading to the active context. In masters, the ID is generated - directly, in children it is forwarded to the master via a - :data:`mitogen.core.ALLOCATE_ID` message. + Allocate a context ID by directly incrementing an internal counter. + + :returns: + The new context ID. """ self.lock.acquire() try: @@ -1236,6 +1327,15 @@ class IdAllocator(object): self.lock.release() def allocate_block(self): + """ + Allocate a block of IDs for use in a child context. + + This function is safe to call from any thread. + + :returns: + Tuple of the form `(id, end_id)` where `id` is the first usable ID + and `end_id` is the last usable ID. + """ self.lock.acquire() try: id_ = self.next_id diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/minify.py b/ansible/plugins/mitogen-0.2.9/mitogen/minify.py similarity index 94% rename from ansible/plugins/mitogen-0.2.8-pre/mitogen/minify.py rename to ansible/plugins/mitogen-0.2.9/mitogen/minify.py index dc9f517c5bf80f00700648c52ecaf3054d04abb2..09fdc4eb20367a8e185b35f805333af12c1eb060 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/minify.py +++ b/ansible/plugins/mitogen-0.2.9/mitogen/minify.py @@ -44,7 +44,8 @@ else: def minimize_source(source): - """Remove comments and docstrings from Python `source`, preserving line + """ + Remove comments and docstrings from Python `source`, preserving line numbers and syntax of empty blocks. :param str source: @@ -62,7 +63,8 @@ def minimize_source(source): def strip_comments(tokens): - """Drop comment tokens from a `tokenize` stream. + """ + Drop comment tokens from a `tokenize` stream. Comments on lines 1-2 are kept, to preserve hashbang and encoding. Trailing whitespace is remove from all lines. @@ -84,7 +86,8 @@ def strip_comments(tokens): def strip_docstrings(tokens): - """Replace docstring tokens with NL tokens in a `tokenize` stream. + """ + Replace docstring tokens with NL tokens in a `tokenize` stream. Any STRING token not part of an expression is deemed a docstring. Indented docstrings are not yet recognised. @@ -119,7 +122,8 @@ def strip_docstrings(tokens): def reindent(tokens, indent=' '): - """Replace existing indentation in a token steam, with `indent`. + """ + Replace existing indentation in a token steam, with `indent`. """ old_levels = [] old_level = 0 diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/os_fork.py b/ansible/plugins/mitogen-0.2.9/mitogen/os_fork.py similarity index 95% rename from ansible/plugins/mitogen-0.2.8-pre/mitogen/os_fork.py rename to ansible/plugins/mitogen-0.2.9/mitogen/os_fork.py index b27cfd5c3f448e02453addd9d7343745d7aa0611..da832c65e35970a0398b238ca8a39f5a09050c8c 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/os_fork.py +++ b/ansible/plugins/mitogen-0.2.9/mitogen/os_fork.py @@ -35,6 +35,7 @@ Support for operating in a mixed threading/forking environment. import os import socket import sys +import threading import weakref import mitogen.core @@ -157,6 +158,7 @@ class Corker(object): held. This will not return until each thread acknowledges it has ceased execution. """ + current = threading.currentThread() s = mitogen.core.b('CORK') * ((128 // 4) * 1024) self._rsocks = [] @@ -164,12 +166,14 @@ class Corker(object): # participation of a broker in order to complete. for pool in self.pools: if not pool.closed: - for x in range(pool.size): - self._cork_one(s, pool) + for th in pool._threads: + if th != current: + self._cork_one(s, pool) for broker in self.brokers: if broker._alive: - self._cork_one(s, broker) + if broker._thread != current: + self._cork_one(s, broker) # Pause until we can detect every thread has entered write(). for rsock in self._rsocks: diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/parent.py b/ansible/plugins/mitogen-0.2.9/mitogen/parent.py similarity index 60% rename from ansible/plugins/mitogen-0.2.8-pre/mitogen/parent.py rename to ansible/plugins/mitogen-0.2.9/mitogen/parent.py index 113fdc2e9d7ed6741f28cf606e79e066d051e883..630e3de1928eafb71d699ee4e6a5b9f559185d8d 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/parent.py +++ b/ansible/plugins/mitogen-0.2.9/mitogen/parent.py @@ -38,9 +38,11 @@ import codecs import errno import fcntl import getpass +import heapq import inspect import logging import os +import re import signal import socket import struct @@ -49,7 +51,6 @@ import sys import termios import textwrap import threading -import time import zlib # Absolute imports for <2.5. @@ -63,9 +64,22 @@ except ImportError: import mitogen.core from mitogen.core import b from mitogen.core import bytes_partition -from mitogen.core import LOG from mitogen.core import IOLOG + +LOG = logging.getLogger(__name__) + +# #410: we must avoid the use of socketpairs if SELinux is enabled. +try: + fp = open('/sys/fs/selinux/enforce', 'rb') + try: + SELINUX_ENABLED = bool(int(fp.read())) + finally: + fp.close() +except IOError: + SELINUX_ENABLED = False + + try: next except NameError: @@ -89,6 +103,10 @@ try: except ValueError: SC_OPEN_MAX = 1024 +BROKER_SHUTDOWN_MSG = ( + 'Connection cancelled because the associated Broker began to shut down.' +) + OPENPTY_MSG = ( "Failed to create a PTY: %s. It is likely the maximum number of PTYs has " "been reached. Consider increasing the 'kern.tty.ptmx_max' sysctl on OS " @@ -136,9 +154,12 @@ SIGNAL_BY_NUM = dict( if name.startswith('SIG') and not name.startswith('SIG_') ) +_core_source_lock = threading.Lock() +_core_source_partial = None + def get_log_level(): - return (LOG.level or logging.getLogger().level or logging.INFO) + return (LOG.getEffectiveLevel() or logging.INFO) def get_sys_executable(): @@ -157,10 +178,6 @@ def get_sys_executable(): return '/usr/bin/python' -_core_source_lock = threading.Lock() -_core_source_partial = None - - def _get_core_source(): """ In non-masters, simply fetch the cached mitogen.core source code via the @@ -208,27 +225,33 @@ def is_immediate_child(msg, stream): Handler policy that requires messages to arrive only from immediately connected children. """ - return msg.src_id == stream.remote_id + return msg.src_id == stream.protocol.remote_id def flags(names): - """Return the result of ORing a set of (space separated) :py:mod:`termios` - module constants together.""" + """ + Return the result of ORing a set of (space separated) :py:mod:`termios` + module constants together. + """ return sum(getattr(termios, name, 0) for name in names.split()) def cfmakeraw(tflags): - """Given a list returned by :py:func:`termios.tcgetattr`, return a list + """ + Given a list returned by :py:func:`termios.tcgetattr`, return a list modified in a manner similar to the `cfmakeraw()` C library function, but - additionally disabling local echo.""" - # BSD: https://github.com/freebsd/freebsd/blob/master/lib/libc/gen/termios.c#L162 - # Linux: https://github.com/lattera/glibc/blob/master/termios/cfmakeraw.c#L20 + additionally disabling local echo. + """ + # BSD: github.com/freebsd/freebsd/blob/master/lib/libc/gen/termios.c#L162 + # Linux: github.com/lattera/glibc/blob/master/termios/cfmakeraw.c#L20 iflag, oflag, cflag, lflag, ispeed, ospeed, cc = tflags - iflag &= ~flags('IMAXBEL IXOFF INPCK BRKINT PARMRK ISTRIP INLCR ICRNL IXON IGNPAR') + iflag &= ~flags('IMAXBEL IXOFF INPCK BRKINT PARMRK ' + 'ISTRIP INLCR ICRNL IXON IGNPAR') iflag &= ~flags('IGNBRK BRKINT PARMRK') oflag &= ~flags('OPOST') - lflag &= ~flags('ECHO ECHOE ECHOK ECHONL ICANON ISIG IEXTEN NOFLSH TOSTOP PENDIN') + lflag &= ~flags('ECHO ECHOE ECHOK ECHONL ICANON ISIG ' + 'IEXTEN NOFLSH TOSTOP PENDIN') cflag &= ~flags('CSIZE PARENB') cflag |= flags('CS8 CREAD') return [iflag, oflag, cflag, lflag, ispeed, ospeed, cc] @@ -245,128 +268,141 @@ def disable_echo(fd): termios.tcsetattr(fd, flags, new) -def close_nonstandard_fds(): - for fd in xrange(3, SC_OPEN_MAX): - try: - os.close(fd) - except OSError: - pass - - def create_socketpair(size=None): """ - Create a :func:`socket.socketpair` to use for use as a child process's UNIX - stdio channels. As socket pairs are bidirectional, they are economical on - file descriptor usage as the same descriptor can be used for ``stdin`` and + Create a :func:`socket.socketpair` for use as a child's UNIX stdio + channels. As socketpairs are bidirectional, they are economical on file + descriptor usage as one descriptor can be used for ``stdin`` and ``stdout``. As they are sockets their buffers are tunable, allowing large - buffers to be configured in order to improve throughput for file transfers - and reduce :class:`mitogen.core.Broker` IO loop iterations. + buffers to improve file transfer throughput and reduce IO loop iterations. """ + if size is None: + size = mitogen.core.CHUNK_SIZE + parentfp, childfp = socket.socketpair() - parentfp.setsockopt(socket.SOL_SOCKET, - socket.SO_SNDBUF, - size or mitogen.core.CHUNK_SIZE) - childfp.setsockopt(socket.SOL_SOCKET, - socket.SO_RCVBUF, - size or mitogen.core.CHUNK_SIZE) + for fp in parentfp, childfp: + fp.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, size) + return parentfp, childfp -def detach_popen(**kwargs): +def create_best_pipe(escalates_privilege=False): """ - Use :class:`subprocess.Popen` to construct a child process, then hack the - Popen so that it forgets the child it created, allowing it to survive a - call to Popen.__del__. + By default we prefer to communicate with children over a UNIX socket, as a + single file descriptor can represent bidirectional communication, and a + cross-platform API exists to align buffer sizes with the needs of the + library. - If the child process is not detached, there is a race between it exitting - and __del__ being called. If it exits before __del__ runs, then __del__'s - call to :func:`os.waitpid` will capture the one and only exit event - delivered to this process, causing later 'legitimate' calls to fail with - ECHILD. + SELinux prevents us setting up a privileged process to inherit an AF_UNIX + socket, a facility explicitly designed as a better replacement for pipes, + because at some point in the mid 90s it might have been commonly possible + for AF_INET sockets to end up undesirably connected to a privileged + process, so let's make up arbitrary rules breaking all sockets instead. - :param list close_on_error: - Array of integer file descriptors to close on exception. + If SELinux is detected, fall back to using pipes. + + :param bool escalates_privilege: + If :data:`True`, the target program may escalate privileges, causing + SELinux to disconnect AF_UNIX sockets, so avoid those. :returns: - Process ID of the new child. + `(parent_rfp, child_wfp, child_rfp, parent_wfp)` + """ + if (not escalates_privilege) or (not SELINUX_ENABLED): + parentfp, childfp = create_socketpair() + return parentfp, childfp, childfp, parentfp + + parent_rfp, child_wfp = mitogen.core.pipe() + try: + child_rfp, parent_wfp = mitogen.core.pipe() + return parent_rfp, child_wfp, child_rfp, parent_wfp + except: + parent_rfp.close() + child_wfp.close() + raise + + +def popen(**kwargs): + """ + Wrap :class:`subprocess.Popen` to ensure any global :data:`_preexec_hook` + is invoked in the child. """ - # This allows Popen() to be used for e.g. graceful post-fork error - # handling, without tying the surrounding code into managing a Popen - # object, which isn't possible for at least :mod:`mitogen.fork`. This - # should be replaced by a swappable helper class in a future version. real_preexec_fn = kwargs.pop('preexec_fn', None) def preexec_fn(): if _preexec_hook: _preexec_hook() if real_preexec_fn: real_preexec_fn() - proc = subprocess.Popen(preexec_fn=preexec_fn, **kwargs) - proc._child_created = False - return proc.pid + return subprocess.Popen(preexec_fn=preexec_fn, **kwargs) -def create_child(args, merge_stdio=False, stderr_pipe=False, preexec_fn=None): +def create_child(args, merge_stdio=False, stderr_pipe=False, + escalates_privilege=False, preexec_fn=None): """ Create a child process whose stdin/stdout is connected to a socket. - :param args: - Argument vector for execv() call. + :param list args: + Program argument vector. :param bool merge_stdio: If :data:`True`, arrange for `stderr` to be connected to the `stdout` socketpair, rather than inherited from the parent process. This may be - necessary to ensure that not TTY is connected to any stdio handle, for + necessary to ensure that no TTY is connected to any stdio handle, for instance when using LXC. :param bool stderr_pipe: If :data:`True` and `merge_stdio` is :data:`False`, arrange for `stderr` to be connected to a separate pipe, to allow any ongoing debug - logs generated by e.g. SSH to be outpu as the session progresses, + logs generated by e.g. SSH to be output as the session progresses, without interfering with `stdout`. + :param bool escalates_privilege: + If :data:`True`, the target program may escalate privileges, causing + SELinux to disconnect AF_UNIX sockets, so avoid those. + :param function preexec_fn: + If not :data:`None`, a function to run within the post-fork child + before executing the target program. :returns: - `(pid, socket_obj, :data:`None` or pipe_fd)` + :class:`Process` instance. """ - parentfp, childfp = create_socketpair() - # When running under a monkey patches-enabled gevent, the socket module - # yields file descriptors who already have O_NONBLOCK, which is - # persisted across fork, totally breaking Python. Therefore, drop - # O_NONBLOCK from Python's future stdin fd. - mitogen.core.set_block(childfp.fileno()) + parent_rfp, child_wfp, child_rfp, parent_wfp = create_best_pipe( + escalates_privilege=escalates_privilege + ) + stderr = None stderr_r = None - extra = {} if merge_stdio: - extra = {'stderr': childfp} + stderr = child_wfp elif stderr_pipe: - stderr_r, stderr_w = os.pipe() - mitogen.core.set_cloexec(stderr_r) - mitogen.core.set_cloexec(stderr_w) - extra = {'stderr': stderr_w} + stderr_r, stderr = mitogen.core.pipe() + mitogen.core.set_cloexec(stderr_r.fileno()) try: - pid = detach_popen( + proc = popen( args=args, - stdin=childfp, - stdout=childfp, + stdin=child_rfp, + stdout=child_wfp, + stderr=stderr, close_fds=True, preexec_fn=preexec_fn, - **extra ) - except Exception: - childfp.close() - parentfp.close() + except: + child_rfp.close() + child_wfp.close() + parent_rfp.close() + parent_wfp.close() if stderr_pipe: - os.close(stderr_r) - os.close(stderr_w) + stderr.close() + stderr_r.close() raise + child_rfp.close() + child_wfp.close() if stderr_pipe: - os.close(stderr_w) - childfp.close() - # Decouple the socket from the lifetime of the Python socket object. - fd = os.dup(parentfp.fileno()) - parentfp.close() + stderr.close() - LOG.debug('create_child() child %d fd %d, parent %d, cmd: %s', - pid, fd, os.getpid(), Argv(args)) - return pid, fd, stderr_r + return PopenProcess( + proc=proc, + stdin=parent_wfp, + stdout=parent_rfp, + stderr=stderr_r, + ) def _acquire_controlling_tty(): @@ -431,15 +467,22 @@ def openpty(): :raises mitogen.core.StreamError: Creating a PTY failed. :returns: - See :func`os.openpty`. + `(master_fp, slave_fp)` file-like objects. """ try: - return os.openpty() + master_fd, slave_fd = os.openpty() except OSError: e = sys.exc_info()[1] - if IS_LINUX and e.args[0] == errno.EPERM: - return _linux_broken_devpts_openpty() - raise mitogen.core.StreamError(OPENPTY_MSG, e) + if not (IS_LINUX and e.args[0] == errno.EPERM): + raise mitogen.core.StreamError(OPENPTY_MSG, e) + master_fd, slave_fd = _linux_broken_devpts_openpty() + + master_fp = os.fdopen(master_fd, 'r+b', 0) + slave_fp = os.fdopen(slave_fd, 'r+b', 0) + disable_echo(master_fd) + disable_echo(slave_fd) + mitogen.core.set_block(slave_fd) + return master_fp, slave_fp def tty_create_child(args): @@ -451,130 +494,187 @@ def tty_create_child(args): slave end. :param list args: - :py:func:`os.execl` argument list. - + Program argument vector. :returns: - `(pid, tty_fd, None)` + :class:`Process` instance. """ - master_fd, slave_fd = openpty() + master_fp, slave_fp = openpty() try: - mitogen.core.set_block(slave_fd) - disable_echo(master_fd) - disable_echo(slave_fd) - - pid = detach_popen( + proc = popen( args=args, - stdin=slave_fd, - stdout=slave_fd, - stderr=slave_fd, + stdin=slave_fp, + stdout=slave_fp, + stderr=slave_fp, preexec_fn=_acquire_controlling_tty, close_fds=True, ) - except Exception: - os.close(master_fd) - os.close(slave_fd) + except: + master_fp.close() + slave_fp.close() raise - os.close(slave_fd) - LOG.debug('tty_create_child() child %d fd %d, parent %d, cmd: %s', - pid, master_fd, os.getpid(), Argv(args)) - return pid, master_fd, None + slave_fp.close() + return PopenProcess( + proc=proc, + stdin=master_fp, + stdout=master_fp, + ) -def hybrid_tty_create_child(args): +def hybrid_tty_create_child(args, escalates_privilege=False): """ Like :func:`tty_create_child`, except attach stdin/stdout to a socketpair like :func:`create_child`, but leave stderr and the controlling TTY attached to a TTY. - :param list args: - :py:func:`os.execl` argument list. + This permits high throughput communication with programs that are reached + via some program that requires a TTY for password input, like many + configurations of sudo. The UNIX TTY layer tends to have tiny (no more than + 14KiB) buffers, forcing many IO loop iterations when transferring bulk + data, causing significant performance loss. + :param bool escalates_privilege: + If :data:`True`, the target program may escalate privileges, causing + SELinux to disconnect AF_UNIX sockets, so avoid those. + :param list args: + Program argument vector. :returns: - `(pid, socketpair_fd, tty_fd)` + :class:`Process` instance. """ - master_fd, slave_fd = openpty() - + master_fp, slave_fp = openpty() try: - disable_echo(master_fd) - disable_echo(slave_fd) - mitogen.core.set_block(slave_fd) - - parentfp, childfp = create_socketpair() + parent_rfp, child_wfp, child_rfp, parent_wfp = create_best_pipe( + escalates_privilege=escalates_privilege, + ) try: - mitogen.core.set_block(childfp) - pid = detach_popen( + mitogen.core.set_block(child_rfp) + mitogen.core.set_block(child_wfp) + proc = popen( args=args, - stdin=childfp, - stdout=childfp, - stderr=slave_fd, + stdin=child_rfp, + stdout=child_wfp, + stderr=slave_fp, preexec_fn=_acquire_controlling_tty, close_fds=True, ) - except Exception: - parentfp.close() - childfp.close() + except: + parent_rfp.close() + child_wfp.close() + parent_wfp.close() + child_rfp.close() raise - except Exception: - os.close(master_fd) - os.close(slave_fd) + except: + master_fp.close() + slave_fp.close() raise - os.close(slave_fd) - childfp.close() - # Decouple the socket from the lifetime of the Python socket object. - stdio_fd = os.dup(parentfp.fileno()) - parentfp.close() - - LOG.debug('hybrid_tty_create_child() pid=%d stdio=%d, tty=%d, cmd: %s', - pid, stdio_fd, master_fd, Argv(args)) - return pid, stdio_fd, master_fd - - -def write_all(fd, s, deadline=None): - """Arrange for all of bytestring `s` to be written to the file descriptor - `fd`. - - :param int fd: - File descriptor to write to. - :param bytes s: - Bytestring to write to file descriptor. - :param float deadline: - If not :data:`None`, absolute UNIX timestamp after which timeout should - occur. - - :raises mitogen.core.TimeoutError: - Bytestring could not be written entirely before deadline was exceeded. - :raises mitogen.parent.EofError: - Stream indicated EOF, suggesting the child process has exitted. - :raises mitogen.core.StreamError: - File descriptor was disconnected before write could complete. + slave_fp.close() + child_rfp.close() + child_wfp.close() + return PopenProcess( + proc=proc, + stdin=parent_wfp, + stdout=parent_rfp, + stderr=master_fp, + ) + + +class Timer(object): + """ + Represents a future event. """ - timeout = None - written = 0 - poller = PREFERRED_POLLER() - poller.start_transmit(fd) + #: Set to :data:`False` if :meth:`cancel` has been called, or immediately + #: prior to being executed by :meth:`TimerList.expire`. + active = True - try: - while written < len(s): - if deadline is not None: - timeout = max(0, deadline - time.time()) - if timeout == 0: - raise mitogen.core.TimeoutError('write timed out') - - if mitogen.core.PY3: - window = memoryview(s)[written:] - else: - window = buffer(s, written) + def __init__(self, when, func): + self.when = when + self.func = func - for fd in poller.poll(timeout): - n, disconnected = mitogen.core.io_op(os.write, fd, window) - if disconnected: - raise EofError('EOF on stream during write') + def __repr__(self): + return 'Timer(%r, %r)' % (self.when, self.func) - written += n - finally: - poller.close() + def __eq__(self, other): + return self.when == other.when + + def __lt__(self, other): + return self.when < other.when + + def __le__(self, other): + return self.when <= other.when + + def cancel(self): + """ + Cancel this event. If it has not yet executed, it will not execute + during any subsequent :meth:`TimerList.expire` call. + """ + self.active = False + + +class TimerList(object): + """ + Efficiently manage a list of cancellable future events relative to wall + clock time. An instance of this class is installed as + :attr:`mitogen.master.Broker.timers` by default, and as + :attr:`mitogen.core.Broker.timers` in children after a call to + :func:`mitogen.parent.upgrade_router`. + + You can use :class:`TimerList` to cause the broker to wake at arbitrary + future moments, useful for implementing timeouts and polling in an + asynchronous context. + + :class:`TimerList` methods can only be called from asynchronous context, + for example via :meth:`mitogen.core.Broker.defer`. + + The broker automatically adjusts its sleep delay according to the installed + timer list, and arranges for timers to expire via automatic calls to + :meth:`expire`. The main user interface to :class:`TimerList` is + :meth:`schedule`. + """ + _now = mitogen.core.now + + def __init__(self): + self._lst = [] + + def get_timeout(self): + """ + Return the floating point seconds until the next event is due. + + :returns: + Floating point delay, or 0.0, or :data:`None` if no events are + scheduled. + """ + while self._lst and not self._lst[0].active: + heapq.heappop(self._lst) + if self._lst: + return max(0, self._lst[0].when - self._now()) + + def schedule(self, when, func): + """ + Schedule a future event. + + :param float when: + UNIX time in seconds when event should occur. + :param callable func: + Callable to invoke on expiry. + :returns: + A :class:`Timer` instance, exposing :meth:`Timer.cancel`, which may + be used to cancel the future invocation. + """ + timer = Timer(when, func) + heapq.heappush(self._lst, timer) + return timer + + def expire(self): + """ + Invoke callbacks for any events in the past. + """ + now = self._now() + while self._lst and self._lst[0].when <= now: + timer = heapq.heappop(self._lst) + if timer.active: + timer.active = False + timer.func() class PartialZlib(object): @@ -614,103 +714,6 @@ class PartialZlib(object): return out + compressor.flush() -class IteratingRead(object): - def __init__(self, fds, deadline=None): - self.deadline = deadline - self.timeout = None - self.poller = PREFERRED_POLLER() - for fd in fds: - self.poller.start_receive(fd) - - self.bits = [] - self.timeout = None - - def close(self): - self.poller.close() - - def __iter__(self): - return self - - def next(self): - while self.poller.readers: - if self.deadline is not None: - self.timeout = max(0, self.deadline - time.time()) - if self.timeout == 0: - break - - for fd in self.poller.poll(self.timeout): - s, disconnected = mitogen.core.io_op(os.read, fd, 4096) - if disconnected or not s: - LOG.debug('iter_read(%r) -> disconnected: %s', - fd, disconnected) - self.poller.stop_receive(fd) - else: - IOLOG.debug('iter_read(%r) -> %r', fd, s) - self.bits.append(s) - return s - - if not self.poller.readers: - raise EofError(u'EOF on stream; last 300 bytes received: %r' % - (b('').join(self.bits)[-300:].decode('latin1'),)) - - raise mitogen.core.TimeoutError('read timed out') - - __next__ = next - - -def iter_read(fds, deadline=None): - """Return a generator that arranges for up to 4096-byte chunks to be read - at a time from the file descriptor `fd` until the generator is destroyed. - - :param int fd: - File descriptor to read from. - :param float deadline: - If not :data:`None`, an absolute UNIX timestamp after which timeout - should occur. - - :raises mitogen.core.TimeoutError: - Attempt to read beyond deadline. - :raises mitogen.parent.EofError: - All streams indicated EOF, suggesting the child process has exitted. - :raises mitogen.core.StreamError: - Attempt to read past end of file. - """ - return IteratingRead(fds=fds, deadline=deadline) - - -def discard_until(fd, s, deadline): - """Read chunks from `fd` until one is encountered that ends with `s`. This - is used to skip output produced by ``/etc/profile``, ``/etc/motd`` and - mandatory SSH banners while waiting for :attr:`Stream.EC0_MARKER` to - appear, indicating the first stage is ready to receive the compressed - :mod:`mitogen.core` source. - - :param int fd: - File descriptor to read from. - :param bytes s: - Marker string to discard until encountered. - :param float deadline: - Absolute UNIX timestamp after which timeout should occur. - - :raises mitogen.core.TimeoutError: - Attempt to read beyond deadline. - :raises mitogen.parent.EofError: - All streams indicated EOF, suggesting the child process has exitted. - :raises mitogen.core.StreamError: - Attempt to read past end of file. - """ - it = iter_read([fd], deadline) - try: - for buf in it: - if IOLOG.level == logging.DEBUG: - for line in buf.splitlines(): - IOLOG.debug('discard_until: discarding %r', line) - if buf.endswith(s): - return - finally: - it.close() # ensure Poller.close() is called. - - def _upgrade_broker(broker): """ Extract the poller state from Broker and replace it with the industrial @@ -719,25 +722,28 @@ def _upgrade_broker(broker): # This function is deadly! The act of calling start_receive() generates log # messages which must be silenced as the upgrade progresses, otherwise the # poller state will change as it is copied, resulting in write fds that are - # lost. (Due to LogHandler->Router->Stream->Broker->Poller, where Stream - # only calls start_transmit() when transitioning from empty to non-empty - # buffer. If the start_transmit() is lost, writes from the child hang - # permanently). + # lost. (Due to LogHandler->Router->Stream->Protocol->Broker->Poller, where + # Stream only calls start_transmit() when transitioning from empty to + # non-empty buffer. If the start_transmit() is lost, writes from the child + # hang permanently). root = logging.getLogger() old_level = root.level root.setLevel(logging.CRITICAL) + try: + old = broker.poller + new = PREFERRED_POLLER() + for fd, data in old.readers: + new.start_receive(fd, data) + for fd, data in old.writers: + new.start_transmit(fd, data) + + old.close() + broker.poller = new + finally: + root.setLevel(old_level) - old = broker.poller - new = PREFERRED_POLLER() - for fd, data in old.readers: - new.start_receive(fd, data) - for fd, data in old.writers: - new.start_transmit(fd, data) - - old.close() - broker.poller = new - root.setLevel(old_level) - LOG.debug('replaced %r with %r (new: %d readers, %d writers; ' + broker.timers = TimerList() + LOG.debug('upgraded %r with %r (new: %d readers, %d writers; ' 'old: %d readers, %d writers)', old, new, len(new.readers), len(new.writers), len(old.readers), len(old.writers)) @@ -754,7 +760,7 @@ def upgrade_router(econtext): ) -def stream_by_method_name(name): +def get_connection_class(name): """ Given the name of a Mitogen connection method, import its implementation module and return its Stream subclass. @@ -762,14 +768,14 @@ def stream_by_method_name(name): if name == u'local': name = u'parent' module = mitogen.core.import_module(u'mitogen.' + name) - return module.Stream + return module.Connection @mitogen.core.takes_econtext def _proxy_connect(name, method_name, kwargs, econtext): """ Implements the target portion of Router._proxy_connect() by upgrading the - local context to a parent if it was not already, then calling back into + local process to a parent if it was not already, then calling back into Router._connect() using the arguments passed to the parent's Router.connect(). @@ -783,7 +789,7 @@ def _proxy_connect(name, method_name, kwargs, econtext): try: context = econtext.router._connect( - klass=stream_by_method_name(method_name), + klass=get_connection_class(method_name), name=name, **kwargs ) @@ -804,30 +810,32 @@ def _proxy_connect(name, method_name, kwargs, econtext): } -def wstatus_to_str(status): +def returncode_to_str(n): """ Parse and format a :func:`os.waitpid` exit status. """ - if os.WIFEXITED(status): - return 'exited with return code %d' % (os.WEXITSTATUS(status),) - if os.WIFSIGNALED(status): - n = os.WTERMSIG(status) - return 'exited due to signal %d (%s)' % (n, SIGNAL_BY_NUM.get(n)) - if os.WIFSTOPPED(status): - n = os.WSTOPSIG(status) - return 'stopped due to signal %d (%s)' % (n, SIGNAL_BY_NUM.get(n)) - return 'unknown wait status (%d)' % (status,) + if n < 0: + return 'exited due to signal %d (%s)' % (-n, SIGNAL_BY_NUM.get(-n)) + return 'exited with return code %d' % (n,) class EofError(mitogen.core.StreamError): """ - Raised by :func:`iter_read` and :func:`write_all` when EOF is detected by - the child process. + Raised by :class:`Connection` when an empty read is detected from the + remote process before bootstrap completes. """ # inherits from StreamError to maintain compatibility. pass +class CancelledError(mitogen.core.StreamError): + """ + Raised by :class:`Connection` when :meth:`mitogen.core.Broker.shutdown` is + called before bootstrap completes. + """ + pass + + class Argv(object): """ Wrapper to defer argv formatting when debug logging is disabled. @@ -893,8 +901,9 @@ class CallSpec(object): class PollPoller(mitogen.core.Poller): """ - Poller based on the POSIX poll(2) interface. Not available on some versions - of OS X, otherwise it is the preferred poller for small FD counts. + Poller based on the POSIX :linux:man2:`poll` interface. Not available on + some versions of OS X, otherwise it is the preferred poller for small FD + counts, as there is no setup/teardown/configuration system call overhead. """ SUPPORTED = hasattr(select, 'poll') _repr = 'PollPoller()' @@ -940,7 +949,7 @@ class PollPoller(mitogen.core.Poller): class KqueuePoller(mitogen.core.Poller): """ - Poller based on the FreeBSD/Darwin kqueue(2) interface. + Poller based on the FreeBSD/Darwin :freebsd:man2:`kqueue` interface. """ SUPPORTED = hasattr(select, 'kqueue') _repr = 'KqueuePoller()' @@ -1018,7 +1027,7 @@ class KqueuePoller(mitogen.core.Poller): class EpollPoller(mitogen.core.Poller): """ - Poller based on the Linux epoll(2) interface. + Poller based on the Linux :linux:man2:`epoll` interface. """ SUPPORTED = hasattr(select, 'epoll') _repr = 'EpollPoller()' @@ -1096,90 +1105,256 @@ for _klass in mitogen.core.Poller, PollPoller, KqueuePoller, EpollPoller: if _klass.SUPPORTED: PREFERRED_POLLER = _klass -# For apps that start threads dynamically, it's possible Latch will also get -# very high-numbered wait fds when there are many connections, and so select() -# becomes useless there too. So swap in our favourite poller. +# For processes that start many threads or connections, it's possible Latch +# will also get high-numbered FDs, and so select() becomes useless there too. +# So swap in our favourite poller. if PollPoller.SUPPORTED: mitogen.core.Latch.poller_class = PollPoller else: mitogen.core.Latch.poller_class = PREFERRED_POLLER -class DiagLogStream(mitogen.core.BasicStream): +class LineLoggingProtocolMixin(object): + def __init__(self, **kwargs): + super(LineLoggingProtocolMixin, self).__init__(**kwargs) + self.logged_lines = [] + self.logged_partial = None + + def on_line_received(self, line): + self.logged_partial = None + self.logged_lines.append((mitogen.core.now(), line)) + self.logged_lines[:] = self.logged_lines[-100:] + return super(LineLoggingProtocolMixin, self).on_line_received(line) + + def on_partial_line_received(self, line): + self.logged_partial = line + return super(LineLoggingProtocolMixin, self).on_partial_line_received(line) + + def on_disconnect(self, broker): + if self.logged_partial: + self.logged_lines.append((mitogen.core.now(), self.logged_partial)) + self.logged_partial = None + super(LineLoggingProtocolMixin, self).on_disconnect(broker) + + +def get_history(streams): + history = [] + for stream in streams: + if stream: + history.extend(getattr(stream.protocol, 'logged_lines', [])) + history.sort() + + s = b('\n').join(h[1] for h in history) + return mitogen.core.to_text(s) + + +class RegexProtocol(LineLoggingProtocolMixin, mitogen.core.DelimitedProtocol): + """ + Implement a delimited protocol where messages matching a set of regular + expressions are dispatched to individual handler methods. Input is + dispatches using :attr:`PATTERNS` and :attr:`PARTIAL_PATTERNS`, before + falling back to :meth:`on_unrecognized_line_received` and + :meth:`on_unrecognized_partial_line_received`. """ - For "hybrid TTY/socketpair" mode, after a connection has been setup, a - spare TTY file descriptor will exist that cannot be closed, and to which - SSH or sudo may continue writing log messages. + #: A sequence of 2-tuples of the form `(compiled pattern, method)` for + #: patterns that should be matched against complete (delimited) messages, + #: i.e. full lines. + PATTERNS = [] + + #: Like :attr:`PATTERNS`, but patterns that are matched against incomplete + #: lines. + PARTIAL_PATTERNS = [] + + def on_line_received(self, line): + super(RegexProtocol, self).on_line_received(line) + for pattern, func in self.PATTERNS: + match = pattern.search(line) + if match is not None: + return func(self, line, match) + + return self.on_unrecognized_line_received(line) + + def on_unrecognized_line_received(self, line): + LOG.debug('%s: (unrecognized): %s', + self.stream.name, line.decode('utf-8', 'replace')) + + def on_partial_line_received(self, line): + super(RegexProtocol, self).on_partial_line_received(line) + LOG.debug('%s: (partial): %s', + self.stream.name, line.decode('utf-8', 'replace')) + for pattern, func in self.PARTIAL_PATTERNS: + match = pattern.search(line) + if match is not None: + return func(self, line, match) - The descriptor cannot be closed since the UNIX TTY layer will send a - termination signal to any processes whose controlling TTY is the TTY that - has been closed. + return self.on_unrecognized_partial_line_received(line) - DiagLogStream takes over this descriptor and creates corresponding log - messages for anything written to it. + def on_unrecognized_partial_line_received(self, line): + LOG.debug('%s: (unrecognized partial): %s', + self.stream.name, line.decode('utf-8', 'replace')) + + +class BootstrapProtocol(RegexProtocol): + """ + Respond to stdout of a child during bootstrap. Wait for :attr:`EC0_MARKER` + to be written by the first stage to indicate it can receive the bootstrap, + then await :attr:`EC1_MARKER` to indicate success, and + :class:`MitogenProtocol` can be enabled. """ + #: Sentinel value emitted by the first stage to indicate it is ready to + #: receive the compressed bootstrap. For :mod:`mitogen.ssh` this must have + #: length of at least `max(len('password'), len('debug1:'))` + EC0_MARKER = b('MITO000') + EC1_MARKER = b('MITO001') + EC2_MARKER = b('MITO002') - def __init__(self, fd, stream): - self.receive_side = mitogen.core.Side(self, fd) - self.transmit_side = self.receive_side - self.stream = stream - self.buf = '' + def __init__(self, broker): + super(BootstrapProtocol, self).__init__() + self._writer = mitogen.core.BufferedWriter(broker, self) - def __repr__(self): - return "mitogen.parent.DiagLogStream(fd=%r, '%s')" % ( - self.receive_side.fd, - self.stream.name, - ) + def on_transmit(self, broker): + self._writer.on_transmit(broker) + + def _on_ec0_received(self, line, match): + LOG.debug('%r: first stage started succcessfully', self) + self._writer.write(self.stream.conn.get_preamble()) + + def _on_ec1_received(self, line, match): + LOG.debug('%r: first stage received mitogen.core source', self) + + def _on_ec2_received(self, line, match): + LOG.debug('%r: new child booted successfully', self) + self.stream.conn._complete_connection() + return False - def on_receive(self, broker): + def on_unrecognized_line_received(self, line): + LOG.debug('%s: stdout: %s', self.stream.name, + line.decode('utf-8', 'replace')) + + PATTERNS = [ + (re.compile(EC0_MARKER), _on_ec0_received), + (re.compile(EC1_MARKER), _on_ec1_received), + (re.compile(EC2_MARKER), _on_ec2_received), + ] + + +class LogProtocol(LineLoggingProtocolMixin, mitogen.core.DelimitedProtocol): + """ + For "hybrid TTY/socketpair" mode, after connection setup a spare TTY master + FD exists that cannot be closed, and to which SSH or sudo may continue + writing log messages. + + The descriptor cannot be closed since the UNIX TTY layer sends SIGHUP to + processes whose controlling TTY is the slave whose master side was closed. + LogProtocol takes over this FD and creates log messages for anything + written to it. + """ + def on_line_received(self, line): """ - This handler is only called after the stream is registered with the IO - loop, the descriptor is manually read/written by _connect_bootstrap() - prior to that. + Read a line, decode it as UTF-8, and log it. """ - buf = self.receive_side.read() - if not buf: - return self.on_disconnect(broker) - - self.buf += buf.decode('utf-8', 'replace') - while u'\n' in self.buf: - lines = self.buf.split('\n') - self.buf = lines[-1] - for line in lines[:-1]: - LOG.debug('%s: %s', self.stream.name, line.rstrip()) + super(LogProtocol, self).on_line_received(line) + LOG.info(u'%s: %s', self.stream.name, line.decode('utf-8', 'replace')) -class Stream(mitogen.core.Stream): +class MitogenProtocol(mitogen.core.MitogenProtocol): """ - Base for streams capable of starting new slaves. + Extend core.MitogenProtocol to cause SHUTDOWN to be sent to the child + during graceful shutdown. """ + def on_shutdown(self, broker): + """ + Respond to the broker's request for the stream to shut down by sending + SHUTDOWN to the child. + """ + LOG.debug('%r: requesting child shutdown', self) + self._send( + mitogen.core.Message( + src_id=mitogen.context_id, + dst_id=self.remote_id, + handle=mitogen.core.SHUTDOWN, + ) + ) + + +class Options(object): + name = None + #: The path to the remote Python interpreter. python_path = get_sys_executable() #: Maximum time to wait for a connection attempt. connect_timeout = 30.0 - #: Derived from :py:attr:`connect_timeout`; absolute floating point - #: UNIX timestamp after which the connection attempt should be abandoned. - connect_deadline = None - #: True to cause context to write verbose /tmp/mitogen.<pid>.log. debug = False #: True to cause context to write /tmp/mitogen.stats.<pid>.<thread>.log. profiling = False - #: Set to the child's PID by connect(). - pid = None + #: True if unidirectional routing is enabled in the new child. + unidirectional = False #: Passed via Router wrapper methods, must eventually be passed to #: ExternalContext.main(). max_message_size = None - #: If :attr:`create_child` supplied a diag_fd, references the corresponding - #: :class:`DiagLogStream`, allowing it to be disconnected when this stream - #: is disconnected. Set to :data:`None` if no `diag_fd` was present. - diag_stream = None + #: Remote name. + remote_name = None + + #: Derived from :py:attr:`connect_timeout`; absolute floating point + #: UNIX timestamp after which the connection attempt should be abandoned. + connect_deadline = None + + def __init__(self, max_message_size, name=None, remote_name=None, + python_path=None, debug=False, connect_timeout=None, + profiling=False, unidirectional=False, old_router=None): + self.name = name + self.max_message_size = max_message_size + if python_path: + self.python_path = python_path + if connect_timeout: + self.connect_timeout = connect_timeout + if remote_name is None: + remote_name = get_default_remote_name() + if '/' in remote_name or '\\' in remote_name: + raise ValueError('remote_name= cannot contain slashes') + if remote_name: + self.remote_name = mitogen.core.to_text(remote_name) + self.debug = debug + self.profiling = profiling + self.unidirectional = unidirectional + self.max_message_size = max_message_size + self.connect_deadline = mitogen.core.now() + self.connect_timeout + + +class Connection(object): + """ + Manage the lifetime of a set of :class:`Streams <Stream>` connecting to a + remote Python interpreter, including bootstrap, disconnection, and external + tool integration. + + Base for streams capable of starting children. + """ + options_class = Options + + #: The protocol attached to stdio of the child. + stream_protocol_class = BootstrapProtocol + + #: The protocol attached to stderr of the child. + diag_protocol_class = LogProtocol + + #: :class:`Process` + proc = None + + #: :class:`mitogen.core.Stream` with sides connected to stdin/stdout. + stdio_stream = None + + #: If `proc.stderr` is set, referencing either a plain pipe or the + #: controlling TTY, this references the corresponding + #: :class:`LogProtocol`'s stream, allowing it to be disconnected when this + #: stream is disconnected. + stderr_stream = None #: Function with the semantics of :func:`create_child` used to create the #: child process. @@ -1201,93 +1376,30 @@ class Stream(mitogen.core.Stream): #: Prefix given to default names generated by :meth:`connect`. name_prefix = u'local' - _reaped = False + #: :class:`Timer` that runs :meth:`_on_timer_expired` when connection + #: timeout occurs. + _timer = None - def __init__(self, *args, **kwargs): - super(Stream, self).__init__(*args, **kwargs) - self.sent_modules = set(['mitogen', 'mitogen.core']) - - def construct(self, max_message_size, remote_name=None, python_path=None, - debug=False, connect_timeout=None, profiling=False, - unidirectional=False, old_router=None, **kwargs): - """Get the named context running on the local machine, creating it if - it does not exist.""" - super(Stream, self).construct(**kwargs) - self.max_message_size = max_message_size - if python_path: - self.python_path = python_path - if connect_timeout: - self.connect_timeout = connect_timeout - if remote_name is None: - remote_name = get_default_remote_name() - if '/' in remote_name or '\\' in remote_name: - raise ValueError('remote_name= cannot contain slashes') - self.remote_name = remote_name - self.debug = debug - self.profiling = profiling - self.unidirectional = unidirectional - self.max_message_size = max_message_size - self.connect_deadline = time.time() + self.connect_timeout + #: When disconnection completes, instance of :class:`Reaper` used to wait + #: on the exit status of the subprocess. + _reaper = None - def on_shutdown(self, broker): - """Request the slave gracefully shut itself down.""" - LOG.debug('%r closing CALL_FUNCTION channel', self) - self._send( - mitogen.core.Message( - src_id=mitogen.context_id, - dst_id=self.remote_id, - handle=mitogen.core.SHUTDOWN, - ) - ) + #: On failure, the exception object that should be propagated back to the + #: user. + exception = None - def _reap_child(self): - """ - Reap the child process during disconnection. - """ - if self.detached and self.child_is_immediate_subprocess: - LOG.debug('%r: immediate child is detached, won\'t reap it', self) - return - - if self.profiling: - LOG.info('%r: wont kill child because profiling=True', self) - return - - if self._reaped: - # on_disconnect() may be invoked more than once, for example, if - # there is still a pending message to be sent after the first - # on_disconnect() call. - return - - try: - pid, status = os.waitpid(self.pid, os.WNOHANG) - except OSError: - e = sys.exc_info()[1] - if e.args[0] == errno.ECHILD: - LOG.warn('%r: waitpid(%r) produced ECHILD', self, self.pid) - return - raise - - self._reaped = True - if pid: - LOG.debug('%r: PID %d %s', self, pid, wstatus_to_str(status)) - return + #: Extra text appended to :class:`EofError` if that exception is raised on + #: a failed connection attempt. May be used in subclasses to hint at common + #: problems with a particular connection method. + eof_error_hint = None - if not self._router.profiling: - # For processes like sudo we cannot actually send sudo a signal, - # because it is setuid, so this is best-effort only. - LOG.debug('%r: child process still alive, sending SIGTERM', self) - try: - os.kill(self.pid, signal.SIGTERM) - except OSError: - e = sys.exc_info()[1] - if e.args[0] != errno.EPERM: - raise + def __init__(self, options, router): + #: :class:`Options` + self.options = options + self._router = router - def on_disconnect(self, broker): - super(Stream, self).on_disconnect(broker) - if self.diag_stream is not None: - self.diag_stream.on_disconnect(broker) - self._reap_child() + def __repr__(self): + return 'Connection(%r)' % (self.stdio_stream,) # Minimised, gzipped, base64'd and passed to 'python -c'. It forks, dups # file descriptor 0 as 100, creates a pipe, then execs a new interpreter @@ -1346,15 +1458,15 @@ class Stream(mitogen.core.Stream): This allows emulation of existing tools where the Python invocation may be set to e.g. `['/usr/bin/env', 'python']`. """ - if isinstance(self.python_path, list): - return self.python_path - return [self.python_path] + if isinstance(self.options.python_path, list): + return self.options.python_path + return [self.options.python_path] def get_boot_command(self): source = inspect.getsource(self._first_stage) source = textwrap.dedent('\n'.join(source.strip().split('\n')[2:])) source = source.replace(' ', '\t') - source = source.replace('CONTEXT_NAME', self.remote_name) + source = source.replace('CONTEXT_NAME', self.options.remote_name) preamble_compressed = self.get_preamble() source = source.replace('PREAMBLE_COMPRESSED_LEN', str(len(preamble_compressed))) @@ -1372,19 +1484,19 @@ class Stream(mitogen.core.Stream): ] def get_econtext_config(self): - assert self.max_message_size is not None + assert self.options.max_message_size is not None parent_ids = mitogen.parent_ids[:] parent_ids.insert(0, mitogen.context_id) return { 'parent_ids': parent_ids, - 'context_id': self.remote_id, - 'debug': self.debug, - 'profiling': self.profiling, - 'unidirectional': self.unidirectional, + 'context_id': self.context.context_id, + 'debug': self.options.debug, + 'profiling': self.options.profiling, + 'unidirectional': self.options.unidirectional, 'log_level': get_log_level(), 'whitelist': self._router.get_module_whitelist(), 'blacklist': self._router.get_module_blacklist(), - 'max_message_size': self.max_message_size, + 'max_message_size': self.options.max_message_size, 'version': mitogen.__version__, } @@ -1396,93 +1508,233 @@ class Stream(mitogen.core.Stream): partial = get_core_source_partial() return partial.append(suffix.encode('utf-8')) + def _get_name(self): + """ + Called by :meth:`connect` after :attr:`pid` is known. Subclasses can + override it to specify a default stream name, or set + :attr:`name_prefix` to generate a default format. + """ + return u'%s.%s' % (self.name_prefix, self.proc.pid) + def start_child(self): args = self.get_boot_command() + LOG.debug('command line for %r: %s', self, Argv(args)) try: - return self.create_child(args, **self.create_child_args) + return self.create_child(args=args, **self.create_child_args) except OSError: e = sys.exc_info()[1] msg = 'Child start failed: %s. Command was: %s' % (e, Argv(args)) raise mitogen.core.StreamError(msg) - eof_error_hint = None - def _adorn_eof_error(self, e): """ - Used by subclasses to provide additional information in the case of a - failed connection. + Subclasses may provide additional information in the case of a failed + connection. """ if self.eof_error_hint: e.args = ('%s\n\n%s' % (e.args[0], self.eof_error_hint),) - def _get_name(self): + def _complete_connection(self): + self._timer.cancel() + if not self.exception: + mitogen.core.unlisten(self._router.broker, 'shutdown', + self._on_broker_shutdown) + self._router.register(self.context, self.stdio_stream) + self.stdio_stream.set_protocol( + MitogenProtocol( + router=self._router, + remote_id=self.context.context_id, + ) + ) + self._router.route_monitor.notice_stream(self.stdio_stream) + self.latch.put() + + def _fail_connection(self, exc): """ - Called by :meth:`connect` after :attr:`pid` is known. Subclasses can - override it to specify a default stream name, or set - :attr:`name_prefix` to generate a default format. + Fail the connection attempt. + """ + LOG.debug('failing connection %s due to %r', + self.stdio_stream and self.stdio_stream.name, exc) + if self.exception is None: + self._adorn_eof_error(exc) + self.exception = exc + mitogen.core.unlisten(self._router.broker, 'shutdown', + self._on_broker_shutdown) + for stream in self.stdio_stream, self.stderr_stream: + if stream and not stream.receive_side.closed: + stream.on_disconnect(self._router.broker) + self._complete_connection() + + eof_error_msg = 'EOF on stream; last 100 lines received:\n' + + def on_stdio_disconnect(self): + """ + Handle stdio stream disconnection by failing the Connection if the + stderr stream has already been closed. Otherwise, wait for it to close + (or timeout), to allow buffered diagnostic logs to be consumed. + + It is normal that when a subprocess aborts, stdio has nothing buffered + when it is closed, thus signalling readability, causing an empty read + (interpreted as indicating disconnection) on the next loop iteration, + even if its stderr pipe has lots of diagnostic logs still buffered in + the kernel. Therefore we must wait for both pipes to indicate they are + empty before triggering connection failure. """ - return u'%s.%s' % (self.name_prefix, self.pid) + stderr = self.stderr_stream + if stderr is None or stderr.receive_side.closed: + self._on_streams_disconnected() - def connect(self): - LOG.debug('%r.connect()', self) - self.pid, fd, diag_fd = self.start_child() - self.name = self._get_name() - self.receive_side = mitogen.core.Side(self, fd) - self.transmit_side = mitogen.core.Side(self, os.dup(fd)) - if diag_fd is not None: - self.diag_stream = DiagLogStream(diag_fd, self) - else: - self.diag_stream = None + def on_stderr_disconnect(self): + """ + Inverse of :func:`on_stdio_disconnect`. + """ + if self.stdio_stream.receive_side.closed: + self._on_streams_disconnected() + + def _on_streams_disconnected(self): + """ + When disconnection has been detected for both streams, cancel the + connection timer, mark the connection failed, and reap the child + process. Do nothing if the timer has already been cancelled, indicating + some existing failure has already been noticed. + """ + if self._timer.active: + self._timer.cancel() + self._fail_connection(EofError( + self.eof_error_msg + get_history( + [self.stdio_stream, self.stderr_stream] + ) + )) + + if self._reaper: + return + + self._reaper = Reaper( + broker=self._router.broker, + proc=self.proc, + kill=not ( + (self.detached and self.child_is_immediate_subprocess) or + # Avoid killing so child has chance to write cProfile data + self._router.profiling + ), + # Don't delay shutdown waiting for a detached child, since the + # detached child may expect to live indefinitely after its parent + # exited. + wait_on_shutdown=(not self.detached), + ) + self._reaper.reap() + + def _on_broker_shutdown(self): + """ + Respond to broker.shutdown() being called by failing the connection + attempt. + """ + self._fail_connection(CancelledError(BROKER_SHUTDOWN_MSG)) + + def stream_factory(self): + return self.stream_protocol_class.build_stream( + broker=self._router.broker, + ) + + def stderr_stream_factory(self): + return self.diag_protocol_class.build_stream() + + def _setup_stdio_stream(self): + stream = self.stream_factory() + stream.conn = self + stream.name = self.options.name or self._get_name() + stream.accept(self.proc.stdout, self.proc.stdin) + + mitogen.core.listen(stream, 'disconnect', self.on_stdio_disconnect) + self._router.broker.start_receive(stream) + return stream + + def _setup_stderr_stream(self): + stream = self.stderr_stream_factory() + stream.conn = self + stream.name = self.options.name or self._get_name() + stream.accept(self.proc.stderr, self.proc.stderr) + + mitogen.core.listen(stream, 'disconnect', self.on_stderr_disconnect) + self._router.broker.start_receive(stream) + return stream + + def _on_timer_expired(self): + self._fail_connection( + mitogen.core.TimeoutError( + 'Failed to setup connection after %.2f seconds', + self.options.connect_timeout, + ) + ) - LOG.debug('%r.connect(): pid:%r stdin:%r, stdout:%r, diag:%r', - self, self.pid, self.receive_side.fd, self.transmit_side.fd, - self.diag_stream and self.diag_stream.receive_side.fd) + def _async_connect(self): + LOG.debug('creating connection to context %d using %s', + self.context.context_id, self.__class__.__module__) + mitogen.core.listen(self._router.broker, 'shutdown', + self._on_broker_shutdown) + self._timer = self._router.broker.timers.schedule( + when=self.options.connect_deadline, + func=self._on_timer_expired, + ) try: - self._connect_bootstrap() - except EofError: - self.on_disconnect(self._router.broker) - e = sys.exc_info()[1] - self._adorn_eof_error(e) - raise + self.proc = self.start_child() except Exception: - self.on_disconnect(self._router.broker) - self._reap_child() - raise + LOG.debug('failed to start child', exc_info=True) + self._fail_connection(sys.exc_info()[1]) + return - #: Sentinel value emitted by the first stage to indicate it is ready to - #: receive the compressed bootstrap. For :mod:`mitogen.ssh` this must have - #: length of at least `max(len('password'), len('debug1:'))` - EC0_MARKER = mitogen.core.b('MITO000\n') - EC1_MARKER = mitogen.core.b('MITO001\n') + LOG.debug('child for %r started: pid:%r stdin:%r stdout:%r stderr:%r', + self, self.proc.pid, + self.proc.stdin.fileno(), + self.proc.stdout.fileno(), + self.proc.stderr and self.proc.stderr.fileno()) - def _ec0_received(self): - LOG.debug('%r._ec0_received()', self) - write_all(self.transmit_side.fd, self.get_preamble()) - discard_until(self.receive_side.fd, self.EC1_MARKER, - self.connect_deadline) - if self.diag_stream: - self._router.broker.start_receive(self.diag_stream) + self.stdio_stream = self._setup_stdio_stream() + if self.context.name is None: + self.context.name = self.stdio_stream.name + self.proc.name = self.stdio_stream.name + if self.proc.stderr: + self.stderr_stream = self._setup_stderr_stream() - def _connect_bootstrap(self): - discard_until(self.receive_side.fd, self.EC0_MARKER, - self.connect_deadline) - self._ec0_received() + def connect(self, context): + self.context = context + self.latch = mitogen.core.Latch() + self._router.broker.defer(self._async_connect) + self.latch.get() + if self.exception: + raise self.exception class ChildIdAllocator(object): + """ + Allocate new context IDs from a block of unique context IDs allocated by + the master process. + """ def __init__(self, router): self.router = router self.lock = threading.Lock() self.it = iter(xrange(0)) def allocate(self): + """ + Allocate an ID, requesting a fresh block from the master if the + existing block is exhausted. + + :returns: + The new context ID. + + .. warning:: + + This method is not safe to call from the :class:`Broker` thread, as + it may block on IO of its own. + """ self.lock.acquire() try: for id_ in self.it: return id_ - master = mitogen.core.Context(self.router, 0) + master = self.router.context_by_id(0) start, end = master.send_await( mitogen.core.Message(dst_id=0, handle=mitogen.core.ALLOCATE_ID) ) @@ -1570,7 +1822,7 @@ class CallChain(object): socket.gethostname(), os.getpid(), thread.get_ident(), - int(1e6 * time.time()), + int(1e6 * mitogen.core.now()), ) def __repr__(self): @@ -1643,7 +1895,9 @@ class CallChain(object): pipelining is disabled, the exception will be logged to the target context's logging framework. """ - LOG.debug('%r.call_no_reply(): %r', self, CallSpec(fn, args, kwargs)) + LOG.debug('starting no-reply function call to %r: %r', + self.context.name or self.context.context_id, + CallSpec(fn, args, kwargs)) self.context.send(self.make_msg(fn, *args, **kwargs)) def call_async(self, fn, *args, **kwargs): @@ -1699,7 +1953,9 @@ class CallChain(object): contexts and consumed as they complete using :class:`mitogen.select.Select`. """ - LOG.debug('%r.call_async(): %r', self, CallSpec(fn, args, kwargs)) + LOG.debug('starting function call to %s: %r', + self.context.name or self.context.context_id, + CallSpec(fn, args, kwargs)) return self.context.send_async(self.make_msg(fn, *args, **kwargs)) def call(self, fn, *args, **kwargs): @@ -1739,9 +1995,11 @@ class Context(mitogen.core.Context): return not (self == other) def __eq__(self, other): - return (isinstance(other, mitogen.core.Context) and - (other.context_id == self.context_id) and - (other.router == self.router)) + return ( + isinstance(other, mitogen.core.Context) and + (other.context_id == self.context_id) and + (other.router == self.router) + ) def __hash__(self): return hash((self.router, self.context_id)) @@ -1819,15 +2077,16 @@ class RouteMonitor(object): RouteMonitor lives entirely on the broker thread, so its data requires no locking. - :param Router router: + :param mitogen.master.Router router: Router to install handlers on. - :param Context parent: + :param mitogen.core.Context parent: :data:`None` in the master process, or reference to the parent context we should propagate route updates towards. """ def __init__(self, router, parent=None): self.router = router self.parent = parent + self._log = logging.getLogger('mitogen.route_monitor') #: Mapping of Stream instance to integer context IDs reachable via the #: stream; used to cleanup routes during disconnection. self._routes_by_stream = {} @@ -1869,11 +2128,11 @@ class RouteMonitor(object): data = str(target_id) if name: data = '%s:%s' % (target_id, name) - stream.send( + stream.protocol.send( mitogen.core.Message( handle=handle, data=data.encode('utf-8'), - dst_id=stream.remote_id, + dst_id=stream.protocol.remote_id, ) ) @@ -1907,20 +2166,20 @@ class RouteMonitor(object): ID of the connecting or disconnecting context. """ for stream in self.router.get_streams(): - if target_id in stream.egress_ids and ( + if target_id in stream.protocol.egress_ids and ( (self.parent is None) or - (self.parent.context_id != stream.remote_id) + (self.parent.context_id != stream.protocol.remote_id) ): self._send_one(stream, mitogen.core.DEL_ROUTE, target_id, None) def notice_stream(self, stream): """ When this parent is responsible for a new directly connected child - stream, we're also responsible for broadcasting DEL_ROUTE upstream - if/when that child disconnects. + stream, we're also responsible for broadcasting + :data:`mitogen.core.DEL_ROUTE` upstream when that child disconnects. """ - self._routes_by_stream[stream] = set([stream.remote_id]) - self._propagate_up(mitogen.core.ADD_ROUTE, stream.remote_id, + self._routes_by_stream[stream] = set([stream.protocol.remote_id]) + self._propagate_up(mitogen.core.ADD_ROUTE, stream.protocol.remote_id, stream.name) mitogen.core.listen( obj=stream, @@ -1948,8 +2207,8 @@ class RouteMonitor(object): if routes is None: return - LOG.debug('%r: %r is gone; propagating DEL_ROUTE for %r', - self, stream, routes) + self._log.debug('stream %s is gone; propagating DEL_ROUTE for %r', + stream.name, routes) for target_id in routes: self.router.del_route(target_id) self._propagate_up(mitogen.core.DEL_ROUTE, target_id) @@ -1972,15 +2231,15 @@ class RouteMonitor(object): target_name = target_name.decode() target_id = int(target_id_s) self.router.context_by_id(target_id).name = target_name - stream = self.router.stream_by_id(msg.auth_id) + stream = self.router.stream_by_id(msg.src_id) current = self.router.stream_by_id(target_id) - if current and current.remote_id != mitogen.parent_id: - LOG.error('Cannot add duplicate route to %r via %r, ' - 'already have existing route via %r', - target_id, stream, current) + if current and current.protocol.remote_id != mitogen.parent_id: + self._log.error('Cannot add duplicate route to %r via %r, ' + 'already have existing route via %r', + target_id, stream, current) return - LOG.debug('Adding route to %d via %r', target_id, stream) + self._log.debug('Adding route to %d via %r', target_id, stream) self._routes_by_stream[stream].add(target_id) self.router.add_route(target_id, stream) self._propagate_up(mitogen.core.ADD_ROUTE, target_id, target_name) @@ -2000,24 +2259,24 @@ class RouteMonitor(object): if registered_stream is None: return - stream = self.router.stream_by_id(msg.auth_id) + stream = self.router.stream_by_id(msg.src_id) if registered_stream != stream: - LOG.error('%r: received DEL_ROUTE for %d from %r, expected %r', - self, target_id, stream, registered_stream) + self._log.error('received DEL_ROUTE for %d from %r, expected %r', + target_id, stream, registered_stream) return context = self.router.context_by_id(target_id, create=False) if context: - LOG.debug('%r: firing local disconnect for %r', self, context) + self._log.debug('firing local disconnect signal for %r', context) mitogen.core.fire(context, 'disconnect') - LOG.debug('%r: deleting route to %d via %r', self, target_id, stream) + self._log.debug('deleting route to %d via %r', target_id, stream) routes = self._routes_by_stream.get(stream) if routes: routes.discard(target_id) self.router.del_route(target_id) - if stream.remote_id != mitogen.parent_id: + if stream.protocol.remote_id != mitogen.parent_id: self._propagate_up(mitogen.core.DEL_ROUTE, target_id) self._propagate_down(mitogen.core.DEL_ROUTE, target_id) @@ -2033,7 +2292,7 @@ class Router(mitogen.core.Router): route_monitor = None def upgrade(self, importer, parent): - LOG.debug('%r.upgrade()', self) + LOG.debug('upgrading %r with capabilities to start new children', self) self.id_allocator = ChildIdAllocator(router=self) self.responder = ModuleForwarder( router=self, @@ -2051,16 +2310,17 @@ class Router(mitogen.core.Router): if msg.is_dead: return stream = self.stream_by_id(msg.src_id) - if stream.remote_id != msg.src_id or stream.detached: + if stream.protocol.remote_id != msg.src_id or stream.conn.detached: LOG.warning('bad DETACHING received on %r: %r', stream, msg) return LOG.debug('%r: marking as detached', stream) - stream.detached = True + stream.conn.detached = True msg.reply(None) def get_streams(self): """ - Return a snapshot of all streams in existence at time of call. + Return an atomic snapshot of all streams in existence at time of call. + This is safe to call from any thread. """ self._write_lock.acquire() try: @@ -2068,17 +2328,42 @@ class Router(mitogen.core.Router): finally: self._write_lock.release() + def disconnect(self, context): + """ + Disconnect a context and forget its stream, assuming the context is + directly connected. + """ + stream = self.stream_by_id(context) + if stream is None or stream.protocol.remote_id != context.context_id: + return + + l = mitogen.core.Latch() + mitogen.core.listen(stream, 'disconnect', l.put) + def disconnect(): + LOG.debug('Starting disconnect of %r', stream) + stream.on_disconnect(self.broker) + self.broker.defer(disconnect) + l.get() + def add_route(self, target_id, stream): """ - Arrange for messages whose `dst_id` is `target_id` to be forwarded on - the directly connected stream for `via_id`. This method is called - automatically in response to :data:`mitogen.core.ADD_ROUTE` messages, - but remains public while the design has not yet settled, and situations - may arise where routing is not fully automatic. + Arrange for messages whose `dst_id` is `target_id` to be forwarded on a + directly connected :class:`Stream`. Safe to call from any thread. + + This is called automatically by :class:`RouteMonitor` in response to + :data:`mitogen.core.ADD_ROUTE` messages, but remains public while the + design has not yet settled, and situations may arise where routing is + not fully automatic. + + :param int target_id: + Target context ID to add a route for. + :param mitogen.core.Stream stream: + Stream over which messages to the target should be routed. """ - LOG.debug('%r.add_route(%r, %r)', self, target_id, stream) + LOG.debug('%r: adding route to context %r via %r', + self, target_id, stream) assert isinstance(target_id, int) - assert isinstance(stream, Stream) + assert isinstance(stream, mitogen.core.Stream) self._write_lock.acquire() try: @@ -2087,7 +2372,20 @@ class Router(mitogen.core.Router): self._write_lock.release() def del_route(self, target_id): - LOG.debug('%r.del_route(%r)', self, target_id) + """ + Delete any route that exists for `target_id`. It is not an error to + delete a route that does not currently exist. Safe to call from any + thread. + + This is called automatically by :class:`RouteMonitor` in response to + :data:`mitogen.core.DEL_ROUTE` messages, but remains public while the + design has not yet settled, and situations may arise where routing is + not fully automatic. + + :param int target_id: + Target context ID to delete route for. + """ + LOG.debug('%r: deleting route to %r', self, target_id) # DEL_ROUTE may be sent by a parent if it knows this context sent # messages to a peer that has now disconnected, to let us raise # 'disconnect' event on the appropriate Context instance. In that case, @@ -2114,35 +2412,36 @@ class Router(mitogen.core.Router): connection_timeout_msg = u"Connection timed out." - def _connect(self, klass, name=None, **kwargs): + def _connect(self, klass, **kwargs): context_id = self.allocate_id() context = self.context_class(self, context_id) + context.name = kwargs.get('name') + kwargs['old_router'] = self kwargs['max_message_size'] = self.max_message_size - stream = klass(self, context_id, **kwargs) - if name is not None: - stream.name = name + conn = klass(klass.options_class(**kwargs), self) try: - stream.connect() + conn.connect(context=context) except mitogen.core.TimeoutError: raise mitogen.core.StreamError(self.connection_timeout_msg) - context.name = stream.name - self.route_monitor.notice_stream(stream) - self.register(context, stream) + return context def connect(self, method_name, name=None, **kwargs): - klass = stream_by_method_name(method_name) + if name: + name = mitogen.core.to_text(name) + + klass = get_connection_class(method_name) kwargs.setdefault(u'debug', self.debug) kwargs.setdefault(u'profiling', self.profiling) kwargs.setdefault(u'unidirectional', self.unidirectional) + kwargs.setdefault(u'name', name) via = kwargs.pop(u'via', None) if via is not None: - return self.proxy_connect(via, method_name, name=name, - **mitogen.core.Kwargs(kwargs)) - return self._connect(klass, name=name, - **mitogen.core.Kwargs(kwargs)) + return self.proxy_connect(via, method_name, + **mitogen.core.Kwargs(kwargs)) + return self._connect(klass, **mitogen.core.Kwargs(kwargs)) def proxy_connect(self, via_context, method_name, name=None, **kwargs): resp = via_context.call(_proxy_connect, @@ -2203,49 +2502,187 @@ class Router(mitogen.core.Router): return self.connect(u'ssh', **kwargs) -class ProcessMonitor(object): - """ - Install a :data:`signal.SIGCHLD` handler that generates callbacks when a - specific child process has exitted. This class is obsolete, do not use. - """ - def __init__(self): - # pid -> callback() - self.callback_by_pid = {} - signal.signal(signal.SIGCHLD, self._on_sigchld) +class Reaper(object): + """ + Asynchronous logic for reaping :class:`Process` objects. This is necessary + to prevent uncontrolled buildup of zombie processes in long-lived parents + that will eventually reach an OS limit, preventing creation of new threads + and processes, and to log the exit status of the child in the case of an + error. + + To avoid modifying process-global state such as with + :func:`signal.set_wakeup_fd` or installing a :data:`signal.SIGCHLD` handler + that might interfere with the user's ability to use those facilities, + Reaper polls for exit with backoff using timers installed on an associated + :class:`Broker`. + + :param mitogen.core.Broker broker: + The :class:`Broker` on which to install timers + :param mitogen.parent.Process proc: + The process to reap. + :param bool kill: + If :data:`True`, send ``SIGTERM`` and ``SIGKILL`` to the process. + :param bool wait_on_shutdown: + If :data:`True`, delay :class:`Broker` shutdown if child has not yet + exited. If :data:`False` simply forget the child. + """ + #: :class:`Timer` that invokes :meth:`reap` after some polling delay. + _timer = None + + def __init__(self, broker, proc, kill, wait_on_shutdown): + self.broker = broker + self.proc = proc + self.kill = kill + self.wait_on_shutdown = wait_on_shutdown + self._tries = 0 + + def _signal_child(self, signum): + # For processes like sudo we cannot actually send sudo a signal, + # because it is setuid, so this is best-effort only. + LOG.debug('%r: sending %s', self.proc, SIGNAL_BY_NUM[signum]) + try: + os.kill(self.proc.pid, signum) + except OSError: + e = sys.exc_info()[1] + if e.args[0] != errno.EPERM: + raise + + def _calc_delay(self, count): + """ + Calculate a poll delay given `count` attempts have already been made. + These constants have no principle, they just produce rapid but still + relatively conservative retries. + """ + delay = 0.05 + for _ in xrange(count): + delay *= 1.72 + return delay + + def _on_broker_shutdown(self): + """ + Respond to :class:`Broker` shutdown by cancelling the reap timer if + :attr:`Router.await_children_at_shutdown` is disabled. Otherwise + shutdown is delayed for up to :attr:`Broker.shutdown_timeout` for + subprocesses may have no intention of exiting any time soon. + """ + if not self.wait_on_shutdown: + self._timer.cancel() + + def _install_timer(self, delay): + new = self._timer is None + self._timer = self.broker.timers.schedule( + when=mitogen.core.now() + delay, + func=self.reap, + ) + if new: + mitogen.core.listen(self.broker, 'shutdown', + self._on_broker_shutdown) - def _on_sigchld(self, _signum, _frame): - for pid, callback in self.callback_by_pid.items(): - pid, status = os.waitpid(pid, os.WNOHANG) - if pid: - callback(status) - del self.callback_by_pid[pid] + def _remove_timer(self): + if self._timer and self._timer.active: + self._timer.cancel() + mitogen.core.unlisten(self.broker, 'shutdown', + self._on_broker_shutdown) - def add(self, pid, callback): + def reap(self): """ - Add a callback function to be notified of the exit status of a process. + Reap the child process during disconnection. + """ + status = self.proc.poll() + if status is not None: + LOG.debug('%r: %s', self.proc, returncode_to_str(status)) + mitogen.core.fire(self.proc, 'exit') + self._remove_timer() + return - :param int pid: - Process ID to be notified of. + self._tries += 1 + if self._tries > 20: + LOG.warning('%r: child will not exit, giving up', self) + self._remove_timer() + return + + delay = self._calc_delay(self._tries - 1) + LOG.debug('%r still running after IO disconnect, recheck in %.03fs', + self.proc, delay) + self._install_timer(delay) - :param callback: - Function invoked as `callback(status)`, where `status` is the raw - exit status of the child process. + if not self.kill: + pass + elif self._tries == 2: + self._signal_child(signal.SIGTERM) + elif self._tries == 6: # roughly 4 seconds + self._signal_child(signal.SIGKILL) + + +class Process(object): + """ + Process objects provide a uniform interface to the :mod:`subprocess` and + :mod:`mitogen.fork`. This class is extended by :class:`PopenProcess` and + :class:`mitogen.fork.Process`. + + :param int pid: + The process ID. + :param file stdin: + File object attached to standard input. + :param file stdout: + File object attached to standard output. + :param file stderr: + File object attached to standard error, or :data:`None`. + """ + #: Name of the process used in logs. Set to the stream/context name by + #: :class:`Connection`. + name = None + + def __init__(self, pid, stdin, stdout, stderr=None): + #: The process ID. + self.pid = pid + #: File object attached to standard input. + self.stdin = stdin + #: File object attached to standard output. + self.stdout = stdout + #: File object attached to standard error. + self.stderr = stderr + + def __repr__(self): + return '%s %s pid %d' % ( + type(self).__name__, + self.name, + self.pid, + ) + + def poll(self): """ - self.callback_by_pid[pid] = callback + Fetch the child process exit status, or :data:`None` if it is still + running. This should be overridden by subclasses. - _instance = None + :returns: + Exit status in the style of the :attr:`subprocess.Popen.returncode` + attribute, i.e. with signals represented by a negative integer. + """ + raise NotImplementedError() - @classmethod - def instance(cls): - if cls._instance is None: - cls._instance = cls() - return cls._instance + +class PopenProcess(Process): + """ + :class:`Process` subclass wrapping a :class:`subprocess.Popen` object. + + :param subprocess.Popen proc: + The subprocess. + """ + def __init__(self, proc, stdin, stdout, stderr=None): + super(PopenProcess, self).__init__(proc.pid, stdin, stdout, stderr) + #: The subprocess. + self.proc = proc + + def poll(self): + return self.proc.poll() class ModuleForwarder(object): """ - Respond to GET_MODULE requests in a slave by forwarding the request to our - parent context, or satisfying the request from our local Importer cache. + Respond to :data:`mitogen.core.GET_MODULE` requests in a child by + forwarding the request to our parent context, or satisfying the request + from our local Importer cache. """ def __init__(self, router, parent_context, importer): self.router = router @@ -2265,7 +2702,7 @@ class ModuleForwarder(object): ) def __repr__(self): - return 'ModuleForwarder(%r)' % (self.router,) + return 'ModuleForwarder' def _on_forward_module(self, msg): if msg.is_dead: @@ -2275,38 +2712,38 @@ class ModuleForwarder(object): fullname = mitogen.core.to_text(fullname) context_id = int(context_id_s) stream = self.router.stream_by_id(context_id) - if stream.remote_id == mitogen.parent_id: + if stream.protocol.remote_id == mitogen.parent_id: LOG.error('%r: dropping FORWARD_MODULE(%d, %r): no route to child', self, context_id, fullname) return - if fullname in stream.sent_modules: + if fullname in stream.protocol.sent_modules: return LOG.debug('%r._on_forward_module() sending %r to %r via %r', - self, fullname, context_id, stream.remote_id) + self, fullname, context_id, stream.protocol.remote_id) self._send_module_and_related(stream, fullname) - if stream.remote_id != context_id: - stream._send( + if stream.protocol.remote_id != context_id: + stream.protocol._send( mitogen.core.Message( data=msg.data, handle=mitogen.core.FORWARD_MODULE, - dst_id=stream.remote_id, + dst_id=stream.protocol.remote_id, ) ) def _on_get_module(self, msg): - LOG.debug('%r._on_get_module(%r)', self, msg) if msg.is_dead: return fullname = msg.data.decode('utf-8') + LOG.debug('%r: %s requested by context %d', self, fullname, msg.src_id) callback = lambda: self._on_cache_callback(msg, fullname) self.importer._request_module(fullname, callback) def _on_cache_callback(self, msg, fullname): - LOG.debug('%r._on_get_module(): sending %r', self, fullname) stream = self.router.stream_by_id(msg.src_id) + LOG.debug('%r: sending %s to %r', self, fullname, stream) self._send_module_and_related(stream, fullname) def _send_module_and_related(self, stream, fullname): @@ -2316,18 +2753,18 @@ class ModuleForwarder(object): if rtup: self._send_one_module(stream, rtup) else: - LOG.debug('%r._send_module_and_related(%r): absent: %r', - self, fullname, related) + LOG.debug('%r: %s not in cache (for %s)', + self, related, fullname) self._send_one_module(stream, tup) def _send_one_module(self, stream, tup): - if tup[0] not in stream.sent_modules: - stream.sent_modules.add(tup[0]) + if tup[0] not in stream.protocol.sent_modules: + stream.protocol.sent_modules.add(tup[0]) self.router._async_route( mitogen.core.Message.pickled( tup, - dst_id=stream.remote_id, + dst_id=stream.protocol.remote_id, handle=mitogen.core.LOAD_MODULE, ) ) diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/profiler.py b/ansible/plugins/mitogen-0.2.9/mitogen/profiler.py similarity index 96% rename from ansible/plugins/mitogen-0.2.8-pre/mitogen/profiler.py rename to ansible/plugins/mitogen-0.2.9/mitogen/profiler.py index 74bbdb235526c54f234fa2a9db5e7279397419ef..bbf6086ade6649b9f7a5c1bf76961284213ab028 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/profiler.py +++ b/ansible/plugins/mitogen-0.2.9/mitogen/profiler.py @@ -28,7 +28,8 @@ # !mitogen: minify_safe -"""mitogen.profiler +""" +mitogen.profiler Record and report cProfile statistics from a run. Creates one aggregated output file, one aggregate containing only workers, and one for the top-level process. @@ -56,28 +57,25 @@ Example: from __future__ import print_function import os import pstats -import cProfile import shutil import subprocess import sys import tempfile import time -import mitogen.core - def try_merge(stats, path): try: stats.add(path) return True except Exception as e: - print('Failed. Race? Will retry. %s' % (e,)) + print('%s failed. Will retry. %s' % (path, e)) return False def merge_stats(outpath, inpaths): first, rest = inpaths[0], inpaths[1:] - for x in range(5): + for x in range(1): try: stats = pstats.Stats(first) except EOFError: @@ -152,7 +150,7 @@ def do_stat(tmpdir, sort, *args): def main(): if len(sys.argv) < 2 or sys.argv[1] not in ('record', 'report', 'stat'): - sys.stderr.write(__doc__) + sys.stderr.write(__doc__.lstrip()) sys.exit(1) func = globals()['do_' + sys.argv[1]] diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/select.py b/ansible/plugins/mitogen-0.2.9/mitogen/select.py similarity index 86% rename from ansible/plugins/mitogen-0.2.8-pre/mitogen/select.py rename to ansible/plugins/mitogen-0.2.9/mitogen/select.py index 51aebc227eb25d6a0f4e6497cee69a3ccfaabef9..2d87574f3e9cc3e844c13ec1e76c2e45aef07661 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/select.py +++ b/ansible/plugins/mitogen-0.2.9/mitogen/select.py @@ -57,9 +57,7 @@ class Select(object): If `oneshot` is :data:`True`, then remove each receiver as it yields a result; since :meth:`__iter__` terminates once the final receiver is - removed, this makes it convenient to respond to calls made in parallel: - - .. code-block:: python + removed, this makes it convenient to respond to calls made in parallel:: total = 0 recvs = [c.call_async(long_running_operation) for c in contexts] @@ -98,7 +96,7 @@ class Select(object): for msg in mitogen.select.Select(selects): print(msg.unpickle()) - :class:`Select` may be used to mix inter-thread and inter-process IO: + :class:`Select` may be used to mix inter-thread and inter-process IO:: latch = mitogen.core.Latch() start_thread(latch) @@ -124,9 +122,10 @@ class Select(object): @classmethod def all(cls, receivers): """ - Take an iterable of receivers and retrieve a :class:`Message` from - each, returning the result of calling `msg.unpickle()` on each in turn. - Results are returned in the order they arrived. + Take an iterable of receivers and retrieve a :class:`Message + <mitogen.core.Message>` from each, returning the result of calling + :meth:`Message.unpickle() <mitogen.core.Message.unpickle>` on each in + turn. Results are returned in the order they arrived. This is sugar for handling batch :meth:`Context.call_async <mitogen.parent.Context.call_async>` invocations: @@ -226,8 +225,15 @@ class Select(object): raise Error(self.owned_msg) recv.notify = self._put - # Avoid race by polling once after installation. - if not recv.empty(): + # After installing the notify function, _put() will potentially begin + # receiving calls from other threads immediately, but not for items + # they already had buffered. For those we call _put(), possibly + # duplicating the effect of other _put() being made concurrently, such + # that the Select ends up with more items in its buffer than exist in + # the underlying receivers. We handle the possibility of receivers + # marked notified yet empty inside Select.get(), so this should be + # robust. + for _ in range(recv.size()): self._put(recv) not_present_msg = 'Instance is not a member of this Select' @@ -261,18 +267,26 @@ class Select(object): self.remove(recv) self._latch.close() - def empty(self): + def size(self): + """ + Return the number of items currently buffered. + + As with :class:`Queue.Queue`, `0` may be returned even though a + subsequent call to :meth:`get` will succeed, since a message may be + posted at any moment between :meth:`size` and :meth:`get`. + + As with :class:`Queue.Queue`, `>0` may be returned even though a + subsequent call to :meth:`get` will block, since another waiting thread + may be woken at any moment between :meth:`size` and :meth:`get`. """ - Return :data:`True` if calling :meth:`get` would block. + return sum(recv.size() for recv in self._receivers) - As with :class:`Queue.Queue`, :data:`True` may be returned even though - a subsequent call to :meth:`get` will succeed, since a message may be - posted at any moment between :meth:`empty` and :meth:`get`. + def empty(self): + """ + Return `size() == 0`. - :meth:`empty` may return :data:`False` even when :meth:`get` would - block if another thread has drained a receiver added to this select. - This can be avoided by only consuming each receiver from a single - thread. + .. deprecated:: 0.2.8 + Use :meth:`size` instead. """ return self._latch.empty() @@ -310,13 +324,13 @@ class Select(object): if not self._receivers: raise Error(self.empty_msg) - event = Event() while True: recv = self._latch.get(timeout=timeout, block=block) try: if isinstance(recv, Select): event = recv.get_event(block=False) else: + event = Event() event.source = recv event.data = recv.get(block=False) if self._oneshot: @@ -329,5 +343,6 @@ class Select(object): # A receiver may have been queued with no result if another # thread drained it before we woke up, or because another # thread drained it between add() calling recv.empty() and - # self._put(). In this case just sleep again. + # self._put(), or because Select.add() caused duplicate _put() + # calls. In this case simply retry. continue diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/service.py b/ansible/plugins/mitogen-0.2.9/mitogen/service.py similarity index 87% rename from ansible/plugins/mitogen-0.2.8-pre/mitogen/service.py rename to ansible/plugins/mitogen-0.2.9/mitogen/service.py index 942ed4f7f4ef8885336c36ba6beac696b5378727..6bd64eb0c03ed0d1676ad7354bcd11e391badafc 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/service.py +++ b/ansible/plugins/mitogen-0.2.9/mitogen/service.py @@ -29,6 +29,7 @@ # !mitogen: minify_safe import grp +import logging import os import os.path import pprint @@ -36,12 +37,10 @@ import pwd import stat import sys import threading -import time import mitogen.core import mitogen.select from mitogen.core import b -from mitogen.core import LOG from mitogen.core import str_rpartition try: @@ -54,7 +53,8 @@ except NameError: return True -DEFAULT_POOL_SIZE = 16 +LOG = logging.getLogger(__name__) + _pool = None _pool_pid = None #: Serialize pool construction. @@ -77,19 +77,51 @@ else: def get_or_create_pool(size=None, router=None): global _pool global _pool_pid - _pool_lock.acquire() - try: - if _pool_pid != os.getpid(): - _pool = Pool(router, [], size=size or DEFAULT_POOL_SIZE, - overwrite=True) - # In case of Broker shutdown crash, Pool can cause 'zombie' - # processes. - mitogen.core.listen(router.broker, 'shutdown', - lambda: _pool.stop(join=False)) - _pool_pid = os.getpid() - return _pool - finally: - _pool_lock.release() + + my_pid = os.getpid() + if _pool is None or _pool.closed or my_pid != _pool_pid: + # Avoid acquiring heavily contended lock if possible. + _pool_lock.acquire() + try: + if _pool_pid != my_pid: + _pool = Pool( + router, + services=[], + size=size or 2, + overwrite=True, + recv=mitogen.core.Dispatcher._service_recv, + ) + # In case of Broker shutdown crash, Pool can cause 'zombie' + # processes. + mitogen.core.listen(router.broker, 'shutdown', + lambda: _pool.stop(join=True)) + _pool_pid = os.getpid() + finally: + _pool_lock.release() + + return _pool + + +def get_thread_name(): + return threading.currentThread().getName() + + +def call(service_name, method_name, call_context=None, **kwargs): + """ + Call a service registered with this pool, using the calling thread as a + host. + """ + if isinstance(service_name, mitogen.core.BytesType): + service_name = service_name.encode('utf-8') + elif not isinstance(service_name, mitogen.core.UnicodeType): + service_name = service_name.name() # Service.name() + + if call_context: + return call_context.call_service(service_name, method_name, **kwargs) + else: + pool = get_or_create_pool() + invoker = pool.get_invoker(service_name, msg=None) + return getattr(invoker.service, method_name)(**kwargs) def validate_arg_spec(spec, args): @@ -239,12 +271,13 @@ class Invoker(object): if not policies: raise mitogen.core.CallError('Method has no policies set.') - if not all(p.is_authorized(self.service, msg) for p in policies): - raise mitogen.core.CallError( - self.unauthorized_msg, - method_name, - self.service.name() - ) + if msg is not None: + if not all(p.is_authorized(self.service, msg) for p in policies): + raise mitogen.core.CallError( + self.unauthorized_msg, + method_name, + self.service.name() + ) required = getattr(method, 'mitogen_service__arg_spec', {}) validate_arg_spec(required, kwargs) @@ -264,7 +297,7 @@ class Invoker(object): except Exception: if no_reply: LOG.exception('While calling no-reply method %s.%s', - type(self.service).__name__, + self.service.name(), func_name(method)) else: raise @@ -445,13 +478,19 @@ class Pool(object): program's configuration or its input data. :param mitogen.core.Router router: - Router to listen for ``CALL_SERVICE`` messages on. + :class:`mitogen.core.Router` to listen for + :data:`mitogen.core.CALL_SERVICE` messages. :param list services: Initial list of services to register. + :param mitogen.core.Receiver recv: + :data:`mitogen.core.CALL_SERVICE` receiver to reuse. This is used by + :func:`get_or_create_pool` to hand off a queue of messages from the + Dispatcher stub handler while avoiding a race. """ activator_class = Activator - def __init__(self, router, services=(), size=1, overwrite=False): + def __init__(self, router, services=(), size=1, overwrite=False, + recv=None): self.router = router self._activator = self.activator_class() self._ipc_latch = mitogen.core.Latch() @@ -472,12 +511,22 @@ class Pool(object): } self._invoker_by_name = {} + if recv is not None: + # When inheriting from mitogen.core.Dispatcher, we must remove its + # stub notification function before adding it to our Select. We + # always overwrite this receiver since the standard service.Pool + # handler policy differs from the one inherited from + # core.Dispatcher. + recv.notify = None + self._select.add(recv) + self._func_by_source[recv] = self._on_service_call + for service in services: self.add(service) self._py_24_25_compat() self._threads = [] for x in range(size): - name = 'mitogen.service.Pool.%x.worker-%d' % (id(self), x,) + name = 'mitogen.Pool.%04x.%d' % (id(self) & 0xffff, x,) thread = threading.Thread( name=name, target=mitogen.core._profile_hook, @@ -485,7 +534,6 @@ class Pool(object): ) thread.start() self._threads.append(thread) - LOG.debug('%r: initialized', self) def _py_24_25_compat(self): @@ -524,15 +572,18 @@ class Pool(object): invoker.service.on_shutdown() def get_invoker(self, name, msg): - self._lock.acquire() - try: - invoker = self._invoker_by_name.get(name) - if not invoker: - service = self._activator.activate(self, name, msg) - invoker = service.invoker_class(service=service) - self._invoker_by_name[name] = invoker - finally: - self._lock.release() + invoker = self._invoker_by_name.get(name) + if invoker is None: + # Avoid acquiring lock if possible. + self._lock.acquire() + try: + invoker = self._invoker_by_name.get(name) + if not invoker: + service = self._activator.activate(self, name, msg) + invoker = service.invoker_class(service=service) + self._invoker_by_name[name] = invoker + finally: + self._lock.release() return invoker @@ -582,9 +633,12 @@ class Pool(object): while not self.closed: try: event = self._select.get_event() - except (mitogen.core.ChannelError, mitogen.core.LatchError): - e = sys.exc_info()[1] - LOG.debug('%r: channel or latch closed, exitting: %s', self, e) + except mitogen.core.LatchError: + LOG.debug('thread %s exiting gracefully', get_thread_name()) + return + except mitogen.core.ChannelError: + LOG.debug('thread %s exiting with error: %s', + get_thread_name(), sys.exc_info()[1]) return func = self._func_by_source[event.source] @@ -597,16 +651,14 @@ class Pool(object): try: self._worker_run() except Exception: - th = threading.currentThread() - LOG.exception('%r: worker %r crashed', self, th.getName()) + LOG.exception('%r: worker %r crashed', self, get_thread_name()) raise def __repr__(self): - th = threading.currentThread() - return 'mitogen.service.Pool(%#x, size=%d, th=%r)' % ( - id(self), + return 'Pool(%04x, size=%d, th=%r)' % ( + id(self) & 0xffff, len(self._threads), - th.getName(), + get_thread_name(), ) @@ -658,10 +710,12 @@ class PushFileService(Service): def _forward(self, context, path): stream = self.router.stream_by_id(context.context_id) - child = mitogen.core.Context(self.router, stream.remote_id) + child = self.router.context_by_id(stream.protocol.remote_id) sent = self._sent_by_stream.setdefault(stream, set()) if path in sent: if child.context_id != context.context_id: + LOG.debug('requesting %s forward small file to %s: %s', + child, context, path) child.call_service_async( service_name=self.name(), method_name='forward', @@ -669,6 +723,8 @@ class PushFileService(Service): context=context ).close() else: + LOG.debug('requesting %s cache and forward small file to %s: %s', + child, context, path) child.call_service_async( service_name=self.name(), method_name='store_and_forward', @@ -691,7 +747,7 @@ class PushFileService(Service): """ for path in paths: self.propagate_to(context, mitogen.core.to_text(path)) - self.router.responder.forward_modules(context, modules) + #self.router.responder.forward_modules(context, modules) TODO @expose(policy=AllowParents()) @arg_spec({ @@ -699,8 +755,8 @@ class PushFileService(Service): 'path': mitogen.core.FsPathTypes, }) def propagate_to(self, context, path): - LOG.debug('%r.propagate_to(%r, %r)', self, context, path) if path not in self._cache: + LOG.debug('caching small file %s', path) fp = open(path, 'rb') try: self._cache[path] = mitogen.core.Blob(fp.read()) @@ -718,7 +774,7 @@ class PushFileService(Service): def store_and_forward(self, path, data, context): LOG.debug('%r.store_and_forward(%r, %r, %r) %r', self, path, data, context, - threading.currentThread().getName()) + get_thread_name()) self._lock.acquire() try: self._cache[path] = data @@ -891,7 +947,7 @@ class FileService(Service): # The IO loop pumps 128KiB chunks. An ideal message is a multiple of this, # odd-sized messages waste one tiny write() per message on the trailer. # Therefore subtract 10 bytes pickle overhead + 24 bytes header. - IO_SIZE = mitogen.core.CHUNK_SIZE - (mitogen.core.Stream.HEADER_LEN + ( + IO_SIZE = mitogen.core.CHUNK_SIZE - (mitogen.core.Message.HEADER_LEN + ( len( mitogen.core.Message.pickled( mitogen.core.Blob(b(' ') * mitogen.core.CHUNK_SIZE) @@ -965,7 +1021,11 @@ class FileService(Service): :raises Error: Unregistered path, or Sender did not match requestee context. """ - if path not in self._paths and not self._prefix_is_authorized(path): + if ( + (path not in self._paths) and + (not self._prefix_is_authorized(path)) and + (not mitogen.core._has_parent_authority(msg.auth_id)) + ): msg.reply(mitogen.core.CallError( Error(self.unregistered_msg % (path,)) )) @@ -1047,7 +1107,7 @@ class FileService(Service): :meth:`fetch`. """ LOG.debug('get_file(): fetching %r from %r', path, context) - t0 = time.time() + t0 = mitogen.core.now() recv = mitogen.core.Receiver(router=context.router) metadata = context.call_service( service_name=cls.name(), @@ -1081,5 +1141,6 @@ class FileService(Service): path, metadata['size'], received_bytes) LOG.debug('target.get_file(): fetched %d bytes of %r from %r in %dms', - metadata['size'], path, context, 1000 * (time.time() - t0)) + metadata['size'], path, context, + 1000 * (mitogen.core.now() - t0)) return ok, metadata diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/setns.py b/ansible/plugins/mitogen-0.2.9/mitogen/setns.py similarity index 83% rename from ansible/plugins/mitogen-0.2.8-pre/mitogen/setns.py rename to ansible/plugins/mitogen-0.2.9/mitogen/setns.py index b1d6978383e56e473c00c332557ce9aca9d69c7a..46a50301d8f5cbece89132c4cb1b8dfe04ce5e84 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/setns.py +++ b/ansible/plugins/mitogen-0.2.9/mitogen/setns.py @@ -116,9 +116,15 @@ def get_machinectl_pid(path, name): raise Error("could not find PID from machinectl output.\n%s", output) -class Stream(mitogen.parent.Stream): - child_is_immediate_subprocess = False +GET_LEADER_BY_KIND = { + 'docker': ('docker_path', get_docker_pid), + 'lxc': ('lxc_info_path', get_lxc_pid), + 'lxd': ('lxc_path', get_lxd_pid), + 'machinectl': ('machinectl_path', get_machinectl_pid), +} + +class Options(mitogen.parent.Options): container = None username = 'root' kind = None @@ -128,24 +134,17 @@ class Stream(mitogen.parent.Stream): lxc_info_path = 'lxc-info' machinectl_path = 'machinectl' - GET_LEADER_BY_KIND = { - 'docker': ('docker_path', get_docker_pid), - 'lxc': ('lxc_info_path', get_lxc_pid), - 'lxd': ('lxc_path', get_lxd_pid), - 'machinectl': ('machinectl_path', get_machinectl_pid), - } - - def construct(self, container, kind, username=None, docker_path=None, - lxc_path=None, lxc_info_path=None, machinectl_path=None, - **kwargs): - super(Stream, self).construct(**kwargs) - if kind not in self.GET_LEADER_BY_KIND: + def __init__(self, container, kind, username=None, docker_path=None, + lxc_path=None, lxc_info_path=None, machinectl_path=None, + **kwargs): + super(Options, self).__init__(**kwargs) + if kind not in GET_LEADER_BY_KIND: raise Error('unsupported container kind: %r', kind) - self.container = container + self.container = mitogen.core.to_text(container) self.kind = kind if username: - self.username = username + self.username = mitogen.core.to_text(username) if docker_path: self.docker_path = docker_path if lxc_path: @@ -155,6 +154,11 @@ class Stream(mitogen.parent.Stream): if machinectl_path: self.machinectl_path = machinectl_path + +class Connection(mitogen.parent.Connection): + options_class = Options + child_is_immediate_subprocess = False + # Order matters. https://github.com/karelzak/util-linux/commit/854d0fe/ NS_ORDER = ('ipc', 'uts', 'net', 'pid', 'mnt', 'user') @@ -189,15 +193,15 @@ class Stream(mitogen.parent.Stream): try: os.setgroups([grent.gr_gid for grent in grp.getgrall() - if self.username in grent.gr_mem]) - pwent = pwd.getpwnam(self.username) + if self.options.username in grent.gr_mem]) + pwent = pwd.getpwnam(self.options.username) os.setreuid(pwent.pw_uid, pwent.pw_uid) # shadow-4.4/libmisc/setupenv.c. Not done: MAIL, PATH os.environ.update({ 'HOME': pwent.pw_dir, 'SHELL': pwent.pw_shell or '/bin/sh', - 'LOGNAME': self.username, - 'USER': self.username, + 'LOGNAME': self.options.username, + 'USER': self.options.username, }) if ((os.path.exists(pwent.pw_dir) and os.access(pwent.pw_dir, os.X_OK))): @@ -217,7 +221,7 @@ class Stream(mitogen.parent.Stream): # namespaces, meaning starting new threads in the exec'd program will # fail. The solution is forking, so inject a /bin/sh call to achieve # this. - argv = super(Stream, self).get_boot_command() + argv = super(Connection, self).get_boot_command() # bash will exec() if a single command was specified and the shell has # nothing left to do, so "; exit $?" gives bash a reason to live. return ['/bin/sh', '-c', '%s; exit $?' % (mitogen.parent.Argv(argv),)] @@ -226,13 +230,12 @@ class Stream(mitogen.parent.Stream): return mitogen.parent.create_child(args, preexec_fn=self.preexec_fn) def _get_name(self): - return u'setns.' + self.container + return u'setns.' + self.options.container - def connect(self): - self.name = self._get_name() - attr, func = self.GET_LEADER_BY_KIND[self.kind] - tool_path = getattr(self, attr) - self.leader_pid = func(tool_path, self.container) + def connect(self, **kwargs): + attr, func = GET_LEADER_BY_KIND[self.options.kind] + tool_path = getattr(self.options, attr) + self.leader_pid = func(tool_path, self.options.container) LOG.debug('Leader PID for %s container %r: %d', - self.kind, self.container, self.leader_pid) - super(Stream, self).connect() + self.options.kind, self.options.container, self.leader_pid) + return super(Connection, self).connect(**kwargs) diff --git a/ansible/plugins/mitogen-0.2.9/mitogen/ssh.py b/ansible/plugins/mitogen-0.2.9/mitogen/ssh.py new file mode 100644 index 0000000000000000000000000000000000000000..b276dd28e46b94de6c74ceb65ac056d6e3914604 --- /dev/null +++ b/ansible/plugins/mitogen-0.2.9/mitogen/ssh.py @@ -0,0 +1,294 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +""" +Construct new children via the OpenSSH client. +""" + +import logging +import re + +try: + from shlex import quote as shlex_quote +except ImportError: + from pipes import quote as shlex_quote + +import mitogen.parent +from mitogen.core import b + +try: + any +except NameError: + from mitogen.core import any + + +LOG = logging.getLogger(__name__) + +auth_incorrect_msg = 'SSH authentication is incorrect' +password_incorrect_msg = 'SSH password is incorrect' +password_required_msg = 'SSH password was requested, but none specified' +hostkey_config_msg = ( + 'SSH requested permission to accept unknown host key, but ' + 'check_host_keys=ignore. This is likely due to ssh_args= ' + 'conflicting with check_host_keys=. Please correct your ' + 'configuration.' +) +hostkey_failed_msg = ( + 'Host key checking is enabled, and SSH reported an unrecognized or ' + 'mismatching host key.' +) + +# sshpass uses 'assword' because it doesn't lowercase the input. +PASSWORD_PROMPT_PATTERN = re.compile( + b('password'), + re.I +) + +HOSTKEY_REQ_PATTERN = re.compile( + b(r'are you sure you want to continue connecting \(yes/no\)\?'), + re.I +) + +HOSTKEY_FAIL_PATTERN = re.compile( + b(r'host key verification failed\.'), + re.I +) + +# [user@host: ] permission denied +# issue #271: work around conflict with user shell reporting 'permission +# denied' e.g. during chdir($HOME) by only matching it at the start of the +# line. +PERMDENIED_PATTERN = re.compile( + b('^(?:[^@]+@[^:]+: )?' # Absent in OpenSSH <7.5 + 'Permission denied'), + re.I +) + +DEBUG_PATTERN = re.compile(b('^debug[123]:')) + + +class PasswordError(mitogen.core.StreamError): + pass + + +class HostKeyError(mitogen.core.StreamError): + pass + + +class SetupProtocol(mitogen.parent.RegexProtocol): + """ + This protocol is attached to stderr of the SSH client. It responds to + various interactive prompts as required. + """ + password_sent = False + + def _on_host_key_request(self, line, match): + if self.stream.conn.options.check_host_keys == 'accept': + LOG.debug('%s: accepting host key', self.stream.name) + self.stream.transmit_side.write(b('yes\n')) + return + + # _host_key_prompt() should never be reached with ignore or enforce + # mode, SSH should have handled that. User's ssh_args= is conflicting + # with ours. + self.stream.conn._fail_connection(HostKeyError(hostkey_config_msg)) + + def _on_host_key_failed(self, line, match): + self.stream.conn._fail_connection(HostKeyError(hostkey_failed_msg)) + + def _on_permission_denied(self, line, match): + if self.stream.conn.options.password is not None and \ + self.password_sent: + self.stream.conn._fail_connection( + PasswordError(password_incorrect_msg) + ) + elif PASSWORD_PROMPT_PATTERN.search(line) and \ + self.stream.conn.options.password is None: + # Permission denied (password,pubkey) + self.stream.conn._fail_connection( + PasswordError(password_required_msg) + ) + else: + self.stream.conn._fail_connection( + PasswordError(auth_incorrect_msg) + ) + + def _on_password_prompt(self, line, match): + LOG.debug('%s: (password prompt): %s', self.stream.name, line) + if self.stream.conn.options.password is None: + self.stream.conn._fail(PasswordError(password_required_msg)) + + self.stream.transmit_side.write( + (self.stream.conn.options.password + '\n').encode('utf-8') + ) + self.password_sent = True + + def _on_debug_line(self, line, match): + text = mitogen.core.to_text(line.rstrip()) + LOG.debug('%s: %s', self.stream.name, text) + + PATTERNS = [ + (DEBUG_PATTERN, _on_debug_line), + (HOSTKEY_FAIL_PATTERN, _on_host_key_failed), + (PERMDENIED_PATTERN, _on_permission_denied), + ] + + PARTIAL_PATTERNS = [ + (PASSWORD_PROMPT_PATTERN, _on_password_prompt), + (HOSTKEY_REQ_PATTERN, _on_host_key_request), + ] + + +class Options(mitogen.parent.Options): + #: Default to whatever is available as 'python' on the remote machine, + #: overriding sys.executable use. + python_path = 'python' + + #: Number of -v invocations to pass on command line. + ssh_debug_level = 0 + + #: The path to the SSH binary. + ssh_path = 'ssh' + + hostname = None + username = None + port = None + identity_file = None + password = None + ssh_args = None + + check_host_keys_msg = 'check_host_keys= must be set to accept, enforce or ignore' + + def __init__(self, hostname, username=None, ssh_path=None, port=None, + check_host_keys='enforce', password=None, identity_file=None, + compression=True, ssh_args=None, keepalive_enabled=True, + keepalive_count=3, keepalive_interval=15, + identities_only=True, ssh_debug_level=None, **kwargs): + super(Options, self).__init__(**kwargs) + + if check_host_keys not in ('accept', 'enforce', 'ignore'): + raise ValueError(self.check_host_keys_msg) + + self.hostname = hostname + self.username = username + self.port = port + self.check_host_keys = check_host_keys + self.password = password + self.identity_file = identity_file + self.identities_only = identities_only + self.compression = compression + self.keepalive_enabled = keepalive_enabled + self.keepalive_count = keepalive_count + self.keepalive_interval = keepalive_interval + if ssh_path: + self.ssh_path = ssh_path + if ssh_args: + self.ssh_args = ssh_args + if ssh_debug_level: + self.ssh_debug_level = ssh_debug_level + + +class Connection(mitogen.parent.Connection): + options_class = Options + diag_protocol_class = SetupProtocol + + child_is_immediate_subprocess = False + + def _get_name(self): + s = u'ssh.' + mitogen.core.to_text(self.options.hostname) + if self.options.port and self.options.port != 22: + s += u':%s' % (self.options.port,) + return s + + def _requires_pty(self): + """ + Return :data:`True` if a PTY to is required for this configuration, + because it must interactively accept host keys or type a password. + """ + return ( + self.options.check_host_keys == 'accept' or + self.options.password is not None + ) + + def create_child(self, **kwargs): + """ + Avoid PTY use when possible to avoid a scaling limitation. + """ + if self._requires_pty(): + return mitogen.parent.hybrid_tty_create_child(**kwargs) + else: + return mitogen.parent.create_child(stderr_pipe=True, **kwargs) + + def get_boot_command(self): + bits = [self.options.ssh_path] + if self.options.ssh_debug_level: + bits += ['-' + ('v' * min(3, self.options.ssh_debug_level))] + else: + # issue #307: suppress any login banner, as it may contain the + # password prompt, and there is no robust way to tell the + # difference. + bits += ['-o', 'LogLevel ERROR'] + if self.options.username: + bits += ['-l', self.options.username] + if self.options.port is not None: + bits += ['-p', str(self.options.port)] + if self.options.identities_only and (self.options.identity_file or + self.options.password): + bits += ['-o', 'IdentitiesOnly yes'] + if self.options.identity_file: + bits += ['-i', self.options.identity_file] + if self.options.compression: + bits += ['-o', 'Compression yes'] + if self.options.keepalive_enabled: + bits += [ + '-o', 'ServerAliveInterval %s' % ( + self.options.keepalive_interval, + ), + '-o', 'ServerAliveCountMax %s' % ( + self.options.keepalive_count, + ), + ] + if not self._requires_pty(): + bits += ['-o', 'BatchMode yes'] + if self.options.check_host_keys == 'enforce': + bits += ['-o', 'StrictHostKeyChecking yes'] + if self.options.check_host_keys == 'accept': + bits += ['-o', 'StrictHostKeyChecking ask'] + elif self.options.check_host_keys == 'ignore': + bits += [ + '-o', 'StrictHostKeyChecking no', + '-o', 'UserKnownHostsFile /dev/null', + '-o', 'GlobalKnownHostsFile /dev/null', + ] + if self.options.ssh_args: + bits += self.options.ssh_args + bits.append(self.options.hostname) + base = super(Connection, self).get_boot_command() + return bits + [shlex_quote(s).strip() for s in base] diff --git a/ansible/plugins/mitogen-0.2.9/mitogen/su.py b/ansible/plugins/mitogen-0.2.9/mitogen/su.py new file mode 100644 index 0000000000000000000000000000000000000000..080c978293e701c81e174ecde61088a6962eebbc --- /dev/null +++ b/ansible/plugins/mitogen-0.2.9/mitogen/su.py @@ -0,0 +1,160 @@ +# Copyright 2019, David Wilson +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# +# 2. Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# 3. Neither the name of the copyright holder nor the names of its contributors +# may be used to endorse or promote products derived from this software without +# specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +# !mitogen: minify_safe + +import logging +import re + +import mitogen.core +import mitogen.parent + +try: + any +except NameError: + from mitogen.core import any + + +LOG = logging.getLogger(__name__) + +password_incorrect_msg = 'su password is incorrect' +password_required_msg = 'su password is required' + + +class PasswordError(mitogen.core.StreamError): + pass + + +class SetupBootstrapProtocol(mitogen.parent.BootstrapProtocol): + password_sent = False + + def setup_patterns(self, conn): + """ + su options cause the regexes used to vary. This is a mess, requires + reworking. + """ + incorrect_pattern = re.compile( + mitogen.core.b('|').join( + re.escape(s.encode('utf-8')) + for s in conn.options.incorrect_prompts + ), + re.I + ) + prompt_pattern = re.compile( + re.escape( + conn.options.password_prompt.encode('utf-8') + ), + re.I + ) + + self.PATTERNS = mitogen.parent.BootstrapProtocol.PATTERNS + [ + (incorrect_pattern, type(self)._on_password_incorrect), + ] + self.PARTIAL_PATTERNS = mitogen.parent.BootstrapProtocol.PARTIAL_PATTERNS + [ + (prompt_pattern, type(self)._on_password_prompt), + ] + + def _on_password_prompt(self, line, match): + LOG.debug('%r: (password prompt): %r', + self.stream.name, line.decode('utf-8', 'replace')) + + if self.stream.conn.options.password is None: + self.stream.conn._fail_connection( + PasswordError(password_required_msg) + ) + return + + if self.password_sent: + self.stream.conn._fail_connection( + PasswordError(password_incorrect_msg) + ) + return + + self.stream.transmit_side.write( + (self.stream.conn.options.password + '\n').encode('utf-8') + ) + self.password_sent = True + + def _on_password_incorrect(self, line, match): + self.stream.conn._fail_connection( + PasswordError(password_incorrect_msg) + ) + + +class Options(mitogen.parent.Options): + username = u'root' + password = None + su_path = 'su' + password_prompt = u'password:' + incorrect_prompts = ( + u'su: sorry', # BSD + u'su: authentication failure', # Linux + u'su: incorrect password', # CentOS 6 + u'authentication is denied', # AIX + ) + + def __init__(self, username=None, password=None, su_path=None, + password_prompt=None, incorrect_prompts=None, **kwargs): + super(Options, self).__init__(**kwargs) + if username is not None: + self.username = mitogen.core.to_text(username) + if password is not None: + self.password = mitogen.core.to_text(password) + if su_path is not None: + self.su_path = su_path + if password_prompt is not None: + self.password_prompt = password_prompt + if incorrect_prompts is not None: + self.incorrect_prompts = [ + mitogen.core.to_text(p) + for p in incorrect_prompts + ] + + +class Connection(mitogen.parent.Connection): + options_class = Options + stream_protocol_class = SetupBootstrapProtocol + + # TODO: BSD su cannot handle stdin being a socketpair, but it does let the + # child inherit fds from the parent. So we can still pass a socketpair in + # for hybrid_tty_create_child(), there just needs to be either a shell + # snippet or bootstrap support for fixing things up afterwards. + create_child = staticmethod(mitogen.parent.tty_create_child) + child_is_immediate_subprocess = False + + def _get_name(self): + return u'su.' + self.options.username + + def stream_factory(self): + stream = super(Connection, self).stream_factory() + stream.protocol.setup_patterns(self) + return stream + + def get_boot_command(self): + argv = mitogen.parent.Argv(super(Connection, self).get_boot_command()) + return [self.options.su_path, self.options.username, '-c', str(argv)] diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/sudo.py b/ansible/plugins/mitogen-0.2.9/mitogen/sudo.py similarity index 79% rename from ansible/plugins/mitogen-0.2.8-pre/mitogen/sudo.py rename to ansible/plugins/mitogen-0.2.9/mitogen/sudo.py index 868d4d76c30e5c5ad2867947e207e0645699e0cb..ea07d0c1926a1a19908973246e523342049c5fa3 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/sudo.py +++ b/ansible/plugins/mitogen-0.2.9/mitogen/sudo.py @@ -35,11 +35,13 @@ import re import mitogen.core import mitogen.parent -from mitogen.core import b LOG = logging.getLogger(__name__) +password_incorrect_msg = 'sudo password is incorrect' +password_required_msg = 'sudo password is required' + # These are base64-encoded UTF-8 as our existing minifier/module server # struggles with Unicode Python source in some (forgotten) circumstances. PASSWORD_PROMPTS = [ @@ -99,14 +101,13 @@ PASSWORD_PROMPTS = [ PASSWORD_PROMPT_RE = re.compile( - u'|'.join( - base64.b64decode(s).decode('utf-8') + mitogen.core.b('|').join( + base64.b64decode(s) for s in PASSWORD_PROMPTS - ) + ), + re.I ) - -PASSWORD_PROMPT = b('password') SUDO_OPTIONS = [ #(False, 'bool', '--askpass', '-A') #(False, 'str', '--auth-type', '-a') @@ -181,10 +182,7 @@ def option(default, *args): return default -class Stream(mitogen.parent.Stream): - create_child = staticmethod(mitogen.parent.hybrid_tty_create_child) - child_is_immediate_subprocess = False - +class Options(mitogen.parent.Options): sudo_path = 'sudo' username = 'root' password = None @@ -195,15 +193,16 @@ class Stream(mitogen.parent.Stream): selinux_role = None selinux_type = None - def construct(self, username=None, sudo_path=None, password=None, - preserve_env=None, set_home=None, sudo_args=None, - login=None, selinux_role=None, selinux_type=None, **kwargs): - super(Stream, self).construct(**kwargs) + def __init__(self, username=None, sudo_path=None, password=None, + preserve_env=None, set_home=None, sudo_args=None, + login=None, selinux_role=None, selinux_type=None, **kwargs): + super(Options, self).__init__(**kwargs) opts = parse_sudo_flags(sudo_args or []) self.username = option(self.username, username, opts.user) self.sudo_path = option(self.sudo_path, sudo_path) - self.password = password or None + if password: + self.password = mitogen.core.to_text(password) self.preserve_env = option(self.preserve_env, preserve_env, opts.preserve_env) self.set_home = option(self.set_home, set_home, opts.set_home) @@ -211,67 +210,62 @@ class Stream(mitogen.parent.Stream): self.selinux_role = option(self.selinux_role, selinux_role, opts.role) self.selinux_type = option(self.selinux_type, selinux_type, opts.type) + +class SetupProtocol(mitogen.parent.RegexProtocol): + password_sent = False + + def _on_password_prompt(self, line, match): + LOG.debug('%s: (password prompt): %s', + self.stream.name, line.decode('utf-8', 'replace')) + + if self.stream.conn.options.password is None: + self.stream.conn._fail_connection( + PasswordError(password_required_msg) + ) + return + + if self.password_sent: + self.stream.conn._fail_connection( + PasswordError(password_incorrect_msg) + ) + return + + self.stream.transmit_side.write( + (self.stream.conn.options.password + '\n').encode('utf-8') + ) + self.password_sent = True + + PARTIAL_PATTERNS = [ + (PASSWORD_PROMPT_RE, _on_password_prompt), + ] + + +class Connection(mitogen.parent.Connection): + diag_protocol_class = SetupProtocol + options_class = Options + create_child = staticmethod(mitogen.parent.hybrid_tty_create_child) + create_child_args = { + 'escalates_privilege': True, + } + child_is_immediate_subprocess = False + def _get_name(self): - return u'sudo.' + mitogen.core.to_text(self.username) + return u'sudo.' + mitogen.core.to_text(self.options.username) def get_boot_command(self): # Note: sudo did not introduce long-format option processing until July # 2013, so even though we parse long-format options, supply short-form # to the sudo command. - bits = [self.sudo_path, '-u', self.username] - if self.preserve_env: + bits = [self.options.sudo_path, '-u', self.options.username] + if self.options.preserve_env: bits += ['-E'] - if self.set_home: + if self.options.set_home: bits += ['-H'] - if self.login: + if self.options.login: bits += ['-i'] - if self.selinux_role: - bits += ['-r', self.selinux_role] - if self.selinux_type: - bits += ['-t', self.selinux_type] - - bits = bits + ['--'] + super(Stream, self).get_boot_command() - LOG.debug('sudo command line: %r', bits) - return bits - - password_incorrect_msg = 'sudo password is incorrect' - password_required_msg = 'sudo password is required' - - def _connect_input_loop(self, it): - password_sent = False - - for buf in it: - LOG.debug('%s: received %r', self.name, buf) - if buf.endswith(self.EC0_MARKER): - self._ec0_received() - return - - match = PASSWORD_PROMPT_RE.search(buf.decode('utf-8').lower()) - if match is not None: - LOG.debug('%s: matched password prompt %r', - self.name, match.group(0)) - if self.password is None: - raise PasswordError(self.password_required_msg) - if password_sent: - raise PasswordError(self.password_incorrect_msg) - self.diag_stream.transmit_side.write( - (mitogen.core.to_text(self.password) + '\n').encode('utf-8') - ) - password_sent = True - - raise mitogen.core.StreamError('bootstrap failed') - - def _connect_bootstrap(self): - fds = [self.receive_side.fd] - if self.diag_stream is not None: - fds.append(self.diag_stream.receive_side.fd) - - it = mitogen.parent.iter_read( - fds=fds, - deadline=self.connect_deadline, - ) + if self.options.selinux_role: + bits += ['-r', self.options.selinux_role] + if self.options.selinux_type: + bits += ['-t', self.options.selinux_type] - try: - self._connect_input_loop(it) - finally: - it.close() + return bits + ['--'] + super(Connection, self).get_boot_command() diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/unix.py b/ansible/plugins/mitogen-0.2.9/mitogen/unix.py similarity index 56% rename from ansible/plugins/mitogen-0.2.8-pre/mitogen/unix.py rename to ansible/plugins/mitogen-0.2.9/mitogen/unix.py index 66141eec1f0be0e86143e0d2ddd5aa975bb9d3de..1af1c0ec6b66522ccdaa603778a48f45502f81cc 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/unix.py +++ b/ansible/plugins/mitogen-0.2.9/mitogen/unix.py @@ -36,6 +36,7 @@ have the same privilege (auth_id) as the current process. """ import errno +import logging import os import socket import struct @@ -45,7 +46,24 @@ import tempfile import mitogen.core import mitogen.master -from mitogen.core import LOG + +LOG = logging.getLogger(__name__) + + +class Error(mitogen.core.Error): + """ + Base for errors raised by :mod:`mitogen.unix`. + """ + pass + + +class ConnectError(Error): + """ + Raised when :func:`mitogen.unix.connect` fails to connect to the listening + socket. + """ + #: UNIX error number reported by underlying exception. + errno = None def is_path_dead(path): @@ -65,9 +83,38 @@ def make_socket_path(): return tempfile.mktemp(prefix='mitogen_unix_', suffix='.sock') -class Listener(mitogen.core.BasicStream): +class ListenerStream(mitogen.core.Stream): + def on_receive(self, broker): + sock, _ = self.receive_side.fp.accept() + try: + self.protocol.on_accept_client(sock) + except: + sock.close() + raise + + +class Listener(mitogen.core.Protocol): + stream_class = ListenerStream keep_alive = True + @classmethod + def build_stream(cls, router, path=None, backlog=100): + if not path: + path = make_socket_path() + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + if os.path.exists(path) and is_path_dead(path): + LOG.debug('%r: deleting stale %r', cls.__name__, path) + os.unlink(path) + + sock.bind(path) + os.chmod(path, int('0600', 8)) + sock.listen(backlog) + + stream = super(Listener, cls).build_stream(router, path) + stream.accept(sock, sock) + router.broker.start_receive(stream) + return stream + def __repr__(self): return '%s.%s(%r)' % ( __name__, @@ -75,20 +122,9 @@ class Listener(mitogen.core.BasicStream): self.path, ) - def __init__(self, router, path=None, backlog=100): + def __init__(self, router, path): self._router = router - self.path = path or make_socket_path() - self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - - if os.path.exists(self.path) and is_path_dead(self.path): - LOG.debug('%r: deleting stale %r', self, self.path) - os.unlink(self.path) - - self._sock.bind(self.path) - os.chmod(self.path, int('0600', 8)) - self._sock.listen(backlog) - self.receive_side = mitogen.core.Side(self, self._sock.fileno()) - router.broker.start_receive(self) + self.path = path def _unlink_socket(self): try: @@ -100,69 +136,91 @@ class Listener(mitogen.core.BasicStream): raise def on_shutdown(self, broker): - broker.stop_receive(self) + broker.stop_receive(self.stream) self._unlink_socket() - self._sock.close() - self.receive_side.closed = True + self.stream.receive_side.close() - def _accept_client(self, sock): + def on_accept_client(self, sock): sock.setblocking(True) try: pid, = struct.unpack('>L', sock.recv(4)) except (struct.error, socket.error): - LOG.error('%r: failed to read remote identity: %s', - self, sys.exc_info()[1]) + LOG.error('listener: failed to read remote identity: %s', + sys.exc_info()[1]) return context_id = self._router.id_allocator.allocate() - context = mitogen.parent.Context(self._router, context_id) - stream = mitogen.core.Stream(self._router, context_id) - stream.name = u'unix_client.%d' % (pid,) - stream.auth_id = mitogen.context_id - stream.is_privileged = True - try: sock.send(struct.pack('>LLL', context_id, mitogen.context_id, os.getpid())) except socket.error: - LOG.error('%r: failed to assign identity to PID %d: %s', - self, pid, sys.exc_info()[1]) + LOG.error('listener: failed to assign identity to PID %d: %s', + pid, sys.exc_info()[1]) return - LOG.debug('%r: accepted %r', self, stream) - stream.accept(sock.fileno(), sock.fileno()) + context = mitogen.parent.Context(self._router, context_id) + stream = mitogen.core.MitogenProtocol.build_stream( + router=self._router, + remote_id=context_id, + auth_id=mitogen.context_id, + ) + stream.name = u'unix_client.%d' % (pid,) + stream.accept(sock, sock) + LOG.debug('listener: accepted connection from PID %d: %s', + pid, stream.name) self._router.register(context, stream) - def on_receive(self, broker): - sock, _ = self._sock.accept() - try: - self._accept_client(sock) - finally: - sock.close() +def _connect(path, broker, sock): + try: + # ENOENT, ECONNREFUSED + sock.connect(path) + + # ECONNRESET + sock.send(struct.pack('>L', os.getpid())) + mitogen.context_id, remote_id, pid = struct.unpack('>LLL', sock.recv(12)) + except socket.error: + e = sys.exc_info()[1] + ce = ConnectError('could not connect to %s: %s', path, e.args[1]) + ce.errno = e.args[0] + raise ce -def connect(path, broker=None): - LOG.debug('unix.connect(path=%r)', path) - sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - sock.connect(path) - sock.send(struct.pack('>L', os.getpid())) - mitogen.context_id, remote_id, pid = struct.unpack('>LLL', sock.recv(12)) mitogen.parent_id = remote_id mitogen.parent_ids = [remote_id] - LOG.debug('unix.connect(): local ID is %r, remote is %r', + LOG.debug('client: local ID is %r, remote is %r', mitogen.context_id, remote_id) router = mitogen.master.Router(broker=broker) - stream = mitogen.core.Stream(router, remote_id) - stream.accept(sock.fileno(), sock.fileno()) + stream = mitogen.core.MitogenProtocol.build_stream(router, remote_id) + stream.accept(sock, sock) stream.name = u'unix_listener.%d' % (pid,) + mitogen.core.listen(stream, 'disconnect', _cleanup) + mitogen.core.listen(router.broker, 'shutdown', + lambda: router.disconnect_stream(stream)) + context = mitogen.parent.Context(router, remote_id) router.register(context, stream) + return router, context - mitogen.core.listen(router.broker, 'shutdown', - lambda: router.disconnect_stream(stream)) - sock.close() - return router, context +def connect(path, broker=None): + LOG.debug('client: connecting to %s', path) + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + try: + return _connect(path, broker, sock) + except: + sock.close() + raise + + +def _cleanup(): + """ + Reset mitogen.context_id and friends when our connection to the parent is + lost. Per comments on #91, these globals need to move to the Router so + fix-ups like this become unnecessary. + """ + mitogen.context_id = 0 + mitogen.parent_id = None + mitogen.parent_ids = [] diff --git a/ansible/plugins/mitogen-0.2.8-pre/mitogen/utils.py b/ansible/plugins/mitogen-0.2.9/mitogen/utils.py similarity index 99% rename from ansible/plugins/mitogen-0.2.8-pre/mitogen/utils.py rename to ansible/plugins/mitogen-0.2.9/mitogen/utils.py index 94a171fb081fe5cea288b630c4387828fde2a908..b1347d022fdeb1717b9bc880b4e8f474d14b1855 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/mitogen/utils.py +++ b/ansible/plugins/mitogen-0.2.9/mitogen/utils.py @@ -39,7 +39,6 @@ import mitogen.master import mitogen.parent -LOG = logging.getLogger('mitogen') iteritems = getattr(dict, 'iteritems', dict.items) if mitogen.core.PY3: diff --git a/ansible/plugins/mitogen-0.2.8-pre/preamble_size.py b/ansible/plugins/mitogen-0.2.9/preamble_size.py similarity index 76% rename from ansible/plugins/mitogen-0.2.8-pre/preamble_size.py rename to ansible/plugins/mitogen-0.2.9/preamble_size.py index f5f1adc1a7b402eba07b35fe95b1a3a21ad62c8c..f0d1e8041ad749cfa69364430b960a76daa28ec0 100644 --- a/ansible/plugins/mitogen-0.2.8-pre/preamble_size.py +++ b/ansible/plugins/mitogen-0.2.9/preamble_size.py @@ -19,15 +19,19 @@ import mitogen.sudo router = mitogen.master.Router() context = mitogen.parent.Context(router, 0) -stream = mitogen.ssh.Stream(router, 0, max_message_size=0, hostname='foo') +options = mitogen.ssh.Options(max_message_size=0, hostname='foo') +conn = mitogen.ssh.Connection(options, router) +conn.context = context -print('SSH command size: %s' % (len(' '.join(stream.get_boot_command())),)) -print('Preamble size: %s (%.2fKiB)' % ( - len(stream.get_preamble()), - len(stream.get_preamble()) / 1024.0, +print('SSH command size: %s' % (len(' '.join(conn.get_boot_command())),)) +print('Bootstrap (mitogen.core) size: %s (%.2fKiB)' % ( + len(conn.get_preamble()), + len(conn.get_preamble()) / 1024.0, )) +print('') + if '--dump' in sys.argv: - print(zlib.decompress(stream.get_preamble())) + print(zlib.decompress(conn.get_preamble())) exit() @@ -55,7 +59,7 @@ for mod in ( original_size = len(original) minimized = mitogen.minify.minimize_source(original) minimized_size = len(minimized) - compressed = zlib.compress(minimized, 9) + compressed = zlib.compress(minimized.encode(), 9) compressed_size = len(compressed) print( '%-25s' diff --git a/ansible/plugins/mitogen-0.2.9/run_tests b/ansible/plugins/mitogen-0.2.9/run_tests new file mode 100755 index 0000000000000000000000000000000000000000..b583af3b1f9cda22a50e8dddf65d3d35ba0411ca --- /dev/null +++ b/ansible/plugins/mitogen-0.2.9/run_tests @@ -0,0 +1,82 @@ +#!/usr/bin/env bash + +# From https://unix.stackexchange.com/a/432145 +# Return the maximum of one or more integer arguments +max() { + local max number + + max="$1" + + for number in "${@:2}"; do + if ((number > max)); then + max="$number" + fi + done + + printf '%d\n' "$max" +} + +echo '----- ulimits -----' +ulimit -a +echo '-------------------' +echo + +# Don't use errexit, so coverage report is still generated when tests fail +set -o pipefail + +NOCOVERAGE="${NOCOVERAGE:-}" +NOCOVERAGE_ERASE="${NOCOVERAGE_ERASE:-$NOCOVERAGE}" +NOCOVERAGE_REPORT="${NOCOVERAGE_REPORT:-$NOCOVERAGE}" + +if [ ! "$UNIT2" ]; then + UNIT2="$(which unit2)" +fi + +if [ ! "$NOCOVERAGE_ERASE" ]; then + coverage erase +fi + +# First run overwites coverage output. +[ "$SKIP_MITOGEN" ] || { + if [ ! "$NOCOVERAGE" ]; then + coverage run -a "${UNIT2}" discover \ + --start-directory "tests" \ + --pattern '*_test.py' \ + "$@" + else + "${UNIT2}" discover \ + --start-directory "tests" \ + --pattern '*_test.py' \ + "$@" + fi + MITOGEN_TEST_STATUS=$? +} + +# Second run appends. This is since 'discover' treats subdirs as packages and +# the 'ansible' subdir shadows the real Ansible package when it contains +# __init__.py, so hack around it by just running again with 'ansible' as the +# start directory. Alternative seems to be renaming tests/ansible/ and making a +# mess of Git history. +[ "$SKIP_ANSIBLE" ] || { + export PYTHONPATH=`pwd`/tests:$PYTHONPATH + if [ ! "$NOCOVERAGE" ]; then + coverage run -a "${UNIT2}" discover \ + --start-directory "tests/ansible" \ + --pattern '*_test.py' \ + "$@" + else + "${UNIT2}" discover \ + --start-directory "tests/ansible" \ + --pattern '*_test.py' \ + "$@" + fi + ANSIBLE_TEST_STATUS=$? +} + +if [ ! "$NOCOVERAGE_REPORT" ]; then + coverage html + echo "coverage report is at file://$(pwd)/htmlcov/index.html" +fi + +# Exit with a non-zero status if any test run did so +exit "$(max $MITOGEN_TEST_STATUS $ANSIBLE_TEST_STATUS)" diff --git a/ansible/plugins/mitogen-0.2.9/scripts/affin.sh b/ansible/plugins/mitogen-0.2.9/scripts/affin.sh new file mode 100755 index 0000000000000000000000000000000000000000..34c03d8b98bc9bc938641b3be5896051e3eb5658 --- /dev/null +++ b/ansible/plugins/mitogen-0.2.9/scripts/affin.sh @@ -0,0 +1,4 @@ +# show process affinities for running ansible-playbook +who="$1" +[ ! "$who" ] && who=ansible-playbook +for i in $(pgrep -f "$who") ; do taskset -c -p $i ; done|cut -d: -f2|sort -n |uniq -c diff --git a/ansible/plugins/mitogen-0.2.8-pre/scripts/debug-helpers.sh b/ansible/plugins/mitogen-0.2.9/scripts/debug-helpers.sh similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/scripts/debug-helpers.sh rename to ansible/plugins/mitogen-0.2.9/scripts/debug-helpers.sh diff --git a/ansible/plugins/mitogen-0.2.8-pre/scripts/pogrep.py b/ansible/plugins/mitogen-0.2.9/scripts/pogrep.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/scripts/pogrep.py rename to ansible/plugins/mitogen-0.2.9/scripts/pogrep.py diff --git a/ansible/plugins/mitogen-0.2.9/scripts/release-notes.py b/ansible/plugins/mitogen-0.2.9/scripts/release-notes.py new file mode 100644 index 0000000000000000000000000000000000000000..08b60c0c315298342386da16f59bfe7cef3eb71f --- /dev/null +++ b/ansible/plugins/mitogen-0.2.9/scripts/release-notes.py @@ -0,0 +1,47 @@ +# coding=UTF-8 + +# Generate the fragment used to make email release announcements +# usage: release-notes.py 0.2.6 + +import sys +import urllib +import lxml.html + +import subprocess + + +response = urllib.urlopen('https://mitogen.networkgenomics.com/changelog.html') +tree = lxml.html.parse(response) + +prefix = 'v' + sys.argv[1].replace('.', '-') + +for elem in tree.getroot().cssselect('div.section[id]'): + if elem.attrib['id'].startswith(prefix): + break +else: + print('cant find') + + + +for child in tree.getroot().cssselect('body > *'): + child.getparent().remove(child) + +body, = tree.getroot().cssselect('body') +body.append(elem) + +proc = subprocess.Popen( + args=['w3m', '-T', 'text/html', '-dump', '-cols', '72'], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, +) + +stdout, _ = proc.communicate(input=(lxml.html.tostring(tree))) +stdout = stdout.decode('UTF-8') +stdout = stdout.translate({ + ord(u'¶'): None, + ord(u'•'): ord(u'*'), + ord(u'’'): ord(u"'"), + ord(u'“'): ord(u'"'), + ord(u'â€'): ord(u'"'), +}) +print(stdout) diff --git a/ansible/plugins/mitogen-0.2.8-pre/setup.cfg b/ansible/plugins/mitogen-0.2.9/setup.cfg similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/setup.cfg rename to ansible/plugins/mitogen-0.2.9/setup.cfg diff --git a/ansible/plugins/mitogen-0.2.8-pre/setup.py b/ansible/plugins/mitogen-0.2.9/setup.py similarity index 100% rename from ansible/plugins/mitogen-0.2.8-pre/setup.py rename to ansible/plugins/mitogen-0.2.9/setup.py diff --git a/ansible/roles/apps/tasks/cert-manager.yml b/ansible/roles/apps/tasks/cert-manager.yml index f8e0adff4f80a3cd2c543bea6218607de07dc7ec..4f08f585ce39cf25272b313973fc89197dfaf4a9 100644 --- a/ansible/roles/apps/tasks/cert-manager.yml +++ b/ansible/roles/apps/tasks/cert-manager.yml @@ -14,7 +14,7 @@ - "'NotFound' not in cert_manager_label_namespace.stderr" - "cert_manager_label_namespace.rc != 0" -- name: Install LetsEncrypt {{ item.name }} ClusterIssuer +- name: Install LetsEncrypt ClusterIssuers tags: - cert-manager k8s: @@ -40,19 +40,16 @@ - name: production server: "https://acme-v02.api.letsencrypt.org/directory" -- name: Apply cert-manager helmfile +- name: Install cert-manager tags: - helmfile - cert-manager - shell: | - set -e -x -o pipefail - /usr/local/bin/helmfile \ - -b /usr/local/bin/helm \ - -e oas \ - -f {{ data_directory }}/source/helmfiles/helmfile.d/05-cert-manager.yaml \ - apply \ - --suppress-secrets \ - | sed 's/\x1B\[[0-9;]*[JKmsu]//g' \ - >> {{ log_directory }}/helmfile.log - args: - executable: /bin/bash + include_role: + name: "helmfile" + tasks_from: "apply" + apply: + tags: + - helmfile + - cert-manager + vars: + helmfile: '05-cert-manager' diff --git a/ansible/roles/apps/tasks/helmfiles.yml b/ansible/roles/apps/tasks/helmfiles.yml deleted file mode 100644 index 7575095b10c1b57ea2350ac878038972fe2d6681..0000000000000000000000000000000000000000 --- a/ansible/roles/apps/tasks/helmfiles.yml +++ /dev/null @@ -1,62 +0,0 @@ ---- -- name: Clone nextcloud repo - tags: - - git - - nextcloud - git: - repo: 'https://open.greenhost.net/openappstack/nextcloud' - dest: '{{ data_directory }}/source/repos/nextcloud' - version: '{{ git_nextcloud_version }}' - -- name: Remove requirements.lock file - tags: - - git - - nextcloud - - helmfile - file: - path: '{{ data_directory }}/source/repos/nextcloud/nextcloud-onlyoffice/requirements.lock' - state: absent - -- name: Clone local-storage repo - tags: - - git - - local-storage - git: - repo: 'https://open.greenhost.net/openappstack/local-storage' - dest: '{{ data_directory }}/source/repos/local-storage' - version: '{{ git_local_storage_version }}' - -- name: Make Prometheus custom resource definitions - tags: - - helmfile - - monitoring - command: '/snap/bin/kubectl apply -f https://raw.githubusercontent.com/coreos/prometheus-operator/master/example/prometheus-operator-crd/{{ item }}' - loop: - - alertmanager.crd.yaml - - prometheus.crd.yaml - - prometheusrule.crd.yaml - - servicemonitor.crd.yaml - - podmonitor.crd.yaml - -- name: Apply helmfiles - tags: - - helmfile - environment: - - NEXTCLOUD_PASSWORD: "{{ nextcloud_password }}" - - NEXTCLOUD_MARIADB_PASSWORD: "{{ nextcloud_mariadb_password }}" - - NEXTCLOUD_MARIADB_ROOT_PASSWORD: "{{ nextcloud_mariadb_root_password }}" - - ONLYOFFICE_JWT_SECRET: "{{ onlyoffice_jwt_secret }}" - - ONLYOFFICE_POSTGRESQL_PASSWORD: "{{ onlyoffice_postgresql_password }}" - - ONLYOFFICE_RABBITMQ_PASSWORD: "{{ onlyoffice_rabbitmq_password }}" - - GRAFANA_ADMIN_PASSWORD: "{{ grafana_admin_password }}" - shell: | - set -e -x -o pipefail - /usr/local/bin/helmfile -b /usr/local/bin/helm -e oas \ - -f {{ data_directory }}/source/helmfiles/helmfile.d/{{ item }}.yaml \ - apply --suppress-secrets \ - | sed 's/\x1B\[[0-9;]*[JKmsu]//g' \ - >> {{ log_directory }}/helmfile.log - args: - executable: /bin/bash - loop: "{{ helmfiles }}" - when: item is not search("cert-manager") diff --git a/ansible/roles/apps/tasks/init.yml b/ansible/roles/apps/tasks/init.yml index 4a7ed7210494a6758c69debd2c3d20040c155401..bc6aa8874ec0f1b6b0962393da4a0e9960c6aa54 100644 --- a/ansible/roles/apps/tasks/init.yml +++ b/ansible/roles/apps/tasks/init.yml @@ -11,15 +11,6 @@ delete: true become: true -- name: Clone charts repo - tags: - - git - git: - repo: 'https://open.greenhost.net/openappstack/charts' - dest: '{{ data_directory }}/source/repos/charts' - version: '{{ git_charts_version }}' - become: true - - name: Create OAS namespaces tags: - kubernetes diff --git a/ansible/roles/apps/tasks/local-storage.yml b/ansible/roles/apps/tasks/local-storage.yml new file mode 100644 index 0000000000000000000000000000000000000000..7c5c8a86109fb7c8005aa8f81a1f105ffa032a47 --- /dev/null +++ b/ansible/roles/apps/tasks/local-storage.yml @@ -0,0 +1,24 @@ +--- +- name: Clone local-storage repo + tags: + - git + - helmfile + - local-storage + git: + repo: 'https://open.greenhost.net/openappstack/local-storage' + dest: '{{ data_directory }}/source/repos/local-storage' + version: '{{ git_local_storage_version }}' + +- name: Install local-storage provisioner + tags: + - helmfile + - local-storage + include_role: + name: "helmfile" + tasks_from: "apply" + apply: + tags: + - helmfile + - local-storage + vars: + helmfile: '00-storage' diff --git a/ansible/roles/apps/tasks/main.yml b/ansible/roles/apps/tasks/main.yml index a366c9362eafedbb90090cb7ffea89272789066d..2c1afae58ae53bd7ec8bbee5105d471b00f65380 100644 --- a/ansible/roles/apps/tasks/main.yml +++ b/ansible/roles/apps/tasks/main.yml @@ -1,4 +1,29 @@ --- -- import_tasks: init.yml -- import_tasks: cert-manager.yml -- import_tasks: helmfiles.yml +- name: Import tasks from init.yml + import_tasks: init.yml + tags: [ helmfile ] + +- name: Install local-storage + import_tasks: local-storage.yml + tags: [ helmfile ] + when: '"00-storage" in helmfiles' + +- name: Install cert-manager + import_tasks: cert-manager.yml + tags: [ helmfile ] + when: '"05-cert-manager" in helmfiles' + +- name: Install nginx + import_tasks: nginx.yml + tags: [ helmfile ] + when: '"10-nginx" in helmfiles' + +- name: Install prometheus + import_tasks: prometheus.yml + tags: [ helmfile ] + when: '"15-monitoring" in helmfiles' + +- name: Install nextcloud + import_tasks: nextcloud.yml + tags: [ helmfile ] + when: '"20-nextcloud" in helmfiles' diff --git a/ansible/roles/apps/tasks/nextcloud.yml b/ansible/roles/apps/tasks/nextcloud.yml new file mode 100644 index 0000000000000000000000000000000000000000..acb05aedd10b04940321766eafe33fb1238cda9f --- /dev/null +++ b/ansible/roles/apps/tasks/nextcloud.yml @@ -0,0 +1,42 @@ +--- +- name: Clone nextcloud repo + tags: + - git + - helmfile + - nextcloud + git: + repo: 'https://open.greenhost.net/openappstack/nextcloud' + dest: '{{ data_directory }}/source/repos/nextcloud' + version: '{{ git_nextcloud_version }}' + +- name: Remove requirements.lock file + tags: + - git + - nextcloud + - helmfile + file: + path: '{{ data_directory }}/source/repos/nextcloud/nextcloud-onlyoffice/requirements.lock' + state: absent + +- name: Install nextcloud and onlyoffice + tags: + - helmfile + - nextcloud + - onlyoffice + include_role: + name: "helmfile" + tasks_from: "apply" + apply: + tags: + - helmfile + - nextcloud + - onlyoffice + environment: + - NEXTCLOUD_PASSWORD: "{{ nextcloud_password }}" + - NEXTCLOUD_MARIADB_PASSWORD: "{{ nextcloud_mariadb_password }}" + - NEXTCLOUD_MARIADB_ROOT_PASSWORD: "{{ nextcloud_mariadb_root_password }}" + - ONLYOFFICE_JWT_SECRET: "{{ onlyoffice_jwt_secret }}" + - ONLYOFFICE_POSTGRESQL_PASSWORD: "{{ onlyoffice_postgresql_password }}" + - ONLYOFFICE_RABBITMQ_PASSWORD: "{{ onlyoffice_rabbitmq_password }}" + vars: + helmfile: '20-nextcloud' diff --git a/ansible/roles/apps/tasks/nginx.yml b/ansible/roles/apps/tasks/nginx.yml new file mode 100644 index 0000000000000000000000000000000000000000..a3a60176429f6227ee84a7b75d4b81a704b8e298 --- /dev/null +++ b/ansible/roles/apps/tasks/nginx.yml @@ -0,0 +1,14 @@ +--- +- name: Install nginx ingress controller + tags: + - helmfile + - nginx + include_role: + name: "helmfile" + tasks_from: "apply" + apply: + tags: + - helmfile + - nginx + vars: + helmfile: '10-nginx' diff --git a/ansible/roles/apps/tasks/prometheus.yml b/ansible/roles/apps/tasks/prometheus.yml new file mode 100644 index 0000000000000000000000000000000000000000..db9e49a8dd6611404832ac3de12819a4e9ff2414 --- /dev/null +++ b/ansible/roles/apps/tasks/prometheus.yml @@ -0,0 +1,52 @@ +--- + +- name: Make Prometheus custom resource definitions + tags: + - helmfile + - prometheus + # NOTE: Change the commit hash in the URL when upgrading Prometheus + command: '/snap/bin/kubectl apply -f https://raw.githubusercontent.com/coreos/prometheus-operator/v0.31.1/example/prometheus-operator-crd/{{ item }}' + loop: + - alertmanager.crd.yaml + - prometheus.crd.yaml + - prometheusrule.crd.yaml + - servicemonitor.crd.yaml + - podmonitor.crd.yaml + +- name: Get prometheus PV name + tags: + - prometheus + shell: "kubectl -n oas get pvc prometheus-prometheus-oas-{{ release_name }}-prometheus-promet-prometheus-0 -o=jsonpath='{.spec.volumeName}'" + register: prometheus_pv_name + failed_when: false + changed_when: false + +# Needed because previously we ran prometheus as root +- name: Ensure prometheus volume is accessible by the prometheus pod + tags: + - prometheus + file: + dest: "{{ data_directory }}/local-storage/{{ prometheus_pv_name.stdout }}" + owner: '1000' + group: '2000' + recurse: true + when: prometheus_pv_name.stdout + +- name: Install prometheus and graphana + include_role: + name: "helmfile" + tasks_from: "apply" + apply: + tags: + - monitoring + - prometheus + environment: + - GRAFANA_ADMIN_PASSWORD: "{{ grafana_admin_password }}" + tags: + - monitoring + - prometheus + vars: + helmfile: '15-monitoring' + # Force needed for upgrading from 5 to 6, see + # https://github.com/helm/charts/tree/master/stable/prometheus-operator#upgrading-from-5xx-to-6xx + helmfile_apply_args: '--args="--force"' diff --git a/ansible/roles/helmfile/defaults/main.yml b/ansible/roles/helmfile/defaults/main.yml new file mode 100644 index 0000000000000000000000000000000000000000..5521ff672faea62718869f8fd4e0ad18132bee09 --- /dev/null +++ b/ansible/roles/helmfile/defaults/main.yml @@ -0,0 +1,2 @@ +# Add additional helmfile apply args +helmfile_apply_args: '' diff --git a/ansible/roles/helmfile/tasks/apply.yml b/ansible/roles/helmfile/tasks/apply.yml new file mode 100644 index 0000000000000000000000000000000000000000..82957b2a68fa506c5a43d5eca1b65c0f459aeea5 --- /dev/null +++ b/ansible/roles/helmfile/tasks/apply.yml @@ -0,0 +1,13 @@ +--- +- name: Apply helmfile + tags: + - helmfile + shell: | + set -e -x -o pipefail + /usr/local/bin/helmfile -b /usr/local/bin/helm -e oas \ + -f {{ data_directory }}/source/helmfiles/helmfile.d/{{ helmfile }}.yaml \ + apply --suppress-secrets {{ helmfile_apply_args }} \ + | sed 's/\x1B\[[0-9;]*[JKmsu]//g' \ + >> {{ log_directory }}/helmfile.log + args: + executable: /bin/bash diff --git a/ansible/roles/rke_configuration/files/cluster-defaults.yml b/ansible/roles/rke_configuration/files/cluster-defaults.yml index 4e3f9b50aeb01783c183e48a75120eda52538fd9..ced099187c91c3afc039f4768fad1eda86b44594 100644 --- a/ansible/roles/rke_configuration/files/cluster-defaults.yml +++ b/ansible/roles/rke_configuration/files/cluster-defaults.yml @@ -24,7 +24,9 @@ ingress: options: {} # Set this to none, so we can install nginx ourselves. provider: none -kubernetes_version: 'v1.14.3-rancher1-1' +# If `kubernetes_version` is not set, the default kubernetes version for the +# specified rke version in `ansible/group_vars/all/oas.yml` is used. +# kubernetes_version: '' monitoring: options: {} provider: '' diff --git a/docs/index.rst b/docs/index.rst index c22e834267c8d3a36e540096784ea03ad9c8771e..b7638b7b6017f6b22841ec5e6321fd7cacc7b163 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -27,5 +27,6 @@ For more information, go to `the OpenAppStack website`_. :caption: Contents: installation_instructions + testing_instructions design reference diff --git a/docs/installation_instructions.md b/docs/installation_instructions.md index d25f61b0ee37f7f48428c0404533c9bb71dc54c4..531d4e0c52dec0bfdf4c68549e8cfb1cb0febaa4 100644 --- a/docs/installation_instructions.md +++ b/docs/installation_instructions.md @@ -42,9 +42,9 @@ guide][https://openappstack.net/contact.html). ## Install OpenAppStack command line tool On your **provisioning machine**, clone the OpenAppStack git repository and -checkout the latest tagged version (currently `0.2.0`): +checkout the latest tagged version (currently `0.2.1`): - $ git clone -b 0.2.0 https://open.greenhost.net/openappstack/openappstack.git + $ git clone -b 0.2.1 https://open.greenhost.net/openappstack/openappstack.git $ cd openappstack @@ -74,7 +74,8 @@ Now you can run the OpenAppStack CLI as follows: The CLI *always* needs a `CLUSTER_NAME` argument. Even for getting subcommand help messages. Be sure to run this command in the root directory of the git -repository. Try it out by running +repository. In this tutorial, we're using `my-cluster` as the cluster name. Try +it out by running $ python -m openappstack my-cluster --help @@ -100,12 +101,10 @@ Setting up OpenAppStack on your VPS happens in three steps: ### Set up cluster To set up your cluster, use the `create` subcommand of the OpenAppStack CLI. -First, choose a name for your cluster. Then run the following command to get +First, choose a name (we chose `my-cluster`) for your cluster. Then run the following command to get information about the `create` subcommand: - $ python -m openappstack CLUSTER_NAME create --help - -Replace `CLUSTER_NAME` with your chosen name. + $ python -m openappstack my-cluster create --help There are two options to create a cluster: @@ -118,18 +117,18 @@ There are two options to create a cluster: 4. Click "New API key" 5. Click "Generate new key" 6. Give the key "Customer", "CloudCustomer" or "API" access rights. You will - need "Customer" rights if you want to automatically generate DNS rules. - If you do not have this right, you have to manually set the right DNS - rules. + need "Customer" rights if you want to automatically generate DNS rules. If + you do not have this right, you have to [manually set the right DNS + rules](http://docs.openappstack.net/en/latest/installation_instructions.html#dns-entries) + later. 7. Copy the generated key and run export it to this variable in a terminal: ``` $ export COSMOS_API_TOKEN=paste your API key here ``` 8. In *the same terminal*, you can now use the `create` subcommand -1. Based on an already existing [Greenhost](https://greenhost.net) or - [Eclips.is](https://portal.eclips.is/portal/) VPS, using the `--droplet-id` - argument. +1. Based on an already existing [Greenhost](https://greenhost.net) VPS, using + the `--droplet-id` argument. Find the ID of your VPS either in the Greenhost Cosmos interface (it is the numeric part of the URL in the "Manage VPS" screen). @@ -142,10 +141,8 @@ There are two options to create a cluster: checking the link under "Show key". The numerical part is your SSH key ID. - *Note: You can also use the API to list ssh keys and find it there. Read the - [Eclips.is API - documentation](https://portal.eclips.is/portal/cloud/ApiDoc#/default) or - [Greenhost API + *Note: You can also use the API to list ssh keys and find it there. Read + the [Greenhost API documentation](https://service.greenhost.net/cloud/ApiDoc#/default) for more information* - In both cases you need to provide the `DOMAIN_NAME` positional argument. @@ -154,15 +151,19 @@ There are two options to create a cluster: command as follows: ``` - $ python -m openappstack create --subdomain oas example.org`. + $ python -m openappstack my-cluster create --subdomain oas example.org ``` -- Here is an example of the complete creation command: +- Here is an example of a complete creation command: ``` - python -m openappstack my-cluster create --create-droplet --hostname oas.example.org --ssh-key-id 112 --create-domain-records --subdomain oas example.org + $ python -m openappstack my-cluster create --create-droplet --hostname oas.example.org --ssh-key-id 112 --acme-live-environment --create-domain-records --subdomain oas example.org ``` + > **NOTE:** We use the `--acme-live-environment` argument. This ensures you + > get real (instead of "staging") Let's Encrypt TLS certificates. This is + > necessary for ONLYOFFICE integration to work. + This will create configuration files for a cluster named `my-cluster`. It will also create a Greenhost VPS with the hostname `oas.example.org` and on which you can log in with SSH key with ID `112`. @@ -186,9 +187,13 @@ its *hostname* and its *IP address*. Also check that your VPS meets our Create the OpenAppStack settings for your VPS by running the following command: ``` -$ python -m openappstack create --ip-address IP_ADDRESS --hostname HOSTNAME --subdomain oas example.org +$ python -m openappstack my-cluster create --ip-address IP_ADDRESS --hostname HOSTNAME --subdomain oas example.org --acme-live-environment ``` +> **NOTE:** We use the `--acme-live-environment` argument. This ensures you get +> real (instead of "staging") Let's Encrypt TLS certificates. This is necessary +> for ONLYOFFICE integration to work. + ### DNS entries Before you continue, if you have not made DNS entries with the CLI tool, you @@ -229,7 +234,7 @@ To start the installation process, run: $ python -m openappstack my-cluster install -This will take approximately 5-10 minutes. It generates secrets that will be +This will take between 5 and 20 minutes. It generates secrets that will be added to the `clusters/my-cluster/secrets` directory. If you ever need any credentials after this installation, you can probably find them there. **Make sure this directory stays safe.** Feel free to encrypt it when you are not using @@ -254,7 +259,7 @@ When the installation is completed, you will have access to these applications: You can access Nextcloud via https://files.example.org. Use the username `admin` with the automatically generated Nextcloud password that you can find in -`clusters/maarten/secrets/nextcloud_admin_password` on your local machine. +`clusters/my-cluster/secrets/nextcloud_admin_password` on your local machine. ONLYOFFICE is already integrated in your Nextcloud installation which allows you to create and share ONLYOFFICE documents within Nextcloud. ONLYOFFICE runs on https://office.oas.example.org. diff --git a/docs/testing_instructions.md b/docs/testing_instructions.md new file mode 100644 index 0000000000000000000000000000000000000000..ceca5b79a549d549d1cbf70fa033181b21fd8264 --- /dev/null +++ b/docs/testing_instructions.md @@ -0,0 +1,52 @@ +# A testing guide and test reporting form for testers of the OpenAppStack project + +Great that you want to take OpenAppStack for a test drive. +This tutorial contains instructions to get you going, +some pointers on what we think would be useful to test, and guesses at what results of those tests would be +useful to write down. +At any point please feel invited to test whatever functionality you come across, and reporting whatever you +think is interesting. Our contact details are listed [here](https://openappstack.net/contact.html), and we'll +descibe how to give feedback via our issue tracker at the [end of these instructions](#providing-feedback). + +## Installation + +First we'd like you to setup an OpenAppStack cluster by yourself, following the +[installation tutorial](https://docs.openappstack.net/en/latest/installation_instructions.html). + +## Nextcloud + +### Logging into Nextcloud + +Please browse to https://files.$YOURPUBLICDOMAIN and try to log in. + The username is `admin`. The password was generated as part of the installation process and is stored in the file `clusters/CLUSTER_NAME/secrets/nextcloud_admin_password` in the `openappstack` directory of your provisioning machine (the cluster_data folder was created in the directory where you ran Ansible during the tutorial). + +### Creating users + +* After logging in to Nextcloud as `admin`, please try to create another Nextcloud user. Ideally, you would create accounts for a few of your coworkers who are willing to try the collaboration features of Nextcloud and OnlyOffice with you. + +### Nextcloud client application + +* If you feel like it, please try the [Nextcloud desktop client](https://nextcloud.com/clients/), connect it to your OpenAppStack instance, and use it to manage some files. +* If you feel like it, please try the [Nextcloud mobile client](https://nextcloud.com/clients/) for your smartphone, connect it to your OpenAppStack instance, and use it to download and/or open some files, upload a new file, etc. + + +## OnlyOffice + +### Creating a new office document + +* From the main Nextcloud webpage, please try to create a new office document, by clicking the round `plus` button near the top of the screen, then picking the `Document` type with the blue icon (third one from below on my screen), and enter a name for it. After that, please try some basic editing of the document, and save it. Maybe check you can open it again afterwards, and that it has the contents that you saved earlier. + + +### Collaborating on an office document + +This part of the test requires the cooperation of another person; feel free to skip it now if that's not convenient at this point. + +* Please try to open the same office document from a few different user accounts simultaneously, and let all participants edit the document mercilessly. There are also some collaboration features that you may want to try: on the left of the OnlyOffice screen there are buttons for chat and for text comments. + + +## Providing feedback + +If you have not done already, please create an account on [https://open.greenhost.net](https://open.greenhost.net) +(or login with your existing github account) and [create a new issue](https://open.greenhost.net/openappstack/openappstack/issues/new) using the `Feedback` template. + +Thanks a lot for your testing work! We'll use your input to try to improve OpenAppStack. diff --git a/helmfiles/helmfile.d/15-monitoring.yaml b/helmfiles/helmfile.d/15-monitoring.yaml index 971134a83c931715d3e9fde93f0f3dd44fa9cccf..1db3eaa6a9f47f555a4c537d26af5ce881654d25 100644 --- a/helmfiles/helmfile.d/15-monitoring.yaml +++ b/helmfiles/helmfile.d/15-monitoring.yaml @@ -7,6 +7,8 @@ releases: - name: "oas-{{ .Environment.Values.releaseName }}-prometheus" namespace: "oas" chart: "stable/prometheus-operator" + # NOTE: If you change this version, also change the commit hash in + # ansible/roles/apps/tasks/helmfiles.yml:34 version: 5.15.0 values: - "../values/prometheus.yaml.gotmpl" diff --git a/helmfiles/helmfile.d/20-nextcloud.yaml b/helmfiles/helmfile.d/20-nextcloud.yaml index f1392b4be4bca8410596ba78f69a5ae5fcad0b03..d144945c7e7514b66e5280316a02010d23ed0681 100644 --- a/helmfiles/helmfile.d/20-nextcloud.yaml +++ b/helmfiles/helmfile.d/20-nextcloud.yaml @@ -17,4 +17,4 @@ releases: - "../values/nextcloud.yaml.gotmpl" - "/etc/OpenAppStack/values/apps/nextcloud.yaml.gotmpl" wait: true - timeout: 600 + timeout: 900 diff --git a/helmfiles/values/nextcloud.yaml.gotmpl b/helmfiles/values/nextcloud.yaml.gotmpl index ced81909cbf7ddfb6c9bd7489c43bfd96becdd65..eb0b641c184ad8cad754dc52c95e19262b29203d 100644 --- a/helmfiles/values/nextcloud.yaml.gotmpl +++ b/helmfiles/values/nextcloud.yaml.gotmpl @@ -49,8 +49,16 @@ nextcloud: password: "{{ requiredEnv "NEXTCLOUD_MARIADB_ROOT_PASSWORD" }}" livenessProbe: initialDelaySeconds: 120 + timeoutSeconds: 20 + periodSeconds: 30 + successThreshold: 1 + failureThreshold: 6 readinessProbe: - initialDelaySeconds: 120 + initialDelaySeconds: 10 + timeoutSeconds: 20 + periodSeconds: 30 + successThreshold: 1 + failureThreshold: 3 onlyoffice-documentserver: ingress: @@ -75,3 +83,16 @@ onlyoffice-documentserver: rabbitmq: rabbitmq: password: "{{ requiredEnv "ONLYOFFICE_RABBITMQ_PASSWORD" }}" + + livenessProbe: + initialDelaySeconds: 120 + timeoutSeconds: 20 + periodSeconds: 30 + successThreshold: 1 + failureThreshold: 6 + readinessProbe: + initialDelaySeconds: 10 + timeoutSeconds: 20 + periodSeconds: 30 + successThreshold: 1 + failureThreshold: 3 diff --git a/helmfiles/values/prometheus.yaml.gotmpl b/helmfiles/values/prometheus.yaml.gotmpl index 27645ba0f60d79cc53176ebc74ee4518531b7031..4f05cd179e3fe0e6f1aa1c0456686109f7fa854b 100644 --- a/helmfiles/values/prometheus.yaml.gotmpl +++ b/helmfiles/values/prometheus.yaml.gotmpl @@ -7,6 +7,7 @@ kubeDns: alertmanager: alertmanagerSpec: + logFormat: logfmt storage: volumeClaimTemplate: metadata: