Skip to content
Snippets Groups Projects
Verified Commit 4275f1c9 authored by Maarten de Waard's avatar Maarten de Waard :angel:
Browse files

Merge branch 'master' into fix-linting-errors-in-cosmos.py

parents fd39a03e a34deb35
Branches
Tags
No related merge requests found
Showing
with 175 additions and 432 deletions
include:
- .gitlab/ci_templates/kaniko.yml
- .gitlab/ci_templates/ssh_setup.yml
stages:
- build
- setup-cluster
......@@ -22,11 +23,6 @@ default:
ci_test_image:
stage: build
image:
# We need a shell to provide the registry credentials, so we need to use the
# kaniko debug image (https://github.com/GoogleContainerTools/kaniko#debug-image)
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
only:
changes:
- .gitlab-ci.yml
......@@ -36,14 +32,7 @@ ci_test_image:
bootstrap:
stage: setup-cluster
before_script:
- ansible --version
script:
# Ensure test/ is not world-writable otherwise ansible-playbook refuses to run, see
# https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir
- chmod 755 ansible/
- eval $(ssh-agent -s)
- echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null
- echo "hostname $HOSTNAME, subdomain $SUBDOMAIN, domain $DOMAIN, address $ADDRESS"
- python3 -m openappstack $HOSTNAME create --create-droplet $DOMAIN --hostname $HOSTNAME --ssh-key-id $SSH_KEY_ID --create-domain-records --subdomain $SUBDOMAIN
- python3 -m openappstack $HOSTNAME install --ansible-param='--skip-tags=helmfile'
......@@ -59,17 +48,11 @@ bootstrap:
- helmfiles/**/*
- test/**/*
- openappstack/**/*
extends: .ssh_setup
install:
stage: install-apps
variables:
ANSIBLE_HOST_KEY_CHECKING: 'False'
script:
# Ensure test/ is not world-writable otherwise ansible-playbook refuses to run, see
# https://docs.ansible.com/ansible/devel/reference_appendices/config.html#cfg-in-world-writable-dir
- chmod 755 ansible/
- eval $(ssh-agent -s)
- echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null
- python3 -m openappstack $HOSTNAME install --ansible-param='--tags=helmfile'
# Show versions of installed apps/binaries
- ansible master -m shell -a 'oas-version-info.sh 2>&1'
......@@ -85,14 +68,11 @@ install:
- helmfiles/**/*
- test/**/*
- openappstack/**/*
extends: .ssh_setup
testinfra:
stage: health-test
script:
- mkdir ~/.ssh
- eval $(ssh-agent -s)
- echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null
- echo -e 'Host *\n stricthostkeychecking no' > ~/.ssh/config
- cd ansible/
- pytest -v -m 'testinfra' --connection=ansible --ansible-inventory=../clusters/${HOSTNAME}/inventory.yml --hosts='ansible://*'
only:
......@@ -102,15 +82,12 @@ testinfra:
- helmfiles/**/*
- test/**/*
- openappstack/**/*
extends: .ssh_setup
certs:
stage: health-test
allow_failure: true
script:
- mkdir ~/.ssh
- eval $(ssh-agent -s)
- echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null
- echo -e 'Host *\n stricthostkeychecking no' > ~/.ssh/config
- cd ansible/
- pytest -s -m 'certs' --connection=ansible --ansible-inventory=../clusters/${HOSTNAME}/inventory.yml --hosts='ansible://*'
only:
......@@ -120,6 +97,7 @@ certs:
- helmfiles/**/*
- test/**/*
- openappstack/**/*
extends: .ssh_setup
prometheus-alerts:
stage: health-test
......@@ -127,10 +105,6 @@ prometheus-alerts:
OAS_DOMAIN: 'ci-${CI_PIPELINE_ID}.ci.openappstack.net'
allow_failure: true
script:
- mkdir ~/.ssh
- eval $(ssh-agent -s)
- echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null
- echo -e 'Host *\n stricthostkeychecking no' > ~/.ssh/config
- cd test/
- pytest -s -m 'prometheus' --connection=ansible --ansible-inventory=../clusters/${HOSTNAME}/inventory.yml --hosts='ansible://*'
only:
......@@ -139,6 +113,7 @@ prometheus-alerts:
- ansible/**/*
- helmfiles/**/*
- test/**/*
extends: .ssh_setup
behave-nextcloud:
stage: integration-test
......@@ -175,9 +150,32 @@ behave-grafana:
- test/**/*
- openappstack/**/*
# Remove droplet after merge
terminate_droplet_after_merge:
terminate_current_pipeline_droplet:
variables:
HOSTNAME: "ci-${CI_PIPELINE_ID}"
stage: cleanup
before_script:
- echo "Terminate the current pipeline droplet on success but only for \
the master branch, since we wanted to have MR droplets running even for \
successful deploys to be able to investgate."
script:
- python3 -c "import greenhost_cloud; greenhost_cloud.terminate_droplets_by_name(\"^${HOSTNAME}\$\")"
only:
changes:
- .gitlab-ci.yml
- ansible/**/*
- helmfiles/**/*
- test/**/*
- openappstack/**/*
refs:
- master
terminate_mr_droplet_after_merge:
stage: cleanup
before_script:
- echo "We leave MR droplets running even when the pipeline is successful \
to be able to investigate a MR. We need to terminate them when the MR \
is merged into master."
script: |
if [ "$(git show -s --pretty=%p HEAD | wc -w)" -gt 1 ]
then
......@@ -196,7 +194,7 @@ terminate_droplet_after_merge:
terminate_old_droplets:
stage: cleanup
script:
# Remove droplet older than 2 days
- echo "Terminate stale, old droplets after 2 days."
- python3 -c "import greenhost_cloud; greenhost_cloud.terminate_droplets_by_name(\"^ci-\", 2)"
only:
changes:
......
# Optional environment variables:
# - KANIKO_BUILD_IMAGENAME: Build/target image image
#
# - KANIKO_BUILD_IMAGENAME: Build/target image name.
# If empty, image URL will be the root of the gitlab project path, i.e.
# `open.greenhost.net:4567/GROUP/PROJECT:TAG
# Is set, images will be named like
# `open.greenhost.net:4567/GROUP/PROJECT/KANIKO_BUILD_IMAGENAME:TAG
#
# - KANIKO_CONTEXT: The subdir which holds the Dockerfile, leave unset if
# the Dockerfile is located at root level of the project.
.kaniko_build:
......@@ -11,4 +17,5 @@
entrypoint: [""]
script:
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > /kaniko/.docker/config.json
- /kaniko/executor --context ${CI_PROJECT_DIR}/${KANIKO_CONTEXT:-.} --dockerfile ${CI_PROJECT_DIR}/${KANIKO_CONTEXT:-.}/Dockerfile --destination $CI_REGISTRY_IMAGE/${KANIKO_BUILD_IMAGENAME/#//}:${CI_COMMIT_REF_NAME}
- if [ -n "${KANIKO_BUILD_IMAGENAME}" ]; then export IMAGENAME="/${KANIKO_BUILD_IMAGENAME}"; fi
- /kaniko/executor --context ${CI_PROJECT_DIR}/${KANIKO_CONTEXT:-.} --dockerfile ${CI_PROJECT_DIR}/${KANIKO_CONTEXT:-.}/Dockerfile --destination ${CI_REGISTRY_IMAGE}${IMAGENAME}:${CI_COMMIT_REF_NAME}
.ssh_setup:
before_script:
- mkdir ~/.ssh
- echo -e 'Host *\n stricthostkeychecking no' > ~/.ssh/config
- eval $(ssh-agent -s)
- echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add - > /dev/null
This feedback template can be used to provide feedback based on the [OAS testing instructions](https://docs.openappstack.net/en/latest/testing_instructions.html).
# Installation
> What problems did you encounter during installation? (If these problems prevent you from continuing with the testing process please provide some details what went wrong so we may try to help you get unstuck!)
> We know that the installation process as it is right now is too technical, especially given that we want OpenAppStack to be easy to use for people that are only moderately computer-savvy.
> We plan to make the installation process easier in the future, probably using some kind of web-based installer.
> Even so, what do you think could be improved about the installation process or tutorial ? Did anything bother you, was something extra hard to do, or unclear?
# Nextcloud
> Is there anything that didn't work properly logging in or creating users, or something that you'd like to remark ?
> Did you have any issues using the Nextcloud desktop client ?
> Did you have any issues using the Nextcloud mobile client ?
> Anything else you tried: How did you do it? What was hard and what did you like?Y
# Onlyoffice
> Creating and editing a document: Is there anything that doesn't work properly, or something that you'd like to remark?
> Collaboration on the same document: Did this work properly? Is the collaboration smooth in a technical sense (i.e., no lag)?
> Something you like Please think of something else you would do with OnlyOffice and describe it here. Now try to do it. How did you do it? What was hard and what did you like?
> Anything else you tried: How did you do it? What was hard and what did you like?
# Closing questions
> What's missing? Now that you have gotten an idea of OpenAppStack's initial offering (Nextcloud and OnlyOffice), think of what the next thing is your organisation would need to collaborate more efficiently. Or think of a tool that you or your organisation use a lot that is also "centralised" (e.g., something exclusively provided by Google, Microsoft or a different company)
> Do you have any other questions, comments, remarks, suggestions?
# Release checklist:
- [ ] update/review documentation and make sure it matches the current state
- Update how to checkout the latest stable tag
- [ ] update [CHANGELOG.md](https://keepachangelog.com/en/1.0.0/)
- Include `Known issues`
- [ ] update the version number in the `VERSION` file
- [ ] commit
- [ ] create signed tag (`git tag -s 0.2.0 -m 'Release 0.2.0'`)
- [ ] Push to MR, including tag
......@@ -4,7 +4,20 @@
No unreleased changes yet.
## [0.2.0] - 2091-10-22
## [0.2.1] - 2019-10-29
This is the release for the second user test.
Fixes:
* [#354](https://open.greenhost.net/openappstack/openappstack/issues/354) Nextcloud timeout during helmchart install
* [#305](https://open.greenhost.net/openappstack/openappstack/issues/305) Use prometheus default securitycontext
* [#351](https://open.greenhost.net/openappstack/openappstack/issues/351) Prometheus installation cannot be repeated
* [#349](https://open.greenhost.net/openappstack/openappstack/issues/349) Improve test instructions
* [#350](https://open.greenhost.net/openappstack/openappstack/issues/350) Increase nextcloud livenessprobe's leniency
* [#346](https://open.greenhost.net/openappstack/openappstack/issues/346) Creating cluster fails if the hostname is oas-dev
* [#319](https://open.greenhost.net/openappstack/openappstack/issues/319) kube_config_cluster.yml should be copied to local machine earlier
## [0.2.0] - 2019-10-22
* Release candidate for second user test
* Added features:
......
0.2.0
0.2.1
......@@ -3,5 +3,5 @@ callback_whitelist = profile_tasks, timer
inventory = inventory.yml
nocows = 1
stdout_callback = yaml
strategy_plugins = plugins/mitogen-0.2.8-pre/ansible_mitogen/plugins/strategy
strategy_plugins = plugins/mitogen-0.2.9/ansible_mitogen/plugins/strategy
strategy = mitogen_linear
......@@ -22,11 +22,7 @@ onlyoffice_rabbitmq_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/
grafana_admin_password: "{{ lookup('password', '{{ cluster_dir }}/secrets/grafana_admin_password chars=ascii_letters') }}"
# Kubernetes version
kubernetes_version: "v1.14.3-rancher1-1"
# git repo versions
git_charts_version: 'HEAD'
git_local_storage_version: 'HEAD'
# version of the https://open.greenhost.net/openappstack/nextcloud repo
git_nextcloud_version: 'd882b6952c32b5cce03e6ad9a534035ce6f01230'
......@@ -50,11 +46,14 @@ krew:
checksum: 'sha256:dc2f2e1ec8a0acb6f3e23580d4a8b38c44823e948c40342e13ff6e8e12edb15a'
rke:
# You can change the kubernetes version used by rke in
# `ansible/group_vars/all/settings.yml.example`
#
# https://github.com/rancher/rke/releases
version: '0.2.7'
version: '0.3.2'
# Also possible:
# checksum: 'sha256:https://github.com/rancher/rke/releases/download/v0.2.4/sha256sum.txt'
checksum: 'sha256:7c05727aa3d6f8c4b5f60b057f1fe7883af48d5a778e3b1668f178dda84883ee'
checksum: 'sha256:96b366fe1faaa668b3e47f5b6d4bfd6334224e33c21e55dc79ec96f85e0e48e8'
cert_manager:
# cert-manager requires custom resource definitions applied before installing
......
......@@ -23,6 +23,7 @@ helmfiles:
# Optional, custom rke config.
# I.e. you can set the desired Kubernetes version but please be aware of
# the [every rke release has only a few supported kubernetes versions](https://rancher.com/docs/rke/latest/en/config-options/#kubernetes-version).
# See also https://rancher.com/blog/2019/keeping-k8s-current-with-rancher
#
# rke_custom_config:
# kubernetes_version: "v1.14.3-rancher1-1"
......
# Copyright 2019, David Wilson
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
import atexit
import errno
import logging
import os
import signal
import socket
import sys
import time
try:
import faulthandler
except ImportError:
faulthandler = None
import mitogen
import mitogen.core
import mitogen.debug
import mitogen.master
import mitogen.parent
import mitogen.service
import mitogen.unix
import mitogen.utils
import ansible
import ansible.constants as C
import ansible_mitogen.logging
import ansible_mitogen.services
from mitogen.core import b
import ansible_mitogen.affinity
LOG = logging.getLogger(__name__)
ANSIBLE_PKG_OVERRIDE = (
u"__version__ = %r\n"
u"__author__ = %r\n"
)
def clean_shutdown(sock):
"""
Shut the write end of `sock`, causing `recv` in the worker process to wake
up with a 0-byte read and initiate mux process exit, then wait for a 0-byte
read from the read end, which will occur after the the child closes the
descriptor on exit.
This is done using :mod:`atexit` since Ansible lacks any more sensible hook
to run code during exit, and unless some synchronization exists with
MuxProcess, debug logs may appear on the user's terminal *after* the prompt
has been printed.
"""
sock.shutdown(socket.SHUT_WR)
sock.recv(1)
def getenv_int(key, default=0):
"""
Get an integer-valued environment variable `key`, if it exists and parses
as an integer, otherwise return `default`.
"""
try:
return int(os.environ.get(key, str(default)))
except ValueError:
return default
def save_pid(name):
"""
When debugging and profiling, it is very annoying to poke through the
process list to discover the currently running Ansible and MuxProcess IDs,
especially when trying to catch an issue during early startup. So here, if
a magic environment variable set, stash them in hidden files in the CWD::
alias muxpid="cat .ansible-mux.pid"
alias anspid="cat .ansible-controller.pid"
gdb -p $(muxpid)
perf top -p $(anspid)
"""
if os.environ.get('MITOGEN_SAVE_PIDS'):
with open('.ansible-%s.pid' % (name,), 'w') as fp:
fp.write(str(os.getpid()))
class MuxProcess(object):
"""
Implement a subprocess forked from the Ansible top-level, as a safe place
to contain the Mitogen IO multiplexer thread, keeping its use of the
logging package (and the logging package's heavy use of locks) far away
from the clutches of os.fork(), which is used continuously by the
multiprocessing package in the top-level process.
The problem with running the multiplexer in that process is that should the
multiplexer thread be in the process of emitting a log entry (and holding
its lock) at the point of fork, in the child, the first attempt to log any
log entry using the same handler will deadlock the child, as in the memory
image the child received, the lock will always be marked held.
See https://bugs.python.org/issue6721 for a thorough description of the
class of problems this worker is intended to avoid.
"""
#: In the top-level process, this references one end of a socketpair(),
#: which the MuxProcess blocks reading from in order to determine when
#: the master process dies. Once the read returns, the MuxProcess will
#: begin shutting itself down.
worker_sock = None
#: In the worker process, this references the other end of
#: :py:attr:`worker_sock`.
child_sock = None
#: In the top-level process, this is the PID of the single MuxProcess
#: that was spawned.
worker_pid = None
#: A copy of :data:`os.environ` at the time the multiplexer process was
#: started. It's used by mitogen_local.py to find changes made to the
#: top-level environment (e.g. vars plugins -- issue #297) that must be
#: applied to locally executed commands and modules.
original_env = None
#: In both processes, this is the temporary UNIX socket used for
#: forked WorkerProcesses to contact the MuxProcess
unix_listener_path = None
#: Singleton.
_instance = None
@classmethod
def start(cls, _init_logging=True):
"""
Arrange for the subprocess to be started, if it is not already running.
The parent process picks a UNIX socket path the child will use prior to
fork, creates a socketpair used essentially as a semaphore, then blocks
waiting for the child to indicate the UNIX socket is ready for use.
:param bool _init_logging:
For testing, if :data:`False`, don't initialize logging.
"""
if cls.worker_sock is not None:
return
if faulthandler is not None:
faulthandler.enable()
mitogen.utils.setup_gil()
cls.unix_listener_path = mitogen.unix.make_socket_path()
cls.worker_sock, cls.child_sock = socket.socketpair()
atexit.register(lambda: clean_shutdown(cls.worker_sock))
mitogen.core.set_cloexec(cls.worker_sock.fileno())
mitogen.core.set_cloexec(cls.child_sock.fileno())
cls.profiling = os.environ.get('MITOGEN_PROFILING') is not None
if cls.profiling:
mitogen.core.enable_profiling()
if _init_logging:
ansible_mitogen.logging.setup()
cls.original_env = dict(os.environ)
cls.child_pid = os.fork()
if cls.child_pid:
save_pid('controller')
ansible_mitogen.logging.set_process_name('top')
ansible_mitogen.affinity.policy.assign_controller()
cls.child_sock.close()
cls.child_sock = None
mitogen.core.io_op(cls.worker_sock.recv, 1)
else:
save_pid('mux')
ansible_mitogen.logging.set_process_name('mux')
ansible_mitogen.affinity.policy.assign_muxprocess()
cls.worker_sock.close()
cls.worker_sock = None
self = cls()
self.worker_main()
def worker_main(self):
"""
The main function of for the mux process: setup the Mitogen broker
thread and ansible_mitogen services, then sleep waiting for the socket
connected to the parent to be closed (indicating the parent has died).
"""
self._setup_master()
self._setup_services()
try:
# Let the parent know our listening socket is ready.
mitogen.core.io_op(self.child_sock.send, b('1'))
# Block until the socket is closed, which happens on parent exit.
mitogen.core.io_op(self.child_sock.recv, 1)
finally:
self.broker.shutdown()
self.broker.join()
# Test frameworks living somewhere higher on the stack of the
# original parent process may try to catch sys.exit(), so do a C
# level exit instead.
os._exit(0)
def _enable_router_debug(self):
if 'MITOGEN_ROUTER_DEBUG' in os.environ:
self.router.enable_debug()
def _enable_stack_dumps(self):
secs = getenv_int('MITOGEN_DUMP_THREAD_STACKS', default=0)
if secs:
mitogen.debug.dump_to_logger(secs=secs)
def _setup_simplejson(self, responder):
"""
We support serving simplejson for Python 2.4 targets on Ansible 2.3, at
least so the package's own CI Docker scripts can run without external
help, however newer versions of simplejson no longer support Python
2.4. Therefore override any installed/loaded version with a
2.4-compatible version we ship in the compat/ directory.
"""
responder.whitelist_prefix('simplejson')
# issue #536: must be at end of sys.path, in case existing newer
# version is already loaded.
compat_path = os.path.join(os.path.dirname(__file__), 'compat')
sys.path.append(compat_path)
for fullname, is_pkg, suffix in (
(u'simplejson', True, '__init__.py'),
(u'simplejson.decoder', False, 'decoder.py'),
(u'simplejson.encoder', False, 'encoder.py'),
(u'simplejson.scanner', False, 'scanner.py'),
):
path = os.path.join(compat_path, 'simplejson', suffix)
fp = open(path, 'rb')
try:
source = fp.read()
finally:
fp.close()
responder.add_source_override(
fullname=fullname,
path=path,
source=source,
is_pkg=is_pkg,
)
def _setup_responder(self, responder):
"""
Configure :class:`mitogen.master.ModuleResponder` to only permit
certain packages, and to generate custom responses for certain modules.
"""
responder.whitelist_prefix('ansible')
responder.whitelist_prefix('ansible_mitogen')
self._setup_simplejson(responder)
# Ansible 2.3 is compatible with Python 2.4 targets, however
# ansible/__init__.py is not. Instead, executor/module_common.py writes
# out a 2.4-compatible namespace package for unknown reasons. So we
# copy it here.
responder.add_source_override(
fullname='ansible',
path=ansible.__file__,
source=(ANSIBLE_PKG_OVERRIDE % (
ansible.__version__,
ansible.__author__,
)).encode(),
is_pkg=True,
)
def _setup_master(self):
"""
Construct a Router, Broker, and mitogen.unix listener
"""
self.broker = mitogen.master.Broker(install_watcher=False)
self.router = mitogen.master.Router(
broker=self.broker,
max_message_size=4096 * 1048576,
)
self._setup_responder(self.router.responder)
mitogen.core.listen(self.broker, 'shutdown', self.on_broker_shutdown)
mitogen.core.listen(self.broker, 'exit', self.on_broker_exit)
self.listener = mitogen.unix.Listener(
router=self.router,
path=self.unix_listener_path,
backlog=C.DEFAULT_FORKS,
)
self._enable_router_debug()
self._enable_stack_dumps()
def _setup_services(self):
"""
Construct a ContextService and a thread to service requests for it
arriving from worker processes.
"""
self.pool = mitogen.service.Pool(
router=self.router,
services=[
mitogen.service.FileService(router=self.router),
mitogen.service.PushFileService(router=self.router),
ansible_mitogen.services.ContextService(self.router),
ansible_mitogen.services.ModuleDepService(self.router),
],
size=getenv_int('MITOGEN_POOL_SIZE', default=32),
)
LOG.debug('Service pool configured: size=%d', self.pool.size)
def on_broker_shutdown(self):
"""
Respond to broker shutdown by beginning service pool shutdown. Do not
join on the pool yet, since that would block the broker thread which
then cannot clean up pending handlers, which is required for the
threads to exit gracefully.
"""
# In normal operation we presently kill the process because there is
# not yet any way to cancel connect().
self.pool.stop(join=self.profiling)
def on_broker_exit(self):
"""
Respond to the broker thread about to exit by sending SIGTERM to
ourself. In future this should gracefully join the pool, but TERM is
fine for now.
"""
if not self.profiling:
# In normal operation we presently kill the process because there is
# not yet any way to cancel connect(). When profiling, threads
# including the broker must shut down gracefully, otherwise pstats
# won't be written.
os.kill(os.getpid(), signal.SIGTERM)
path_classifiers:
library:
- "mitogen/compat"
- "ansible_mitogen/compat"
queries:
# Mitogen 2.4 compatibility trips this query everywhere, so just disable it
- exclude: py/unreachable-statement
- exclude: py/should-use-with
# mitogen.core.b() trips this query everywhere, so just disable it
- exclude: py/import-and-import-from
include LICENSE
......@@ -73,7 +73,9 @@ necessarily involves preventing the scheduler from making load balancing
decisions.
"""
from __future__ import absolute_import
import ctypes
import logging
import mmap
import multiprocessing
import os
......@@ -83,41 +85,44 @@ import mitogen.core
import mitogen.parent
LOG = logging.getLogger(__name__)
try:
_libc = ctypes.CDLL(None, use_errno=True)
_strerror = _libc.strerror
_strerror.restype = ctypes.c_char_p
_pthread_mutex_init = _libc.pthread_mutex_init
_pthread_mutex_lock = _libc.pthread_mutex_lock
_pthread_mutex_unlock = _libc.pthread_mutex_unlock
_sem_init = _libc.sem_init
_sem_wait = _libc.sem_wait
_sem_post = _libc.sem_post
_sched_setaffinity = _libc.sched_setaffinity
except (OSError, AttributeError):
_libc = None
_strerror = None
_pthread_mutex_init = None
_pthread_mutex_lock = None
_pthread_mutex_unlock = None
_sem_init = None
_sem_wait = None
_sem_post = None
_sched_setaffinity = None
class pthread_mutex_t(ctypes.Structure):
class sem_t(ctypes.Structure):
"""
Wrap pthread_mutex_t to allow storing a lock in shared memory.
Wrap sem_t to allow storing a lock in shared memory.
"""
_fields_ = [
('data', ctypes.c_uint8 * 512),
('data', ctypes.c_uint8 * 128),
]
def init(self):
if _pthread_mutex_init(self.data, 0):
if _sem_init(self.data, 1, 1):
raise Exception(_strerror(ctypes.get_errno()))
def acquire(self):
if _pthread_mutex_lock(self.data):
if _sem_wait(self.data):
raise Exception(_strerror(ctypes.get_errno()))
def release(self):
if _pthread_mutex_unlock(self.data):
if _sem_post(self.data):
raise Exception(_strerror(ctypes.get_errno()))
......@@ -128,7 +133,7 @@ class State(ctypes.Structure):
the context of the new child process.
"""
_fields_ = [
('lock', pthread_mutex_t),
('lock', sem_t),
('counter', ctypes.c_uint8),
]
......@@ -142,7 +147,7 @@ class Policy(object):
Assign the Ansible top-level policy to this process.
"""
def assign_muxprocess(self):
def assign_muxprocess(self, index):
"""
Assign the MuxProcess policy to this process.
"""
......@@ -177,9 +182,9 @@ class FixedPolicy(Policy):
cores, before reusing the second hyperthread of an existing core.
A hook is installed that causes :meth:`reset` to run in the child of any
process created with :func:`mitogen.parent.detach_popen`, ensuring
CPU-intensive children like SSH are not forced to share the same core as
the (otherwise potentially very busy) parent.
process created with :func:`mitogen.parent.popen`, ensuring CPU-intensive
children like SSH are not forced to share the same core as the (otherwise
potentially very busy) parent.
"""
def __init__(self, cpu_count=None):
#: For tests.
......@@ -207,11 +212,13 @@ class FixedPolicy(Policy):
self._reserve_mask = 3
self._reserve_shift = 2
def _set_affinity(self, mask):
def _set_affinity(self, descr, mask):
if descr:
LOG.debug('CPU mask for %s: %#08x', descr, mask)
mitogen.parent._preexec_hook = self._clear
self._set_cpu_mask(mask)
def _balance(self):
def _balance(self, descr):
self.state.lock.acquire()
try:
n = self.state.counter
......@@ -219,28 +226,28 @@ class FixedPolicy(Policy):
finally:
self.state.lock.release()
self._set_cpu(self._reserve_shift + (
self._set_cpu(descr, self._reserve_shift + (
(n % (self.cpu_count - self._reserve_shift))
))
def _set_cpu(self, cpu):
self._set_affinity(1 << cpu)
def _set_cpu(self, descr, cpu):
self._set_affinity(descr, 1 << (cpu % self.cpu_count))
def _clear(self):
all_cpus = (1 << self.cpu_count) - 1
self._set_affinity(all_cpus & ~self._reserve_mask)
self._set_affinity(None, all_cpus & ~self._reserve_mask)
def assign_controller(self):
if self._reserve_controller:
self._set_cpu(1)
self._set_cpu('Ansible top-level process', 1)
else:
self._balance()
self._balance('Ansible top-level process')
def assign_muxprocess(self):
self._set_cpu(0)
def assign_muxprocess(self, index):
self._set_cpu('MuxProcess %d' % (index,), index)
def assign_worker(self):
self._balance()
self._balance('WorkerProcess')
def assign_subprocess(self):
self._clear()
......@@ -258,9 +265,19 @@ class LinuxPolicy(FixedPolicy):
mask >>= 64
return mitogen.core.b('').join(chunks)
def _get_thread_ids(self):
try:
ents = os.listdir('/proc/self/task')
except OSError:
LOG.debug('cannot fetch thread IDs for current process')
return [os.getpid()]
return [int(s) for s in ents if s.isdigit()]
def _set_cpu_mask(self, mask):
s = self._mask_to_bytes(mask)
_sched_setaffinity(os.getpid(), len(s), s)
for tid in self._get_thread_ids():
_sched_setaffinity(tid, len(s), s)
if _sched_setaffinity is not None:
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment