diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 5a89e5a24d56912490c22f34cdaa9a7fe77df979..0e4bcfec4fce5e63a2bdcc6dcaf1328aa3e55371 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -432,6 +432,8 @@ single-sign-on-helm-release:
   stage: install-apps
   script:
     - *debug_information
+    # Add optional override values we need for the CI pipeline only
+    - '[ -f ./install/overrides/oas-${APP}-override.yaml ] && kubectl apply -n oas-apps -f ./install/overrides/oas-${APP}-override.yaml'
     - bash ./install/install-${APP}.sh
   extends:
     - .ssh_setup
diff --git a/CHANGELOG.md b/CHANGELOG.md
index a7ad5551644c7e3533b3e38946a68f194c7bcab0..bfe5e983fe880270b6e05fc96fe868ee7d3eeffd 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,15 @@
 # Changelog
 
+## [0.6.0] - 2021-07-14
+
+* Overhauled installation procedure
+  * Secrets are now only managed on the cluster
+  * settings.yml has been replaced with .flux.env and is saved as a secret in
+    the cluster
+  * Applications can be installed after first install of the cluster
+  * Switched to Flux 2
+* Updated all applications
+
 ## [0.5.0] - 2020-09-17
 
 * Added automatic smtp config for nextcloud
diff --git a/docs/advanced_installation.rst b/docs/advanced_installation.rst
index be70becfa4f26e43967c429ba1ce8d781abc058e..afb92a7ea8da60afeb6eddc8af4480b8065dc563 100644
--- a/docs/advanced_installation.rst
+++ b/docs/advanced_installation.rst
@@ -5,16 +5,112 @@ Advanced installation
 In this guide we show how to customize your cluster installation, i.e. if you
 want to install additional applications, or change the configuration of extisting
 apps installed by OAS this is the right place. Customizing other parts of your
-cluster might be possible but not yet covered by this guide.
+cluster is possible but not yet covered by this guide.
 As the name of this guide implies, it is written for users with advanced knowledge
-of the tools behind Openappstack, most importantly: Kubernetes, Helm, Ansible and Flux.
+of the tools behind Openappstack, most importantly: Kubernetes, Helm, Ansible and Flux 2.
+
+
+.. _setup-with-greenhost-api:
+
+Advanced cluster creation: Setup with the Greenhost API
+-------------------------------------------------------
+
+-  Before you can start, you need to have an API key with Customer
+   rights.
+
+   1. In the Cosmos service centre, click your webmaster account name on
+      the top right corner
+   2. Go to "User settings"
+   3. Click "API keys"
+   4. Click "New API key"
+   5. Click "Generate new key"
+   6. Give the key "Customer", "CloudCustomer" or "API" access rights. You
+      will need "Customer" rights if you want to automatically generate DNS
+      rules. If you do not have this right, you have to `manually set the
+      right DNS rules <#dns-entries>`__
+      later.
+   7. Copy the generated key and run export it to this variable in a
+      terminal:
+
+      ::
+
+        $ export COSMOS_API_TOKEN=<paste your API key here>
+
+   8. In *the same terminal*, you can now use the ``create`` subcommand
+
+- There are two ways to let the installation program know which VPS to use:
+
+   1. Based on an already existing `Greenhost <https://greenhost.net>`__
+      VPS, using the ``--droplet-id`` argument.
+
+      Find the ID of your VPS either in the Greenhost Cosmos interface (it is
+      the numeric part of the URL in the "Manage VPS" screen).
+
+   2. By creating a new VPS through the API, using the ``--create-droplet``
+      argument.
+
+      In that case, make sure to also provide the ``--create-hostname`` and
+      ``--ssh-key-id`` arguments.
+
+      You can find your SSH key ID by going to VPS Cloud -> SSH keys and
+      checking the link under "Show key". The numerical part is your SSH
+      key ID.
+
+      *Note: You can also use the API to list ssh keys and find it there.
+      Read the `Greenhost API
+      documentation <https://service.greenhost.net/cloud/ApiDoc#/default>`__
+      for more information*
+
+- In both cases you need to provide the ``DOMAIN_NAME`` positional
+  argument.
+
+  If you use a subdomain (e.g. ``oas.yourdomain.com``), use the
+  ``--subdomain`` command as follows:
+
+  ::
+
+    $ python -m openappstack oas.example.org create --subdomain oas example.org
+
+- Here is an example of a complete creation command:
+
+  ::
+
+    $ python -m openappstack oas.example.org create \
+      --create-droplet \
+      --create-hostname oas.example.org \
+      --ssh-key-id 112 \
+      --create-domain-records \
+      --subdomain oas \
+      example.org
+
+  Let's break down the arguments:
+
+  - ``--create-droplet``: Use the Greenhost API to create a new VPS
+  - ``--create-hostname oas.example.org``: Create a VPS with hostname ``oas.example.org``
+  - ``--ssh-key-id 112``: Use SSH key ID 112 (you can find your SSH key ID in
+    the `Cosmos Service Centre <https://service.greenhost.net>`__ under *VPS Cloud* -> *Installation SSH Keys*. Hover over a button there to see the ID in the URL it uses.
+  - ``--create-domain-records``: Use the Greenhost API to create DNS records
+    If you do this, you can skip :ref:`step-2`. The following records are
+    created:
+
+    - An ``A`` record ``oas.example.org`` pointing to the VPSs IP address
+    - A ``CNAME`` record ``*.oas.example.org`` pointing to ``oas.example.org``.
+
+  - ``--subdomain oas``: Only needed when you use ``--create-domain-records`` so
+    the Greenhost API can find your domain. Instead of using positional argument
+    ``oas.example.org`` you need to provide
+
+You can now continue to :ref:`step-2`, or :ref:`step-3` if you used the API to
+create the DNS records.
+
+Customization
+-------------
 
 .. warning::
 
   Customizing your OAS cluster could break your cluster in a way that it's not
   easy to recover. Please be aware of the potential risk when proceeding.
 
-
 Prerequisites
 =============
 
@@ -25,116 +121,45 @@ Customize OAS applications
 ==========================
 
 Apps deployed by OAS are configured using helm values from templates in
-``ansible/roles/apps/templates/settings/APPNAME.yaml``. To add new helm values
-or override existing values you can use ``APPNAME_extra_values`` in your
-``CLUSTERDIR/group_vars/all/settings.yaml``.
-Look for the correct ``APPNAME`` in ``flux/<namespace>/APPNAME.yaml``.
-
-We use the `ansible combine filter with "recursive=True" <https://docs.ansible.com/ansible/latest/user_guide/playbooks_filters.html#combining-hashes-dictionaries>`__
-to merge the ``APPNAME_extra_values`` variable into our default settings.
-A possible pitfall with customizing values from ansible templates is that
-you need to replace all dynamic expressions which would be templated.
-We will cover this in the example below.
-
-Example: Customize prometheus data retention
---------------------------------------------
-
-If you want to lower the data retention value (currently 10 days)
-to lets say 5 days you need to add the following addition to
-``prometheus_extra_values`` in ``CLUSTERDIR/group_vars/all/settings.yaml``.
-(For all prometheus default values set by Openappstack see
-``ansible/roles/apps/templates/settings/prometheus.yaml``.)
-
-.. code-block:: yaml
-
-   prometheus_extra_values:
-     server:
-       retention: "5d"
-
-After this, run the
-`Openappstack installation procedure <installation_instructions.html#step-4-installation>`__
-again. Until
-`helm-operator detects changes to configmaps and secrets <https://open.greenhost.net/openappstack/openappstack/-/issues/617>`__
-by itself and updates a helm release you need to manully restart the
-helm-operator pod in order to apply the config change to prometheus:
-
-::
-
-   kubectl -n oas delete pod -l 'app=helm-operator'
-
-
-Deploying custom apps with an extra flux instance
-=================================================
-
-**NOTE**: Since we updated to flux 2, an extra flux instance is not necessary
-anymore. This documentation will be updated soon, but ignore what it says below
-for now, unless you are running version 0.5.0 or below.
-
-Openappstack uses `Flux <https://fluxcd.io>`__ to deploy and auto-update
-applications. See the `Automatic updates <design.html#automatic-updates>`__
-section in the design doc for more information abouot Flux.
-
-You can configure an extra Flux instance running on your OAS cluster,
-which can monitor your own git repo with flux definitions.
-Be aware that you only need a second Flux instance, the existing helm-operator
-will deploy the helm releases from both Flux installations.
-
-In order to do that you first need to enable ``flux-custom`` in the list of
-``enabled_applications`` in your ``settings.yml`` file. Then configure
-``flux-custom_extra_values`` to your needs (see the `Flux helm chart <https://github.com/fluxcd/flux/blob/master/chart/flux/README.md>`__
-for all possible values). An example is shown below:
-
-.. code-block:: yaml
-
-   flux-custom_extra_values:
-     # https://github.com/fluxcd/flux/tree/master/chart/flux#configuration
-     git:
-       url: git@my.git.server.org:myself/flux-config.git
-       branch: master
-       readonly: true
-       pollInterval: 1h
-     registry:
-       excludeImage: '*'
-     # If you use ssh instead of https, like in above git repo URL you need to
-     # provide the ssh hostkey pringerprint shown with
-     #   `ssh-keyscan -t ssh-rsa my.git.server.org`:
-     ssh:
-       known_hosts: 'my.git.server.org ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDZ2JHLwWuixvUvx630E3fInoJKyABLPzgDH4k4dM6ptQXVfK0Hu53nhIjsCbp/i4u4GLH2B8Tv2umVh8EejvjbqsZpD4fX6PWbM131vA3sPSD4q5qJV1xkZdc4+3STrndRD02py96OtjBl/f6EJW3Upz/xWX/iiL6crp8RZzOsSn9dv/gg4Bn42G3gcNZJVgNJHO8yPAYf1fMVlDYpiKlib6Vuow8EKerqByDnLm1vbnnMHes+F6Pt1JkLzURGE83AwAUfZZTRGyzZEkXVNpcV1Pq3esg1bBAW6dfxeyzF9SJrpYkv0lbUkuBFEz9ExzWpiKe/q/O61W0u9Q6sKwev'
-     sync:
-       state: secret
-     syncGarbageCollection:
-       enabled: true
-
-I your Flux definitions in your git repo contain ssh git URLs you also
-need to configure ``helm-operator`` to trust the fingerprints of the
-servers your git repositories are hosted at.  If they are hosted on the same
-server your Flux repo is at i.e.:
-
-.. code-block:: yaml
-
-   helm_operator:
-     extra_opts: "--set git.ssh.known_hosts='my.git.server.org ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDZ2JHLwWuixvUvx630E3fInoJKyABLPzgDH4k4dM6ptQXVfK0Hu53nhIjsCbp/i4u4GLH2B8Tv2umVh8EejvjbqsZpD4fX6PWbM131vA3sPSD4q5qJV1xkZdc4+3STrndRD02py96OtjBl/f6EJW3Upz/xWX/iiL6crp8RZzOsSn9dv/gg4Bn42G3gcNZJVgNJHO8yPAYf1fMVlDYpiKlib6Vuow8EKerqByDnLm1vbnnMHes+F6Pt1JkLzURGE83AwAUfZZTRGyzZEkXVNpcV1Pq3esg1bBAW6dfxeyzF9SJrpYkv0lbUkuBFEz9ExzWpiKe/q/O61W0u9Q6sKwev'
-
-After this, run the
-`Openappstack installation procedure <installation_instructions.html#step-4-installation>`__
-again.
-
-
-Using a different storage class
-===============================
-
-Openappstack uses the `local path
-provisioner <https://github.com/rancher/local-path-provisioner>`__ to
-create local path persistent volumes on the host. If you want to use a
-different storage provisioner you need to first set the setting
-``storageClass.defaultClass`` to ``false`` (see
-``ansible/roles/apps/templates/settings/local-path-provisioner.yaml``) and
-can then remove the ``local-path-provisioner`` from the list of
-``enabled_applications``
-
-After this, run the
-`Openappstack installation procedure <installation_instructions.html#step-4-installation>`__
-again.
-
-Now you can configure your Kubernetes cluster to use a different default storage
-provisioner which newly created persistent volumes can use.
+``flux2/apps/<application>/release.yaml``. It is possible to override values
+from the helmrelease by adding a custom ``ConfigMap`` or ``Secret`` to the
+cluster. The secret or configmap name is specified in the ``valuesFrom`` section
+of the ``release.yaml`` file. Read more in the `Flux documentation
+<https://fluxcd.io/docs/migration/helm-operator-migration/#values-from-sources>`__
+
+Example: Customize Nextcloud to work with staging certificates
+--------------------------------------------------------------
+
+Our CI pipeline works with staging certificates from Let's Encrypt, for that
+reason we need to allow insecure connections for the integration with
+ONLYOFFICE. You can find the file at ``install/overrides/oas-nextcloud-override.yaml``.
+
+To apply it, run the following commands:
+
+.. code:: bash
+
+   # If you want to run this on your provisioning machine, tell kubectl to use
+   # your cluster:
+   export KUBECONFIG=$PWD/clusters/oas.example.org/kube_config_cluster.yml
+   # Check the current state of the helmrelease you want to modify:
+   flux get helmrelease -A
+   # If all is OK, make sure to apply your override configmap or secret in the
+   # same namespace as your helmrelease with the '-n' argument
+   kubectl apply \
+     -n oas-apps \
+     -f ./install/overrides/oas-nextcloud-override.yaml
+
+Adding custom apps to the cluster
+=================================
+
+OpenAppStack uses Flux 2 to install and auto-update applications. If you want to
+install extra applications or other things into the Kubernetes cluster, our
+advice would be to set up your own GitRepository and add it to the Flux system.
+
+When you do this, you are fully responsible for keeping those applications
+secure and updated. If any of those applications is insecure, that can also
+invalidate the security of your OpenAppStack applications, because they are part
+of the same cluster and VPS.
+
+Refer to the `Flux 2 documentation <https://fluxcd.io/docs>`__ for more
+information.
diff --git a/docs/conf.py b/docs/conf.py
index c36ddd4c334aff681437ba0d08737dcdd394e355..ad6133c985e24133f81574f85c5a35c61764925d 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -31,7 +31,8 @@ with open('../VERSION') as version_file:
 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
 # ones.
 extensions = [
-    'recommonmark'
+    'recommonmark',
+    'sphinx.ext.autosectionlabel'
 ]
 
 # Add any paths that contain templates here, relative to this directory.
diff --git a/docs/design.md b/docs/design.md
index f441ee8b14c364ad5530398e5567ba0b6cf2c42b..8edfd1baf785a7c033be3f347988cb7de8ca32e5 100644
--- a/docs/design.md
+++ b/docs/design.md
@@ -103,119 +103,20 @@ inside the containers). However, it is possible to mount persistent volumes to
 specific directories in the container, basically adding a persistent layer on
 top of the containerised application. To provide this in OAS's simple setup, we
 use a [local storage
-provisioner](https://github.com/rancher/local-path-provisioner) that
+provisioner](https://github.com/greenhost/local-path-provisioner) that
 automatically provides persistent data on the VPS running OAS to an application
 that requests it.
 
 ## Automatic updates
 
 OpenAppStack has an auto-update mechanism that performs unattended upgrades to
-applications. [Flux](https://fluxcd.io/) is the system running in the cluster
+applications. [Flux 2](https://fluxcd.io/) is the system running in the cluster
 that is responsible for these updates.
 
-Technically, flux is split up in two components: `flux` and `helm-operator`.
-`flux` watches a git repository (or subdirectory thereof) for [source
-files](https://docs.fluxcd.io/projects/helm-operator/en/latest/references/helmrelease-custom-resource.html)
-that prescribe which application versions should be installed, and stores a copy
-of those prescriptions inside the cluster as a Kubernetes manifest (of kind
-`HelmRelease`).
-
-`helm-operator` watches those in-cluster `HelmRelease` manifests, checks whether
-the listed applications are already installed – including correct versions and
-other settings – and performs any actions that are necessary to make sure that
-the cluster state matches the prescriptions: installing new applications,
-upgrading others.
-
-Which git repository is watched by flux, is configurable. For typical
-production OpenAppStack deployments, this is set to be
-`https://open.greenhost.net/openappstack/openappstack` – the `HelmRelease` files
-are stored in the `flux` directory. The OpenAppStack team considers available
-upstream updates (say an update to Nextcloud). If the new Nextcloud version
-passes our tests, the team changes the corresponding application description
-file in the git repository (in this case `flux/nextcloud.yaml`) to reference the
-new version. OpenAppStack deployments that are configured to watch this git
-repository will see the change, record it in the in-cluster `HelmRelease`
-manifest, and have their `helm-operator` perform the Nextcloud upgrade.
-
-### Customising which applications are installed
-
-The `HelmRelease` files in the `flux` directory form the applications that are
-available for installation. There is an additional mechanism though, to allow
-the cluster administrator to choose which applications are actually installed.
-You might want to leave out some apps that you think you won't use, and save
-some resources that way. You can choose which apps to enable/disable by modifying the `enabled_applications` list in `CLUSTERDIR/group_vars/all/settings.yml`, and afterwards running the OAS install procedure.
-
-Every `HelmRelease` file should have a corresponding Kubernetes Secret, which
-we call a "settings secret". For the apps that are part of OpenAppStack, these
-secrets are created by the OpenAppStack installation procedure, so you don't
-need to handle them unless you want to customise the set of installed
-applications.
-
-The subdirectory of `/flux` where the `HelmRelease` file is located corresponds
-to the namespace of the secret. For example, the `HelmRelease` file
-`/flux/oas-apps/nextcloud.yml` corresponds to the Kubernetes secret
-`nextcloud-settings` in the namespace `oas-apps`.
-
-This Kubernetes secret contains two keys:
-* `enabled`: this should contain a simple string: `true` to enable the application, or
-  `false` to disable it;
-* `values.yaml`: this contains a yaml-formatted string with Helm values that
-  are supplied to Helm when the application is installed.
-
-### Local development
-
-**NOTE**: Local flux was removed. Documentation will be updated soon.
-
-When developing OpenAppStack, it's nice to be able to change the application
-versions for your development cluster only, without affecting production
-clusters. One way to do that is to set the `flux_source.repo` and/or
-`flux_source.branch` ansible variables to point to another branch of the
-`open.greenhost.net/openappstack/openappstack` repository, or to a different
-repository.
-
-To make this easier, we included a way to serve up a git repository with
-`HelmRelease` manifests from the cluster itself, so you don't need an external
-Gitlab or Github project. This feature is disabled by default, and can be
-enabled by setting the `local_flux` ansible variable to `true`. If enabled, this
-will change several things:
-* when you run the OpenAppStack installation ansible playbook from your
-  workstation, the current contents of the `flux` directory from your
-  workstation are copied to a directory (`/var/lib/OpenAppStack/local-flux`) on
-  the OpenAppStack host machine; also, a git commit is created in that directory
-  on the host machine from these updated contents;
-* as part of the OpenAppStack installation, an nginx instance is deployed that
-  serves up the contents of `/var/lib/OpenAppStack/local-flux` over http;
-* `flux` is configured to read the `HelmRelease` files from that git repository,
-  served by the in-cluster nginx. In particular, the `flux_source` variables are
-  ignored if `local_flux` is enabled.
-
-The `local-flux` feature is also used by our CI pipeline, in order to be as
-similar as possible to a production installation, in particular using `flux` and
-`helm-operator` for the installation process, while still being completely
-separate from the production applications versions as prescribed by the master
-repository at `open.greenhost.net/openappstack/openappstack`.
-
-#### Triggering an update
-
-Both `flux` and `helm-operator` check at regular intervals (currently 1 hour
-and 20 minutes, respectively) whether there is an upgrade to perform. If you
-don't want to wait for that after making a change, you can trigger an update:
-* to let `flux` re-read the `HelmRelease` files from the git repo (be it the
-  OpenAppStack master one, a `local-flux` one, or yet another one), log in to
-  the host machine and do
-
-      $ fluxctl --k8s-fwd-ns=oas sync
-
-  If there are any changes to `HelmRelease` manifests, `helm-operator` is
-  notified of that through the Kubernetes API and should act on them more or
-  less instantly (though the actual installation or upgrade could take quite a
-  while, depending on what needs to be done).
-* If, for some reason, the `HelmRelease` manifests are still in sync between git
-  repository and cluster, but you'd still like to let `helm-operator` check
-  whether these `HelmRelease`s match what's actually installed, you can force
-  that by deleting the `helm-operator` pod:
-
-      $ kubectl delete pod -n oas -l app=helm-operator
-
-  A new `helm-operator` pod should be created automatically, and in our
-  experience will do a reconciliation run soon.
+Flux 2 tracks all the files in the `flux2` directory of the [OpenAppStack code
+repository](https://open.greenhost.net/openappstack/openappstack). Once changes
+are pushd the branch that Flux tracks, the changes are applied to the cluster.
+
+We use Flux 2 in "read only" mode, which means that your OpenAppStack cluster
+does not push changes to our Git repository. You can read more about Flux 2 and
+its components in the [flux 2 documentation](https://fluxcd.io/docs).
diff --git a/docs/index.rst b/docs/index.rst
index 4e5b919a5a8f88ae594bdeba147cf3a369d859f1..3176794a39d07197351fa174e8b46527f8d1853e 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -30,8 +30,8 @@ For more information, go to `the OpenAppStack website`_.
    advanced_installation
    testing_instructions
    usage
-   troubleshooting
    maintenance
+   troubleshooting
    security
    upgrading
    comparable_projects
diff --git a/docs/installation_instructions.rst b/docs/installation_instructions.rst
index 77b9656e556fb1d887094afd4e69e893b419aad4..a3895dd2241bb3cd0b8a5842dafa1a30187fe998 100644
--- a/docs/installation_instructions.rst
+++ b/docs/installation_instructions.rst
@@ -18,8 +18,8 @@ Kubernetes cluster on more than one VPS) will come in the future.
   will generate some secrets that will be saved to this machine.
 
 If you encounter any difficulties while following these instructions,
-please [open an issue following our contact
-guide][https://openappstack.net/contact.html).
+please `let us know following our contact
+guide <https://openappstack.net/contact.html>`__.
 
 .. warning::
   -  OpenAppStack is still under heavy development and is not ready for
@@ -42,7 +42,7 @@ bare metal server ready. The server should meet these requirements:
 -  Current Debian stable "buster"
 -  A public IP address
 -  The ability to create DNS records for this IP
--  8GB of RAM
+-  6 cores and 12 GB of RAM
 -  At least 25GB of disk space for installation, plus more for
    application data. We recommend starting with 30GB.
 -  Root ssh access
@@ -50,10 +50,10 @@ bare metal server ready. The server should meet these requirements:
 
    ``apt install python3``
 
--  A trusted machine to run the installer on (we call this the
-   ``provisioning   machine``). All the commands listed in these
-   instructions should be run on the provisioning machine, unless
-   specified otherwise.
+You should also have a trusted machine to run the installer on (we call this the
+``provisioning machine``). All the commands listed in these instructions should
+be run on the provisioning machine, unless specified otherwise:
+
 -  You need Python 3 with its development files, Pip and Git installed
    (``apt install python3-pip python3-dev git``)
 -  We recommend using a `python
@@ -67,11 +67,11 @@ Getting the installation script
 -------------------------------
 
 On your **provisioning machine**, clone the OpenAppStack git repository
-and checkout the latest release branch (currently ``v0.5``):
+and checkout the latest release branch (currently ``v0.6``):
 
 ::
 
-    $ git clone -b v0.5 https://open.greenhost.net/openappstack/openappstack.git
+    $ git clone -b v0.6 https://open.greenhost.net/openappstack/openappstack.git
     $ cd openappstack
 
 Create a python virtual environment called "env" that uses python 3.
@@ -104,225 +104,171 @@ Now you can run the OpenAppStack CLI as follows:
 The CLI *always* needs a ``CLUSTER_NAME`` argument. Even for getting
 subcommand help messages. Be sure to run this command in the root
 directory of the git repository. In this tutorial, we're using
-``my-cluster`` as the cluster name. Try it out by running
+``oas.example.org`` as the cluster name. Try it out by running
 
 ::
 
-    $ python -m openappstack my-cluster --help
+    $ python -m openappstack oas.example.org --help
 
 Install OpenAppStack
 --------------------
 
 Setting up OpenAppStack happens in five steps:
 
-1. **Set up cluster**
+1. :ref:`step-1`
+
+   Create configuration files.
 
-   Create configuration files, and optionally create VPS.
+2. :ref:`step-2`
 
-2. **Optional: enable backups using Velero**
+   Configure an A-record for your domain as well as a wild card for the
+   applications hosted on subdomains.
 
-   Optionally add settings and credentials to enable backups of your
-   cluster to external S3 storage.
+3. :ref:`step-3`
 
-3. **Optional: Configure outgoing email**
+   Configure settings for application installations.
 
-   Optionally add settings and credentials to enable outgoing emails
-   from applications to your users
+   Optionally configure outgoing email and/or automated backup settings.
 
+4. :ref:`step-4`
 
-4. **Install OpenAppStack**
+   Setup the VPS for OpenAppStack and install k3s, lightweight Kubernetes.
 
-   Install Kubernetes and all the other software that comes with
-   OpenAppStack. See `Usage <#usage>`__ for more information on which
-   applications are installed.
+5. :ref:`step-5`
 
-5. **Validate setup**
+   Install the OpenAppStack core applications as well as optional applications.
+
+6. :ref:`step-6`
 
    This runs a test in the browser to validate that the installation was
    successful.
 
+.. _step-1:
+
 Step 1: Set up cluster
 ~~~~~~~~~~~~~~~~~~~~~~
 
+In this guide, we will setup a cluster with IP address ``1.2.3.4`` on domain
+``oas.example.org``. Substitute these two variables with your IP address and
+your domain.
+
 To set up your cluster, use the ``create`` subcommand of the
-OpenAppStack CLI. First, choose a name (we chose ``my-cluster``) for
+OpenAppStack CLI. First, choose a name (we chose ``oas.example.org``) for
 your cluster. Then run the following command to get information about
 the ``create`` subcommand:
 
 ::
 
-    $ python -m openappstack my-cluster create --help
-
-There are two options to create a cluster, `using the Greenhost
-API <#option-1--setup-with-the-greenhost-api>`__ or `using any other
-machine <#option-2--setup-any-vps-or-bare-metal-server>`__. Choose one
-of these options, and skip to the "`Installation <#installation>`__"
-subchapter afterwards.
-
-Option A: Setup with the Greenhost API
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-
--  Before you can start, you need to have an API key with Customer
-   rights.
-
-   1. In the Cosmos service centre, click your webmaster account name on
-      the top right corner
-   2. Go to "User settings"
-   3. Click "API keys"
-   4. Click "New API key"
-   5. Click "Generate new key"
-   6. Give the key "Customer", "CloudCustomer" or "API" access rights. You
-      will need "Customer" rights if you want to automatically generate DNS
-      rules. If you do not have this right, you have to `manually set the
-      right DNS rules <#dns-entries>`__
-      later.
-   7. Copy the generated key and run export it to this variable in a
-      terminal:
-
-      ::
-
-        $ export COSMOS_API_TOKEN=<paste your API key here>
-
-   8. In *the same terminal*, you can now use the ``create`` subcommand
-
-- There are two ways to let the installation program know which VPS to use:
-
-   1. Based on an already existing `Greenhost <https://greenhost.net>`__
-      VPS, using the ``--droplet-id`` argument.
-
-      Find the ID of your VPS either in the Greenhost Cosmos interface (it is
-      the numeric part of the URL in the "Manage VPS" screen).
-
-   2. By creating a new VPS through the API, using the ``--create-droplet``
-      argument.
-
-      In that case, make sure to also provide the ``--create-hostname`` and
-      ``--ssh-key-id`` arguments.
-
-      You can find your SSH key ID by going to VPS Cloud -> SSH keys and
-      checking the link under "Show key". The numerical part is your SSH
-      key ID.
-
-      *Note: You can also use the API to list ssh keys and find it there.
-      Read the `Greenhost API
-      documentation <https://service.greenhost.net/cloud/ApiDoc#/default>`__
-      for more information*
-
-- In both cases you need to provide the ``DOMAIN_NAME`` positional
-  argument.
-
-  If you use a subdomain (e.g. ``oas.yourdomain.com``), use the
-  ``--subdomain`` command as follows:
-
-  ::
-
-    $ python -m openappstack my-cluster create --subdomain oas example.org
-
-- Here is an example of a complete creation command:
-
-  ::
-
-    $ python -m openappstack my-cluster create \
-      --create-droplet \
-      --create-hostname oas.example.org \
-      --ssh-key-id 112 \
-      --create-domain-records \
-      --subdomain oas \
-      example.org
-
-  .. note::
-    You can use the ``--acme-staging`` argument for testing
-    purposes. This ensures you use "staging" certificates from Let's
-    Encrypt, to reduce the stress on their servers. However, ONLYOFFICE
-    integration requires valid (live) certificates to work.
-
-  This will create configuration files for a cluster named ``my-cluster``.
-  It will also create a Greenhost VPS with the hostname
-  ``oas.example.org`` and on which you can log in with SSH key with ID
-  ``112``.
-
-  These DNS records will also be created by Greenhost (assuming you own
-  the domain ``example.org`` at Greenhost):
-
-  -  An ``A`` record ``oas.example.org`` pointing to the VPSs IP address
-  -  A ``CNAME`` record ``*.oas.example.org`` pointing to
-     ``oas.example.org``.
-
-Option B: Setup any VPS or bare metal server
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+    $ python -m openappstack oas.example.org create --help
 
-Skip this step and continue to `Installation <#installation>`__ if you
-already set up a Greenhost VPS.
+If you want the installation script to automatically create a VPS for you, check
+:ref:`setup-with-greenhost-api`.
+Otherwise, continue here.
 
 If you want to install OpenAppStack on a non-Greenhost VPS, we assume
 you already have a machine with a world-facing IP address. Make sure
 that your VPS meets our `prerequisites <#prerequisites>`__. You'll need
 its *hostname* and its *IP address*.
 
-.. note::
-  You can use the ``--acme-staging`` argument for testing
-  purposes. If you are automating this, please use this to ensure you
-  use "staging" certificates from Let's Encrypt, to reduce the stress
-  on their servers. However, ONLYOFFICE and single sign-on integration
-  require valid (live) certificates to work properly so please don't
-  use this option by default.
+Create the initial OpenAppStack configuration for your VPS by running the
+following command:
 
-If you want your cluster to be reachable under the fully qualified
-domain name (``FQDN``) ``oas.example.org``, the corresponding parameters
-would be:
+::
 
--  ``--subdomain``: ``oas``
--  the ``DOMAIN`` positional argument: ``example.org``
+    $ python -m openappstack oas.example.org create \
+      oas.example.org \
+      --ip-address 1.2.3.4
 
-Create the OpenAppStack settings for your VPS by running the following
-command:
+This configures your cluster under the fully qualified domain name (FQDN)
+``oas.example.org``, To break down the command:
 
-::
+- the first, positional argument ``oas.example.org`` tells the cluster the domain
+  it will be hosted on. This should be a (subdomain of a) domain you own.
+- ``--ip-address 1.2.3.4`` tells the script the IP address of your VPS. This
+  will be used to find the VPS during the installation procedure.
 
-    $ python -m openappstack my-cluster create \
-      --ip-address IP_ADDRESS \
-      --create-hostname my-clusters-hostname \
-      --subdomain oas \
-      example.org
+The configuration has now been written to the ``clusters/oas.example.org`` on
+your provisioning machine.
 
-DNS entries
-'''''''''''
+.. _step-2:
 
-Before you continue, if you have not made DNS entries with the CLI tool,
-you need to make them now. It is important to start with configuring DNS
-because depending on your DNS setup/provider, it takes a while
-(sometimes hours) to propagate.
+Step 2: Configure DNS
+~~~~~~~~~~~~~~~~~~~~~
 
-You need one dedicated (sub)domain entry and a wildcard entry for
-everything inside it. For example, create an A record for these domains:
+Next, make sure that you have two DNS records that point to your
+cluster. Create these two DNS records:
 
--  An ``A`` record ``oas.example.org`` pointing to the VPSs IP address,
--  A ``CNAME`` record ``*.oas.example.org`` pointing to
-   ``oas.example.org``.
+-  An ``A`` record ``oas.example.org`` pointing to the VPS's IP address,
+-  A ``CNAME`` record ``*.oas.example.org`` pointing to ``oas.example.org``.
 
 .. Note::
    It is also possible to host OpenAppStack on a domain (with
-   no dedicated subdomain). That does imply that the included Wordpress site
+   no dedicated subdomain). That does imply that the included WordPress site
    will be hosted on your root domain ``example.org``. In that case, make these
    DNS records instead:
 
-   - An ``A`` record ``example.org`` pointing to the VPSs IP address,
+   - An ``A`` record ``example.org`` pointing to the VPS's IP address,
    - A ``CNAME`` record ``*.example.org`` pointing to ``example.org``.
 
 OpenAppStack will fetch https certificates with `Let's
 Encrypt <https://letsencrypt.org>`__ by default. In order to do this DNS
-entries need to be created. If you don't need https certificates for
-your cluster while testing you can skip this step. Please be aware of
-the limitations of this:
+entries need to be created.
+
+.. _step-3:
 
--  Onlyoffice won't work since it requires a valid certificate
-   connecting to Nextcloud.
--  You need to be able to resolve the domain names locally.
+Step 3: Additional configuration
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
-Step 2: Optional: cluster backups using Velero
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Copy the file ``install/.flux.env.example`` to your cluster dir
+``clusters/oas.example.org/.flux.env``. This file contains the last bit of
+information you need to configure. You **have to** configure the following
+values. The rest are optional.
 
-At this point you can enable `Velero <https://velero.io>`__, a program that
+- ``ip_address``: The IP address of your cluster
+- ``domain``: The FQDN of your cluster
+- ``admin_email``: a valid email address for the system administrator of your
+  cluster.
+
+Outgoing email
+''''''''''''''
+
+If you want apps like Nextcloud, RocketChat and Prometheus to be able to send
+email notifications, you need to provide an email account.
+
+.. Note::
+    OpenAppStack does not set up an email server for you. In order to enable
+    outgoing emails you need to provide an already existing email account.
+
+OpenAppStack uses SMTP to send emails. Search your email provider's helpdesk
+for SMTP configuration details and enter them in the
+``clusters/oas.example.org/.flux.env`` file as follows:
+
+.. code::
+
+   # Enable sending mails
+   outgoing_mail_enabled=true
+   # Email address that the cluster sends mails from. Needs to be an existing SMTP
+   # login
+   outgoing_mail_from_address=admin@example.org
+   # Same outgoing mail address, but only the part before the '@'
+   outgoing_mail_from_local_part=admin
+   # Same outgoing mail address, but only the part after the '@'
+   outgoing_mail_domain=example.org
+   # SMTP password for the outgoing mail address
+   outgoing_mail_smtp_password=CHANGEME
+   # SMTP username, often the same as the outgoing email address
+   outgoing_mail_smtp_user=admin@example.org
+   # SMTP login data.
+   outgoing_mail_smtp_host=smtp.greenhost.nl
+   outgoing_mail_smtp_authtype=LOGIN
+   outgoing_mail_smtp_port=587
+
+.. _backups-with-velero:
+
+Backups with Velero
+'''''''''''''''''''
+
+You can enable `Velero <https://velero.io>`__, a program that
 runs on your cluster and uploads backups of your cluster and user data to an
 S3 storage service of your choice.
 
@@ -334,8 +280,8 @@ upload it to the S3 storage you configure. This includes:
   like which applications are installed, including their version number and
   installation-time settings;
 - persistent data of all applications: for example, single sign-on users that
-  you created, Nextcloud files and metadata, Wordpress site data and comments,
-  Rocketchat chat history, etc. A single exception to this is Prometheus data
+  you created, Nextcloud files and metadata, WordPress site data and comments,
+  Rocket.Chat chat history, etc. A single exception to this is Prometheus data
   (statistics of system properties), which takes up a lot of space and we
   consider not valuable enough to back up.
 
@@ -343,54 +289,16 @@ It does not include anything on the VPS that you may have set up but is not
 part of OpenAppStack, like programs installed via ``apt``, or data added to the
 VPS disk not through OpenAppStack.
 
-To enable Velero:
+To configure Velero, edit the file ``clusters/oas.example.org/.flux.env``,
+and configure the settings with the ``backup_s3_`` prefix.
 
-- edit the file ``clusters/my-cluster/group_vars/all/settings.yml``,
-  reviewing all settings under the ``backup`` field and entering the data
-  specific to your backup storage location;
-- in that same file ``clusters/my-cluster/group_vars/all/settings.yml``,
-  add ``'velero'`` to the list of ``enabled_applications``;
-- create the directory ``clusters/my-cluster/secrets``;
-- create the file ``clusters/my-cluster/secrets/s3_access_key``, with as
-  only contents your S3 access key;
-- create the file ``clusters/my-cluster/secrets/s3_secret_key``, with as
-  only contents your S3 secret key.
+Then continue with the installation procedure as described below. At the end of
+the installation procedure, you have to install the ``velero`` application.
 
-Then continue with the installation procedure as described below.
+.. _step-4:
 
-Step 3. Optional: Configure outgoing email
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-If you want apps like Nextcloud to be able to send email notifications, you need to
-provide an email account. If you provide an account, OpenAppStack will configure Nextcloud
-to send emails from that account.
-
-.. Note::
-    OpenAppStack does not set up an email server for you. In order to enable outgoing emails you
-    need to provide an already existing email account.
-
-After using the create command, open the file ``./clusters/my-cluster/group-vars/all/settings.yml``
-and set the details of your email server. The details that you need to enter here
-depend on your email provider.
-
-.. code:: yaml
-
-   outgoing_mail:
-     enabled: true
-     fromAddress: "noreply@example.org"
-     smtp:
-       user: "noreply@example.org"
-       password: "**********"
-       host: "mail.example.org"
-       # Can be one of ["PLAIN", "LOGIN", "NONE"]
-       authtype: "PLAIN"
-       ssl: true
-       port: 465
-
-Right now OpenAppStack only configures  Nextcloud to send emails to users.
-
-Step 4: Installation
-~~~~~~~~~~~~~~~~~~~~
+Step 4: Setup cluster
+~~~~~~~~~~~~~~~~~~~~~
 
 You're almost ready to start the OpenAppStack installation script.
 First, make sure your DNS configuration is propagated. To do so, make
@@ -403,43 +311,88 @@ sure 'ping' shows your VPS's IP address:
 The ``install`` command will try to log into your machine as the
 ``root``\ user using SSH.
 
-To start the installation process, run:
+Run the ``install`` command with the CLI to completely configure your VPS for
+OpenAppStack.
 
 ::
 
-    $ python -m openappstack my-cluster install --install-kubernetes
+    $ python -m openappstack oas.example.org install
 
-This will take between 5 and 20 minutes. It generates secrets that will
-be added to the ``clusters/my-cluster/secrets`` directory. If you ever
-need any credentials after this installation, you can probably find them
-there. **Make sure this directory stays safe.** Feel free to encrypt it
-when you are not using the ``openappstack`` CLI tool.
 
-OpenAppStack uses `Flux <https://fluxcd.io>`__ to install applications.
-After the installation process has completed, Flux has not necessarily
-finished installing and integrating all the applications. This process
-usually takes 10-20 minutes to complete, but can also take longer
-depending on your machine's compute and/or network resources.
+This will take a few minutes. It installs k3s, a lightweight Kubernetes. `Flux`_
+is installed to manage applications and keep them updated automatically.
+
+.. _flux: https://fluxcd.io
 
 In the future, we will add commands that show you the status of the
 application installation. For now, just wait half an hour for everything
 to settle, and then continue to the next step (validating your setup).
 
 .. Note::
-   It is possible to re-run the ``install`` command. Make
-   sure you re-run it on a machine with the same ``secrets`` as
-   generated the first time. You can achieve this by making sure you
-   have the ``clusters/my-cluster`` directory and it contains the same
-   ``secrets`` directory before you run the installation command.
+   It is possible to re-run the ``install`` command with a newer version of the
+   installation script. This usually updates k3s and can have other benefits.
 
-.. Note::
-   The ``--install-kubernetes`` argument installs or updates your VPS's K3s
-   installation. If you only want to run (or re-run) the part that installs OpenAppStack
-   on your Kubernetes clusters, leave it out.
+.. _step-5:
+
+Step 5: Install applications
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Before you can start, you need to execute a few commands from the installation
+directory **on your provisioning machine**. Don't forget to replace
+``oas.example.org`` with your domain.
 
-Step 5: Validate setup
+.. code:: bash
+
+   export CLUSTER_DIR=clusters/oas.example.org
+   # Copy the installation kustomization to your cluster directory
+   cp install/kustomization.yaml clusters/$CLUSTER_DIR/
+   # Tell kubectl to use your cluster's kube_config
+   export KUBECONFIG=clusters/$CLUSTER_DIR/kube_config_cluster.yml
+   # This inserts the configuration from .flux.env into your cluster as a "secret"
+   kubectl apply -k clusters/$CLUSTER_DIR
+
+After you have executed that code, your terminal should show: 
+
+.. code:: bash
+
+   secret/oas-cluster-variables configured
+
+Next, run:
+
+.. code::
+
+   ./install/install-openappstack.sh
+
+This installs the *core* of OpenAppStack into your ``k3s`` cluster. To see
+what's included, check the ``flux2/infrastructure`` and the ``flux2/core``
+folders. In addition, it sets up Prometheus and Grafana to monitor your cluster.
+
+After the script completes, you can install applications by running the other
+installation scripts in the ``install`` folder. At the moment, we have scripts
+to install:
+
+- Nextcloud and Onlyoffice with ``install-nextcloud.sh``
+- Rocket.Chat with ``install-rocketchat.sh``
+- WordPress with ``install-wordpress.sh``
+- Velero with ``install-velero.sh`` (only if you have configured it in
+  :ref:`backups-with-velero`).
+
+.. _step-6:
+
+Step 6: Validate setup
 ~~~~~~~~~~~~~~~~~~~~~~
 
 Because OpenAppStack is still under development, we would like you to
 follow our `testing instructions <testing_instructions.html>`__ to make sure
 that the setup process went well.
+
+Step 7: Let us know!
+~~~~~~~~~~~~~~~~~~~~
+
+We would love to hear about your experience installing OpenAppStack.  If you
+encountered any problems, please create an issue in our `issue tracker
+<https://open.greenhost.net/groups/openappstack/-/issues>`__. If you didn't
+please still reach out as described on our `contact page
+<https://openappstack.net/contact.html>`__ and tell us how you like OpenAppStack
+so far. We want to be in communication with our users, and we want to help you
+if you run into problems.
diff --git a/docs/maintenance.md b/docs/maintenance.md
deleted file mode 100644
index 29b8bd815f93198f3f2135ce41db877a0f462171..0000000000000000000000000000000000000000
--- a/docs/maintenance.md
+++ /dev/null
@@ -1,140 +0,0 @@
-# Maintaining an Openappstack cluster
-
-## Logging
-
-Logs from pods and containers can be read in different ways:
-
-* In the cluster filesystem at `/var/log/pods/` or `/var/logs/containers/`.
-* Using [kubectl logs](https://kubernetes.io/docs/concepts/cluster-administration/logging)
-* Querying aggregated logs with Grafana, see below.
-
-## Central log aggregation
-
-We use [Promtail](https://grafana.com/docs/loki/latest/clients/promtail/),
-[Loki](https://grafana.com/oss/loki/) and [Grafana](https://grafana.com/) for
-easy access of aggregated logs.
-The [Loki documentation](https://grafana.com/docs/loki/latest/) is a
-good starting point how this setup works, and the [Using Loki in Grafana](https://grafana.com/docs/grafana/latest/datasources/loki)
-gets you started with querying your cluster logs with Grafana.
-
-You will find the Loki Grafana integration on your cluster at https://grafana.oas.example.org/explore
-together with some generic query examples.
-
-### LogQL query examples
-
-Please also refer to the [LogQL documentation](https://grafana.com/docs/loki/latest/logql).
-
-Query all aggregated logs (unfortunatly we can't find a better way of doing this
-since LogQL always expects a stream label to get queried):
-
-    logcli query '{foo!="bar"}'
-
-Query all logs for a keyword:
-
-    logcli query '{foo!="bar"} |= "error"'
-
-Query all k8s apps for errors using a regular expression:
-
-    logcli query '{job=~".*"} |~ "error|fail|exception|fatal"'
-
-
-#### Flux
-
-Flux is responsible for installing applications. It used `helm-operator` to
-deploy the desired helm releases.
-
-Query all messages from `flux`:
-
-    {app="flux"}
-
-Query all messages from `flux` and `helm-operator`:
-
-    {app=~"(flux|helm-operator)"}
-
-`flux` messages containing `wordpress`:
-
-    {app = "flux"} |= "wordpress"
-
-`flux` messages containing `wordpress` without `unchanged` events (to only show
-the installation messages):
-
-    {app = "flux"} |= "wordpress" != "unchanged"
-
-Filter out redundant `flux` messages:
-
-    { app = "flux" } !~ "(unchanged | event=refreshed | method=Sync | component=checkpoint)"
-
-Debug oauth2 single sign-on with rocketchat:
-
-    {container_name=~"(hydra|rocketchat)"}
-
-Query kubernetes events processed by the `eventrouter` app containing `warning`:
-
-    logcli query '{app="eventrouter"} |~ "warning"'
-
-#### Cert-manager
-
-Cert manager is responsible for requesting Let's Encrypt TLS certificates.
-
-Query `cert-manager` messages containing `chat`:
-
-    {app="cert-manager"} |= "chat"
-
-
-#### Hydra
-
-Hydra is the single sign-on system.
-
-Show only warnings and errors from `hydra`:
-
-    {container_name="hydra"} != "level=info"
-
-## Backup
-
-### On your provisioning machine
-
-During the installation process, a cluster config directory is created on your
-provisioning machine, located in the top-level sub-directory `clusters` in your
-clone of the openappstack git repository. You may want to back this up. It
-contains all files generated during the `create` and `install` commands of the
-CLI, together with the generated secrets that are stored during installation.
-
-### On your cluster
-
-OpenAppStack supports using the program Velero to make backups of your
-OpenAppStack instance to external storage via the S3 API. See the [installation
-instructions](installation_instructions.html#step-2-optional-cluster-backups-using-velero)
-for setup details. By default this will make nightly backups of the entire
-cluster (minus Prometheus data). To make a manual backup, run
-
-    velero create backup BACKUP_NAME --exclude-namespaces velero --wait
-
-from your VPS. See `velero --help` for other commands, and [Velero's
-documentation](https://velero.io/docs/v1.4/) for more information.
-
-Note: in case you want to make an (additional) backup of application data via
-alternate means, all persistent volume data of the cluster are stored in directories
-under `/var/lib/OpenAppStack/local-storage`.
-
-## Restore
-
-Restore instructions will follow, please [reach out to us](https://openappstack.net/contact.html)
-if you need assistance.
-
-## Change the IP of your cluster
-
-In case your cluster needs to migrate to another IP, make sure to update the IP
-address in `/etc/rancher/k3s/k3s.yaml` and, if applicable, your local kube
-config.
-
-## Delete evicted pods
-
-In case your cluster disk usage is over 80%, kubernetes [taints](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)
-the node with `DiskPressure`. Then it tries to evict pods, which is pointless in
-a single node setup but still happened anyway. Sometimes hundreds of pods will end
-up in `evicted` state but still showed up after `DiskPressure` recovered.
-See also the [out of resource handling with kubelet](https://kubernetes.io/docs/tasks/administer-cluster/out-of-resource/) documentation.
-
-You can delete all evicted pods with this:
-
-    kubectl get pods --all-namespaces -ojson | jq -r '.items[] | select(.status.reason!=null) | select(.status.reason | contains("Evicted")) | .metadata.name + " " + .metadata.namespace' | xargs -n2 -l bash -c 'kubectl delete pods $0 --namespace=$1'
diff --git a/docs/maintenance.rst b/docs/maintenance.rst
new file mode 100644
index 0000000000000000000000000000000000000000..61a7a5233a2c49e32684200de080d23a1fcdfb2f
--- /dev/null
+++ b/docs/maintenance.rst
@@ -0,0 +1,205 @@
+Maintaining an Openappstack cluster
+===================================
+
+Logging
+-------
+
+Logs from pods and containers can be read in different ways:
+
+-  In the cluster filesystem at ``/var/log/pods/`` or
+   ``/var/logs/containers/``.
+-  Using `kubectl logs`_
+-  Querying aggregated logs with Grafana, see below.
+
+Central log aggregation
+-----------------------
+
+We use `Promtail`_, `Loki`_ and `Grafana`_ for easy access of aggregated
+logs. The `Loki documentation`_ is a good starting point how this setup
+works, and the `Using Loki in Grafana`_ gets you started with querying
+your cluster logs with Grafana.
+
+You will find the Loki Grafana integration on your cluster at
+https://grafana.oas.example.org/explore together with some generic query
+examples.
+
+LogQL query examples
+~~~~~~~~~~~~~~~~~~~~
+
+Please also refer to the `LogQL documentation`_.
+
+Query all aggregated logs (unfortunatly we can’t find a better way of
+doing this since LogQL always expects a stream label to get queried):
+
+.. code:: bash
+
+   logcli query '{foo!="bar"}'
+
+Query all logs for a keyword:
+
+.. code:: bash
+
+   logcli query '{foo!="bar"} |= "error"'
+
+Query all k8s apps for errors using a regular expression:
+
+.. code:: bash
+
+   logcli query '{job=~".*"} |~ "error|fail|exception|fatal"'
+
+Flux
+^^^^
+
+`Flux`_ is responsible for installing applications. It uses four
+controllers:
+
+-  ``source-controller`` that tracks Helm and Git repositories like
+   https://open.greenhost.net/openappstack/openappstack for updates.
+-  ``kustomize-controller`` to deploy ``kustomizations`` that often
+   install ``helmreleases``.
+-  ``helm-controller`` to deploy the ``helmreleases``.
+-  ``notification-controller`` that is responsible for inbound and
+   outbound flux messages
+
+Query all messages from the ``source-controller``:
+
+.. code:: bash
+
+   {app="source-controller"}
+
+Query all messages from ``flux`` and ``helm-controller``:
+
+.. code:: bash
+
+   {app=~"(source-controller|helm-controller)"}
+
+``helm-controller`` messages containing ``wordpress``:
+
+.. code:: bash
+
+   {app = "helm-controller"} |= "wordpress"
+
+``helm-controller`` messages containing ``wordpress`` without
+``unchanged`` events (to only show the installation messages):
+
+.. code:: bash
+
+   {app = "helm-controller"} |= "wordpress" != "unchanged"
+
+Filter out redundant ``helm-controller`` messages:
+
+.. code:: bash
+
+   { app = "helm-controller" } !~ "(unchanged | event=refreshed | method=Sync | component=checkpoint)"
+
+Debug oauth2 single sign-on with rocketchat:
+
+.. code:: bash
+
+   {container_name=~"(hydra|rocketchat)"}
+
+Query kubernetes events processed by the ``eventrouter`` app containing
+``warning``:
+
+.. code:: bash
+
+   logcli query '{app="eventrouter"} |~ "warning"'
+
+Cert-manager
+^^^^^^^^^^^^
+
+Cert manager is responsible for requesting Let’s Encrypt TLS
+certificates.
+
+Query ``cert-manager`` messages containing ``chat``:
+
+.. code:: bash
+
+   {app="cert-manager"} |= "chat"
+
+Hydra
+^^^^^
+
+Hydra is the single sign-on system.
+
+Show only warnings and errors from ``hydra``:
+
+.. code:: bash
+
+   {container_name="hydra"} != "level=info"
+
+Backup
+------
+
+On your provisioning machine
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+During the installation process, a cluster config directory is created
+on your provisioning machine, located in the top-level sub-directory
+``clusters`` in your clone of the openappstack git repository. Although
+these files are not essential for your OpenAppStack cluster to continue
+functioning, you may want to back this folder up because it allows easy
+access to your cluster.
+
+On your cluster
+~~~~~~~~~~~~~~~
+
+OpenAppStack supports using the program Velero to make backups of your
+OpenAppStack instance to external storage via the S3 API. See
+:ref:`backups-with-velero` in the installation instructions for setup details.
+By default this will make nightly backups of the entire cluster (minus
+Prometheus data). To make a manual backup, run
+
+.. code:: bash
+
+   cluster$ velero create backup BACKUP_NAME --exclude-namespaces velero --wait
+
+from your VPS. See ``velero --help`` for other commands, and `Velero’s
+documentation`_ for more information.
+
+Note: in case you want to make an (additional) backup of application
+data via alternate means, all persistent volume data of the cluster are
+stored in directories under ``/var/lib/OpenAppStack/local-storage``.
+
+Restore
+-------
+
+Restore instructions will follow, please `reach out to us`_ if you need
+assistance.
+
+Change the IP of your cluster
+-----------------------------
+
+In case your cluster needs to migrate to another IP, make sure to update
+the IP address in ``/etc/rancher/k3s/k3s.yaml`` and, if applicable, your
+local kube config and inventory.yml in the cluster directory
+``clusters/oas.example.org``.
+
+Delete evicted pods
+-------------------
+
+In case your cluster disk is full, kubernetes `taints`_ the node with
+``DiskPressure``. Then it tries to evict pods, which is pointless in a single
+node setup but can still happen. We have experienced hundreds of pods in
+``evicted`` state that still showed up after ``DiskPressure`` had recovered. See
+also the `out of resource handling with kubelet`_ documentation.
+
+You can delete all evicted pods with this command:
+
+.. code:: bash
+
+   kubectl get pods --all-namespaces -ojson | jq -r '.items[] | select(.status.reason!=null) | select(.status.reason | contains("Evicted")) | .metadata.name + " " + .metadata.namespace' | xargs -n2 -l bash -c 'kubectl delete pods $0 --namespace=$1'
+
+
+.. _kubectl logs: https://kubernetes.io/docs/concepts/cluster-administration/logging
+.. _Promtail: https://grafana.com/docs/loki/latest/clients/promtail/
+.. _Loki: https://grafana.com/oss/loki/
+.. _Grafana: https://grafana.com/
+.. _Loki documentation: https://grafana.com/docs/loki/latest/
+.. _Using Loki in Grafana: https://grafana.com/docs/grafana/latest/datasources/loki
+.. _LogQL documentation: https://grafana.com/docs/loki/latest/logql
+.. _Flux: https://fluxcd.io/
+.. _Velero’s documentation: https://velero.io/docs/v1.4/
+.. _reach out to us: https://openappstack.net/contact.html
+.. _taints: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
+.. _out of resource handling with kubelet: https://kubernetes.io/docs/tasks/administer-cluster/out-of-resource/
diff --git a/docs/reference.md b/docs/reference.md
index aac0d01f4735710e4d2cf404af2d28caad9146db..7bf4d95c6ef2afc6aef7742f4ca4fed1a7955766 100644
--- a/docs/reference.md
+++ b/docs/reference.md
@@ -8,9 +8,6 @@ created:
 - `/var/lib/OpenAppStack/local-storage`: all application data (e.g., Nextcloud
   files) are stored here.
 
-- `/var/lib/OpenAppStack/source`: On development setups, this contains the
-  `local-flux` git repository.
-
 - `/var/lib/rancher/k3s`: this contains the files related to your "Kubernetes"
   cluster.
   - The kubectl configuration file is located at `/etc/rancher/k3s/k3s.yaml`
diff --git a/docs/requirements.in b/docs/requirements.in
new file mode 100644
index 0000000000000000000000000000000000000000..e83495e54952ea96dc9297e327b386cf6a5f6b84
--- /dev/null
+++ b/docs/requirements.in
@@ -0,0 +1,10 @@
+# Please add dependencies here and run
+# pip install -r requirements-dev.txt && pip-compile
+# to update requirements.txt
+#
+# Please add developer dependencies which are not needed to install
+# OpenAppStack to requirements-dev.txt!
+#
+sphinx
+sphinx-rtd-theme
+recommonmark
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 0000000000000000000000000000000000000000..7165db72bb602691edb2d1478b9e86c4fbd6fc07
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1,67 @@
+#
+# This file is autogenerated by pip-compile
+# To update, run:
+#
+#    pip-compile
+#
+alabaster==0.7.12
+    # via sphinx
+babel==2.9.1
+    # via sphinx
+certifi==2021.5.30
+    # via requests
+chardet==4.0.0
+    # via requests
+commonmark==0.9.1
+    # via recommonmark
+docutils==0.16
+    # via
+    #   recommonmark
+    #   sphinx
+    #   sphinx-rtd-theme
+idna==2.10
+    # via requests
+imagesize==1.2.0
+    # via sphinx
+jinja2==3.0.1
+    # via sphinx
+markupsafe==2.0.1
+    # via jinja2
+packaging==20.9
+    # via sphinx
+pygments==2.9.0
+    # via sphinx
+pyparsing==2.4.7
+    # via packaging
+pytz==2021.1
+    # via babel
+recommonmark==0.7.1
+    # via -r requirements.in
+requests==2.25.1
+    # via sphinx
+snowballstemmer==2.1.0
+    # via sphinx
+sphinx-rtd-theme==0.5.2
+    # via -r requirements.in
+sphinx==4.0.2
+    # via
+    #   -r requirements.in
+    #   recommonmark
+    #   sphinx-rtd-theme
+sphinxcontrib-applehelp==1.0.2
+    # via sphinx
+sphinxcontrib-devhelp==1.0.2
+    # via sphinx
+sphinxcontrib-htmlhelp==2.0.0
+    # via sphinx
+sphinxcontrib-jsmath==1.0.1
+    # via sphinx
+sphinxcontrib-qthelp==1.0.3
+    # via sphinx
+sphinxcontrib-serializinghtml==1.1.5
+    # via sphinx
+urllib3==1.26.5
+    # via requests
+
+# The following packages are considered to be unsafe in a requirements file:
+# setuptools
diff --git a/docs/security.rst b/docs/security.rst
index d6d5b9cfb8c064d89817ab2b8e4b6b844cadbfdd..3dbb4ebf6d282248b54d81a5f5192ededf70d5a8 100644
--- a/docs/security.rst
+++ b/docs/security.rst
@@ -5,23 +5,29 @@ Security
 Access control
 ==============
 
-By default, the resources of your OAS cluster will be exposed to the whole
-internet (although they are password protected by the single-sign-on system).
+By default, the applications on your OAS cluster will be exposed to the whole
+internet (although they are password protected).
+
 If you like to limit who can access your cluster resources you can configure
 the OAS ingress (`ingress-nginx <https://kubernetes.github.io/ingress-nginx>`__)
 to only accept connections from a certain IP address or range.
-Add a file in the ``CLUSTER_DIR/group_vars/all/`` directory, i.e. named
-``ingress.yml`` with the following content:
 
-.. code-block:: yaml
+Follow the :ref:`Customize OAS applications` instructions, but use the following
+secret as ``install/overrides/oas-nginx-override.yml`` and apply the secret in
+the ``oas`` namespace instead of ``oas-apps``. Replace the source range with the
+IP address ranges you want to allow.
 
-  ingress_extra_values:
-    controller:
-      config:
-        # https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/#whitelist-source-range
-        # comma separated list of CIDRs, e.g. 10.0.0.0/24,172.10.0.1.
-        whitelist-source-range: 1.2.3.4/24
+.. code-block:: yaml
 
-After this, run the
-`Openappstack installation procedure <installation_instructions.html#step-4-installation>`__
-again.
+   ---
+   apiVersion: v1
+   kind: secret
+   metadata:
+     name: oas-nginx-override
+   data:
+     values.yaml: |
+       controller:
+         config:
+           # https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/annotations/#whitelist-source-range
+           # comma separated list of CIDRs, e.g. 10.0.0.0/24,172.10.0.1.
+           whitelist-source-range: 1.2.3.4/24
diff --git a/docs/testing_instructions.md b/docs/testing_instructions.md
index a3ae01b6ca2db9dda8e8fcb84fcff93b45e44128..164b519558defb793d62b47421e3616e220edb7e 100644
--- a/docs/testing_instructions.md
+++ b/docs/testing_instructions.md
@@ -80,7 +80,7 @@ machine's config directory.
 
 ## Providing feedback
 
-If you have not done already, please create an account on [https://open.greenhost.net](https://open.greenhost.net)
+If you have not done so already, please create an account on [https://open.greenhost.net](https://open.greenhost.net)
 (or login with your existing github account) and [create a new issue](https://open.greenhost.net/openappstack/openappstack/issues/new) using the `Feedback` template.
 
 Thanks a lot for your testing work! We'll use your input to try to improve OpenAppStack.
diff --git a/docs/troubleshooting.md b/docs/troubleshooting.md
deleted file mode 100644
index 697c14c6788ccf8a44412882b21c63a122310137..0000000000000000000000000000000000000000
--- a/docs/troubleshooting.md
+++ /dev/null
@@ -1,266 +0,0 @@
-# Troubleshooting
-
-If you run into problems, there are a few things you can do to research the
-problem. This document describes what you can do.
-
-> **NOTE:** `cluster$` indicates that the commands should be run as root on your
-> OAS machine.
-
-## Known issues
-
-Take a look if the problem you have encountered is already in our [issue
-tracker](https://open.greenhost.net/groups/openappstack/-/issues).
-
-## Run the cli tests
-
-To get an overall status of your cluster you can run the tests from the
-command line.
-
-There are two types of tests: [testinfra](https://testinfra.readthedocs.io/en/latest/)
-tests, and [taiko](https://taiko.dev) tests.
-
-### Testinfra tests
-
-Testinfra tests are split into two groups, lets call them `blackbox` and
-`clearbox` tests.  The blackbox tests run on your provisioning machine and test
-the OAS cluster from the outside. For example, the certificate check will check
-if the OAS will return valid certificates for the provided services.
-The clearbox tests run on the OAS host and check i.e. if docker is installed
-in the right version etc.
-
-First, enter the `test` directory in the Git repository on your provisioning
-machine.
-
-    cd test
-
-To run the test against your cluster, first export the `CLUSTER_DIR` environment
-variabel with the location of your cluster config directory:
-
-    export CLUSTER_DIR="../clusters/CLUSTERNAME"
-
-Run all tests:
-
-    py.test -s --ansible-inventory=${CLUSTER_DIR}/inventory.yml --hosts='ansible://*'
-
-Test all applications, that will check for:
-
-  * proper certificate
-  * helm release successfully installed
-  * all app pods are running and healthy
-
-```
-pytest -s -m 'app' --connection=ansible --ansible-inventory=${CLUSTER_DIR}/inventory.yml --hosts='ansible://*'
-```
-
-Test a specific application:
-
-    pytest -s -m 'app' --app="wordpress" --connection=ansible --ansible-inventory=${CLUSTER_DIR}/inventory.yml --hosts='ansible://*'
-
-
-#### Known Issues
-
-- Default ssh backend for testinfra tests is `paramiko`, which doesn't work oout
-  of the box. It fails to connect to the host because the `ed25519` hostkey was
-  not verified. Therefore we need to force plain ssh:// with either
-  `connection=ssh` or `--hosts=ssh://…`
-
-### Taiko tests
-
-Taiko tests run in a browser and test if all the interfaces are up
-and running and correctly connected to each other. They are integrated in the
-`openappstack` CLI command suite.
-
-#### Prerequisites
-
-Install [taiko](https://taiko.dev):
-
-    npm install -g taiko
-
-#### Usage
-
-To run all taiko tests, run the following command in this repository:
-
-    python -m openappstack CLUSTERNAME test
-
-In the future, this command will run all tests, but now only *taiko* is
-implemented. To learn more about the `test` subcommand, run:
-
-    python -m openappstack CLUSTERNAME test --help
-
-You can also only run a taiko test for a specific application, i.e.:
-
-    python -m openappstack CLUSTERNAME test --taiko-tags nextcloud
-
-### Advanced usage
-
-#### Testinfra tests
-
-Specify host manually:
-
-    py.test -s --hosts='ssh://root@example.openappstack.net'
-
-Run only tests tagged with `prometheus`:
-
-    py.test -s --ansible-inventory=${CLUSTER_DIR}/inventory.yml --hosts='ansible://*' -m prometheus
-
-Run cert test manually using the ansible inventory file:
-
-    py.test -s --ansible-inventory=${CLUSTER_DIR}/inventory.yml --hosts='ansible://*' -m certs
-
-Run cert test manually against a different cluster, not configured in any
-ansible inventory file, either by using pytest:
-
-    FQDN='example.openappstack.net' py.test -sv -m 'certs'
-
-or directly:
-
-    FQDN='example.openappstack.net' pytest/test_certs.py
-
-#### Running testinfra tests with local gitlab-runner docker executor
-
-Export the following environment variables like this:
-
-    export CI_REGISTRY_IMAGE='open.greenhost.net:4567/openappstack/openappstack'
-    export SSH_PRIVATE_KEY="$(cat ~/.ssh/id_ed25519_oas_ci)"
-    export COSMOS_API_TOKEN='…'
-
-then:
-
-    gitlab-runner exec docker --env CI_REGISTRY_IMAGE="$CI_REGISTRY_IMAGE" --env SSH_PRIVATE_KEY="$SSH_PRIVATE_KEY" --env COSMOS_API_TOKEN="$COSMOS_API_TOKEN" bootstrap
-
-#### Taiko tests
-
-##### Using Taiko without the OpenAppStack CLI
-
-Go to the `test/taiko` directory and run:
-
-For nextcloud & onlyoffice tests:
-
-    export DOMAIN='oas.example.net'
-    export SSO_USERNAME='user1'
-    export SSO_USER_PW='...'
-    export TAIKO_TESTS='nextcloud'
-    taiko --observe taiko-tests.js
-
-You can replace `nextcloud` with `grafana` or `wordpress` to test the other
-applications, or with `all` to test all applications.
-
-## SSH access
-
-You can SSH login to your VPS. Some programs that are available to the root user
-on the VPS:
-
-* `kubectl`, the Kubernetes control program. The root user is connected to the
-  cluster automatically.
-* `helm` is the "Kubernetes package manager". Use i.e. `helm ls --all-namespaces`
-  to see what apps are installed in your cluster. You can also use it to perform
-  manual upgrades; see `helm --help`.
-
-## Using kubectl to debug your cluster
-
-You can use `kubectl`, the Kubernetes control program, to find and manipulate
-your Kubernetes cluster. Once you have installed `kubectl`, to get access to your
-cluster with the OAS CLI:
-
-    $ python -m openappstack my-cluster info
-
-Look for these lines:
-
-    To use kubectl with this cluster, copy-paste this in your terminal:
-
-    export KUBECONFIG=/home/you/projects/openappstack/clusters/my-cluster/secrets/kube_config_cluster.yml
-
-Copy the whole `export` line into your terminal. In *the same terminal window*,
-kubectl will connect to your cluster.
-
-## HTTPS Certificates
-
-OAS uses [cert-manager](http://docs.cert-manager.io/en/latest/) to automatically
-fetch [Let's Encrypt](https://letsencrypt.org/) certificates for all deployed
-services. If you experience invalid SSL certificates, i.e. your browser warns you
-when visiting Rocketchat (`https://chat.example.org`), here's how to
-debug this. A useful resource for troubleshooting is also the official cert-manager
-[Troubleshooting Issuing ACME Certificates](https://cert-manager.io/docs/faq/acme/)
-documentation.
-
-In this example we fix a failed certificate request for `chat.example.org`.
-We will start by checking if `cert-manager` is set up correctly.
-
-Did you create your cluster using the `--acme-staging` argument?
-Please check the resulting value of the `acme_staging` key in
-`clusters/YOUR_CLUSTERNAME/group_vars/all/settings.yml`. If this is set to `true`, certificates
-are fetched from the [Let's Encrypt staging API](https://letsencrypt.org/docs/staging-environment/),
-which can't be validated by default in your browser.
-
-Are all cert-manager pods in the `oas` namespace in the `READY` state ?
-
-    $ kubectl -n oas get pods | grep cert-manager
-
-Are there any `cm-acme-http-solver-*` pods still running, indicating that there
-are unfinished certificate requests ?
-
-    $ kubectl get pods --all-namespaces | grep cm-acme-http-solver
-
-Show the logs of the main `cert-manager` pod:
-
-    $ kubectl -n oas logs -l "app.kubernetes.io/name=cert-manager"
-
-You can `grep` for your cluster domain or for any specific subdomain to narrow
-down results.
-
-Query for failed certificates, -requests, challenges or orders:
-
-    $ kubectl get --all-namespaces certificate,certificaterequest,challenge,order | grep -iE '(false|pending)'
-    oas-apps    certificate.cert-manager.io/oas-rocketchat                 False   oas-rocketchat                 15h
-    oas-apps    certificaterequest.cert-manager.io/oas-rocketchat-2045852889                 False   15h
-    oas-apps    challenge.acme.cert-manager.io/oas-rocketchat-2045852889-1775447563-837515681   pending   chat.example.org   15h
-    oas-apps    order.acme.cert-manager.io/oas-rocketchat-2045852889-1775447563                 pending   15h
-
-We see that the Rocketchat certificate resources are in a bad state since 15h.
-
-Show certificate resource status message:
-
-    $ kubectl -n oas-apps get certificate oas-rocketchat -o jsonpath="{.status.conditions[*]['message']}"
-    Waiting for CertificateRequest "oas-rocketchat-2045852889" to complete
-
-We see that the `certificate` is waiting for the `certificaterequest`, lets
-query it's status message:
-
-    $ kubectl -n oas-apps get certificaterequest oas-rocketchat-2045852889 -o jsonpath="{.status.conditions[*]['message']}"
-    Waiting on certificate issuance from order oas-apps/oas-rocketchat-2045852889-1775447563: "pending"
-
-Show the related order resource and look at the status and events:
-
-		kubectl -n oas-apps describe order oas-rocketchat-2045852889-1775447563
-
-Show the failed challenge resource reason:
-
-		$ kubectl -n oas-apps get challenge oas-rocketchat-2045852889-1775447563-837515681 -o jsonpath='{.status.reason}'
-		Waiting for http-01 challenge propagation: wrong status code '503', expected '200'
-
-In this example, deleting the challenge fixed the issue and a proper certificate
-could get fetched:
-
-    $ kubectl -n oas-apps delete challenges.acme.cert-manager.io oas-rocketchat-2045852889-1775447563-837515681
-
-
-## Application installation fails
-
-Find applications that fail to install:
-
-    helm ls --all-namespaces | grep -i -v DEPLOYED
-    kubectl get helmreleases --all-namespaces | grep -i -v DEPLOYED
-
-Especially the nextcloud installation process is brittle and error-prone.
-Lets take it as an example how to debug the root cause.
-
-
-## Purge OAS and install from scratch
-
-If ever things fail beyond possible recovery, here's how to completely purge an OAS installation in order to start from scratch:
-
-    cluster$ /usr/local/bin/k3s-killall.sh
-    cluster$ systemctl disable k3s
-    cluster$ mount | egrep '(kubelet|nsfs|k3s)' | cut -d' ' -f 3 | xargs --no-run-if-empty -n 1 umount
-    cluster$ rm -rf /var/lib/{rancher,OpenAppStack,kubelet,cni,docker,etcd} /etc/{kubernetes,rancher} /var/log/{OpenAppStack,containers,pods} /tmp/k3s /etc/systemd/system/k3s.service
-    cluster$ systemctl reboot
diff --git a/docs/troubleshooting.rst b/docs/troubleshooting.rst
new file mode 100644
index 0000000000000000000000000000000000000000..35db24398839b80be386c63411e99547056ba67e
--- /dev/null
+++ b/docs/troubleshooting.rst
@@ -0,0 +1,418 @@
+Troubleshooting
+===============
+
+If you run into problems, there are a few things you can do to research the
+problem. This document describes what you can do.
+
+.. note::
+   ``cluster$`` indicates that the commands should be run as root on your
+   OAS machine.
+
+**We would love to hear from you!** If you have problems, please create an issue
+in our `issue tracker
+<https://open.greenhost.net/groups/openappstack/-/issues>`__ or reach out as
+described on our `contact page <https://openappstack.net/contact.html>`__. We
+want to be in communication with our users, and we want to help you if you run
+into problems.
+
+Known issues
+------------
+
+If you run into a problem, please check our `issue
+tracker <https://open.greenhost.net/groups/openappstack/-/issues>`__ to see if
+others have run into the same problem. We might have suggested a workaround or
+temporary solution in one of our issues. If your problems is not described in an
+issue, please open a new one so we can solve the problems you encounter.
+
+Run the CLI tests
+-----------------
+
+To get an overall status of your cluster you can run the tests from the
+command line.
+
+There are two types of tests: [testinfra](https://testinfra.readthedocs.io/en/latest/)
+tests, and [Taiko](https://taiko.dev) tests.
+
+Testinfra tests
+~~~~~~~~~~~~~~~
+
+Testinfra tests are split into two groups, lets call them *blackbox* and
+*clearbox* tests.  The blackbox tests run on your provisioning machine and test
+the OAS cluster from the outside. For example, the certificate check will check
+if the OAS returns valid certificates for the provided services.
+The clearbox tests run on the OAS host and check i.e. if docker is installed
+in the right version etc. Our testinfra tests are a combination of blackbox and
+clearbox tests.
+
+First, enter the `test` directory in the Git repository **on your provisioning
+machine**.
+
+.. code:: bash
+
+   cd test
+
+To run the test against your cluster, first export the ``CLUSTER_DIR``
+environment variable with the location of your cluster config directory (replace
+``oas.example.org`` with your cluster name):
+
+.. code:: bash
+
+   export CLUSTER_DIR="../clusters/oas.example.org"
+
+Run all tests
+'''''''''''''
+
+.. code:: bash
+
+   py.test -s --ansible-inventory=${CLUSTER_DIR}/inventory.yml --hosts='ansible://*'
+
+Test all applications 
+'''''''''''''''''''''
+
+This will check for:
+
+* The applications return proper certificates
+* All helm releases are successfully installed 
+* All app pods are running and healthy (this test includes all optional
+  applications)
+
+These tests includes all optional applications and will fail for optional
+applications that are not installed.
+
+.. code:: bash
+
+   pytest -s -m 'app' --connection=ansible --ansible-inventory=${CLUSTER_DIR}/inventory.yml --hosts='ansible://*'
+
+
+Tests a specific application
+''''''''''''''''''''''''''''
+
+.. code:: bash
+
+    pytest -s -m 'app' --app="wordpress" --connection=ansible --ansible-inventory=${CLUSTER_DIR}/inventory.yml --hosts='ansible://*'
+
+
+Known Issues
+''''''''''''
+
+The Default ssh backend for testinfra tests is ``paramiko``, which doesn't work
+out of the box. It fails to connect to the host because the ``ed25519`` hostkey
+was not verified. Therefore we need to force plain ssh:// with either
+``connection=ssh`` or ``--hosts=ssh://…``
+
+Taiko tests
+~~~~~~~~~~~
+
+Taiko tests run in a browser and test if all the interfaces are up
+and running and correctly connected to each other. They are integrated in the
+`openappstack` CLI command suite.
+
+Prerequisites
+'''''''''''''
+
+Install [Taiko](https://taiko.dev) on your provisioning machine:
+
+.. code:: bash
+
+   npm install -g taiko
+
+Run Taiko tests
+'''''''''''''''
+
+To run all Taiko tests, run the following command in this repository:
+
+.. code:: bash
+
+   python -m openappstack CLUSTERNAME test
+
+To learn more about the `test` subcommand, run:
+
+.. code:: bash
+
+   python -m openappstack CLUSTERNAME test --help
+
+You can also only run a Taiko test for a specific application, i.e.:
+
+.. code:: bash
+
+   python -m openappstack CLUSTERNAME test --taiko-tags nextcloud
+
+Advanced usage
+--------------
+
+Testinfra tests
+~~~~~~~~~~~~~~~
+
+Specify host manually:
+
+.. code:: bash
+
+   py.test -s --hosts='ssh://root@example.openappstack.net'
+
+Run only tests tagged with `prometheus`:
+
+.. code:: bash
+
+   py.test -s --ansible-inventory=${CLUSTER_DIR}/inventory.yml --hosts='ansible://*' -m prometheus
+
+Run cert test manually using the ansible inventory file:
+
+.. code:: bash
+
+   py.test -s --ansible-inventory=${CLUSTER_DIR}/inventory.yml --hosts='ansible://*' -m certs
+
+Run cert test manually against a different cluster, not configured in any
+ansible inventory file, either by using pytest:
+
+.. code:: bash
+
+   FQDN='example.openappstack.net' py.test -sv -m 'certs'
+
+or directly:
+
+.. code:: bash
+
+   FQDN='example.openappstack.net' pytest/test_certs.py
+
+Running Testinfra tests with local gitlab-runner docker executor
+
+Export the following environment variables like this:
+
+.. code:: bash
+
+    export CI_REGISTRY_IMAGE='open.greenhost.net:4567/openappstack/openappstack'
+    export SSH_PRIVATE_KEY="$(cat ~/.ssh/id_ed25519_oas_ci)"
+    export COSMOS_API_TOKEN='…'
+
+then:
+
+.. code:: bash
+
+    gitlab-runner exec docker --env CI_REGISTRY_IMAGE="$CI_REGISTRY_IMAGE" --env SSH_PRIVATE_KEY="$SSH_PRIVATE_KEY" --env COSMOS_API_TOKEN="$COSMOS_API_TOKEN" bootstrap
+
+Taiko tests
+~~~~~~~~~~~
+
+Using Taiko without the OpenAppStack CLI
+''''''''''''''''''''''''''''''''''''''''
+
+Go to the ``test/taiko`` directory and run:
+
+For nextcloud & onlyoffice tests:
+
+.. code:: bash
+
+    export DOMAIN='oas.example.net'
+    export SSO_USERNAME='user1'
+    export SSO_USER_PW='...'
+    export TAIKO_TESTS='nextcloud'
+    taiko --observe taiko-tests.js
+
+You can replace ``nextcloud`` with ``grafana`` or ``wordpress`` to test the
+other applications, or with ``all`` to test all applications.
+
+SSH access
+----------
+
+You can SSH login to your VPS. Some programs that are available to the root user
+on the VPS:
+
+* ``kubectl``, the Kubernetes control program. The root user is connected to the
+  cluster automatically.
+* ``helm`` is the "Kubernetes package manager". Use i.e. ``helm ls --all-namespaces``
+  to see what apps are installed in your cluster. You can also use it to perform
+  manual upgrades; see ``helm --help``.
+* ``flux`` is the `flux`_ command line tool
+
+.. _flux: https://fluxcd.io
+
+Using kubectl to debug your cluster
+-----------------------------------
+
+You can use ``kubectl``, the Kubernetes control program, to find and manipulate
+your Kubernetes cluster. Once you have installed ``kubectl``, to get access to
+your cluster with the OAS CLI:
+
+.. code:: bash
+
+    $ python -m openappstack oas.example.org info
+
+Look for these lines:
+
+.. code::
+
+   To use kubectl with this cluster, copy-paste this in your terminal:
+   export KUBECONFIG=/home/you/projects/openappstack/clusters/oas.example.org/kube_config_cluster.yml
+
+Copy the whole ``export`` line into your terminal. In *the same terminal
+window*, ``kubectl`` will connect to your cluster.
+
+HTTPS Certificates
+------------------
+
+OAS uses `cert-manager <https://docs.cert-manager.io/en/latest/>`__ to
+automatically fetch `Let's Encrypt <https://letsencrypt.org/>`__ certificates
+for all deployed services. If you experience invalid SSL certificates, i.e. your
+browser warns you when visiting Rocketchat (https://chat.oas.example.org),
+a useful resource for troubleshooting is the official cert-manager
+`Troubleshooting Issuing ACME Certificates
+<https://cert-manager.io/docs/faq/acme/>`__ documentation. First, try this:
+
+In this example we fix a failed certificate request for
+*https://chat.oas.example.org*.  We will start by checking if ``cert-manager``
+is set up correctly.
+
+Is your cluster using the live ACME server? 
+
+.. code:: bash
+
+   $ kubectl get clusterissuers -o yaml | grep 'server:'
+
+Should return ``server: https://acme-v02.api.letsencrypt.org/directory`` and not
+something with the word *staging* in it.
+
+Are all cert-manager pods in the `oas` namespace in the `READY` state ?
+
+.. code:: bash
+
+   $ kubectl -n cert-manager get pods
+
+Cert-manager uses a "custom resource" to keep track of your certificates, so you
+can also check the status of your certificates by running: 
+
+This returns all the certificates for all applications on your system. The
+command includes example output of healthy certificates.
+
+.. code:: bash
+
+   $ kubectl get certificates -A
+   NAMESPACE   NAME                           READY   SECRET                         AGE
+   oas         hydra-public.tls               True    hydra-public.tls               14d
+   oas         single-sign-on-userpanel.tls   True    single-sign-on-userpanel.tls   14d
+   oas-apps    oas-nextcloud-files            True    oas-nextcloud-files            14d
+   oas-apps    oas-nextcloud-office           True    oas-nextcloud-office           14d
+   oas         grafana-tls                    True    grafana-tls                    13d
+   oas         alertmanager-tls               True    alertmanager-tls               13d
+   oas         prometheus-tls                 True    prometheus-tls                 13d
+
+If there are problems, you can check for the specific ``certificaterequests``:
+
+.. code:: bash
+
+   $ kubectl get certificaterequests -A
+
+If you still need more information, you can dig into the logs of the
+``cert-manager`` pod:
+
+    $ kubectl -n oas logs -l "app.kubernetes.io/name=cert-manager"
+
+You can `grep` for your cluster domain or for any specific subdomain to narrow
+down results.
+
+Example
+'''''''
+
+Query for failed certificates, -requests, challenges or orders:
+
+.. code:: bash
+
+    $ kubectl get --all-namespaces certificate,certificaterequest,challenge,order | grep -iE '(false|pending)'
+    oas-apps    certificate.cert-manager.io/oas-rocketchat                 False   oas-rocketchat                 15h
+    oas-apps    certificaterequest.cert-manager.io/oas-rocketchat-2045852889                 False   15h
+    oas-apps    challenge.acme.cert-manager.io/oas-rocketchat-2045852889-1775447563-837515681   pending   chat.oas.example.org   15h
+    oas-apps    order.acme.cert-manager.io/oas-rocketchat-2045852889-1775447563                 pending   15h
+
+We see that the Rocketchat certificate resources are in a bad state since 15h.
+
+Show certificate resource status message:
+
+.. code:: bash
+
+    $ kubectl -n oas-apps get certificate oas-rocketchat -o jsonpath="{.status.conditions[*]['message']}"
+    Waiting for CertificateRequest "oas-rocketchat-2045852889" to complete
+
+We see that the `certificate` is waiting for the `certificaterequest`, lets
+query its status message:
+
+.. code:: bash
+
+    $ kubectl -n oas-apps get certificaterequest oas-rocketchat-2045852889 -o jsonpath="{.status.conditions[*]['message']}"
+    Waiting on certificate issuance from order oas-apps/oas-rocketchat-2045852889-1775447563: "pending"
+
+Show the related order resource and look at the status and events:
+
+.. code:: bash
+
+    $ kubectl -n oas-apps describe order oas-rocketchat-2045852889-1775447563
+
+Show the failed challenge resource reason:
+
+.. code:: bash
+
+    $ kubectl -n oas-apps get challenge oas-rocketchat-2045852889-1775447563-837515681 -o jsonpath='{.status.reason}'
+    Waiting for http-01 challenge propagation: wrong status code '503', expected '200'
+
+In this example, deleting the challenge fixed the issue and a proper certificate
+could get fetched:
+
+.. code:: bash
+
+    $ kubectl -n oas-apps delete challenges.acme.cert-manager.io oas-rocketchat-2045852889-1775447563-837515681
+
+Application installation or upgrade failures
+--------------------------------------------
+
+Application installations and upgrades are managed by `flux`_. Flux uses
+``helm-controller`` to install and upgrade applications with ``helm charts``.
+
+An application installed with Flux consists of a ``kustomization``. This is a
+resource that defines where the information about the application is stored in
+our Git repository. The ``kustomization`` contains a ``helmrelease``, which is
+an object that represents an installation of a Helm chart. Read more about the
+difference between ``kustomizations`` and ``helmreleases`` in the `flux
+documentation <https://fluxcd.io/docs>`__
+
+To find out if all ``kustomizations`` have been applied correctly, run the
+following flux command in your cluster:
+
+.. code:: bash
+
+   cluster$ flux get kustomizations -A
+
+If all your ``kustomizations`` are in a ``Ready`` state, take a look at your
+``helmreleases``:
+
+.. code:: bash
+
+   cluster$ flux get helmreleases -A
+
+Often, you can resolve complications with ``kustomizations`` or ``helmreleases``
+by telling Flux to *reconcile* them: 
+
+.. code:: bash
+
+   cluster$ flux reconcile helmrelease nextcloud
+
+Will make sure that the Nextcloud ``helmrelease`` gets brought into a state that
+our OpenAppStack wants it to be in.
+
+Purge OAS and install from scratch
+----------------------------------
+
+If ever things fail beyond possible recovery, here's how to completely purge an
+OAS installation in order to start from scratch:
+
+.. warning::
+
+   **You will lose all your data!** This completely destroys OpenAppStack and
+   takes everything offline. If you chose to do this, you will need to
+   re-install OpenAppStack and make sure that your data is stored somewhere
+   other than the VPS that runs OpenAppStack.
+
+.. code:: bash
+
+    cluster$ /usr/local/bin/k3s-killall.sh
+    cluster$ systemctl disable k3s
+    cluster$ mount | egrep '(kubelet|nsfs|k3s)' | cut -d' ' -f 3 | xargs --no-run-if-empty -n 1 umount
+    cluster$ rm -rf /var/lib/{rancher,OpenAppStack,kubelet,cni,docker,etcd} /etc/{kubernetes,rancher} /var/log/{OpenAppStack,containers,pods} /tmp/k3s /etc/systemd/system/k3s.service
+    cluster$ systemctl reboot
diff --git a/docs/upgrading.md b/docs/upgrading.md
deleted file mode 100644
index a626152e4bc8d4c6079a6ffbc87d59b3f4868bfa..0000000000000000000000000000000000000000
--- a/docs/upgrading.md
+++ /dev/null
@@ -1,119 +0,0 @@
-# Upgrading OpenAppStack
-
-## General considerations before upgrading
-
-Include all potential new values of `ansible/group_vars/all/settings.yml.example`
-in your `clusters/YOUR_CLUSTERNAME/group_vars/all/settings.yml`.
-To find the differences (requires [python yq package](https://pypi.org/project/yq/)):
-
-    yq -y -rS . clusters/YOURCLUSTER/group_vars/all/settings.yml > /tmp/mine.yml
-    yq -y -rS . ansible/group_vars/all/settings.yml.example > /tmp/upstream.yml
-    diff /tmp/mine.yml /tmp/upstream.yml
-
-Before you upgrade please look at the `CHANGELOG.md` file to review what changed
-between the versions.
-
-## Upgrading from 0.4.0 to 0.5.0
-
-Unfortunatly we can't ensure a smooth upgrade for this version neither.
-Please read the section below on how to do an upgrade by installing a the new
-OAS version from scratch after backing up your data.
-
-## Upgrading from 0.3.0 to 0.4.0
-
-There is no easy upgrade path from version 0.3.0 to version 0.4.0. As far as we
-know, nobody was running OpenAppStack apart from the developers, so we assume
-this is not a problem.
-
-If you do need to upgrade, this is how you can migrate your data. Backup all the
-data available under `/var/lib/OpenAppStack/local-storage`, create a new cluster
-using the installation instructions, and putting back the data. This migration
-procedure might not work perfectly.
-
-Use `kubectl get pvc -A` on your old cluster to get a mapping of all the PVC
-uuids (and thus their folder names in `/var/lib/OpenAppStack/local-storage`) to
-the pods they are bound to.
-
-Then, delete your old OpenAppStack, and install a new one with version number
-0.4.0 or higher. You can upload your backed up data into
-`/var/lib/OpenAppStack/local-storage`. All PVCs will have new unique IDs (and
-thus different folder names). You have to manually match the folders from your
-backup with the new folders.
-
-Additionally, if you want to re-use your old `settings.yaml` file, this data
-needs to be added to it:
-
-```
-backup:
-  s3:
-    # Disabled by default. To enable, change to `true` and configure the
-    # settings below. You'll also want to add "velero" to the enabled
-    # applications a bit further in this file.
-    # Finally, you'll also need to provide access credentials as
-    # secrets; see the documentation:
-    # https://docs.openappstack.net/en/latest/installation_instructions.html#step-2-optional-cluster-backups-using-velero
-    enabled: false
-    # URL of S3 service. Please use the principal domain name here, without the
-    # bucket name.
-    url: "https://store.greenhost.net"
-    # Region of S3 service that's used for backups.
-    # For some on-premise providers this may be irrelevant, but the S3
-    # apparently requires it at some point.
-    region: "ceph"
-    # Name of the S3 bucket that backups will be stored in.
-    # This has to exist already: Velero will not create it for you.
-    bucket: "openappstack-backup"
-    # Prefix that's added to backup filenames.
-    prefix: "test-instance"
-
-# A whitelist of applications that will be enabled.
-enabled_applications:
-  # System components, necessary for the system to function.
-  - 'cert-manager'
-  - 'letsencrypt-production'
-  - 'letsencrypt-staging'
-  - 'ingress'
-  - 'local-path-provisioner'
-  - 'single-sign-on'
-  # The backup system Velero is disabled by default, see settings under `backup` above.
-  # - 'velero'
-  # Applications.
-  - 'grafana'
-  - 'loki'
-  - 'promtail'
-  - 'nextcloud'
-  - 'prometheus'
-  - 'rocketchat'
-  - 'wordpress'
-```
-
-## Upgrading to 0.3.0
-
-Upgrading from versions earlier than `0.3.0` requires manual
-intervention.
-
-* Move your local `settings.yml` file to a different location:
-
-    ```
-    cd CLUSTER_DIR
-    mkdir -p ./group_vars/all/
-    mv settings.yml ./group_vars/all/
-    ```
-
-* [Flux](https://fluxcd.io) is now used to install and update applications. For
-  that reason, we need you to remove all helm charts (WARNING: You will lose
-  your data!):
-
-  ```
-  helm delete --purge oas-test-cert-manager oas-test-local-storage \
-      oas-test-prometheus oas-test-proxy oas-test-files`
-  ```
-  - After removing all helm charts, you probably also want to remove all the
-    `pvc`s that are left behind. Flux will not re-use the database PVCs created
-    for these applications. Find all the pvcs by running `kubectl get pvc
-    --namespace oas-apps` and `kubectl get pvc --namespace oas`
-
-## Troubleshooting
-
-If you encounter problems when you upgrade your cluster, please try again
-running the installation procedure before reporting an issue.
diff --git a/docs/upgrading.rst b/docs/upgrading.rst
new file mode 100644
index 0000000000000000000000000000000000000000..17bc83dcd4a630615ab2a077b2d462c83699ac62
--- /dev/null
+++ b/docs/upgrading.rst
@@ -0,0 +1,126 @@
+Upgrading OpenAppStack
+======================
+
+Upgrading to 0.6.0
+------------------
+
+
+A few things are important when upgrading to 0.6.0:
+
+- We now use Flux 2 and the installation procedure has been overhauled. For this
+  reason we advice you to set up a completely new cluster.
+- Copy your configuration details from ``settings.yaml`` to a new ``.flux.env``.
+  See ``install/.flux.env.example`` and the :ref:`OpenAppStack installation
+  instructions` for more information.
+
+Please `reach out to us`_ if you are using, or plan to use OAS in
+production.
+
+Upgrading from 0.4.0 to 0.5.0
+-----------------------------
+
+Unfortunatly we can’t ensure a smooth upgrade for this version neither.
+Please read the section below on how to do an upgrade by installing a
+the new OAS version from scratch after backing up your data.
+
+Upgrading from 0.3.0 to 0.4.0
+-----------------------------
+
+There is no easy upgrade path from version 0.3.0 to version 0.4.0. As
+far as we know, nobody was running OpenAppStack apart from the
+developers, so we assume this is not a problem.
+
+If you do need to upgrade, this is how you can migrate your data. Backup
+all the data available under ``/var/lib/OpenAppStack/local-storage``,
+create a new cluster using the installation instructions, and putting
+back the data. This migration procedure might not work perfectly.
+
+Use ``kubectl get pvc -A`` on your old cluster to get a mapping of all
+the PVC uuids (and thus their folder names in
+``/var/lib/OpenAppStack/local-storage``) to the pods they are bound to.
+
+Then, delete your old OpenAppStack, and install a new one with version
+number 0.4.0 or higher. You can upload your backed up data into
+``/var/lib/OpenAppStack/local-storage``. All PVCs will have new unique
+IDs (and thus different folder names). You have to manually match the
+folders from your backup with the new folders.
+
+Additionally, if you want to re-use your old ``settings.yaml`` file,
+this data needs to be added to it:
+
+::
+
+   backup:
+     s3:
+       # Disabled by default. To enable, change to `true` and configure the
+       # settings below. You'll also want to add "velero" to the enabled
+       # applications a bit further in this file.
+       # Finally, you'll also need to provide access credentials as
+       # secrets; see the documentation:
+       # https://docs.openappstack.net/en/latest/installation_instructions.html#step-2-optional-cluster-backups-using-velero
+       enabled: false
+       # URL of S3 service. Please use the principal domain name here, without the
+       # bucket name.
+       url: "https://store.greenhost.net"
+       # Region of S3 service that's used for backups.
+       # For some on-premise providers this may be irrelevant, but the S3
+       # apparently requires it at some point.
+       region: "ceph"
+       # Name of the S3 bucket that backups will be stored in.
+       # This has to exist already: Velero will not create it for you.
+       bucket: "openappstack-backup"
+       # Prefix that's added to backup filenames.
+       prefix: "test-instance"
+
+   # A whitelist of applications that will be enabled.
+   enabled_applications:
+     # System components, necessary for the system to function.
+     - 'cert-manager'
+     - 'letsencrypt-production'
+     - 'letsencrypt-staging'
+     - 'ingress'
+     - 'local-path-provisioner'
+     - 'single-sign-on'
+     # The backup system Velero is disabled by default, see settings under `backup` above.
+     # - 'velero'
+     # Applications.
+     - 'grafana'
+     - 'loki'
+     - 'promtail'
+     - 'nextcloud'
+     - 'prometheus'
+     - 'rocketchat'
+     - 'wordpress'
+
+Upgrading to 0.3.0
+------------------
+
+Upgrading from versions earlier than ``0.3.0`` requires manual
+intervention.
+
+-  Move your local ``settings.yml`` file to a different location:
+
+   ::
+
+      cd CLUSTER_DIR
+      mkdir -p ./group_vars/all/
+      mv settings.yml ./group_vars/all/
+
+-  `Flux`_ is now used to install and update applications. For that
+   reason, we need you to remove all helm charts (WARNING: You will lose
+   your data!):
+
+   ::
+
+      helm delete --purge oas-test-cert-manager oas-test-local-storage \
+          oas-test-prometheus oas-test-proxy oas-test-files`
+
+   -  After removing all helm charts, you probably also want to remove
+      all the ``pvc``\ s that are left behind. Flux will not re-use the
+      database PVCs created for these applications. Find all the pvcs by
+      running ``kubectl get pvc   --namespace oas-apps`` and
+      ``kubectl get pvc --namespace oas``
+
+.. _reach out to us: https://openappstack.net/contact.html
+.. _Flux: https://fluxcd.io
+
diff --git a/docs/usage.md b/docs/usage.md
deleted file mode 100644
index b4a88290bf76f9a3f0a1fcb53d639bb7572e03e3..0000000000000000000000000000000000000000
--- a/docs/usage.md
+++ /dev/null
@@ -1,117 +0,0 @@
-# Usage
-
-After all the applications are installed, the first thing to do is log into
-https://admin.oas.example.org. Here you can find the "user panel", a place where
-you can create, edit and delete users. You can log in with the user "admin". The
-password can be found in
-`clusters/my-cluster/secrets/userbackend_admin_password`. After logging in, you
-will see an overview of all the applications your user has access to. For more
-information on how to create users and give them access to applications, take a
-look at the [user panel
-documentation](https://docs.openappstack.net/projects/user-panel/en/latest/).
-
-> **NOTE:** at the moment none of the applications are available at
-> `oas.example.org`, we only provide applications in subdomains. In the future
-> this might change.
-
-## Applications
-
-These applications are available after the installation is completed
-successfully:
-
-
-### OAS User panel
-
-The [OAS user panel](https://open.greenhost.net/openappstack/user-panel/)
-can be used to create and edit users. These users can be used to log into the
-applications listed below.
-The user panel is available at https://admin.oas.example.org. You can login
-as `admin` using the `userbackend_admin_password` password from your secrets
-folder.
-
-After logging in to the user panel follow the [user panel documentation](https://docs.openappstack.net/projects/user-panel/en/latest/#creating-a-new-user)
-to create a new user.
-
-*Note*: The email address is important because some applications need a valid
-email address for notification mails.
-Single sign-on with Grafana will fail for users lacking an email address.
-
-You can now use the new user to log in to all apps which were granted access to
-in the last step using single sign-on.
-
-
-### Nextcloud
-
-[Nextcloud](https://nextcloud.com/) is a file sharing and communication
-platform and is available at https://files.oas.example.org.
-
-#### Single sign-on
-
-Nextcloud needs to be configured to properly send out emails.
-You can do so by logging in as `admin` using signle sign-on and then going to
-`Settings -> Basic settings -> Email server` and entering your SMTP email
-config and credentials.
-Please complete this configuration before you login as non-admin user using
-single sign-on, otherwise the [first login will not succeed](https://open.greenhost.net/openappstack/openappstack/issues/508).
-
-
-### Onlyoffice
-
-[Onlyoffice](https://www.onlyoffice.com/connectors-nextcloud.aspx) is an online
-document editing suite. Your can open documents in Onlyoffice by clicking them in Nextcloud. You can open new documents by clicking the "Plus" button in Nextcloud and selecting Document, Spreadsheet or Presentation.
-
-
-### Rocketchat
-
-[Rocketchat](https://rocket.chat/) is a team chat application and available at
-https://chat.oas.example.org.
-
-### Known issues
-
-- [Rocketchat isn't configured yet to send out email notifications](https://open.greenhost.net/openappstack/openappstack/issues/510)
-
-
-### Wordpress
-
-[Wordpress](https://wordpress.com) is a website content management system and
-available at https://www.oas.example.org.
-Click the `Log in` button and then click `Login with OpenID Connect` to use
-single sign-on.
-
-#### Single sign-on
-
-- If you [log in as `admin` using single sign-on, you will not have
-admin rights within Wordpress](https://open.greenhost.net/openappstack/single-sign-on/issues/33).
-In order to use admin rights you need to log in without single sign-on using the
-`wordpress_admin_password` password in the `secrets` folder.
-
-
-### Grafana
-
-[Grafana](https://grafana.com) that shows you information about the status of
-your cluster.
-Read more about Grafana in the [monitoring chapter below](#monitoring)
-
-#### Single sign-on
-
-- If you [log in as `admin` using single sign-on, you will not have
-admin rights within Grafana](https://open.greenhost.net/openappstack/single-sign-on/issues/32).
-In order to use admin rights you need to log in without signgle sign-on using the
-`grafana_admin_password` password in the `secrets` folder.
-
-
-### Other applications installed into the cluster
-
-Besides these applications, some other components are installed.
-These are part of the OpenAppStack back end and they dont't have a user facing
-web interfaces, but we like to list them here for reference:
-
-* [rancher's local-path-provisioner](https://github.com/rancher/local-path-provisioner) provides an easy way for the cluster to use a directory on
-  the node (by default `/var/lib/OpenAppStack/local-storage`) for storage;
-* [NGINX](https://www.nginx.com) is a webserver that functions as a so-called ingress controller,
-  routing web traffic that enters the cluster to the various applications;
-* [cert-manager](https://cert-manager.io) acquires and stores [Let's
-  Encrypt](https://letsencrypt.org/) certificates, enabling encrypted web
-  traffic to all applications running in the cluster;
-* [Flux](https://fluxcd.io) checks for application updates approved by the
-  OpenAppStack team and installs them automatically.
diff --git a/docs/usage.rst b/docs/usage.rst
new file mode 100644
index 0000000000000000000000000000000000000000..299bcdd4608b9bbe27d6adb697e54e1b25862456
--- /dev/null
+++ b/docs/usage.rst
@@ -0,0 +1,104 @@
+Usage
+=====
+
+After all the applications are installed, the first thing to do is log into
+https://admin.oas.example.org. This is the "user panel", a place where
+you can create, edit and delete users. You can log in with the user "admin". The
+password can be found by running 
+
+.. code:: bash
+
+   python3 -m openappstack oas.example.org secrets
+
+Search for ``userbackend_admin_password``.
+
+After logging in, you will see an overview of all the installed applications
+your user has access to. For more information on how to create users and give
+them access to applications, take a look at the [user panel
+documentation](https://docs.openappstack.net/projects/user-panel/en/latest/).
+
+.. note::
+
+   If you don't see applications, make sure you have installed at least one
+   optional application in :ref:`step-5` of the installation procedure.
+
+Core applications
+-----------------
+
+These applications are available after the installation is completed
+successfully:
+
+OAS User panel
+~~~~~~~~~~~~~~
+
+The `OAS user panel <https://open.greenhost.net/openappstack/user-panel/>`__
+can be used to create and edit users. These users can be used to log into the
+applications listed below.
+The user panel is available at https://admin.oas.example.org. You can login
+as `admin` using the `userbackend_admin_password` password from your secrets
+folder.
+
+After logging in to the user panel follow the [user panel documentation](https://docs.openappstack.net/projects/user-panel/en/latest/#creating-a-new-user)
+to create a new user.
+
+*Note*: The email address is important because some applications need a valid
+email address for notification mails.
+Single sign-on with Grafana will fail for users lacking an email address.
+
+You can now use the new user to log in to all apps which were granted access to
+in the last step using single sign-on.
+
+Grafana
+~~~~~~~
+
+`Grafana <https://grafana.com>`__ is a dashboard application that shows you
+information about the status of your cluster collected by `Prometheus
+<https://prometheus.io>`__. Grafana is available at
+https://grafana.oas.example.org.
+
+Single sign-on users
+''''''''''''''''''''
+
+Users that have the "Admin" label turned on in the user panel, will have admin
+rights in Grafana. Other users are able to see graphs, but can not change
+anything.
+
+Optional applications
+---------------------
+
+Nextcloud
+~~~~~~~~~
+
+`Nextcloud <https://nextcloud.com/>`__ is a file sharing and communication
+platform and is available at https://files.oas.example.org.
+
+Onlyoffice
+~~~~~~~~~~
+
+`Onlyoffice <https://www.onlyoffice.com/connectors-nextcloud.aspx>`__ is an
+online document editing suite. Your can open documents in Onlyoffice by clicking
+them in Nextcloud. You can open new documents by clicking the "Plus" button in
+Nextcloud and selecting Document, Spreadsheet or Presentation.
+
+.. warning::
+
+   OpenAppStack has a known issue that can cause loss of ONLYOFFICE
+   documents when the machine gets restarted or loses access to its disk.
+
+   Track the following issue to see if it is solved: https://open.greenhost.net/openappstack/nextcloud/-/issues/967
+
+   We are working hard to resolve the issue.
+
+RocketChat
+~~~~~~~~~~
+
+`RocketChat <https://rocket.chat/>`__ is a team chat application and available
+at https://chat.oas.example.org.
+
+
+WordPress
+~~~~~~~~~
+
+`WordPress <https://wordpress.com>`__ is a website content management system and
+available at https://www.oas.example.org. Click the `Log in` button and then
+click `Login with OpenID Connect` to use single sign-on.
diff --git a/flux2/apps/nextcloud/release.yaml b/flux2/apps/nextcloud/release.yaml
index 99fbb120c8f4636ed772d653f639555ada9edf14..a458708d6125d23c5621905c573f4bd1df571b87 100644
--- a/flux2/apps/nextcloud/release.yaml
+++ b/flux2/apps/nextcloud/release.yaml
@@ -33,9 +33,6 @@ spec:
             name: "${outgoing_mail_smtp_user}"
             password: "${outgoing_mail_smtp_password}"
             authtype: "${outgoing_mail_smtp_authtype}"
-      cronjob:
-        # Set curl to accept insecure connections when acme staging is used
-        curlInsecure: "${acme_staging}"
 
       ingress:
         enabled: true
diff --git a/install/.flux.env.example b/install/.flux.env.example
index c1fe971ecbff12573234a6da6435a98c04d2da1d..a52a4079a7155af41a5237ff374c69e6e70ae709 100755
--- a/install/.flux.env.example
+++ b/install/.flux.env.example
@@ -1,32 +1,40 @@
+# IP address of the cluster
 ip_address=1.2.3.4
+# FQDN of the cluster
 domain=example.org
-# Needs to be a real email address (or at least not @example.com) for LE
+# Email address of the system administrator. Is used for Prometheus alerts,
+# Let's Encrypt notifications, etc.
 admin_email=admin@example.org
 
-# Outgoing mail: even though we disable it, we need values for them, because
-# Kustomize still wants to substitute them.
+# Outgoing mail: even if this is disabled, we need values for it.
 outgoing_mail_enabled=false
-outgoing_mail_from_local_part=admin
+# Email address that the cluster sends mails from. Needs to be an existing SMTP
+# login
 outgoing_mail_from_address=admin@example.org
-outgoing_mail_domain=oas.example.org
+# Same outgoing mail address, but only the part before the '@'
+outgoing_mail_from_local_part=admin
+# Same outgoing mail address, but only the part after the '@'
+outgoing_mail_domain=example.org
+# SMTP password for the outgoing mail address
 outgoing_mail_smtp_password=CHANGEME
-# Example data for Greenhost SMTP login
+# SMTP username, often the same as the outgoing email address
+outgoing_mail_smtp_user=admin@example.org
+# SMTP login data. No need to change this for Greenhost email accounts.
 outgoing_mail_smtp_host=smtp.greenhost.nl
 outgoing_mail_smtp_authtype=LOGIN
 outgoing_mail_smtp_port=587
-outgoing_mail_smtp_user=info@example.org
 
-# ACME staging server address
+# Let's Encrypt ACME server address, keep this for production clusters
 acme_server=https://acme-v02.api.letsencrypt.org/directory
-# Used to let some programs accept insecure certificates
-acme_staging=false
-# On development setups please use Letsencrypts staging API *AND*
-# set the `acme_staging` var to true
-# acme_server=https://acme-staging-v02.api.letsencrypt.org/directory
-# acme_staging=true
 
+# On development setups please use Letsencrypts staging API 
+# - Note that single sign-on will not work, because it requires valid
+#   certificates.
+# - Use `nextcloud-values-override.yml` for Nextcloud and ONLYOFFICE to work
+#   with ACME staging certificates.
+# acme_server=https://acme-staging-v02.api.letsencrypt.org/directory
 
-# (Example) backup data
+# Save backups to an S3 bucket.
 backup_s3_bucket=oas.greenhost.net
 backup_s3_prefix=ci-prefix
 backup_s3_url=https://store.greenhost.net
diff --git a/install/install-nextcloud.sh b/install/install-nextcloud.sh
index 3714658b1ba9c6f7f48c7ba4bd8f8bb122c75165..1f9998817e88e506abb2e46103defd012be5830b 100755
--- a/install/install-nextcloud.sh
+++ b/install/install-nextcloud.sh
@@ -2,9 +2,6 @@
 
 set -euo pipefail
 
-# First, add some overrides for values that are only useful in CI
-kubectl apply -n oas-apps -f $( dirname "${BASH_SOURCE[0]}" )/oas-nextcloud-override.yaml
-
 # This kustomization's only purpose is to add the kustomization that is in the
 # flxu2/cluster/optional/nextcloud folder. After this kustomization is applied
 # an `add-nextcloud` kustomization will be present on the cluster, as well as a
diff --git a/install/oas-nextcloud-override.yaml b/install/oas-nextcloud-override.yaml
deleted file mode 100644
index 5ad5657aea91c73c72d1db5ee08debbb856ca9c1..0000000000000000000000000000000000000000
--- a/install/oas-nextcloud-override.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: oas-nextcloud-override
-data:
-  values.yaml: |
-    onlyoffice:
-      unauthorizedStorage: true
-      httpsHstsEnabled: false
diff --git a/install/overrides/oas-nextcloud-override.yaml b/install/overrides/oas-nextcloud-override.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f81c0f172aa2bbf85f89fef49dc2490878893680
--- /dev/null
+++ b/install/overrides/oas-nextcloud-override.yaml
@@ -0,0 +1,15 @@
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: oas-nextcloud-override
+data:
+  values.yaml: |
+    # By overriding these values, Nextcloud and ONLYOFFICE will work on ACME
+    # staging certificates.
+    onlyoffice:
+      unauthorizedStorage: true
+      httpsHstsEnabled: false
+    cronjob:
+      # Set curl to accept insecure connections when acme staging is used
+      curlInsecure: true