diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 182379129f94661891021229cf2376795e0ca534..6be974def3c78910be766b2be62d2bee3216f62b 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -152,7 +152,7 @@ testinfra:
   script:
     - *debug_information
     - cd ansible/
-    - pytest -v -m 'testinfra' --connection=ansible --ansible-inventory=${CLUSTER_DIR}/inventory.yml --hosts='ansible://*'
+    - pytest -v -s -m 'testinfra' --connection=ansible --ansible-inventory=${CLUSTER_DIR}/inventory.yml --hosts='ansible://*'
   only:
     changes:
       - .gitlab-ci.yml
diff --git a/test/pytest/test_system.py b/test/pytest/test_system.py
index be7192f4be31869bfaef9e814ab75c62410fcf2c..e405511e44bdd3ecc32becfb7074759dd4b85004 100644
--- a/test/pytest/test_system.py
+++ b/test/pytest/test_system.py
@@ -1,7 +1,88 @@
 import pytest
+import json
+import pprint
 
 
 @pytest.mark.testinfra
 def test_os_release(host):
     system_info = host.system_info
     assert system_info.release == '10'
+
+@pytest.mark.testinfra
+def test_kubernetes_setup(host):
+    """
+    Kube-bench checks if the setup conforms the CIS security benchmark. Not all
+    tests are relevant for the Rancher/RKE setup, because, for example, 1.1
+    checks the rights of config files that do not exist in our system.
+    """
+    # Instantiate PrettyPrinter to get readable test output if it fails
+    pp = pprint.PrettyPrinter()
+
+    # Only run these tests
+    # 1. Master tests
+    # 1.1: Ignore, because it's about config files we don't have
+    # 1.2: Ignore 1.2.32 - 1.2.35 because you don't need an encryption provider
+    #      in a single node setup
+    tests = []
+    tests += ['1.2.{}'.format(x) for x in range(1, 15)]
+    # TODO: Add PodSecurityPolicy so 1.2.16 can succeed (
+    tests += ['1.2.{}'.format(x) for x in range(17, 32)]
+    # 1.3: Controller manager, all tests added
+    tests += ['1.3.{}'.format(x) for x in range(1, 7)]
+    # 1.4: Scheduler, all tests added
+    tests += ['1.4.{}'.format(x) for x in range(1, 2)]
+    # 2. Etcd, all tests added
+    tests += ['2.{}'.format(x) for x in range(1, 7)]
+    # 3. Control plane configuration, all tests added
+    tests += ['3.1.1', '3.2.1', '3.2.2']
+    # 4. Node tests
+    # 4.1: Ignore, because it's about config files we don't have
+    # 4.2:
+    # 4.2.8: can't fix because we can't unset it
+    # 4.2.10 seems related to TLS connection between nodes, so is not relevant for us ATM
+    tests += ['4.2.{}'.format(x) for x in range (1, 7)]
+    tests += ['4.2.9']
+    tests += ['4.2.{}'.format(x) for x in range (11, 13)]
+    # 5: Kubernetes policies, not added for now because they are extra relevant
+    # for multi-user clusters.
+
+    result_data = []
+    check_arg = ",".join(tests)
+
+    result = host.run(" ".join([
+        "docker",
+        "run",
+        "--pid=host",
+        "-v",
+        "/etc:/etc:ro",
+        "-v",
+        "/var:/var:ro",
+        "-t",
+        "aquasec/kube-bench:latest",
+        "--version=1.15",
+        '--check="{}"'.format(check_arg),
+        "--noremediations",
+        "--noresults",
+        "--nosummary",
+        "--json"]), capture_output=True)
+
+    if result.rc != 0:
+        print("Docker run failed: ")
+        print(result.stderr)
+
+    # kube-bench doesn't give perfectly valid JSON as output. It gives 1 line
+    # of valid json per test
+    for line in result.stdout.splitlines():
+        output_data = json.loads(line)
+        if output_data['total_fail'] > 0:
+            print("Failed tests: ")
+            for test_output in output_data['tests']:
+                if test_output['fail'] > 0:
+                    print("Section {}".format(test_output['section']))
+                    for result in test_output['results']:
+                        if result['status'] == 'FAIL':
+                            pp.pprint(result)
+        result_data.append(output_data)
+
+    for data in result_data:
+        assert data['total_fail'] == 0