diff --git a/backend/app.py b/backend/app.py index b25e68020e3e62d8b3a2296944e67d7805f5c997..ddb226c979350081a16866bcbf1efaa0e050329d 100644 --- a/backend/app.py +++ b/backend/app.py @@ -5,6 +5,8 @@ from flask_jwt_extended import JWTManager import flask_migrate from jsonschema.exceptions import ValidationError from NamedAtomicLock import NamedAtomicLock +import threading +import traceback from werkzeug.exceptions import BadRequest # These imports are required @@ -19,7 +21,9 @@ from areas import resources from areas import roles from areas import tags from cliapp import cliapp +import helpers.kubernetes import helpers.provision +import helpers.threads from web import login from database import db @@ -75,10 +79,12 @@ app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = SQLALCHEMY_TRACK_MODIFICATIONS app.logger.setLevel(logging.INFO) cors = CORS(app) -provisioner = helpers.provision.Provision() db.init_app(app) +with app.app_context(): + provisioner = helpers.provision.Provision() + def init_routines(): with app.app_context(): # We have reset the alembic migration history at Stackspin version 2.2. @@ -102,27 +108,53 @@ def init_routines(): app.logger.info(f"upgrade failed: {type(e)}: {e}") sys.exit(2) - # We need this app context in order to talk the database, which is managed by - # flask-sqlalchemy, which assumes a flask app context. + def reload(): + # We need this app context in order to talk the database, which is managed by + # flask-sqlalchemy, which assumes a flask app context. + with app.app_context(): + app.logger.info("Reloading dashboard config from cluster resources.") + # Load the list of apps from a configmap and store any missing ones in the + # database. + app_slugs = cluster_config.populate_apps() + # Same for the list of oauthclients. + cluster_config.populate_oauthclients() + # Load per-app scim config if present. + cluster_config.populate_scim_config(app_slugs) + # We could call `reload` here manually, but actually the watch also at its + # start creates events for existing secrets so we don't need to. with app.app_context(): - # Load the list of apps from a configmap and store any missing ones in the - # database. - cluster_config.populate_apps() - # Same for the list of oauthclients. - cluster_config.populate_oauthclients() - - if provisioner.enabled: - # We define this wrapper because the SCIM provisioning code needs to access the - # database, which needs a flask app context. - def provision(): + # Set watch for dashboard SCIM config secrets. Any time those change, + # we reload so we can do SCIM for newly installed apps. + try: + helpers.kubernetes.watch_dashboard_config(app, reload) + except Exception as e: + app.logger.error(f"Error watching dashboard config: {e}") + + # Set up a generic task scheduler (cron-like). + scheduler = BackgroundScheduler() + scheduler.start() + # Add a job to run the provisioning reconciliation routine regularly. + # We'll also run it when we make changes that should be propagated + # immediately. + scheduler.add_job(helpers.threads.request_provision, 'interval', id='provision', hours=24) + # We'll run this in a separate thread so it can be done in the background. + # We have this single "provisioning worker" so there will be only one + # provisioning operation at a time. We use an Event to signal a + # provisioning request. + def provisioning_loop(): + while True: + app.logger.info("Waiting for next provisioning run.") + # helpers.threads.provision_event.wait() + # helpers.threads.provision_event.clear() + helpers.threads.wait_provision() + app.logger.info("Starting provisioning.") with app.app_context(): - provisioner.reconcile() - # Set up a generic task scheduler (cron-like). - scheduler = BackgroundScheduler() - scheduler.start() - # Add a job to run the provisioning reconciliation routine regularly. - # TODO: decrease the frequency once development settles. - scheduler.add_job(provision, 'interval', minutes=1) + try: + provisioner.reconcile() + except Exception as e: + app.logger.warn(f"Exception during user provisioning:") + app.logger.warn(traceback.format_exc()) + threading.Thread(target=provisioning_loop).start() # `init_routines` should only run once per dashboard instance. To enforce this # we have different behaviour for production and development mode: diff --git a/backend/areas/apps/__init__.py b/backend/areas/apps/__init__.py deleted file mode 100644 index c798e1591650493727541bc252166abdf252a585..0000000000000000000000000000000000000000 --- a/backend/areas/apps/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .apps import * -from .apps_service import * -from .models import * diff --git a/backend/areas/apps/apps_service.py b/backend/areas/apps/apps_service.py index 04958aa884b76119ac390dbda33927438ae950fa..98971d8ce13e2bd0e4ad07a387645470bb9b43bd 100644 --- a/backend/areas/apps/apps_service.py +++ b/backend/areas/apps/apps_service.py @@ -1,12 +1,19 @@ +import threading + from flask import current_app from flask_jwt_extended import get_jwt import ory_kratos_client from ory_kratos_client.api import identity_api from .models import App, AppRole +from areas.roles.models import Role +from areas.users.models import User from config import * +from database import db from helpers.access_control import user_has_access from helpers.kratos_user import KratosUser +import helpers.kubernetes as k8s +from helpers.threads import request_provision class AppsService: @staticmethod @@ -42,3 +49,39 @@ class AppsService: def get_app_roles(): app_roles = AppRole.query.all() return [{"user_id": app_role.user_id, "app_id": app_role.app_id, "role_id": app_role.role_id} for app_role in app_roles] + + @classmethod + def install_app(cls, app): + app.install() + # Create app roles for the new app for all admins, and reprovision. We + # do this asynchronously, because we first need to wait for the app + # installation to be finished -- otherwise the SCIM config for user + # provisioning is not ready yet. + current_app.logger.info("Starting thread for creating app roles.") + # We need to pass the app context to the thread, because it needs that + # for database operations. + ca = current_app._get_current_object() + threading.Thread(target=cls.create_admin_app_roles, args=(ca, app,)).start() + + @staticmethod + def create_admin_app_roles(ca, app): + """Create AppRole objects for the given app for all admins.""" + with ca.app_context(): + ca.logger.info("Waiting for kustomizations to be ready.") + k8s.wait_kustomization_ready(app) + for user in User.get_all(): + if not user['stackspin_data']['stackspin_admin']: + # We are only dealing with admin users here. + continue + existing_app_role = AppRole.query.filter_by(app_id=app.id, user_id=user['id']).first() + if existing_app_role is None: + ca.logger.info(f"Creating app role for app {app.slug} for admin user {user['id']}") + app_role = AppRole( + user_id=user['id'], + app_id=app.id, + role_id=Role.ADMIN_ROLE_ID + ) + db.session.add(app_role) + db.session.commit() + ca.logger.info("Requesting user provisioning.") + request_provision() diff --git a/backend/areas/apps/models.py b/backend/areas/apps/models.py index 5d983dee629394f13a2159a4e8f406d2b26ebb1a..e9330ab92bafd1eeb36cd93235f405ce68cceb34 100644 --- a/backend/areas/apps/models.py +++ b/backend/areas/apps/models.py @@ -30,6 +30,9 @@ class App(db.Model): # The URL is only stored in the DB for external applications; otherwise the # URL is stored in a configmap (see get_url) url = db.Column(String(length=128), unique=False) + scim_url = db.Column(String(length=1024), nullable=True) + scim_token = db.Column(String(length=1024), nullable=True) + scim_group_support = db.Column(Boolean, nullable=False, server_default='0') oauthclients = relationship("OAuthClientApp", back_populates="app") def __init__(self, slug, name, external=False, url=None): @@ -93,12 +96,12 @@ class App(db.Model): def uninstall(self): """ - Delete the app kustomization. + Delete the `add-$app` kustomization. - In our case, this triggers a deletion of the app's PVCs (so deletes all - data), as well as any other Kustomizations and HelmReleases related to - the app. It also triggers a deletion of the OAuth2Client object. It - also does not remove the TLS secret generated by cert-manager. + This triggers a deletion of the app's PVCs (so deletes all data), as + well as any other Kustomizations and HelmReleases related to the app. + It also triggers a deletion of the OAuth2Client object. It does not + remove the TLS secret generated by cert-manager. """ self.__delete_kustomization() @@ -191,6 +194,9 @@ class ProvisionStatus(enum.Enum): NotSupported = "NotSupported" # This user needs to be deleted from this app. ToDelete = "ToDelete" + # This app role entry corresponds to a Stackspin user that no longer + # exists. + Orphaned = "Orphaned" # Something went wrong; more information can be found in the # `last_provision_message`. Error = "Error" @@ -217,6 +223,7 @@ class AppRole(db.Model): # pylint: disable=too-few-public-methods ) last_provision_attempt = db.Column(DateTime, nullable=True) last_provision_message = db.Column(Unicode(length=256), nullable=True) + scim_id = db.Column(Unicode(length=256), nullable=True) role = relationship("Role") app = relationship("App") @@ -251,7 +258,7 @@ class AppStatus(): # pylint: disable=too-few-public-methods kustomization = app.kustomization if kustomization is not None and "status" in kustomization: - ks_ready, ks_message = AppStatus.check_condition(kustomization['status']) + ks_ready, ks_message = k8s.check_condition(kustomization['status']) self.installed = True if ks_ready: self.ready = ks_ready @@ -268,7 +275,7 @@ class AppStatus(): # pylint: disable=too-few-public-methods helmreleases = app.helmreleases for helmrelease in helmreleases: hr_status = helmrelease['status'] - hr_ready, hr_message = AppStatus.check_condition(hr_status) + hr_ready, hr_message = k8s.check_condition(hr_status) # For now, only show the message of the first HR that isn't ready if not hr_ready: @@ -283,26 +290,6 @@ class AppStatus(): # pylint: disable=too-few-public-methods def __repr__(self): return f"Installed: {self.installed}\tReady: {self.ready}\tMessage: {self.message}" - @staticmethod - def check_condition(status): - """ - Returns a tuple that has true/false for readiness and a message - - Ready, in this case means that the condition's type == "Ready" and its - status == "True". If the condition type "Ready" does not occur, the - status is interpreted as not ready. - - The message that is returned is the message that comes with the - condition with type "Ready" - - :param status: Kubernetes resource's "status" object. - :type status: dict - """ - for condition in status["conditions"]: - if condition["type"] == "Ready": - return condition["status"] == "True", condition["message"] - return False, "Condition with type 'Ready' not found" - def to_dict(self): """Represents this app status as a dict""" return { diff --git a/backend/areas/auth/auth.py b/backend/areas/auth/auth.py index 6cd27d343e77514b37f9147d93c31d42d9d775d0..861081a403b69b505beb506a3240f99529ac73dc 100644 --- a/backend/areas/auth/auth.py +++ b/backend/areas/auth/auth.py @@ -4,7 +4,7 @@ from flask_cors import cross_origin from datetime import timedelta from areas import api_v1 -from areas.apps import App, AppRole +from areas.apps.models import App, AppRole from config import * from helpers import HydraOauth, BadRequest, KratosApi diff --git a/backend/areas/users/models.py b/backend/areas/users/models.py new file mode 100644 index 0000000000000000000000000000000000000000..b8669895ee274d56fb9126567a6423488d4488a6 --- /dev/null +++ b/backend/areas/users/models.py @@ -0,0 +1,77 @@ +from areas.apps.models import App, AppRole +from areas.roles.models import Role +from areas.tags.models import TagUser +from database import db +from helpers import KratosApi + +class User(): + @staticmethod + def get_all(): + page = 0 + userList = [] + # Get all associated user data (Stackspin roles, tags). + stackspinData = UserStackspinData() + while page >= 0: + if page == 0: + res = KratosApi.get("/admin/identities?per_page=1000").json() + else: + res = KratosApi.get("/admin/identities?per_page=1000&page={}".format(page)).json() + for r in res: + # Inject information from the `stackspin` database that's associated to this user. + r["stackspin_data"] = stackspinData.getData(r["id"]) + userList.append(r) + if len(res) == 0: + page = -1 + else: + page = page + 1 + + return userList + +class UserStackspinData(): + # TODO: we currently ignore the userID parameter, so we always get all + # associated information even if we only need it for a single user. + # That should be changed. + def __init__(self, userID=None): + self.dashboardRoles = self.__getDashboardRoles() + self.userTags = self.__getUserTags() + + def getData(self, userID): + stackspinData = {} + dashboardRole = self.dashboardRoles.get(userID) + if dashboardRole is not None: + stackspinData["stackspin_admin"] = dashboardRole == Role.ADMIN_ROLE_ID + # Also, user tags. + stackspinData["tags"] = self.userTags.get(userID, []) + return stackspinData + + @staticmethod + def setTags(userID, tags): + # Delete all existing tags, because the new set of tags is interpreted + # to overwrite the previous set. + db.session.query(TagUser).filter(TagUser.user_id == userID).delete() + # Now create an entry for every tag in the new list. + for tagID in tags: + tagUser = TagUser(user_id=userID, tag_id=tagID) + db.session.add(tagUser) + + @staticmethod + def __getDashboardRoles(): + dashboardRoles = {} + for appRole, app in ( + db.session.query(AppRole, App) + .filter(AppRole.app_id == App.id) + .filter(App.slug == "dashboard") + .all() + ): + dashboardRoles[appRole.user_id] = appRole.role_id + return dashboardRoles + + @staticmethod + def __getUserTags(): + userTags = {} + for tagUser in db.session.query(TagUser).all(): + if tagUser.user_id in userTags: + userTags[tagUser.user_id].append(tagUser.tag_id) + else: + userTags[tagUser.user_id] = [tagUser.tag_id] + return userTags diff --git a/backend/areas/users/user_service.py b/backend/areas/users/user_service.py index af7e60fd37cbc8bfb4c0067533109e73a2e94e98..02b2d7afde3b1683c64c98efc7893850c89ac28a 100644 --- a/backend/areas/users/user_service.py +++ b/backend/areas/users/user_service.py @@ -12,14 +12,15 @@ import time from flask import current_app +from .models import UserStackspinData from config import KRATOS_ADMIN_URL from database import db -from areas.apps import App, AppRole, AppsService -from areas.apps.models import ProvisionStatus -from areas.roles import Role, RoleService -from areas.tags import TagUser +from areas.apps.models import App, AppRole, ProvisionStatus +from areas.apps.apps_service import AppsService +from areas.roles import Role from helpers import KratosApi from helpers.error_handler import KratosError +from helpers.threads import request_provision kratos_admin_api_configuration = \ @@ -31,25 +32,7 @@ kratos_identity_api = identity_api.IdentityApi(kratos_client) class UserService: @classmethod def get_users(cls): - page = 0 - userList = [] - # Get all associated user data (Stackspin roles, tags). - stackspinData = UserStackspinData() - while page >= 0: - if page == 0: - res = KratosApi.get("/admin/identities?per_page=1000").json() - else: - res = KratosApi.get("/admin/identities?per_page=1000&page={}".format(page)).json() - for r in res: - # Inject information from the `stackspin` database that's associated to this user. - r["stackspin_data"] = stackspinData.getData(r["id"]) - userList.append(r) - if len(res) == 0: - page = -1 - else: - page = page + 1 - - return userList + return User.get_all() @classmethod def get_user(cls, id): @@ -105,6 +88,7 @@ class UserService: # Commit all changes to the stackspin database. db.session.commit() + request_provision() # We start a recovery flow immediately after creating the # user, so the user can set their initial password. @@ -155,6 +139,7 @@ class UserService: UserStackspinData.setTags(id, data["tags"]) db.session.commit() + request_provision() return cls.get_user(id) @@ -177,44 +162,7 @@ class UserService: app_id=app_id, ) db.session.add(appRole) - - @classmethod - def put_multiple_users(cls, user_editing_id, data): - for user_data in data["users"]: - kratos_data = { - # "schema_id": "default", - "traits": {"email": user_data["email"]}, - } - KratosApi.put("/admin/identities/{}".format(user_data["id"]), kratos_data) - - is_admin = RoleService.is_user_admin(user_editing_id) - - if is_admin and user_data["app_roles"]: - app_roles = user_data["app_roles"] - for ar in app_roles: - app = App.query.filter_by(slug=ar["name"]).first() - app_role = AppRole.query.filter_by( - user_id=user_data["id"], app_id=app.id).first() - - if app_role: - app_role.role_id = ar["role_id"] if "role_id" in ar else None - # Mark the app role so the SCIM routine will pick it up at - # its next run. - app_role.provision_status = ProvisionStatus.SyncNeeded - db.session.commit() - else: - appRole = AppRole( - user_id=user_Data["id"], - role_id=ar["role_id"] if "role_id" in ar else None, - app_id=app.id, - ) - db.session.add(appRole) - db.session.commit() - - if user_data.get("tags") is not None: - UserStackspinData.setTags(user_data["id"], user_data["tags"]) - - return cls.get_user(user_data["id"]) + request_provision() @staticmethod def delete_user(id): @@ -222,6 +170,7 @@ class UserService: for ar in app_role: ar.provision_status = ProvisionStatus.ToDelete db.session.commit() + request_provision() @classmethod def post_multiple_users(cls, data): @@ -252,6 +201,7 @@ class UserService: f"Exception: {error} on creating user: {user_email}") creation_failed_users.append(user_email) + request_provision() success_response = {} existing_response = {} failed_response = {} @@ -306,52 +256,3 @@ class UserService: userRes["traits"]["app_roles"] = app_roles return userRes - -class UserStackspinData(): - # TODO: we currently ignore the userID parameter, so we always get all - # associated information even if we only need it for a single user. - # That should be changed. - def __init__(self, userID=None): - self.dashboardRoles = self.__getDashboardRoles() - self.userTags = self.__getUserTags() - - def getData(self, userID): - stackspinData = {} - dashboardRole = self.dashboardRoles.get(userID) - if dashboardRole is not None: - stackspinData["stackspin_admin"] = dashboardRole == Role.ADMIN_ROLE_ID - # Also, user tags. - stackspinData["tags"] = self.userTags.get(userID, []) - return stackspinData - - @staticmethod - def setTags(userID, tags): - # Delete all existing tags, because the new set of tags is interpreted - # to overwrite the previous set. - db.session.query(TagUser).filter(TagUser.user_id == userID).delete() - # Now create an entry for every tag in the new list. - for tagID in tags: - tagUser = TagUser(user_id=userID, tag_id=tagID) - db.session.add(tagUser) - - @staticmethod - def __getDashboardRoles(): - dashboardRoles = {} - for appRole, app in ( - db.session.query(AppRole, App) - .filter(AppRole.app_id == App.id) - .filter(App.slug == "dashboard") - .all() - ): - dashboardRoles[appRole.user_id] = appRole.role_id - return dashboardRoles - - @staticmethod - def __getUserTags(): - userTags = {} - for tagUser in db.session.query(TagUser).all(): - if tagUser.user_id in userTags: - userTags[tagUser.user_id].append(tagUser.tag_id) - else: - userTags[tagUser.user_id] = [tagUser.tag_id] - return userTags diff --git a/backend/areas/users/users.py b/backend/areas/users/users.py index 9427a1d13c205790a5743c8608c694ee56ce841f..d7dba6269706f68314d7d466cec37c969c0af83e 100644 --- a/backend/areas/users/users.py +++ b/backend/areas/users/users.py @@ -118,18 +118,6 @@ def post_multiple_users(): return jsonify(res) -# multi-user editing of app roles -@api_v1.route("/users-multi-edit", methods=["PUT"]) -@jwt_required() -@cross_origin() -@expects_json(schema_multi_edit) -@admin_required() -def put_multiple_users(): - data = request.get_json() - user_id = __get_user_id_from_jwt() - res = UserService.put_multiple_users(user_id, data) - return jsonify(res) - @api_v1.route("/me", methods=["GET"]) @jwt_required() @cross_origin() @@ -146,7 +134,7 @@ def get_personal_info(): def update_personal_info(): data = request.get_json() user_id = __get_user_id_from_jwt() - res = UserService.put_user(user_id, user_id, data) + res = UserService.put_user(user_id, data) return jsonify(res) diff --git a/backend/cliapp/cliapp/cli.py b/backend/cliapp/cliapp/cli.py index 41dec63ec81f0ee3159661842be42e5aa8164f87..38d5a20f82ea43cf321c3e9ef3344e59792b1a84 100644 --- a/backend/cliapp/cliapp/cli.py +++ b/backend/cliapp/cliapp/cli.py @@ -18,7 +18,8 @@ from sqlalchemy import func from config import HYDRA_ADMIN_URL, KRATOS_ADMIN_URL, KRATOS_PUBLIC_URL from helpers import KratosUser from cliapp import cli -from areas.apps import AppRole, App +from areas.apps.apps_service import AppsService +from areas.apps.models import AppRole, App from areas.roles import Role from areas.users import UserService from database import db @@ -195,14 +196,14 @@ def install_app(slug): if app.external: current_app.logger.info( - f"App {slug} is an external app and can not be provisioned automatically") + f"App {slug} is an external app and cannot be provisioned automatically") sys.exit(1) current_status = app.get_status() if not current_status.installed: - app.install() + AppsService.install_app(app) current_app.logger.info( - f"App {slug} installing... use `status` to see status") + f"App {slug} installing...") else: current_app.logger.error(f"App {slug} is already installed") diff --git a/backend/cluster_config.py b/backend/cluster_config.py index f814e1c4351e9e3ab56cefc07ce77146fb7d6d57..4ced17f74f38b6d73d859ade225c8bfaa17214be 100644 --- a/backend/cluster_config.py +++ b/backend/cluster_config.py @@ -1,12 +1,15 @@ +import base64 +import jwt +import logging +from string import Template +import yaml + from database import db from areas.apps.models import App, OAuthClientApp import helpers.kubernetes as k8s -import logging -import yaml - # Read in two configmaps from the cluster, which specify which apps should be -# present in the database. +# present in the database. Returns the list of app slugs. def populate_apps(): logging.info("cluster_config: populating apps") database_apps = {} @@ -14,20 +17,23 @@ def populate_apps(): slug = app.slug database_apps[slug] = app logging.debug(f"database app: {slug}") - _populate_apps_from(database_apps, "stackspin-apps") - _populate_apps_from(database_apps, "stackspin-apps-custom") + core_apps = _populate_apps_from(database_apps, "stackspin-apps") + custom_apps = _populate_apps_from(database_apps, "stackspin-apps-custom") + return (core_apps + custom_apps) # Read a list of apps from a configmap. Check if they are already present in # the database, and if not, add missing ones there. Properties `name`, # `external` and `url` can be specified in yaml format in the configmap value -# contents. +# contents. Returns the list of app slugs found. def _populate_apps_from(database_apps, configmap_name): + slugs = [] cm_apps = k8s.get_kubernetes_config_map_data(configmap_name, "flux-system") if cm_apps is None: logging.info(f"Could not find configmap '{configmap_name}' in namespace 'flux-system'; ignoring.") else: for app_slug, app_data in cm_apps.items(): logging.debug(f"configmap app: {app_slug}") + slugs.append(app_slug) if app_slug in database_apps: logging.debug(f" already present in database") else: @@ -43,6 +49,7 @@ def _populate_apps_from(database_apps, configmap_name): new_app = App(slug=app_slug, name=name, external=external, url=url) db.session.add(new_app) db.session.commit() + return slugs # Read in two configmaps from the cluster, which specify which oauthclients # should be present in the database. @@ -81,3 +88,50 @@ def _populate_oauthclients_from(database_oauthclients, configmap_name): logging.debug(f" new oauth client: {new_client}") db.session.add(new_client) db.session.commit() + +# Read optional per-app SCIM configuration (URL and token) from secrets. +# Store the results in the database. Needs the list of app slugs to be passed. +def populate_scim_config(apps): + for app in apps: + secret_name = f"stackspin-scim-{app}" + scim_config = k8s.get_kubernetes_secret_data(secret_name, "flux-system") + if scim_config is None: + logging.info(f"Could not find secret '{secret_name}' in namespace 'flux-system'; ignoring.") + continue + logging.info(f"Processing secret stackspin-scim-{app}") + app = App.query.filter_by(slug=app).first() + if not app: + logging.error(f" could not find app with slug {app}") + continue + scim_url = scim_config.get("scim_url") + if scim_url is None: + logging.error(f" 'scim_url' is not set") + continue + scim_token = scim_config.get("scim_token") + if scim_token is None: + logging.error(f" 'scim_token' is not set") + continue + scim_url = base64.b64decode(scim_url).decode() + # We substitute the string `$BASE` or `${BASE}` in the `scim_url` by + # the app's base url. + scim_url = Template(scim_url).substitute(BASE=app.get_url()) + app.scim_url = scim_url + scim_token = base64.b64decode(scim_token).decode() + scim_jwt = scim_config.get("scim_jwt") + if scim_jwt is not None: + scim_jwt = base64.b64decode(scim_jwt).decode() + if scim_jwt == "nextcloud": + # Nextcloud wants a JWT token containing the username of an existing user. + scim_token = jwt.encode({"sub":"admin"}, scim_token, algorithm="HS256") + else: + logging.error(f" 'jwt' has unknown value {scim_jwt}") + continue + app.scim_token = scim_token + scim_group_support = scim_config.get("scim_group_support") + if scim_group_support is None: + app.scim_group_support = False + else: + scim_group_support = base64.b64decode(scim_group_support).decode() + app.scim_group_support = scim_group_support.lower() in ['1', 'true', 'yes'] + logging.info(f"Configuring SCIM for {app} with url {app.scim_url}.") + db.session.commit() diff --git a/backend/config.py b/backend/config.py index f43ee28b16e2ce8ca7a648d21bc25d28d1d16e1a..ffa0967779be7c91f92e8a0909da91d3d0a4435a 100644 --- a/backend/config.py +++ b/backend/config.py @@ -39,6 +39,3 @@ else: DEMO_INSTANCE = os.environ.get("DASHBOARD_DEMO_INSTANCE", "False").lower() in ('true', '1') ENFORCE_2FA = os.environ.get("DASHBOARD_ENFORCE_2FA", "False").lower() in ('true', '1') - -SCIM_URL = os.environ.get("SCIM_URL") -SCIM_TOKEN = os.environ.get("SCIM_TOKEN") diff --git a/backend/helpers/exceptions.py b/backend/helpers/exceptions.py index bae5711c57fcf9dcc9a8fc96ccc03ef6cc77f6c7..177592de4ca5b81d42f3c9ab0d87c64e8b10b4c6 100644 --- a/backend/helpers/exceptions.py +++ b/backend/helpers/exceptions.py @@ -6,3 +6,9 @@ class BackendError(Exception): """The backend error is raised when interacting with the backend fails or gives an unexpected result. The error contains a oneliner description of the problem""" + def __init__(self, message, upstream_exception=None): + # Init of standard Exception class. + super().__init__(message) + # We save the original exception in case the handler wants to inspect + # it. + self.upstream_exception = upstream_exception diff --git a/backend/helpers/kratos_user.py b/backend/helpers/kratos_user.py index 13a10e3a2f703cf86ac21c156154c98709236c47..877aaca68ca70376b3245191163bdd9d3544e8b9 100644 --- a/backend/helpers/kratos_user.py +++ b/backend/helpers/kratos_user.py @@ -56,7 +56,7 @@ class KratosUser(): self.created_at = obj.created_at self.updated_at = obj.updated_at except KratosApiException as error: - raise BackendError(f"Unable to get entry, kratos replied with: {error}") from error + raise BackendError(f"Unable to get entry, kratos replied with: {error}", error) from error def __repr__(self): diff --git a/backend/helpers/kubernetes.py b/backend/helpers/kubernetes.py index cfdcbf7d53145558fb5d636b969b698c8cf28aa1..de35673ac9430e6cce2c03f2a5d34074a5c192c3 100644 --- a/backend/helpers/kubernetes.py +++ b/backend/helpers/kubernetes.py @@ -2,12 +2,14 @@ List of functions to get data from Flux Kustomizations and Helmreleases """ import crypt +import functools import secrets import string +import threading import jinja2 import yaml -from kubernetes import client, config +from kubernetes import client, config, watch from kubernetes.config.incluster_config import InClusterConfigLoader from kubernetes.client import api_client from kubernetes.client.exceptions import ApiException @@ -421,3 +423,74 @@ def get_gitrepo(name, namespace='flux-system'): # Raise all non-404 errors raise error return resource + +def debounce(timeout: float): + def decorator(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + wrapper.func.cancel() + wrapper.func = threading.Timer(timeout, func, args, kwargs) + wrapper.func.start() + wrapper.func = threading.Timer(timeout, lambda: None) + return wrapper + return decorator + +def watch_dashboard_config(app, reload): + w = watch.Watch() + api_instance = client.CoreV1Api(api_client.ApiClient()) + def p(): + with app.app_context(): + # Number of seconds to wait before reloading in case more secrets show up. + # In particular this prevents us from reloading once for every + # secret that exists at startup in succession. + debounce_timeout = 1 + @debounce(debounce_timeout) + def debounced_reload(): + reload() + for event in w.stream( + api_instance.list_namespaced_secret, + 'flux-system', + label_selector="stackspin.net/scim-config=1", + watch=True + ): + current_app.logger.info(f"{event['type']} SCIM config secret: {event['object'].metadata.name}") + debounced_reload() + thread = threading.Thread(target=p) + thread.start() + +def check_condition(status): + """ + Returns a tuple that has true/false for readiness and a message + + Ready, in this case means that the condition's type == "Ready" and its + status == "True". If the condition type "Ready" does not occur, the + status is interpreted as not ready. + + The message that is returned is the message that comes with the + condition with type "Ready" + + :param status: Kubernetes resource's "status" object. + :type status: dict + """ + if status["observedGeneration"] == -1: + return False, "Kustomization is not yet seen by controller" + for condition in status["conditions"]: + if condition["type"] == "Ready": + return condition["status"] == "True", condition["message"] + return False, "Condition with type 'Ready' not found" + +def wait_kustomization_ready(app): + w = watch.Watch() + api_instance = client.CustomObjectsApi() + for event in w.stream(api_instance.list_namespaced_custom_object, 'kustomize.toolkit.fluxcd.io', 'v1', 'flux-system', 'kustomizations'): + ks = event['object'] + if ks['metadata']['name'] != app.slug: + # We're currently only interested in the `app` app. + continue + ks_ready, ks_message = check_condition(ks['status']) + if not ks_ready: + # There is some data on the app kustomization, but it's not ready + # yet. + continue + print(f"Kustomization {app.slug} is now ready.") + return diff --git a/backend/helpers/provision.py b/backend/helpers/provision.py index 3b1f01f000286597f65b274aaa224dc54ef143fe..001b75f7787ea17ae1a9a7f598759c83f922ad70 100644 --- a/backend/helpers/provision.py +++ b/backend/helpers/provision.py @@ -3,6 +3,7 @@ import json.decoder import logging import ory_kratos_client from ory_kratos_client.api import identity_api +import ory_kratos_client.exceptions import requests from areas.apps.models import App, AppRole, ProvisionStatus @@ -10,6 +11,7 @@ from areas.roles.models import Role import config from database import db from helpers.kratos_user import KratosUser +from helpers.exceptions import BackendError class ProvisionError(Exception): pass @@ -20,8 +22,8 @@ class User: self.scim_id = scim_id self.displayName = displayName - def ref(self): - return f"{config.SCIM_URL}Users/{self.scim_id}" + def ref(self, base_url): + return f"{base_url}Users/{self.scim_id}" class Group: def __init__(self, scim_id, displayName, members): @@ -51,53 +53,88 @@ class Group: # do the corresponding SCIM calls to those apps to do the actual provisioning. class Provision: def __init__(self): - if config.SCIM_TOKEN is None: - logging.warn("SCIM_TOKEN is not set, so disabling SCIM") - self.enabled = False - return - if config.SCIM_URL is None: - logging.warn("SCIM_URL is not set, so disabling SCIM") - self.enabled = False - return - self.enabled = True - logging.info(f"Enabling SCIM, with URL {config.SCIM_URL}") + self.initialised = False + + # We don't do this in init, because at the moment this object is created we + # might not have performed the database migration yet that creates the scim + # config columns. This function will be called once automatically at the + # start of `reconcile`. + def _load_config(self): + logging.info("Loading SCIM configuration from database for all apps.") + database_apps = App.query.filter(App.scim_url != None).all() + enabled = False + apps = {} + for app in database_apps: + if app.scim_token is None: + logging.warn(f"scim_token is not set, so disabling SCIM for {app.slug}") + continue + apps[app.id] = app + logging.info(f"Enabling SCIM for {app.slug}, with URL {app.scim_url}") + enabled = True + self.enabled = enabled + self.scim_apps = apps # Set up kratos API client. kratos_admin_api_configuration = ory_kratos_client.Configuration(host=config.KRATOS_ADMIN_URL, discard_unknown_keys=True) kratos_admin_client = ory_kratos_client.ApiClient(kratos_admin_api_configuration) self.kratos_identity_api = identity_api.IdentityApi(kratos_admin_client) + self.initialised = True + def app_supported(self, app): - return (app.slug == "nextcloud") + return (app.id in self.scim_apps) def reconcile(self): logging.info("Reconciling user provisioning") - existing_users = self._get_existing_users() - for userId, u in existing_users.items(): - logging.info(f"Existing user: {u.displayName} ({userId})") - existing_groups = self._get_existing_groups() - for _, g in existing_groups.items(): - g.debug() - # We will modify this group in-memory over the course of the user reconciliation. - # After we have reconciled all users, we will update the group in a single operation. - # We could also use PATCH operations instead to add single users as we - # go, but the Nextcloud SCIM app does not support that. - admin_group = existing_groups.get('admin', None) - if admin_group is None: - raise ProvisionError("Admin group could not be found, aborting.") - # TODO: we later need to retry Error ones as well. + if not self.initialised: + self._load_config() + # Collect existing in-app users and groups in advance so we can compare to that + # efficiently when we go over the dashboard users. + existing_users = {} + existing_groups = {} + admin_group = {} + for app_id, app in self.scim_apps.items(): + existing_users[app_id] = self._get_existing_users(app) + for userId, u in existing_users[app_id].items(): + logging.info(f"Existing user in {app.slug}: {u.displayName} ({userId})") + if app.scim_group_support: + existing_groups[app_id] = self._get_existing_groups(app) + for _, g in existing_groups[app_id].items(): + g.debug() + # We will modify this group in-memory over the course of the + # user reconciliation. After we have reconciled all users, we + # will update the group in a single operation. We could also + # use PATCH operations instead to add single users as we go, + # but the Nextcloud SCIM app does not support that. + admin_group[app_id] = existing_groups[app_id].get('admin') + if admin_group[app_id] is None: + raise ProvisionError("Admin group for {app.slug} could not be found, aborting.") + # TODO: we later need to retry some failed ones as well: at least Error + # in case it was a temporary error, and NotSupported in case the app + # supports SCIM now whereas it did not before. Maybe do that only for + # regular full (re)provision runs, not for ones requested because of + # changes. app_roles = db.session.query(AppRole).filter( AppRole.provision_status.in_((ProvisionStatus.SyncNeeded, ProvisionStatus.ToDelete)) ) for app_role in app_roles: # logging.info(f"found app_role: {app_role}") + # Check if this app supports SCIM at all and is configured for it. app_role.last_provision_attempt = datetime.now() - existing_user = existing_users.get(app_role.user_id, None) + app = app_role.app + if not self.app_supported(app): + app_role.provision_status = ProvisionStatus.NotSupported + app_role.last_provision_message = f"App does not support automatic user provisioning." + db.session.commit() + continue + # Look up any existing in-app user with this ID from the + # pre-fetched list we got via SCIM. + existing_user = existing_users[app.id].get(app_role.user_id) if existing_user is not None: - logging.info(f"User {app_role.user_id} already exists in the app.") + logging.info(f"User {app_role.user_id} already exists in the app {app.slug}.") if app_role.role_id == Role.NO_ACCESS_ROLE_ID: if existing_user is None: - logging.info("User without access does not exist yet, so nothing to do.") + logging.info(f"User without access does not exist yet in {app.slug}, so nothing to do.") # We have not provisioned this user in this app yet, so we # don't have to do anything at this point. app_role.provision_status = ProvisionStatus.Provisioned @@ -105,32 +142,21 @@ class Provision: db.session.commit() continue else: - logging.info("User without access exists in the app; we continue so we can disable the user in the app.") - app = app_role.app - if not self.app_supported(app): - app_role.provision_status = ProvisionStatus.NotSupported - app_role.last_provision_message = f"App does not support automatic user provisioning." - db.session.commit() - continue + logging.info(f"User without access exists in the app {app.slug}; we continue so we can disable the user in the app.") try: - self._provision_user(app_role, app, existing_user, admin_group) - new_status = ProvisionStatus.Provisioned - new_message = "User successfully provisioned." + self._provision_user(app_role, app, existing_user, admin_group.get(app.id)) except ProvisionError as ex: - new_status = ProvisionStatus.Error - new_message = str(ex) - app_role.provision_status = new_status - app_role.last_provision_message = new_message - db.session.commit() - # let's do only one at a time for now - # break - logging.info("Before provisioning admin group:") - admin_group.debug() - if admin_group.modified: - logging.info("Admin group was modified, so updating it via SCIM.") - self._provision_group(admin_group) - else: - logging.info("Admin group was not modified.") + app_role.provision_status = ProvisionStatus.Error + app_role.last_provision_message = str(ex) + db.session.commit() + for app_id, app in self.scim_apps.items(): + if app.scim_group_support: + if admin_group[app_id].modified: + logging.info(f"Admin group for {app.slug} was modified, so updating it via SCIM.") + admin_group[app_id].debug() + self._provision_group(admin_group[app_id], app) + else: + logging.info(f"Admin group for {app.slug} was not modified.") # Provision the user via SCIM PUT or POST, based on the user and role # information in `app_role`, and on the existing user object in the app @@ -139,29 +165,39 @@ class Provision: # admin group. def _provision_user(self, app_role, app, existing_user, admin_group): logging.info(f"Reconciling user {app_role.user_id}") + app = self.scim_apps[app.id] scim_headers = { - 'Authorization': 'Bearer ' + config.SCIM_TOKEN + 'Authorization': 'Bearer ' + app.scim_token } if app_role.provision_status == ProvisionStatus.ToDelete: if existing_user is None: db.session.delete(app_role) db.session.commit() - return else: logging.info(f"Deleting user {app_role.user_id} from {app.slug}") - url = f"{config.SCIM_URL}/Users/{existing_user.scim_id}" + url = f"{app.scim_url}Users/{existing_user.scim_id}" response = requests.delete(url, headers=scim_headers) logging.info(f"SCIM http status: {response.status_code}") if response.status_code == 204: db.session.delete(app_role) db.session.commit() - return else: logging.info(f"Error returned by SCIM deletion: {response.content}") + raise ProvisionError("App cannot delete user via SCIM.") + return # Get the related user object - logging.info(f"Info: Getting user data from Kratos.") - kratos_user = KratosUser(self.kratos_identity_api, app_role.user_id) + logging.info(f"Getting user data from Kratos.") + try: + kratos_user = KratosUser(self.kratos_identity_api, app_role.user_id) + except BackendError as e: + if isinstance(e.upstream_exception, ory_kratos_client.exceptions.NotFoundException): + app_role.provision_status = ProvisionStatus.Orphaned + app_role.last_provision_message = "Cannot provision because this Stackspin user does not exist (anymore)." + db.session.commit() + return + else: + raise if app_role.role_id == Role.NO_ACCESS_ROLE_ID: active = False else: @@ -171,26 +207,47 @@ class Provision: 'schemas': ['urn:ietf:params:scim:schemas:core:2.0:User'], 'externalId': app_role.user_id, 'active': active, - # Sadly Nextcloud doesn't allow changing the userName, so we set it - # to something unique and stable. - # https://github.com/nextcloud/server/issues/5488 - # We add the `stackspin-` prefix to make this compatible with the - # username generated by the sociallogin (SSO) Nextcloud app. - 'userName': f"stackspin-{app_role.user_id}", + # Zulip does not read the `emails` property, instead getting the + # email from the `userName` property. + 'userName': kratos_user.email, 'displayName': kratos_user.name, 'emails': [{ 'value': kratos_user.email, 'primary': True }], + 'name': { + 'formatted': kratos_user.name, + }, } + + # Make some app-specific additions and modifications to the SCIM data. + if app.slug == 'nextcloud': + # Sadly Nextcloud doesn't allow changing the userName, so we set it + # to something unique and stable. + # https://github.com/nextcloud/server/issues/5488 + # We add the `stackspin-` prefix to make this compatible with the + # username generated by the sociallogin (SSO) Nextcloud app. + data['userName'] = f"stackspin-{app_role.user_id}" + if app.slug == 'zulip': + # Zulip does not accept an empty formatted name. + if kratos_user.name is None or kratos_user.name == '': + data['name']['formatted'] = " " + # Zulip doesn't support SCIM user groups, but we can set the user + # role as a field on the user object. + if app_role.role_id == Role.ADMIN_ROLE_ID: + data['role'] = 'owner' + else: + data['role'] = 'member' + + # Now format the URL and make the SCIM request. if existing_user is None: - url = f"{config.SCIM_URL}/Users" + url = f"{app.scim_url}/Users" response = requests.post(url, headers=scim_headers, json=data) + logging.info(f"Post SCIM user: {url} with data: {data} getting status: {response.status_code}") else: - url = f"{config.SCIM_URL}/Users/{existing_user.scim_id}" + url = f"{app.scim_url}/Users/{existing_user.scim_id}" response = requests.put(url, headers=scim_headers, json=data) - logging.info(f"SCIM url: {url}") - logging.info(f"SCIM http status: {response.status_code}") + logging.info(f"Put SCIM user: {url} with data: {data} getting status: {response.status_code}") try: response_json = response.json() except json.decoder.JSONDecodeError as e: @@ -198,67 +255,126 @@ class Provision: logging.info(response.content) raise ProvisionError("App returned non-json data in SCIM user put/post.") logging.info(f"got: {response_json}") - # {'schemas': ['urn:ietf:params:scim:schemas:core:2.0:User'], 'id': - # 'Greenhostie', 'externalId': '316cbd5c-7b69-4a27-8a3b-96b3ec056e99', - # 'meta': None, 'userName': 'Greenhostie', 'name': {'formatted': - # 'Greenhostie', 'familyName': None, 'givenName': None, 'middleName': None, - # 'honorificPrefix': None, 'honorificSuffix': None}, 'displayName': - # 'Greenhostie', 'nickName': None, 'profileUrl': None, 'title': None, - # 'userType': None, 'preferredLanguage': None, 'locale': None, 'timezone': - # None, 'active': True, 'password': None, 'emails': [{'type': None, - # 'primary': True, 'display': None, 'value': 'arie+scim@greenhost.nl', - # '$ref': None}], 'phoneNumbers': None, 'ims': None, 'photos': None, - # 'addresses': None, 'groups': None, 'entitlements': None, 'roles': None, - # 'x509Certificates': None} + if existing_user is None: + # Because this is a new user for the app, we should read off its + # SCIM ID and store that in the Stackspin database. + app_role.scim_id = response_json['id'] + db.session.commit() user = User(app_role.user_id, response_json['id'], kratos_user.name) - if app_role.role_id == Role.ADMIN_ROLE_ID: - logging.info(f"Adding user to admin group: {user.displayName} ({user.kratos_id})") - admin_group.add_member(user) - else: - logging.info(f"Removing user from admin group: {user.displayName} ({user.kratos_id})") - admin_group.remove_member(user) - logging.info("After adding/removing user:") - admin_group.debug() + if app.scim_group_support: + if app_role.role_id == Role.ADMIN_ROLE_ID: + logging.info(f"Adding user to admin group: {user.displayName} ({user.kratos_id})") + admin_group.add_member(user) + else: + logging.info(f"Removing user from admin group: {user.displayName} ({user.kratos_id})") + admin_group.remove_member(user) + logging.info("After adding/removing user:") + admin_group.debug() - def _get_existing_users(self): - logging.info(f"Info: Getting list of current users from app via SCIM.") - url = f"{config.SCIM_URL}/Users" - scim_headers = { - 'Authorization': 'Bearer ' + config.SCIM_TOKEN - } - response = requests.get(url, headers=scim_headers) - logging.info(f"SCIM http status: {response.status_code}") - try: - response_json = response.json() - except json.decoder.JSONDecodeError as e: - logging.info("SCIM result was not json") - logging.info(response.content) - raise ProvisionError("Failed to get existing users from Nextcloud") - # logging.info(f"got: {response_json}") + app_role.status = ProvisionStatus.Provisioned + app_role.status.last_provision_message = "User successfully provisioned." + db.session.commit() + return + + def _scim_list_users(self, app): + logging.info(f"Info: Getting list of current users from {app.slug} via SCIM.") + # SCIM prescribes a 1-based index. + startIndex = 1 + # Get this many items per request. The application might further reduce + # this number so we can't count on it. + count = 100 + users = [] + # Track how many users we've received thus far so we know when to stop. + running_total = 0 + while True: + url = f"{app.scim_url}Users?count={count}&startIndex={startIndex}" + scim_headers = { + 'Authorization': 'Bearer ' + app.scim_token + } + response = requests.get(url, headers=scim_headers) + logging.info(f"SCIM http status: {response.status_code}") + try: + response_json = response.json() + except json.decoder.JSONDecodeError as e: + logging.info("SCIM result was not json") + logging.info(response.content) + raise ProvisionError(f"Failed to get existing users from {app.slug}") + new_users = response_json['Resources'] + users = users + new_users + added = len(new_users) + running_total = running_total + added + if running_total == response_json['totalResults'] or added == 0: + # We've got them all. + logging.info(f"All existing users for {app.slug}: {users}") + return users + else: + startIndex = startIndex + added + + def _get_existing_users(self, app): + scim_users = self._scim_list_users(app) # Make a dictionary of the users, using their externalId as key, which # is the kratos user ID. users = {} - for u in response_json['Resources']: - kratos_id = u['externalId'] + for u in scim_users: + kratos_id = u.get('externalId') if not kratos_id: logging.info(f"Got user without externalId: {u}") - # Users that were created in Nextcloud by SSO, before SCIM was - # introduced in Stackspin, will not have `externalId` set, so - # we get the Kratos ID from the `id` attribute instead. - if u['id'].startswith('stackspin-'): - kratos_id = u['id'][len('stackspin-'):] + # Users that were created just-in-time when logging in to the + # app will not have `externalId` set, so we attempt to look up + # the user from our Stackspin database based on the app ID and + # SCIM ID. + app_role = db.session.query(AppRole).filter_by( + app_id=app.id, + scim_id=u['id'] + ).first() + if app_role is None: + logging.info(f" SCIM ID {u['id']} not listed in database.") + # We can't find this app user in our Stackspin database, at + # least based on the SCIM ID. It could be that it was + # created before the introduction of SCIM, or was created + # on-the-fly on login by the app before SCIM got a chance + # to create it. To cover that case, we try to find the + # matching Stackspin user by email address. + try: + if app.slug == 'zulip': + email_address = u['userName'] + else: + email_address = u['emails'][0]['value'] + kratos_user = KratosUser.find_by_email(self.kratos_identity_api, email_address) + except KeyError: + # The `emails` field is not set, so give up. + kratos_user = None + except IndexError: + # The list of email addresses is empty, so give up. + kratos_user = None + if kratos_user is None: + # This user is not known at all by Stackspin, so + # we'll ignore it. + logging.info(f" SCIM user unknown, ignoring.") + continue + # We found the user based on email address. We'll + # store the SCIM ID for this user in the Stackspin + # database so we don't need to do this email + # address matching again next time. + app_role = db.session.query(AppRole).filter_by( + app_id=app.id, + user_id=kratos_user.uuid + ).first() + if app_role is not None: + app_role.scim_id = u['id'] + db.session.commit() + logging.info(f" Stored SCIM ID {u['id']} for user {kratos_user.uuid} for app {app.slug}") + kratos_id = kratos_user.uuid else: - # This is a user that was not created by Stackspin (either - # SSO or SCIM), so we'll ignore it. - continue + kratos_id = app_role.user_id users[kratos_id] = User(kratos_id, u['id'], u['displayName']) return users - def _get_existing_groups(self): - logging.info(f"Info: Getting list of current groups from Nextcloud via SCIM.") - url = f"{config.SCIM_URL}/Groups" + def _get_existing_groups(self, app): + logging.info(f"Info: Getting list of current groups from {app.slug} via SCIM.") + url = f"{app.scim_url}/Groups" scim_headers = { - 'Authorization': 'Bearer ' + config.SCIM_TOKEN + 'Authorization': 'Bearer ' + app.scim_token } response = requests.get(url, headers=scim_headers) logging.info(f"SCIM http status: {response.status_code}") @@ -267,7 +383,7 @@ class Provision: except json.decoder.JSONDecodeError as e: logging.info("SCIM result was not json") logging.info(response.content) - raise ProvisionError("Failed to get existing groups from Nextcloud") + raise ProvisionError("Failed to get existing groups from {app.slug}") logging.info(f"got: {response_json}") groups = {} for group in response_json['Resources']: @@ -278,21 +394,21 @@ class Provision: groups[group['id']] = Group(group['id'], group['displayName'], members) return groups - def _provision_group(self, group): + def _provision_group(self, group, app): logging.info(f"Reconciling group {group.scim_id}") scim_headers = { - 'Authorization': 'Bearer ' + config.SCIM_TOKEN + 'Authorization': 'Bearer ' + app.scim_token } member_data = [ - {'value': member.scim_id, 'display': member.displayName, '$ref': member.ref()} + {'value': member.scim_id, 'display': member.displayName, '$ref': member.ref(app.scim_url)} for _, member in group.members.items()] - logging.info(f"Will update admin group with member data {member_data}") + logging.info(f"Will update admin group for {app.slug} with member data {member_data}") data = { 'schemas': ['urn:ietf:params:scim:schemas:core:2.0:Group'], 'displayName': group.displayName, 'members': member_data, } - url = f"{config.SCIM_URL}/Groups/{group.scim_id}" + url = f"{app.scim_url}/Groups/{group.scim_id}" response = requests.put(url, headers=scim_headers, json=data) logging.info(f"SCIM http status: {response.status_code}") try: @@ -300,5 +416,5 @@ class Provision: except json.decoder.JSONDecodeError as e: logging.info("SCIM result was not json") logging.info(response.content) - raise ProvisionError("App returned non-json data in SCIM group put.") + raise ProvisionError("{app.slug} returned non-json data to SCIM group PUT.") logging.info(f"got: {response_json}") diff --git a/backend/helpers/threads.py b/backend/helpers/threads.py new file mode 100644 index 0000000000000000000000000000000000000000..5679b9863a96959b0601e6b296d491507f1c8d5c --- /dev/null +++ b/backend/helpers/threads.py @@ -0,0 +1,42 @@ +import functools +from posix_ipc import MessageQueue, O_CREAT, BusyError +import threading + +# Signal to provisioning loop that we want to provision now. +provisioning_queue = MessageQueue('/stackspin-dashboard-provision-queue', O_CREAT) + +def debounce(timeout: float): + def decorator(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + wrapper.func.cancel() + wrapper.func = threading.Timer(timeout, func, args, kwargs) + wrapper.func.start() + wrapper.func = threading.Timer(timeout, lambda: None) + return wrapper + return decorator + +@debounce(1) +def request_provision(): + try: + provisioning_queue.send("provision", timeout=0) + except BusyError: + # If we can't signal for some reason (queue limit reached), silently + # fail. + pass + +def wait_provision(): + # We first wait until there's any message in the queue. + provisioning_queue.receive() + # After that, we check if there are any more messages, to prevent a couple + # of (long) provisioning runs to be done back-to-back in case of multiple + # provisioning requests. Note however that if a request comes in during the + # middle of a provisioning run, we still do another one right after to make + # sure we propagate the latest changes right away. + try: + while True: + # We read with zero timeout, so we get an exception right away if + # the queue is empty. + provisioning_queue.receive(timeout=0) + except BusyError: + pass diff --git a/backend/migrations/versions/267d280db490dd94_dynamic_scim_apps.py b/backend/migrations/versions/267d280db490dd94_dynamic_scim_apps.py new file mode 100644 index 0000000000000000000000000000000000000000..7bba4df76e5399d1a24c9c7d3a9e0215cafc5bf3 --- /dev/null +++ b/backend/migrations/versions/267d280db490dd94_dynamic_scim_apps.py @@ -0,0 +1,65 @@ +"""Extend SCIM support to dynamic apps + +Revision ID: 267d280db490 +Revises: 825262488cd9 +Create Date: 2024-04-12 11:49:00 + +""" +from alembic import op +import sqlalchemy as sa + +# revision identifiers, used by Alembic. +revision = '267d280db490' +down_revision = '825262488cd9' +branch_labels = None +depends_on = None + +def upgrade(): + op.add_column( + "app", + sa.Column( + "scim_url", + sa.Unicode(length=1024), + nullable=True + ), + ) + op.add_column( + "app", + sa.Column( + "scim_token", + sa.Unicode(length=1024), + nullable=True + ), + ) + op.add_column( + "app", + sa.Column( + "scim_group_support", + sa.Boolean(), + server_default='0', + nullable=False + ), + ) + # ID of user in app for SCIM purposes. The dashboard needs this so it can + # construct the SCIM URL to the app identifying the user. + op.add_column( + "app_role", + sa.Column( + "scim_id", + sa.Unicode(length=256), + nullable=True + ), + ) + op.create_index( + "app_role__app_id__scim_id", + "app_role", + ["app_id", "scim_id"], + unique=False + ) + +def downgrade(): + op.drop_column("app", "scim_url") + op.drop_column("app", "scim_token") + op.drop_column("app", "scim_group_support") + op.drop_index("app_role__app_id__scim_id", "app_role") + op.drop_column("app_role", "scim_id") diff --git a/backend/requirements.txt b/backend/requirements.txt index 244e4e8b576dc4e66de300dfda2b6cc84ab89137..b06a97967da972a7109d0132353666c5ed723f5c 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -28,6 +28,7 @@ ory-kratos-client==1.0.0 ory-hydra-client==1.11.8 pathspec==0.9.0 platformdirs==2.5.1 +posix-ipc==1.1.1 pycparser==2.21 PyJWT==2.3.0 pymysql==1.0.2 diff --git a/backend/web/login/login.py b/backend/web/login/login.py index 6198d85140b5ff0b95f473dc9e361fa089f17bc1..c6bcd70792a3e983ec895e1340688f9bef109902 100644 --- a/backend/web/login/login.py +++ b/backend/web/login/login.py @@ -24,7 +24,7 @@ from database import db from helpers import KratosUser from config import * from web import web -from areas.apps import AppRole, App, OAuthClientApp +from areas.apps.models import AppRole, App, OAuthClientApp from areas.roles import RoleService from areas.roles.models import Role from areas.users.user_service import UserService diff --git a/deployment/helmchart/Chart.yaml b/deployment/helmchart/Chart.yaml index 1205fad396e74e43c8a3ebc3cb66e1dbd7ac61af..2167bf63f89278dbcbfb020c645bd94e889e695d 100644 --- a/deployment/helmchart/Chart.yaml +++ b/deployment/helmchart/Chart.yaml @@ -23,4 +23,4 @@ name: stackspin-dashboard sources: - https://open.greenhost.net/stackspin/dashboard/ - https://open.greenhost.net/stackspin/dashboard-backend/ -version: 1.9.2 +version: 1.9.3-scim-2 diff --git a/deployment/helmchart/templates/rbac/clusterrole.yaml b/deployment/helmchart/templates/rbac/clusterrole.yaml index b4b8874817640a3eb0b45c8b4672bae13df31944..b6a7857d7c47acb718a607f05bc2b59bd5bf0d14 100644 --- a/deployment/helmchart/templates/rbac/clusterrole.yaml +++ b/deployment/helmchart/templates/rbac/clusterrole.yaml @@ -22,6 +22,7 @@ rules: - get - patch - create + - watch - apiGroups: - helm.toolkit.fluxcd.io resources: @@ -46,6 +47,7 @@ rules: - configmaps verbs: - list + - watch - get - patch - delete diff --git a/deployment/helmchart/values.yaml b/deployment/helmchart/values.yaml index 62a9860260181407ba64511a2b7db7659dbd0128..d0607ba3aab5dba9aa719b2806ec1c955bdaf919 100644 --- a/deployment/helmchart/values.yaml +++ b/deployment/helmchart/values.yaml @@ -68,7 +68,7 @@ dashboard: image: registry: open.greenhost.net:4567 repository: stackspin/dashboard/dashboard - tag: 0.9.2 + tag: 201-extend-scim-to-support-zulip digest: "" ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. @@ -236,7 +236,7 @@ backend: image: registry: open.greenhost.net:4567 repository: stackspin/dashboard/dashboard-backend - tag: 0.9.2 + tag: 201-extend-scim-to-support-zulip digest: "" ## Optionally specify an array of imagePullSecrets. ## Secrets must be manually created in the namespace. @@ -723,7 +723,7 @@ tests: image: registry: open.greenhost.net:4567 repository: stackspin/dashboard/cypress-test - tag: 0.9.2 + tag: 201-extend-scim-to-support-zulip pullPolicy: IfNotPresent credentials: user: "" diff --git a/dev.sh b/dev.sh index 486097c783e0b79fe476c1a923b97fd9e5201232..38591317eb5b0cce65b96670ee3a8b7c615ff71f 100755 --- a/dev.sh +++ b/dev.sh @@ -114,6 +114,15 @@ runBackend() { esac } +runBackendShell() { + echo "Running shell in the local dashboard backend environment." + pushd backend > /dev/null + source venv/bin/activate + env $(xargs <backend.env) bash + deactivate + popd > /dev/null +} + runFrontend() { echo "Running dashboard frontend locally and connecting to cluster." case $mode in @@ -172,6 +181,12 @@ then exit 0 fi +if [ "$1" == "backend-shell" ] +then + runBackendShell + exit 0 +fi + if [ $# -eq 1 ] then mode="native"