Skip to content
Snippets Groups Projects
app.py 7.53 KiB
from apscheduler.schedulers.background import BackgroundScheduler
from flask import Flask, jsonify
from flask_cors import CORS
from flask_jwt_extended import JWTManager
import flask_migrate
from jsonschema.exceptions import ValidationError
from NamedAtomicLock import NamedAtomicLock
import threading
import traceback
from werkzeug.exceptions import BadRequest

# These imports are required
from areas import api_v1
from cliapp import cli
from web import web

from areas import users
from areas.apps.apps import *
from areas import auth
from areas import resources
from areas import roles
from areas import tags
from cliapp import cliapp
import config
import helpers.kubernetes
import helpers.provision
import helpers.threads
from web import login

from database import db

from helpers import (
    BadRequest,
    KratosError,
    HydraError,
    Unauthorized,
    bad_request_error,
    validation_error,
    kratos_error,
    global_error,
    hydra_error,
    unauthorized_error,
)

import cluster_config
from config import *
import logging
import migration_reset
import os
import sys

# Configure logging.
log_level = logging.getLevelName(config.LOG_LEVEL or 'INFO')
from logging.config import dictConfig
dictConfig({
    'version': 1,
    'formatters': {'default': {
        'format': '[%(asctime)s] %(levelname)s in %(name)s (%(filename)s+%(lineno)d): %(message)s',
    }},
    'handlers': {'wsgi': {
        'class': 'logging.StreamHandler',
        'stream': 'ext://flask.logging.wsgi_errors_stream',
        'formatter': 'default',
        'level': log_level,
    }},
    'root': {
        'handlers': ['wsgi'],
        'level': log_level,
    },
    # Loggers are created also by alembic, flask_migrate, etc. Without this
    # setting, those loggers seem to be ignored.
    'disable_existing_loggers': False,
})

app = Flask(__name__)

app.config["SECRET_KEY"] = SECRET_KEY
app.config["SQLALCHEMY_DATABASE_URI"] = SQLALCHEMY_DATABASE_URI
app.config["SQLALCHEMY_ENGINE_OPTIONS"] = {'pool_pre_ping': True}
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = SQLALCHEMY_TRACK_MODIFICATIONS

cors = CORS(app)

db.init_app(app)

with app.app_context():
    provisioner = helpers.provision.Provision()

def init_routines():
    with app.app_context():
        # We have reset the alembic migration history at Stackspin version 2.2.
        # This checks whether we need to prepare the database to follow that
        # change.
        migration_reset.reset()
    app.logger.info("Loading flask_migrate.")
    # flask_migrate exits the program when it encounters an error, for example
    # when the version set in the database is not found in the
    # `migrations/versions` directory. We could prevent that by catching the
    # `SystemExit` exception here, but actually it's not safe to continue in
    # that case.
    flask_migrate.Migrate(app, db)
    app.logger.info("Attempting flask_migrate database upgrade.")
    try:
        with app.app_context():
            flask_migrate.upgrade()
    # TODO: actually flask_migrate.upgrade will catch any errors and
    # exit the program :/
    except Exception as e:
        app.logger.info(f"upgrade failed: {type(e)}: {e}")
        sys.exit(2)

    def reload():
        # We need this app context in order to talk the database, which is managed by
        # flask-sqlalchemy, which assumes a flask app context.
        with app.app_context():
            app.logger.info("Reloading dashboard config from cluster resources.")
            # Load the list of apps from a configmap and store any missing ones in the
            # database.
            app_slugs = cluster_config.populate_apps()
            # Same for the list of oauthclients.
            cluster_config.populate_oauthclients()
            # Load per-app scim config if present.
            cluster_config.populate_scim_config(app_slugs)
    # We could call `reload` here manually, but actually the watch also at its
    # start creates events for existing secrets so we don't need to.
    with app.app_context():
        # Set watch for dashboard SCIM config secrets. Any time those change,
        # we reload so we can do SCIM for newly installed apps.
        try:
            helpers.kubernetes.watch_dashboard_config(app, reload)
        except Exception as e:
            app.logger.error(f"Error watching dashboard config: {e}")

    # Set up a generic task scheduler (cron-like).
    scheduler = BackgroundScheduler()
    scheduler.start()
    # Add a job to run the provisioning reconciliation routine regularly.
    # We'll also run it when we make changes that should be propagated
    # immediately.
    scheduler.add_job(helpers.threads.request_provision, 'interval', id='provision', hours=24)
    # We'll run this in a separate thread so it can be done in the background.
    # We have this single "provisioning worker" so there will be only one
    # provisioning operation at a time. We use an Event to signal a
    # provisioning request.
    def provisioning_loop():
        while True:
            app.logger.info("Waiting for next provisioning run.")
            # helpers.threads.provision_event.wait()
            # helpers.threads.provision_event.clear()
            helpers.threads.wait_provision()
            app.logger.info("Starting provisioning.")
            with app.app_context():
                try:
                    provisioner.reconcile()
                except Exception as e:
                    app.logger.warn(f"Exception during user provisioning:")
                    app.logger.warn(traceback.format_exc())
    threading.Thread(target=provisioning_loop).start()

# `init_routines` should only run once per dashboard instance. To enforce this
# we have different behaviour for production and development mode:
# * We have "preload" on for gunicorn, so this file is loaded only once, before
#   workers are forked (production).
# * We make sure that in development mode we run this only once, even though
#   this file is loaded twice by flask for some reason.
if RUN_BY_GUNICORN:
    logging.info("Running initialization code (production mode).")
    init_routines()
else:
    logging.info("WERKZEUG_RUN_MAIN: {}".format(os.environ.get("WERKZEUG_RUN_MAIN", "unset")))
    if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
        logging.info("Running initialization code (dev mode).")
        init_routines()
    else:
        logging.info("Not running initialization code (dev or cli mode).")
        # This should not perform any actual migration, just load the
        # flask_migrate extension so we can use `flask db` commands from the
        # cli.
        flask_migrate.Migrate(app, db)

# Now that we've done all database interactions in the initialisation routines,
# we need to drop all connections to the database to prevent those from being
# shared among different worker processes.
logging.info("Disposing of database connections.")
with app.app_context():
    db.engine.dispose()

app.register_blueprint(api_v1)
app.register_blueprint(web)
app.register_blueprint(cli)

# Error handlers
app.register_error_handler(Exception, global_error)
app.register_error_handler(BadRequest, bad_request_error)
app.register_error_handler(ValidationError, validation_error)
app.register_error_handler(KratosError, kratos_error)
app.register_error_handler(HydraError, hydra_error)
app.register_error_handler(Unauthorized, unauthorized_error)

jwt = JWTManager(app)

# When token is not valid or missing handler
@jwt.invalid_token_loader
@jwt.unauthorized_loader
@jwt.expired_token_loader
def expired_token_callback(*args):
    return jsonify({"errorMessage": "Unauthorized"}), 401

@app.route("/")
def index():
    return "Stackspin API v1.0"