Skip to content
Snippets Groups Projects
app.py 7.95 KiB
Newer Older
Arie Peterson's avatar
Arie Peterson committed
from apscheduler.schedulers.background import BackgroundScheduler
Luka's avatar
Luka committed
from flask import Flask, jsonify
from flask_cors import CORS
from flask_jwt_extended import JWTManager
import flask_migrate
from jsonschema.exceptions import ValidationError
from NamedAtomicLock import NamedAtomicLock
import threading
import traceback
from werkzeug.exceptions import BadRequest
Luka's avatar
Luka committed

# These imports are required
from areas import api_v1
Luka's avatar
Luka committed
from cliapp import cli
from web import web
from areas import users
Arie Peterson's avatar
Arie Peterson committed
from areas.apps.apps import *
from areas import auth
Arie Peterson's avatar
Arie Peterson committed
from areas import resources
from areas import tags
Luka's avatar
Luka committed
from cliapp import cliapp
import helpers.kubernetes
Arie Peterson's avatar
Arie Peterson committed
import helpers.provision
import helpers.threads
Luka's avatar
Luka committed
from web import login
from database import db

from helpers import (
    BadRequest,
    KratosError,
Luka's avatar
Luka committed
    HydraError,
Davor's avatar
Davor committed
    Unauthorized,
    bad_request_error,
    validation_error,
    kratos_error,
    global_error,
Luka's avatar
Luka committed
    hydra_error,
Davor's avatar
Davor committed
    unauthorized_error,
import cluster_config
Luka's avatar
Luka committed
from config import *
import migration_reset
Arie Peterson's avatar
Arie Peterson committed
import os
Luka's avatar
Luka committed

# Configure logging.
log_level = logging.getLevelName(config.LOG_LEVEL or 'INFO')
from logging.config import dictConfig
dictConfig({
    'version': 1,
    'formatters': {'default': {
        'format': '[%(asctime)s] %(levelname)s in %(name)s (%(filename)s+%(lineno)d): %(message)s',
    }},
    'handlers': {'wsgi': {
        'class': 'logging.StreamHandler',
        'stream': 'ext://flask.logging.wsgi_errors_stream',
        'formatter': 'default',
        'level': log_level,
    }},
    'root': {
        'handlers': ['wsgi'],
        'level': log_level,
    },
    # Loggers are created also by alembic, flask_migrate, etc. Without this
    # setting, those loggers seem to be ignored.
    'disable_existing_loggers': False,
logging.getLogger("kubernetes.client.rest").setLevel(logging.WARNING)
Luka's avatar
Luka committed
app = Flask(__name__)
app.config["SECRET_KEY"] = SECRET_KEY
app.config["SQLALCHEMY_DATABASE_URI"] = SQLALCHEMY_DATABASE_URI
app.config["SQLALCHEMY_ENGINE_OPTIONS"] = {'pool_pre_ping': True}
Luka's avatar
Luka committed
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = SQLALCHEMY_TRACK_MODIFICATIONS
Luka's avatar
Luka committed
cors = CORS(app)

db.init_app(app)
with app.app_context():
    provisioner = helpers.provision.Provision()

Arie Peterson's avatar
Arie Peterson committed
def init_routines():
    with app.app_context():
        # We have reset the alembic migration history at Stackspin version 2.2.
        # This checks whether we need to prepare the database to follow that
        # change.
        migration_reset.reset()
    app.logger.info("Loading flask_migrate.")
    # flask_migrate exits the program when it encounters an error, for example
    # when the version set in the database is not found in the
    # `migrations/versions` directory. We could prevent that by catching the
    # `SystemExit` exception here, but actually it's not safe to continue in
    # that case.
    flask_migrate.Migrate(app, db)
    app.logger.info("Attempting flask_migrate database upgrade.")
    try:
        with app.app_context():
            flask_migrate.upgrade()
    # TODO: actually flask_migrate.upgrade will catch any errors and
    # exit the program :/
    except Exception as e:
        app.logger.info(f"upgrade failed: {type(e)}: {e}")
        sys.exit(2)

    def reload():
        # We need this app context in order to talk the database, which is managed by
        # flask-sqlalchemy, which assumes a flask app context.
        with app.app_context():
            app.logger.info("Reloading dashboard config from cluster resources.")
            # Load the list of apps from a configmap and store any missing ones in the
            # database.
            app_slugs = cluster_config.populate_apps()
            # Same for the list of oauthclients.
            cluster_config.populate_oauthclients()
            # Load per-app scim config if present.
            cluster_config.populate_scim_config(app_slugs)
    # We could call `reload` here manually, but actually the watch also at its
    # start creates events for existing secrets so we don't need to.
    with app.app_context():
        # Set watch for dashboard SCIM config secrets. Any time those change,
        # we reload so we can do SCIM for newly installed apps.
        try:
            helpers.kubernetes.watch_dashboard_config(app, reload)
        except Exception as e:
            app.logger.error(f"Error watching dashboard config: {e}")
    # Set up a generic task scheduler (cron-like).
    scheduler = BackgroundScheduler()
    scheduler.start()
    # Add a job to run the provisioning reconciliation routine regularly.
    # We'll also run it when we make changes that should be propagated
    # immediately.
    scheduler.add_job(helpers.threads.request_provision, 'interval', id='provision', hours=24)
    # We'll run this in a separate thread so it can be done in the background.
    # We have this single "provisioning worker" so there will be only one
    # provisioning operation at a time. We use an Event to signal a
    # provisioning request.
    def provisioning_loop():
            app.logger.info("Waiting for next provisioning run.")
            # helpers.threads.provision_event.wait()
            # helpers.threads.provision_event.clear()
            helpers.threads.wait_provision()
            app.logger.info("Starting provisioning.")
            with app.app_context():
                try:
                    provisioner.reconcile()
                except Exception as e:
                    app.logger.warn(f"Exception during user provisioning:")
                    app.logger.warn(traceback.format_exc())
    threading.Thread(target=provisioning_loop).start()
# `init_routines` should only run once per dashboard instance. To enforce this
# we have different behaviour for production and development mode:
# * We have "preload" on for gunicorn, so this file is loaded only once, before
#   workers are forked (production).
# * We make sure that in development mode we run this only once, even though
#   this file is loaded twice by flask for some reason.
if RUN_BY_GUNICORN:
    logging.info("Running initialization code (production mode).")
    init_routines()
else:
Arie Peterson's avatar
Arie Peterson committed
    logging.info("WERKZEUG_RUN_MAIN: {}".format(os.environ.get("WERKZEUG_RUN_MAIN", "unset")))
    if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
        logging.info("Running initialization code (dev mode).")
Arie Peterson's avatar
Arie Peterson committed
        init_routines()
        logging.info("Not running initialization code (dev or cli mode).")
        # This should not perform any actual migration, just load the
        # flask_migrate extension so we can use `flask db` commands from the
        # cli.
        flask_migrate.Migrate(app, db)
# Now that we've done all database interactions in the initialisation routines,
# we need to drop all connections to the database to prevent those from being
# shared among different worker processes.
logging.info("Disposing of database connections.")
with app.app_context():
    db.engine.dispose()

Luka's avatar
Luka committed
app.register_blueprint(api_v1)
app.register_blueprint(web)
Mart van Santen's avatar
Mart van Santen committed
app.register_blueprint(cli)
Luka's avatar
Luka committed

# Error handlers
app.register_error_handler(Exception, global_error)
app.register_error_handler(BadRequest, bad_request_error)
app.register_error_handler(ValidationError, validation_error)
app.register_error_handler(KratosError, kratos_error)
Luka's avatar
Luka committed
app.register_error_handler(HydraError, hydra_error)
Davor's avatar
Davor committed
app.register_error_handler(Unauthorized, unauthorized_error)
Luka's avatar
Luka committed
jwt = JWTManager(app)

# When token is not valid or missing handler
@jwt.invalid_token_loader
def invalid_token_callback(reason):
    logging.info(f"Invalid token: {reason}.")
    return jsonify({"errorMessage": "Unauthorized (invalid token)"}), 401

Luka's avatar
Luka committed
@jwt.unauthorized_loader
def unauthorized_callback(reason):
    logging.info(f"No token: {reason}.")
    return jsonify({"errorMessage": "Unauthorized (no token)"}), 401

Luka's avatar
Luka committed
@jwt.expired_token_loader
def expired_token_callback(reason):
    logging.info(f"Expired token: {reason}.")
    return jsonify({"errorMessage": "Unauthorized (expired token)"}), 401
Luka's avatar
Luka committed

Luka's avatar
Luka committed
def index():
Varac's avatar
Varac committed
    return "Stackspin API v1.0"