Newer
Older
from apscheduler.schedulers.background import BackgroundScheduler
from flask_jwt_extended import JWTManager
from jsonschema.exceptions import ValidationError
from NamedAtomicLock import NamedAtomicLock
import threading
import traceback
from werkzeug.exceptions import BadRequest
# These imports are required
from areas import roles
from helpers import (
BadRequest,
KratosError,
bad_request_error,
validation_error,
kratos_error,
global_error,
log_level = logging.getLevelName(config.LOG_LEVEL or 'INFO')
from logging.config import dictConfig
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] %(levelname)s in %(name)s (%(filename)s+%(lineno)d): %(message)s',
}},
'handlers': {'wsgi': {
'class': 'logging.StreamHandler',
'stream': 'ext://flask.logging.wsgi_errors_stream',
'formatter': 'default',
}},
'root': {
'handlers': ['wsgi'],
},
# Loggers are created also by alembic, flask_migrate, etc. Without this
# setting, those loggers seem to be ignored.
'disable_existing_loggers': False,
logging.getLogger("kubernetes.client.rest").setLevel(logging.WARNING)
app.config["SECRET_KEY"] = SECRET_KEY
app.config["SQLALCHEMY_DATABASE_URI"] = SQLALCHEMY_DATABASE_URI
app.config["SQLALCHEMY_ENGINE_OPTIONS"] = {'pool_pre_ping': True}
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = SQLALCHEMY_TRACK_MODIFICATIONS
with app.app_context():
provisioner = helpers.provision.Provision()
with app.app_context():
# We have reset the alembic migration history at Stackspin version 2.2.
# This checks whether we need to prepare the database to follow that
# change.
migration_reset.reset()
app.logger.info("Loading flask_migrate.")
# flask_migrate exits the program when it encounters an error, for example
# when the version set in the database is not found in the
# `migrations/versions` directory. We could prevent that by catching the
# `SystemExit` exception here, but actually it's not safe to continue in
# that case.
flask_migrate.Migrate(app, db)
app.logger.info("Attempting flask_migrate database upgrade.")
try:
with app.app_context():
flask_migrate.upgrade()
# TODO: actually flask_migrate.upgrade will catch any errors and
# exit the program :/
except Exception as e:
app.logger.info(f"upgrade failed: {type(e)}: {e}")
sys.exit(2)
def reload():
# We need this app context in order to talk the database, which is managed by
# flask-sqlalchemy, which assumes a flask app context.
with app.app_context():
app.logger.info("Reloading dashboard config from cluster resources.")
# Load the list of apps from a configmap and store any missing ones in the
# database.
app_slugs = cluster_config.populate_apps()
# Same for the list of oauthclients.
cluster_config.populate_oauthclients()
# Load per-app scim config if present.
cluster_config.populate_scim_config(app_slugs)
# We could call `reload` here manually, but actually the watch also at its
# start creates events for existing secrets so we don't need to.
with app.app_context():
# Set watch for dashboard SCIM config secrets. Any time those change,
# we reload so we can do SCIM for newly installed apps.
try:
helpers.kubernetes.watch_dashboard_config(app, reload)
except Exception as e:
app.logger.error(f"Error watching dashboard config: {e}")
# Set up a generic task scheduler (cron-like).
scheduler = BackgroundScheduler()
scheduler.start()
# Add a job to run the provisioning reconciliation routine regularly.
# We'll also run it when we make changes that should be propagated
# immediately.
scheduler.add_job(helpers.threads.request_provision, 'interval', id='provision', hours=24)
# We'll run this in a separate thread so it can be done in the background.
# We have this single "provisioning worker" so there will be only one
# provisioning operation at a time. We use an Event to signal a
# provisioning request.
def provisioning_loop():
app.logger.info("Waiting for next provisioning run.")
# helpers.threads.provision_event.wait()
# helpers.threads.provision_event.clear()
helpers.threads.wait_provision()
app.logger.info("Starting provisioning.")
with app.app_context():
try:
provisioner.reconcile()
except Exception as e:
app.logger.warn(f"Exception during user provisioning:")
app.logger.warn(traceback.format_exc())
threading.Thread(target=provisioning_loop).start()
# `init_routines` should only run once per dashboard instance. To enforce this
# we have different behaviour for production and development mode:
# * We have "preload" on for gunicorn, so this file is loaded only once, before
# workers are forked (production).
# * We make sure that in development mode we run this only once, even though
# this file is loaded twice by flask for some reason.
if RUN_BY_GUNICORN:
logging.info("Running initialization code (production mode).")
init_routines()
else:
logging.info("WERKZEUG_RUN_MAIN: {}".format(os.environ.get("WERKZEUG_RUN_MAIN", "unset")))
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
logging.info("Running initialization code (dev mode).")
logging.info("Not running initialization code (dev or cli mode).")
# This should not perform any actual migration, just load the
# flask_migrate extension so we can use `flask db` commands from the
# cli.
flask_migrate.Migrate(app, db)
# Now that we've done all database interactions in the initialisation routines,
# we need to drop all connections to the database to prevent those from being
# shared among different worker processes.
logging.info("Disposing of database connections.")
with app.app_context():
db.engine.dispose()
# Error handlers
app.register_error_handler(Exception, global_error)
app.register_error_handler(BadRequest, bad_request_error)
app.register_error_handler(ValidationError, validation_error)
app.register_error_handler(KratosError, kratos_error)
jwt = JWTManager(app)
# When token is not valid or missing handler
@jwt.invalid_token_loader
def invalid_token_callback(reason):
logging.info(f"Invalid token: {reason}.")
return jsonify({"errorMessage": "Unauthorized (invalid token)"}), 401
def unauthorized_callback(reason):
logging.info(f"No token: {reason}.")
return jsonify({"errorMessage": "Unauthorized (no token)"}), 401
def expired_token_callback(reason):
logging.info(f"Expired token: {reason}.")
return jsonify({"errorMessage": "Unauthorized (expired token)"}), 401
@app.route("/")