Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • xeruf/dashboard
  • stackspin/dashboard
2 results
Show changes
Commits on Source (904)
* text=auto
*.css linguist-vendored
*.scss linguist-vendored
*.js linguist-vendored
CHANGELOG.md export-ignore
.venv
.idea
.vscode
__pycache__
*.pyc
# See https://help.github.com/articles/ignoring-files/ for more about ignoring files.
# dependencies
/node_modules
/frontend/node_modules
/backend/venv
/backend/web/node_modules
/backend/venv
/.pnp
.pnp.js
# testing
/coverage
# production
/build
# local environment
/frontend/local.env
/.vscode
# misc
.DS_Store
*.swp
.envrc
.direnv
run_app.local.sh
npm-debug.log*
yarn-debug.log*
yarn-error.log*
.eslintcache
cypress/videos/
# KUBECONFIG values
backend/kubeconfig/*
# Helm dependencies
deployment/helmchart/charts/
# Helm values
deployment/helmchart/values-local.yaml
include:
- remote: https://open.greenhost.net/stackspin/stackspin/-/raw/main/.gitlab/ci_templates/helm_package.yml
- template: 'Workflows/MergeRequest-Pipelines.gitlab-ci.yml'
stages:
- build-container
- build-project
- build-image
- lint-helm-chart
- package-helm-chart
- release-helm-chart
image: node:20-alpine
variables:
KANIKO_BUILD_IMAGENAME: dashboard-backend
CHART_NAME: stackspin-dashboard
CHART_DIR: deployment/helmchart/
yarn:
stage: build-project
before_script: []
script:
- cd frontend
- echo "Building app"
- yarn install
- echo "REACT_APP_API_URL=/api/v1" > .env
- echo "EXTEND_ESLINT=true" >> .env
- yarn build
- mkdir docker
- mv build docker/html
- echo "Build successful"
artifacts:
expire_in: 1 hour
name: web-build
paths:
- frontend/docker/html
build-container:
stage: build-container
.kaniko-build:
script:
- cd ${DIRECTORY}
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > /kaniko/.docker/config.json
- export CONTAINER_TAG=${CI_COMMIT_TAG:-${CI_COMMIT_REF_SLUG}}
- /kaniko/executor --cache=true --context ${CI_PROJECT_DIR}/${DIRECTORY} --destination ${CI_REGISTRY_IMAGE}/${KANIKO_BUILD_IMAGENAME}:${CONTAINER_TAG}
build-frontend-image:
stage: build-image
needs:
- yarn
image:
# We need a shell to provide the registry credentials, so we need to use the
# kaniko debug image (https://github.com/GoogleContainerTools/kaniko#debug-image)
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
script:
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > /kaniko/.docker/config.json
- /kaniko/executor --cache=true --context ${CI_PROJECT_DIR}/ --dockerfile ${CI_PROJECT_DIR}/Dockerfile --destination ${CI_REGISTRY_IMAGE}/${KANIKO_BUILD_IMAGENAME}:${CI_COMMIT_REF_SLUG}
variables:
KANIKO_BUILD_IMAGENAME: dashboard
DIRECTORY: frontend/docker
before_script:
- cp deployment/Dockerfile $DIRECTORY
- cp deployment/nginx.conf $DIRECTORY
extends:
.kaniko-build
build-backend-image:
stage: build-image
needs: []
variables:
KANIKO_BUILD_IMAGENAME: dashboard-backend
DIRECTORY: backend
image:
# We need a shell to provide the registry credentials, so we need to use the
# kaniko debug image (https://github.com/GoogleContainerTools/kaniko#debug-image)
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
extends:
.kaniko-build
build-test-image:
stage: build-image
needs: []
variables:
KANIKO_BUILD_IMAGENAME: cypress-test
DIRECTORY: tests
image:
# We need a shell to provide the registry credentials, so we need to use the
# kaniko debug image (https://github.com/GoogleContainerTools/kaniko#debug-image)
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
extends:
.kaniko-build
php:
preset: laravel
disabled:
- unused_use
finder:
not-name:
- index.php
- server.php
js:
finder:
not-name:
- webpack.mix.js
css: true
{}
# Changelog
## 0.14.0
- Introduce support for upstream OIDC. With some additional configuration, you
can use an external OIDC provider to allow "social sign-up/sign-in".
## 0.13.3
- Fix creating app roles for users created via the CLI.
## 0.13.2
- Update hydra client library to v2 and adapt to its changed API.
- Change the jwt identity claim because that's strictly required to be a string
now, and we put a json object in there before.
## 0.13.1
- Add the `cryptography` python library as dependency of the backend. This is
necessary for sha256_password and caching_sha2_password auth methods.
## 0.13.0
- Fix the initial recovery flow created automatically for new users, which was
broken by the kratos client lib upgrade.
- Add support for serving arbitrary files from the frontend pod, provided by a
persistent volume claim. Reorganize the assets to make it easier to include
custom icons this way.
- Add support for theming. Customizing colours, logo and background image is
now particularly easy, but mostly anything can be changed through a custom
stylesheet.
- Only show Stackspin version info to admin users.
## 0.12.4
- Prevent database connections from being shared between worker processes.
That sharing may cause intermittent database errors.
## 0.12.3
- Fix broken kratos hooks tracking last recovery and login times.
- Upgrade python to 3.13.
## 0.12.2
- Fix consent page for `ENFORCE_2FA` instances.
## 0.12.1
- Add back `jinja2-base64-filters` to backend for templating kustomizations
during app installation.
## 0.12.0
- Add basic support for WebAuthn as second-factor authentication.
- Only show app version numbers in the dashboard tiles to admin users.
- Do not serve Dockerfile and nginx config from frontend.
- Start renovating python dependencies of the backend. Upgrade many direct and
indirect dependencies.
## 0.11.1
- Fix password reset form in case no email address is pre-filled.
## 0.11.0
- Allow pre-filling user's email address in a link to the password (re)set
form. This is useful when creating new user accounts.
- Fix user provisioning after installing new apps.
## 0.10.5
- Look up users from Kratos by email address using the proper (new) API
mechanism for that, instead of iterating over all users.
- Compare email addresses case insensitively to deal with Stackspin apps
changing case of email address strings.
- Fix broken user accounts when created via the flask CLI.
- Replace slightly off-spec usage of `__repr__` by `__str__`.
## 0.10.4
- Disable Zulip accounts when deleting users, because Zulip doesn't allow
deleting accounts via SCIM.
## 0.10.3
- Fix setting successful provisioning status.
## 0.10.2
- Fine-tune logging levels, and introduce a new environment variable
`LOG_LEVEL` to set the log level at runtime.
- Track when a user's full name has been changed, and only include the name in
the SCIM provisioning call when it has changed, or for newly provisioned
users.
## 0.10.1
- Watch dashboard configmaps with lists of apps and oauthclients, and
reload config on changes. This also makes sure that we always load the config
at dashboard start-up, even when there are no (SCIM-supporting) apps
installed.
## 0.10.0
- Include new "System resources" module with basic stats.
- Implement basic (manual/static) SCIM functionality for automatic user provisioning.
- Implement dynamic (i.e., arbitrary apps) SCIM functionality, tested and
tailored for Nextcloud and Zulip.
- Upgrade to tailwind v3, and update several other javascript dependencies.
- Make info modals slightly wider, to make sure you can see the full contents
also for slightly larger fonts. In particular, this fixes a partially
invisible reset link.
- Add a CLI command for deleting older unused accounts.
- Add logo for Gitea.
## 0.9.2
- Fix saving user properties, which was broken because of the partial tags
implementation.
## 0.9.1
- Do not autocomplete totp input field.
- Allow removing user app roles from CLI.
## 0.9.0
- Improve user listing: show label for admin users, show last login and
password reset times, improved layout.
- Fix rare bug in frontend's idea of admin status in the face of custom apps.
- Prepare backend for user tags.
## 0.8.4
- Allow enforcing 2fa.
- Add button for admin users to reset 2FA of users. Also improve UX of this and
other dangerous operations in the user edit screen.
- Fix logout to include hydra post-logout.
- Do not show link to recovery on TOTP form.
- Fix css of demo sign-up.
- Upgrade to python 3.12.
## 0.8.3
- Introduce backend code for resetting 2FA, and add cli command for that.
- Upgrade Kratos api library `ory-kratos-client` to 1.0.0.
- Patch our usage of Kratos api pagination of identities list.
## 0.8.2
- End the Kratos session in prelogout. This makes sure that we end the "SSO
session" also when logging out from an app. We used to rely on hydra's
post-logout url to get at the kratos logout, but apps sometimes override that
url via an oidc parameter.
## 0.8.1
- Add a couple of attributes to our OIDC tokens to support our switch to
another Nextcloud app for OIDC.
## 0.8.0
- Add feature to easily edit app permissions for multiple users at once.
- Change the way secrets are created for apps, creating them in the stackspin
project (using an existing secrets controller). So remove support for
generating app secrets in the dashboard.
- Fix password reset when 2FA is enabled.
- Fix bug that all Wekan users get admin permissions in Wekan regardless of
role set in Stackspin.
- Enable "pre-ping" for all database connections managed by sqlalchemy in the
dashboard backend, hoping to eliminate or reduce dropped database
connections.
- Fix listing of Velero in app permissions when batch-creating users.
## 0.7.6
- Add Forgejo metadata for use as custom app.
## 0.7.5
- Add Jitsi and Mattermost metadata for use as custom apps.
## 0.7.4
- Make the sign-in UI less wide.
## 0.7.3
Only changes to the helm chart.
## 0.7.2
- Apply Stackspin styling to the login component. This covers the login pages,
recovery page, and profile/authentication settings.
## 0.7.1
- Load the flask_migrate flask extension in dev/cli mode so we may run `flask
db` commands from the cli again.
## 0.7.0
- Improve the UX of the dashboard tiles: adding help texts in modals, add a
status dropdown with version info, add alerts before and after automatic
upgrades, show greeting, show tag when logged in as admin user.
- Make sure we run the initialisation code in the backend only once per run,
both in development and production mode. Also, do not run the init code on
flask cli runs.
- Remember the active tab in the authentication settings when saving.
- No longer send emails to addresses that do not match an existing account.
This was fixed by upgrading Kratos; we're happy to see that the default
Kratos behaviour was changed in this regard.
## 0.6.7
Only changes to the helm chart.
## 0.6.6
Only changes to the helm chart.
## 0.6.5
- Further improve (error) message handling. In particular, show feedback when
saving profile settings. Some of the previous error message changes have been
reverted pending further consideration of the design.
- Disable changing the email address as this is not supported right now.
## 0.6.4
- Fix error messages that were not shown, in particular when providing wrong
credentials when logging in. We redesigned the error handling, considering
that these messages may be translated later on.
## 0.6.3
- Add support for Hedgedoc.
- Add a button for admins for creating a recovery link for a user.
- Automatically log in to dashboard if already authenticated.
- Redirect to dashboard if not redirect login is set, on successful login.
- Fix deletion of apps via the CLI.
- Add special features (sign-up form) for the Stackspin demo instance.
- Show the user UUID in user modal.
- Only show installed apps when configuring roles.
## 0.6.2
- Fix submit button label in the form for verifying your TOTP code.
## 0.6.1
- Add TOTP as second factor authentication. Please note that you'll need to set
a `backend.dashboardUrl` value instead of the old `backend.loginPanelUrl` one
-- typically dropping the `/web` suffix to get the new value.
- Create a new backend endpoint for providing some environment variables to the
frontend, with the URLs of the Kratos and Hydra APIs.
## 0.6.0
- Make it easier to add apps, by reading apps and oauthclients from configmaps
at startup.
- Reset alembic migration history.
## 0.5.2
- Fix login welcome message
- Clarify "set new password" button (#94)
- Show error messages when login fails, for example when a wrong password was
entered (#96)
- Fix access checking for monitoring (#105)
## 0.5.1
- Fix bug of missing "Monitoring" app access when creating a new user.
- Add Velero to the list of installable apps, but hide it from the dashboard
## 0.5.0
- Merge dashboard-backend repository into this repository, released as 0.5.0
FROM python:3.10-slim
RUN apt-get update
RUN apt-get install -y gcc
## make a local directory
RUN mkdir /app
# set "app" as the working directory from which CMD, RUN, ADD references
WORKDIR /app
# copy requirements.txt to /app
ADD requirements.txt .
# pip install the local requirements.txt
RUN pip install -r requirements.txt
# now copy all the files in this directory to /code
ADD . .
# Listen to port 80 at runtime
EXPOSE 5000
# Define our command to be run when launching the container
CMD ["gunicorn", "app:app", "-b", "0.0.0.0:5000", "--workers", "4", "--reload", "--capture-output", "--enable-stdio-inheritance", "--log-level", "DEBUG"]
# Stackspin dashboard backend
# Stackspin Dashboard
Backend for the [Stackspin dashboard](https://open.greenhost.net/stackspin/dashboard)
This repo hosts the Stackspin Dashboard, both frontend and backend code.
## Login application
## Project structure
### Frontend
The frontend code lives in the `frontend` directory.
### Backend
The backend code lives in the `backend` directory. Apart from the dashboard
backend itself, it also contains a flask application that functions as the
identity provider, login, consent and logout endpoints for the OpenID Connect
(OIDC) process.
Apart from the dashboard backend this repository contains a flask application
that functions as the identity provider, login, consent and logout endpoints
for the OpenID Connect (OIDC) process.
The application relies on the following components:
- **Hydra**: Hydra is an open source OIDC server.
It means applications can connect to Hydra to start a session with a user.
Hydra provides the application with the username
and other roles/claims for the application.
Hydra is developed by Ory and has security as one of their top priorities.
- **Kratos**: This is Identity Manager
and contains all the user profiles and secrets (passwords).
Kratos is designed to work mostly between UI (browser) and kratos directly,
over a public API endpoint.
Authentication, form-validation, etc. are all handled by Kratos.
Kratos only provides an API and not UI itself.
Kratos provides an admin API as well,
which is only used from the server-side flask app to create/delete users.
- **MariaDB**: The login application, as well as Hydra and Kratos, need to store data.
This is done in a MariaDB database server.
There is one instance with three databases.
As all databases are very small we do not foresee resource limitation problems.
- **Hydra**: Hydra is an open source OIDC server.
It means applications can connect to Hydra to start a session with a user.
Hydra provides the application with the username
and other roles/claims for the application.
Hydra is developed by Ory and has security as one of their top priorities.
- **Kratos**: This is Identity Manager
and contains all the user profiles and secrets (passwords).
Kratos is designed to work mostly between UI (browser) and kratos directly,
over a public API endpoint.
Authentication, form-validation, etc. are all handled by Kratos.
Kratos only provides an API and not UI itself.
Kratos provides an admin API as well,
which is only used from the server-side flask app to create/delete users.
- **MariaDB**: The login application, as well as Hydra and Kratos, need to store data.
This is done in a MariaDB database server.
There is one instance with three databases.
As all databases are very small we do not foresee resource limitation problems.
If Hydra hits a new session/user, it has to know if this user has access.
To do so, the user has to login through a login application.
......@@ -42,63 +50,158 @@ the login application has a UI component which relies heavily on JavaScript.
As this is a relatively small application,
it is based on traditional Bootstrap + JQuery.
# Development
To develop the Dashboard,
you need a Stackspin cluster that is set up as a development environment.
Follow the instructions [in the dashboard-dev-overrides
repository](https://open.greenhost.net/stackspin/dashboard-dev-overrides#dashboard-dev-overrides)
in order to set up a development-capable cluster.
The end-points for the Dashboard,
as well as Kratos and Hydra, will point to `http://stackspin_proxy:8081` in that cluster.
As a result, you can run components using the `docker-compose` file in
this repository, and still log into Stackspin applications that run on the cluster.
## Setting up the local development environment
After this process is finished, the following will run locally:
- The [dashboard](https://open.greenhost.net/stackspin/dashboard)
- The
[dashboard-backend](https://open.greenhost.net/stackspin/dashboard-backend)
The following will be available locally through a proxy and port-forwards:
- Hydra admin
- Kratos admin and public
- The MariaDB database connections
These need to be available locally, because Kratos wants to run on the same
domain as the front-end that serves the login interface.
### 1. Setup hosts file
The application will run on `http://stackspin_proxy`. Add the following line to
`/etc/hosts` to be able to access that from your browser:
```
127.0.0.1 stackspin_proxy
```
### 2. Kubernetes access
The script needs you to have access to the Kubernetes cluster that runs
Stackspin. Point the `KUBECONFIG` environment variable to a kubectl config. That
kubeconfig will be mounted inside docker containers, so also make sure your
Docker user can read it.
### 3. Run it all
Now, run this script that sets a few environment variables based on what is in
your cluster secrets, and starts `docker-compose` to start a reverse proxy as
well as the flask application in this repository.
```
./run_app.sh
```
### 4. Front-end developmenet
Start the [dashboard front-end app](https://open.greenhost.net/stackspin/dashboard/#yarn-start).
## Development environment
The development environment is a hybrid one, where one or both of the dashboard
frontend and backend run locally, but the rest of the cluster runs on a remote
machine.
The remote should be a regular Stackspin cluster, though preferably one that's
dedicated to development purposes.
The local dashboard frontend and/or backend can run in a docker container or
directly ("native mode"). (At this time it's not possible to mix the two, for
example by having the dashboard backend run directly and the frontend in a
docker container.)
The connection between the local and remote parts is set up by a tool called
telepresence. If you want to develop the frontend for example, telepresence
intercepts traffic that goes into the remote's frontend pod and redirects it to
your copy that's running locally on your machine; responses from your local
frontend are led back via the remote. This interception happens invisibly to
your browser, which you just point at the remote cluster.
### Prerequisites
#### Set up telepresence on your local development machine
You need to do this once for every development machine you're using
(workstation, laptop).
* You need root on your machine and at some point allow telepresence to perform
actions as root, in order to make network changes to allow the two-way
tunnel. If this is not possible or not desirable, you can try to run your
local dashboard in a docker container instead.
* Set `user_allow_other` in `/etc/fuse.conf`. This is necessary when
telepresence adds (FUSE-based) sshfs mounts so your local code can access
volumes from the kubernetes cluster, in particular the one with the service
account token (credentials for calling the kubernetes api), to let the
dashboard interact with the cluster.
- MacOS users may have to do a little extra work to get a working current
sshfs: see [telepresence
docs](https://www.getambassador.io/docs/telepresence-oss/latest/troubleshooting#volume-mounts-are-not-working-on-macos).
* Download and install the telepresence binary on your development machine:
https://www.getambassador.io/docs/telepresence-oss/latest/install
#### Access to development cluster
You need `kubectl` and `helm` binaries, and a `kubectl` configuration file
(often called "kubeconfig") containing credentials needed to authenticate
against your cluster. If the `KUBECONFIG` environment variable is set and
points to the config file, this will be picked up by the various programs.
#### Set up telepresence on your development cluster
You need to do this once for every cluster you want to use as a development cluster.
* Install telepresence on your development cluster:
```
telepresence helm install -f telepresence-values.yaml
```
#### Install local dependencies
Before running the frontend in native mode:
* Make sure you have nodejs installed. You may want to use [Node Version
Manager](https://github.com/nvm-sh/nvm) to make it easy to install several
versions side by side.
* Install necessary javascript dependencies (will be placed in
`frontend/node_modules`) using `./dev.sh frontend setup`.
Before running the backend in native mode:
* Make sure you have python3 installed.
* Install necessary python dependencies (in a virtualenv in `backend/venv`)
using `./dev.sh backend setup`.
### Run
From the root `dashboard` directory, run for example `./dev.sh frontend`. This
will set up the telepresence tunnel to the cluster, and start the dashboard
frontend server in native mode. `./dev.sh backend` will do the same but for the
backend. You can run both at the same time (in separate terminal windows) if
you want to make changes to both frontend and backend.
If you want to run the local dashboard in docker instead, use `./dev.sh
frontend docker` and/or `./dev.sh backend docker`. Please note that due to a
telepresence limitation it's not currently possible to run the frontend
natively and the backend in docker at the same time, or vice versa.
#### Known issues
* Running the dashboard backend locally with telepresence in docker mode
currently doesn't work because of dns resolution issues in the docker
container: https://github.com/telepresenceio/telepresence/issues/1492 . We
could work around this by using a fully qualified domain name for the
database service -- which doesn't agree with the goal of making the stackspin
namespace variable -- or using the service env vars, but we're hoping that
telepresence will fix this in time.
* Telepresence intercepts traffic to a pod, but the original pod is still
running. In case of the backend, this is sometimes problematic, for example
when you're adding database migrations which the original pod then doesn't
know about and crashes, or with SCIM which involves timer-based actions which
are then performed both by your modified local instance and by the original
remote one. There is some work in progress to allow scaling down the
intercepted pod: https://github.com/telepresenceio/telepresence/issues/1608 .
* If telepresence is giving errors, in particular ones about "an intercept with
the same name already existing" on repeated runs, it may help to reset the
telepresence state by doing `./dev.sh reset`. This will stop the local
telepresence daemon so it can be cleanly restarted on the next try, and will
also restart the "traffic manager" on the remote so it will discard any old
lingering intercepts.
---
## Testing as a part of Stackspin
Sometimes you may want to make more fundamental changes to the dashboard that
might behave differently in the local development environment compared to a
regular Stackspin instance, i.e., one that's not a local/cluster hybrid. In
this case, you'll want to run your new version in a regular Stackspin cluster.
To do that:
* Push your work to an MR.
* Set the image tags in `values.yaml` to the one created for your branch; if
unsure, check the available tags in the Gitlab container registry for the
dashboard project.
* Make sure to increase the chart version number in `Chart.yaml`, preferably
with a suffix to denote that it's not a stable version. For example, if the
last stable release is 1.2.3, make the version 1.2.4-myawesomefeature in your
branch.
The CI pipeline should then publish your new chart version in the Gitlab helm
chart repo for the dashboard project, but in the `unstable` channel -- the
`stable` channel is reserved for chart versions that have been merged to the
`main` branch.
Once your package is published, use it by
1. changing the `spec.url` field of the `flux-system/dashboard`
`HelmRepository` object in the cluster where you want to run this, replacing
`stable` by `unstable`; and
2. changing the `spec.chart.spec.version` field of the `stackspin/dashboard`
`HelmRelease` to your chart version (the one from this chart's `Chart.yaml`).
## Release process
To publish a new version of the helm chart:
1. Increase the docker image tag in `deployment/helmchart/values.yaml` so it uses the new tag (to be
created in a later step of this release).
2. Update the appVersion in `deployment/helmchart/Chart.yaml` to match that new tag version.
3. Increase the chart version in `deployment/helmchart/Chart.yaml`.
4. Update `CHANGELOG.md` and/or `deployment/helmchart/CHANGELOG.md` and check
that it includes relevant changes, including ones added by renovatebot.
5. Commit and push these changes to `main`.
6. Create a new git tag for the new release and push it to gitlab as well.
The last step will trigger a CI run that will package and publish the helm chart.
from flask import Flask, jsonify
from flask_cors import CORS
from flask_jwt_extended import JWTManager
from flask_migrate import Migrate
from jsonschema.exceptions import ValidationError
from werkzeug.exceptions import BadRequest
# These imports are required
from areas import api_v1
from cliapp import cli
from web import web
from areas import users
from areas import apps
from areas import auth
from areas import roles
from cliapp import cliapp
from web import login
from database import db
from helpers import (
BadRequest,
KratosError,
HydraError,
Unauthorized,
bad_request_error,
validation_error,
kratos_error,
global_error,
hydra_error,
unauthorized_error,
)
from config import *
import logging
app = Flask(__name__)
app.config["SECRET_KEY"] = SECRET_KEY
app.config["SQLALCHEMY_DATABASE_URI"] = SQLALCHEMY_DATABASE_URI
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = SQLALCHEMY_TRACK_MODIFICATIONS
cors = CORS(app)
Migrate(app, db)
db.init_app(app)
app.logger.setLevel(logging.INFO)
app.register_blueprint(api_v1)
app.register_blueprint(web)
app.register_blueprint(cli)
# Error handlers
app.register_error_handler(Exception, global_error)
app.register_error_handler(BadRequest, bad_request_error)
app.register_error_handler(ValidationError, validation_error)
app.register_error_handler(KratosError, kratos_error)
app.register_error_handler(HydraError, hydra_error)
app.register_error_handler(Unauthorized, unauthorized_error)
jwt = JWTManager(app)
# When token is not valid or missing handler
@jwt.invalid_token_loader
@jwt.unauthorized_loader
@jwt.expired_token_loader
def expired_token_callback(*args):
return jsonify({"errorMessage": "Unauthorized"}), 401
@app.route("/")
def index():
return "Stackspin API v1.0"
from flask import Blueprint
api_v1 = Blueprint("api_v1", __name__, url_prefix="/api/v1")
@api_v1.route("/")
@api_v1.route("/health")
def api_index():
return "Stackspin API v1.0"
from .models import App, AppRole
class AppsService:
@staticmethod
def get_all_apps():
apps = App.query.all()
return [{"id": app.id, "name": app.name, "slug": app.slug} for app in apps]
@staticmethod
def get_app_roles():
app_roles = AppRole.query.all()
return [{"user_id": app_role.user_id, "app_id": app_role.app_id, "role_id": app_role.role_id} for app_role in app_roles]
\ No newline at end of file
from sqlalchemy import ForeignKey, Integer, String
from sqlalchemy.orm import relationship
from database import db
class App(db.Model):
"""
The App object, interact with the App database object. Data is stored in
the local database.
"""
id = db.Column(Integer, primary_key=True)
name = db.Column(String(length=64))
slug = db.Column(String(length=64), unique=True)
def __repr__(self):
return f"{self.id} <{self.name}>"
class AppRole(db.Model):
"""
The AppRole object, stores the roles Users have on Apps
"""
user_id = db.Column(String(length=64), primary_key=True)
app_id = db.Column(Integer, ForeignKey("app.id"), primary_key=True)
role_id = db.Column(Integer, ForeignKey("role.id"))
role = relationship("Role")
def __repr__(self):
return f"role_id: {self.role_id}, user_id: {self.user_id}, app_id: {self.app_id}, role: {self.role}"
.venv
.idea
.vscode
__pycache__
*.pyc
.DS_Store
*.swp
.envrc
.direnv
run_app.local.sh
[MAIN]
# List of plugins (as comma separated values of python module names) to load,
# usually to register additional checkers.
load-plugins=pylint_flask,pylint_flask_sqlalchemy
# List of class names for which member attributes should not be checked (useful
# for classes with dynamically set attributes). This supports the use of
# qualified names.
ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace,scoped_session
FROM python:3.13-slim
# set "app" as the working directory from which CMD, RUN, ADD references
WORKDIR /app
# First install apt packages, so we can cache this even if requirements.txt
# changes.
# hadolint ignore=DL3008
RUN apt-get update \
&& apt-get install --no-install-recommends -y gcc g++ libffi-dev libc6-dev \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# Now copy the python dependencies specification.
COPY requirements.txt .
# Install python dependencies.
RUN pip install --no-cache-dir -r requirements.txt
# now copy all the files in this directory to /app
COPY . .
EXPOSE 5000
# Define our command to be run when launching the container
CMD ["gunicorn", "app:app", "-b", "0.0.0.0:5000", "--workers", "8", "--preload", "--capture-output", "--enable-stdio-inheritance", "--log-level", "DEBUG"]
This diff is collapsed.
List of message codes used in the frontend
Kratos codes:
=============
4000006 The provided credentials are invalid, check for spelling mistakes
in your password or username, email address, or phone number.
1010003 Please confirm this action by verifying that it is you.
Stackspin codes:
================
S_CONFIRM_CREDENTIALS Please confirm your credentials to complete this action.
from apscheduler.schedulers.background import BackgroundScheduler
from flask import Flask, jsonify
from flask_cors import CORS
from flask_jwt_extended import JWTManager
import flask_migrate
from jsonschema.exceptions import ValidationError
from NamedAtomicLock import NamedAtomicLock
import threading
import traceback
from werkzeug.exceptions import BadRequest
# These imports are required
from areas import api_v1
from cliapp import cli
from web import web
from areas import users
from areas.apps.apps import *
from areas import auth
from areas import resources
from areas import roles
from areas import tags
from cliapp import cliapp
import config
import helpers.kubernetes
import helpers.provision
import helpers.threads
from web import login
from database import db
from helpers import (
BadRequest,
KratosError,
HydraError,
Unauthorized,
bad_request_error,
validation_error,
kratos_error,
global_error,
hydra_error,
unauthorized_error,
)
import cluster_config
from config import *
import logging
import migration_reset
import os
import sys
# Configure logging.
log_level = logging.getLevelName(config.LOG_LEVEL or 'INFO')
from logging.config import dictConfig
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] %(levelname)s in %(name)s (%(filename)s+%(lineno)d): %(message)s',
}},
'handlers': {'wsgi': {
'class': 'logging.StreamHandler',
'stream': 'ext://flask.logging.wsgi_errors_stream',
'formatter': 'default',
'level': log_level,
}},
'root': {
'handlers': ['wsgi'],
'level': log_level,
},
# Loggers are created also by alembic, flask_migrate, etc. Without this
# setting, those loggers seem to be ignored.
'disable_existing_loggers': False,
})
logging.getLogger("kubernetes.client.rest").setLevel(logging.WARNING)
app = Flask(__name__)
app.config["SECRET_KEY"] = SECRET_KEY
app.config["SQLALCHEMY_DATABASE_URI"] = SQLALCHEMY_DATABASE_URI
app.config["SQLALCHEMY_ENGINE_OPTIONS"] = {'pool_pre_ping': True}
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = SQLALCHEMY_TRACK_MODIFICATIONS
cors = CORS(app)
db.init_app(app)
with app.app_context():
provisioner = helpers.provision.Provision()
def init_routines():
with app.app_context():
# We have reset the alembic migration history at Stackspin version 2.2.
# This checks whether we need to prepare the database to follow that
# change.
migration_reset.reset()
app.logger.info("Loading flask_migrate.")
# flask_migrate exits the program when it encounters an error, for example
# when the version set in the database is not found in the
# `migrations/versions` directory. We could prevent that by catching the
# `SystemExit` exception here, but actually it's not safe to continue in
# that case.
flask_migrate.Migrate(app, db)
app.logger.info("Attempting flask_migrate database upgrade.")
try:
with app.app_context():
flask_migrate.upgrade()
# TODO: actually flask_migrate.upgrade will catch any errors and
# exit the program :/
except Exception as e:
app.logger.info(f"upgrade failed: {type(e)}: {e}")
sys.exit(2)
def reload():
# We need this app context in order to talk the database, which is managed by
# flask-sqlalchemy, which assumes a flask app context.
with app.app_context():
app.logger.info("Reloading dashboard config from cluster resources.")
# Load the list of apps from a configmap and store any missing ones in the
# database.
app_slugs = cluster_config.populate_apps()
# Same for the list of oauthclients.
cluster_config.populate_oauthclients()
# Load per-app scim config if present.
cluster_config.populate_scim_config(app_slugs)
# We could call `reload` here manually, but actually the watch also at its
# start creates events for existing secrets so we don't need to.
with app.app_context():
# Set watch for dashboard SCIM config secrets. Any time those change,
# we reload so we can do SCIM for newly installed apps.
try:
helpers.kubernetes.watch_dashboard_config(app, reload)
except Exception as e:
app.logger.error(f"Error watching dashboard config: {e}")
# Set up a generic task scheduler (cron-like).
scheduler = BackgroundScheduler()
scheduler.start()
# Add a job to run the provisioning reconciliation routine regularly.
# We'll also run it when we make changes that should be propagated
# immediately.
scheduler.add_job(helpers.threads.request_provision, 'interval', id='provision', hours=24)
# We'll run this in a separate thread so it can be done in the background.
# We have this single "provisioning worker" so there will be only one
# provisioning operation at a time. We use an Event to signal a
# provisioning request.
def provisioning_loop():
while True:
app.logger.info("Waiting for next provisioning run.")
# helpers.threads.provision_event.wait()
# helpers.threads.provision_event.clear()
helpers.threads.wait_provision()
app.logger.info("Starting provisioning.")
with app.app_context():
try:
provisioner.reconcile()
except Exception as e:
app.logger.warn(f"Exception during user provisioning:")
app.logger.warn(traceback.format_exc())
threading.Thread(target=provisioning_loop).start()
# `init_routines` should only run once per dashboard instance. To enforce this
# we have different behaviour for production and development mode:
# * We have "preload" on for gunicorn, so this file is loaded only once, before
# workers are forked (production).
# * We make sure that in development mode we run this only once, even though
# this file is loaded twice by flask for some reason.
if RUN_BY_GUNICORN:
logging.info("Running initialization code (production mode).")
init_routines()
else:
logging.info("WERKZEUG_RUN_MAIN: {}".format(os.environ.get("WERKZEUG_RUN_MAIN", "unset")))
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
logging.info("Running initialization code (dev mode).")
init_routines()
else:
logging.info("Not running initialization code (dev or cli mode).")
# This should not perform any actual migration, just load the
# flask_migrate extension so we can use `flask db` commands from the
# cli.
flask_migrate.Migrate(app, db)
# Now that we've done all database interactions in the initialisation routines,
# we need to drop all connections to the database to prevent those from being
# shared among different worker processes.
logging.info("Disposing of database connections.")
with app.app_context():
db.engine.dispose()
app.register_blueprint(api_v1)
app.register_blueprint(web)
app.register_blueprint(cli)
# Error handlers
app.register_error_handler(Exception, global_error)
app.register_error_handler(BadRequest, bad_request_error)
app.register_error_handler(ValidationError, validation_error)
app.register_error_handler(KratosError, kratos_error)
app.register_error_handler(HydraError, hydra_error)
app.register_error_handler(Unauthorized, unauthorized_error)
jwt = JWTManager(app)
# When token is not valid or missing handler
@jwt.invalid_token_loader
def invalid_token_callback(reason):
logging.info(f"Invalid token: {reason}.")
return jsonify({"errorMessage": "Unauthorized (invalid token)"}), 401
@jwt.unauthorized_loader
def unauthorized_callback(reason):
logging.info(f"No token: {reason}.")
return jsonify({"errorMessage": "Unauthorized (no token)"}), 401
@jwt.expired_token_loader
def expired_token_callback(reason):
logging.info(f"Expired token: {reason}.")
return jsonify({"errorMessage": "Unauthorized (expired token)"}), 401
@app.route("/")
def index():
return "Stackspin API v1.0"
from flask import Blueprint, jsonify
import yaml
from config import *
import helpers.kubernetes as k8s
import requests
api_v1 = Blueprint("api_v1", __name__, url_prefix="/api/v1")
@api_v1.route("/")
@api_v1.route("/health")
def api_index():
return "Stackspin API v1.0"
@api_v1.route("/environment")
def api_environment():
environment = {
"HYDRA_PUBLIC_URL": HYDRA_PUBLIC_URL,
"KRATOS_PUBLIC_URL": KRATOS_PUBLIC_URL,
"TELEPRESENCE": TELEPRESENCE,
}
return jsonify(environment)
# We want to know if
# 1. A release has happened recently and is already deployed on this cluster.
# 2. A release has happened recently but has not yet been deployed on this
# cluster -- that will then probably happen automatically during the next
# maintenance window.
#
# To get the last release, we get the contents of the `VERSION` file from
# the main branch. The `VERSION` file is only updated as part of the release
# process.
#
# To find out how long ago the currently running version was deployed, we look
# at the `lastUpdateTime` of the stackspin `GitRepo` object on the cluster.
@api_v1.route("/info")
def api_info():
# Get static info from configmap on cluster.
static_info = k8s.get_kubernetes_config_map_data(
"stackspin-static-info",
"flux-system",
)
results = static_info
# Get app versions from apps configmaps on cluster.
results['appVersions'] = {}
apps = k8s.get_kubernetes_config_map_data(
"stackspin-apps",
"flux-system",
)
for app, app_data in apps.items():
data = yaml.safe_load(app_data)
if 'version' in data:
results['appVersions'][app] = data['version']
apps_custom = k8s.get_kubernetes_config_map_data(
"stackspin-apps-custom",
"flux-system",
)
if apps_custom is not None:
for app, app_data in apps_custom.items():
data = yaml.safe_load(app_data)
if 'version' in data:
results['appVersions'][app] = data['version']
# Get last update time of stackspin GitRepo object on the cluster; that
# tells us when flux last updated the cluster based on changes in the
# stackspin git repo.
stackspin_repo = k8s.get_gitrepo('stackspin')
results['lastUpdated'] = stackspin_repo['status']['artifact']['lastUpdateTime']
# This is the branch (or other git ref, like tag or commit) that this
# cluster follows.
flux_ref = stackspin_repo['spec']['ref']
# `flux_ref` is a structured object, though as far as we've seen always a
# dict with a single entry. The key can be `branch` or `tag` or `commit`.
# We reduce this to a single string git ref for simplicity in the
# front-end.
ref = next(iter(flux_ref.values()))
results['followingGit'] = ref
# Get latest released version from gitlab. Whether it's considered
# "released" depends on which branch we're following, but usually that's
# the `vX` "production" branch.
git_release = 'Unknown'
result = requests.get(f"https://open.greenhost.net/stackspin/stackspin/-/raw/{ref}/VERSION", timeout=5)
if result.status_code == 200:
git_release = result.text.rstrip()
results['lastRelease'] = git_release
return jsonify(results)
......@@ -3,6 +3,8 @@ from flask_jwt_extended import jwt_required
from flask_cors import cross_origin
from areas import api_v1
from .apps_service import AppsService
CONFIG_DATA = [
{
......@@ -16,46 +18,45 @@ CONFIG_DATA = [
}
]
APPS_DATA = [
{"id": 1, "name": "Nextcloud", "enabled": True, "status": "ON for everyone"},
{"id": 2, "name": "Rocketchat", "enabled": True, "status": "ON for everyone"},
{"id": 3, "name": "Wordpress", "enabled": False, "status": "ON for everyone"}
]
APP_DATA = {"id": 1, "name": "Nextcloud", "selected": True, "status": "ON for everyone", "config": CONFIG_DATA},
@api_v1.route('/apps', methods=['GET'])
@jwt_required()
@cross_origin()
def get_apps():
return jsonify(APPS_DATA)
"""Return data about all apps"""
apps = AppsService.get_accessible_apps()
return jsonify(apps)
@api_v1.route('/apps/<string:slug>', methods=['GET'])
@jwt_required()
@cross_origin()
def get_app(slug):
return jsonify(APPS_DATA[0])
"""Return data about a single app"""
app = AppsService.get_app(slug)
return jsonify(app)
@api_v1.route('/apps', methods=['POST'])
@jwt_required()
@cross_origin()
def post_app():
return jsonify(APPS_DATA), 201
"""Unused function, returns bogus data for now"""
return jsonify([]), 201
@api_v1.route('/apps/<string:slug>', methods=['PUT'])
@jwt_required()
@cross_origin()
def put_app(slug):
return jsonify(APPS_DATA)
"""Unused function, returns bogus data for now"""
return jsonify([])
@api_v1.route('/apps/<string:slug>/config', methods=['GET'])
@jwt_required()
@cross_origin()
def get_config(slug):
"""Returns bogus config data"""
return jsonify(CONFIG_DATA)
......@@ -63,4 +64,5 @@ def get_config(slug):
@jwt_required()
@cross_origin()
def delete_config(slug):
"""Does nothing, then returns bogus config data"""
return jsonify(CONFIG_DATA)