Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • xeruf/dashboard
  • stackspin/dashboard
2 results
Show changes
Commits on Source (875)
Showing
with 2179 additions and 79 deletions
......@@ -2,6 +2,10 @@
# dependencies
/node_modules
/frontend/node_modules
/backend/venv
/backend/web/node_modules
/backend/venv
/.pnp
.pnp.js
......@@ -11,13 +15,12 @@
# production
/build
# local environment
/frontend/local.env
/.vscode
# misc
.DS_Store
.env
.env.local
.env.development.local
.env.test.local
.env.production.local
*.swp
npm-debug.log*
......@@ -27,6 +30,9 @@ yarn-error.log*
.eslintcache
cypress/videos/
# KUBECONFIG values
backend/kubeconfig/*
# Helm dependencies
deployment/helmchart/charts/
......
......@@ -4,45 +4,85 @@ include:
stages:
- build-project
- build-container
- build-image
- lint-helm-chart
- package-helm-chart
- release-helm-chart
image: node:14-alpine
image: node:20-alpine
variables:
CHART_NAME: stackspin-dashboard
CHART_DIR: deployment/helmchart/
KANIKO_BUILD_IMAGENAME: dashboard
build-project:
yarn:
stage: build-project
before_script: []
script:
- cd frontend
- echo "Building app"
- yarn install
- echo "REACT_APP_API_URL=/api/v1" > .env
- echo "EXTEND_ESLINT=true" >> .env
- yarn build
- mv build web-build
- mkdir docker
- mv build docker/html
- echo "Build successful"
artifacts:
expire_in: 1 hour
name: web-build
paths:
- web-build
- frontend/docker/html
build-container:
stage: build-container
.kaniko-build:
script:
- cd ${DIRECTORY}
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > /kaniko/.docker/config.json
- export CONTAINER_TAG=${CI_COMMIT_TAG:-${CI_COMMIT_REF_SLUG}}
- /kaniko/executor --cache=true --context ${CI_PROJECT_DIR}/${DIRECTORY} --destination ${CI_REGISTRY_IMAGE}/${KANIKO_BUILD_IMAGENAME}:${CONTAINER_TAG}
build-frontend-image:
stage: build-image
needs:
- yarn
image:
# We need a shell to provide the registry credentials, so we need to use the
# kaniko debug image (https://github.com/GoogleContainerTools/kaniko#debug-image)
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
script:
- cp deployment/Dockerfile web-build
- cp deployment/nginx.conf web-build
- cd web-build
- echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"}}}" > /kaniko/.docker/config.json
- /kaniko/executor --cache=true --context ${CI_PROJECT_DIR}/web-build --destination ${CI_REGISTRY_IMAGE}/${KANIKO_BUILD_IMAGENAME}:${CI_COMMIT_REF_SLUG}
variables:
KANIKO_BUILD_IMAGENAME: dashboard
DIRECTORY: frontend/docker
before_script:
- cp deployment/Dockerfile $DIRECTORY
- cp deployment/nginx.conf $DIRECTORY
extends:
.kaniko-build
build-backend-image:
stage: build-image
needs: []
variables:
KANIKO_BUILD_IMAGENAME: dashboard-backend
DIRECTORY: backend
image:
# We need a shell to provide the registry credentials, so we need to use the
# kaniko debug image (https://github.com/GoogleContainerTools/kaniko#debug-image)
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
extends:
.kaniko-build
build-test-image:
stage: build-image
needs: []
variables:
KANIKO_BUILD_IMAGENAME: cypress-test
DIRECTORY: tests
image:
# We need a shell to provide the registry credentials, so we need to use the
# kaniko debug image (https://github.com/GoogleContainerTools/kaniko#debug-image)
name: gcr.io/kaniko-project/executor:debug
entrypoint: [""]
extends:
.kaniko-build
{}
# Changelog
## 0.14.0
- Introduce support for upstream OIDC. With some additional configuration, you
can use an external OIDC provider to allow "social sign-up/sign-in".
## 0.13.3
- Fix creating app roles for users created via the CLI.
## 0.13.2
- Update hydra client library to v2 and adapt to its changed API.
- Change the jwt identity claim because that's strictly required to be a string
now, and we put a json object in there before.
## 0.13.1
- Add the `cryptography` python library as dependency of the backend. This is
necessary for sha256_password and caching_sha2_password auth methods.
## 0.13.0
- Fix the initial recovery flow created automatically for new users, which was
broken by the kratos client lib upgrade.
- Add support for serving arbitrary files from the frontend pod, provided by a
persistent volume claim. Reorganize the assets to make it easier to include
custom icons this way.
- Add support for theming. Customizing colours, logo and background image is
now particularly easy, but mostly anything can be changed through a custom
stylesheet.
- Only show Stackspin version info to admin users.
## 0.12.4
- Prevent database connections from being shared between worker processes.
That sharing may cause intermittent database errors.
## 0.12.3
- Fix broken kratos hooks tracking last recovery and login times.
- Upgrade python to 3.13.
## 0.12.2
- Fix consent page for `ENFORCE_2FA` instances.
## 0.12.1
- Add back `jinja2-base64-filters` to backend for templating kustomizations
during app installation.
## 0.12.0
- Add basic support for WebAuthn as second-factor authentication.
- Only show app version numbers in the dashboard tiles to admin users.
- Do not serve Dockerfile and nginx config from frontend.
- Start renovating python dependencies of the backend. Upgrade many direct and
indirect dependencies.
## 0.11.1
- Fix password reset form in case no email address is pre-filled.
## 0.11.0
- Allow pre-filling user's email address in a link to the password (re)set
form. This is useful when creating new user accounts.
- Fix user provisioning after installing new apps.
## 0.10.5
- Look up users from Kratos by email address using the proper (new) API
mechanism for that, instead of iterating over all users.
- Compare email addresses case insensitively to deal with Stackspin apps
changing case of email address strings.
- Fix broken user accounts when created via the flask CLI.
- Replace slightly off-spec usage of `__repr__` by `__str__`.
## 0.10.4
- Disable Zulip accounts when deleting users, because Zulip doesn't allow
deleting accounts via SCIM.
## 0.10.3
- Fix setting successful provisioning status.
## 0.10.2
- Fine-tune logging levels, and introduce a new environment variable
`LOG_LEVEL` to set the log level at runtime.
- Track when a user's full name has been changed, and only include the name in
the SCIM provisioning call when it has changed, or for newly provisioned
users.
## 0.10.1
- Watch dashboard configmaps with lists of apps and oauthclients, and
reload config on changes. This also makes sure that we always load the config
at dashboard start-up, even when there are no (SCIM-supporting) apps
installed.
## 0.10.0
- Include new "System resources" module with basic stats.
- Implement basic (manual/static) SCIM functionality for automatic user provisioning.
- Implement dynamic (i.e., arbitrary apps) SCIM functionality, tested and
tailored for Nextcloud and Zulip.
- Upgrade to tailwind v3, and update several other javascript dependencies.
- Make info modals slightly wider, to make sure you can see the full contents
also for slightly larger fonts. In particular, this fixes a partially
invisible reset link.
- Add a CLI command for deleting older unused accounts.
- Add logo for Gitea.
## 0.9.2
- Fix saving user properties, which was broken because of the partial tags
implementation.
## 0.9.1
- Do not autocomplete totp input field.
- Allow removing user app roles from CLI.
## 0.9.0
- Improve user listing: show label for admin users, show last login and
password reset times, improved layout.
- Fix rare bug in frontend's idea of admin status in the face of custom apps.
- Prepare backend for user tags.
## 0.8.4
- Allow enforcing 2fa.
- Add button for admin users to reset 2FA of users. Also improve UX of this and
other dangerous operations in the user edit screen.
- Fix logout to include hydra post-logout.
- Do not show link to recovery on TOTP form.
- Fix css of demo sign-up.
- Upgrade to python 3.12.
## 0.8.3
- Introduce backend code for resetting 2FA, and add cli command for that.
- Upgrade Kratos api library `ory-kratos-client` to 1.0.0.
- Patch our usage of Kratos api pagination of identities list.
## 0.8.2
- End the Kratos session in prelogout. This makes sure that we end the "SSO
session" also when logging out from an app. We used to rely on hydra's
post-logout url to get at the kratos logout, but apps sometimes override that
url via an oidc parameter.
## 0.8.1
- Add a couple of attributes to our OIDC tokens to support our switch to
another Nextcloud app for OIDC.
## 0.8.0
- Add feature to easily edit app permissions for multiple users at once.
- Change the way secrets are created for apps, creating them in the stackspin
project (using an existing secrets controller). So remove support for
generating app secrets in the dashboard.
- Fix password reset when 2FA is enabled.
- Fix bug that all Wekan users get admin permissions in Wekan regardless of
role set in Stackspin.
- Enable "pre-ping" for all database connections managed by sqlalchemy in the
dashboard backend, hoping to eliminate or reduce dropped database
connections.
- Fix listing of Velero in app permissions when batch-creating users.
## 0.7.6
- Add Forgejo metadata for use as custom app.
## 0.7.5
- Add Jitsi and Mattermost metadata for use as custom apps.
## 0.7.4
- Make the sign-in UI less wide.
## 0.7.3
Only changes to the helm chart.
## 0.7.2
- Apply Stackspin styling to the login component. This covers the login pages,
recovery page, and profile/authentication settings.
## 0.7.1
- Load the flask_migrate flask extension in dev/cli mode so we may run `flask
db` commands from the cli again.
## 0.7.0
- Improve the UX of the dashboard tiles: adding help texts in modals, add a
status dropdown with version info, add alerts before and after automatic
upgrades, show greeting, show tag when logged in as admin user.
- Make sure we run the initialisation code in the backend only once per run,
both in development and production mode. Also, do not run the init code on
flask cli runs.
- Remember the active tab in the authentication settings when saving.
- No longer send emails to addresses that do not match an existing account.
This was fixed by upgrading Kratos; we're happy to see that the default
Kratos behaviour was changed in this regard.
## 0.6.7
Only changes to the helm chart.
## 0.6.6
Only changes to the helm chart.
## 0.6.5
- Further improve (error) message handling. In particular, show feedback when
saving profile settings. Some of the previous error message changes have been
reverted pending further consideration of the design.
- Disable changing the email address as this is not supported right now.
## 0.6.4
- Fix error messages that were not shown, in particular when providing wrong
credentials when logging in. We redesigned the error handling, considering
that these messages may be translated later on.
## 0.6.3
- Add support for Hedgedoc.
- Add a button for admins for creating a recovery link for a user.
- Automatically log in to dashboard if already authenticated.
- Redirect to dashboard if not redirect login is set, on successful login.
- Fix deletion of apps via the CLI.
- Add special features (sign-up form) for the Stackspin demo instance.
- Show the user UUID in user modal.
- Only show installed apps when configuring roles.
## 0.6.2
- Fix submit button label in the form for verifying your TOTP code.
## 0.6.1
- Add TOTP as second factor authentication. Please note that you'll need to set
a `backend.dashboardUrl` value instead of the old `backend.loginPanelUrl` one
-- typically dropping the `/web` suffix to get the new value.
- Create a new backend endpoint for providing some environment variables to the
frontend, with the URLs of the Kratos and Hydra APIs.
## 0.6.0
- Make it easier to add apps, by reading apps and oauthclients from configmaps
at startup.
- Reset alembic migration history.
## 0.5.2
- Fix login welcome message
- Clarify "set new password" button (#94)
- Show error messages when login fails, for example when a wrong password was
entered (#96)
- Fix access checking for monitoring (#105)
## 0.5.1
- Fix bug of missing "Monitoring" app access when creating a new user.
- Add Velero to the list of installable apps, but hide it from the dashboard
## 0.5.0
- Merge dashboard-backend repository into this repository, released as 0.5.0
# Stackspin Dashboard
This repo hosts the Stackspin Dashboard frontend code.
The backend code is located at
<https://open.greenhost.net/stackspin/dashboard-backend>.
This repo hosts the Stackspin Dashboard, both frontend and backend code.
## Project structure
### Frontend
The frontend code lives in the `frontend` directory.
### Backend
The backend code lives in the `backend` directory. Apart from the dashboard
backend itself, it also contains a flask application that functions as the
identity provider, login, consent and logout endpoints for the OpenID Connect
(OIDC) process.
The application relies on the following components:
- **Hydra**: Hydra is an open source OIDC server.
It means applications can connect to Hydra to start a session with a user.
Hydra provides the application with the username
and other roles/claims for the application.
Hydra is developed by Ory and has security as one of their top priorities.
- **Kratos**: This is Identity Manager
and contains all the user profiles and secrets (passwords).
Kratos is designed to work mostly between UI (browser) and kratos directly,
over a public API endpoint.
Authentication, form-validation, etc. are all handled by Kratos.
Kratos only provides an API and not UI itself.
Kratos provides an admin API as well,
which is only used from the server-side flask app to create/delete users.
- **MariaDB**: The login application, as well as Hydra and Kratos, need to store data.
This is done in a MariaDB database server.
There is one instance with three databases.
As all databases are very small we do not foresee resource limitation problems.
If Hydra hits a new session/user, it has to know if this user has access.
To do so, the user has to login through a login application.
This application is developed by the Stackspin team (Greenhost)
and is part of this repository.
It is a Python Flask application
The application follows flows defined in Kratos,
and as such a lot of the interaction is done in the web-browser,
rather then server-side.
As a result,
the login application has a UI component which relies heavily on JavaScript.
As this is a relatively small application,
it is based on traditional Bootstrap + JQuery.
## Development environment
### Setup
Create a `.env` file in the project root directory:
cp .env.example .env
and adjust the `REACT_APP_HYDRA_PUBLIC_URL` to the SSO URL of your cluster.
#### `yarn start`
Runs the app in the development mode.
Open [http://localhost:3000](http://localhost:3000) to view it in the browser.
The page will reload if you make edits.
You will also see any lint errors in the console.
#### `yarn test`
Launches the test runner in the interactive watch mode.
See the section about [running tests](https://facebook.github.io/create-react-app/docs/running-tests)
for more information.
#### `yarn build`
Builds the app for production to the `build` folder.
It correctly bundles React in production mode and optimizes the build
for the best performance.
The build is minified and the filenames include the hashes.
Your app is ready to be deployed!
See the section about [deployment](https://facebook.github.io/create-react-app/docs/deployment)
for more information.
#### `yarn eject`
**Note: this is a one-way operation. Once you `eject`, you can’t go back!**
If you aren’t satisfied with the build tool and configuration choices,
you can `eject` at any time. This command will remove the single build dependency
from your project.
Instead, it will copy all the configuration files and the transitive dependencies
(webpack, Babel, ESLint, etc) right into your project so you have full control
over them.
All of the commands except `eject` will still work, but they will point to the
copied scripts so you can tweak them. At this point you’re on your own.
You don’t have to ever use `eject`. The curated feature set is suitable for
small and middle deployments, and you shouldn’t feel obligated
to use this feature.
However we understand that this tool wouldn’t be useful
if you couldn’t customize it when you are ready for it.
## Learn More
You can learn more in the [Create React App documentation](https://facebook.github.io/create-react-app/docs/getting-started).
The development environment is a hybrid one, where one or both of the dashboard
frontend and backend run locally, but the rest of the cluster runs on a remote
machine.
The remote should be a regular Stackspin cluster, though preferably one that's
dedicated to development purposes.
The local dashboard frontend and/or backend can run in a docker container or
directly ("native mode"). (At this time it's not possible to mix the two, for
example by having the dashboard backend run directly and the frontend in a
docker container.)
The connection between the local and remote parts is set up by a tool called
telepresence. If you want to develop the frontend for example, telepresence
intercepts traffic that goes into the remote's frontend pod and redirects it to
your copy that's running locally on your machine; responses from your local
frontend are led back via the remote. This interception happens invisibly to
your browser, which you just point at the remote cluster.
### Prerequisites
#### Set up telepresence on your local development machine
You need to do this once for every development machine you're using
(workstation, laptop).
* You need root on your machine and at some point allow telepresence to perform
actions as root, in order to make network changes to allow the two-way
tunnel. If this is not possible or not desirable, you can try to run your
local dashboard in a docker container instead.
* Set `user_allow_other` in `/etc/fuse.conf`. This is necessary when
telepresence adds (FUSE-based) sshfs mounts so your local code can access
volumes from the kubernetes cluster, in particular the one with the service
account token (credentials for calling the kubernetes api), to let the
dashboard interact with the cluster.
- MacOS users may have to do a little extra work to get a working current
sshfs: see [telepresence
docs](https://www.getambassador.io/docs/telepresence-oss/latest/troubleshooting#volume-mounts-are-not-working-on-macos).
* Download and install the telepresence binary on your development machine:
https://www.getambassador.io/docs/telepresence-oss/latest/install
#### Access to development cluster
You need `kubectl` and `helm` binaries, and a `kubectl` configuration file
(often called "kubeconfig") containing credentials needed to authenticate
against your cluster. If the `KUBECONFIG` environment variable is set and
points to the config file, this will be picked up by the various programs.
#### Set up telepresence on your development cluster
You need to do this once for every cluster you want to use as a development cluster.
* Install telepresence on your development cluster:
```
telepresence helm install -f telepresence-values.yaml
```
#### Install local dependencies
Before running the frontend in native mode:
* Make sure you have nodejs installed. You may want to use [Node Version
Manager](https://github.com/nvm-sh/nvm) to make it easy to install several
versions side by side.
* Install necessary javascript dependencies (will be placed in
`frontend/node_modules`) using `./dev.sh frontend setup`.
Before running the backend in native mode:
* Make sure you have python3 installed.
* Install necessary python dependencies (in a virtualenv in `backend/venv`)
using `./dev.sh backend setup`.
### Run
From the root `dashboard` directory, run for example `./dev.sh frontend`. This
will set up the telepresence tunnel to the cluster, and start the dashboard
frontend server in native mode. `./dev.sh backend` will do the same but for the
backend. You can run both at the same time (in separate terminal windows) if
you want to make changes to both frontend and backend.
If you want to run the local dashboard in docker instead, use `./dev.sh
frontend docker` and/or `./dev.sh backend docker`. Please note that due to a
telepresence limitation it's not currently possible to run the frontend
natively and the backend in docker at the same time, or vice versa.
#### Known issues
* Running the dashboard backend locally with telepresence in docker mode
currently doesn't work because of dns resolution issues in the docker
container: https://github.com/telepresenceio/telepresence/issues/1492 . We
could work around this by using a fully qualified domain name for the
database service -- which doesn't agree with the goal of making the stackspin
namespace variable -- or using the service env vars, but we're hoping that
telepresence will fix this in time.
* Telepresence intercepts traffic to a pod, but the original pod is still
running. In case of the backend, this is sometimes problematic, for example
when you're adding database migrations which the original pod then doesn't
know about and crashes, or with SCIM which involves timer-based actions which
are then performed both by your modified local instance and by the original
remote one. There is some work in progress to allow scaling down the
intercepted pod: https://github.com/telepresenceio/telepresence/issues/1608 .
* If telepresence is giving errors, in particular ones about "an intercept with
the same name already existing" on repeated runs, it may help to reset the
telepresence state by doing `./dev.sh reset`. This will stop the local
telepresence daemon so it can be cleanly restarted on the next try, and will
also restart the "traffic manager" on the remote so it will discard any old
lingering intercepts.
---
## Testing as a part of Stackspin
Sometimes you may want to make more fundamental changes to the dashboard that
might behave differently in the local development environment compared to a
regular Stackspin instance, i.e., one that's not a local/cluster hybrid. In
this case, you'll want to run your new version in a regular Stackspin cluster.
To do that:
* Push your work to an MR.
* Set the image tags in `values.yaml` to the one created for your branch; if
unsure, check the available tags in the Gitlab container registry for the
dashboard project.
* Make sure to increase the chart version number in `Chart.yaml`, preferably
with a suffix to denote that it's not a stable version. For example, if the
last stable release is 1.2.3, make the version 1.2.4-myawesomefeature in your
branch.
The CI pipeline should then publish your new chart version in the Gitlab helm
chart repo for the dashboard project, but in the `unstable` channel -- the
`stable` channel is reserved for chart versions that have been merged to the
`main` branch.
Once your package is published, use it by
1. changing the `spec.url` field of the `flux-system/dashboard`
`HelmRepository` object in the cluster where you want to run this, replacing
`stable` by `unstable`; and
2. changing the `spec.chart.spec.version` field of the `stackspin/dashboard`
`HelmRelease` to your chart version (the one from this chart's `Chart.yaml`).
## Release process
To publish a new version of the helm chart:
1. Increase the docker image tag in `deployment/helmchart/values.yaml` so it uses the new tag (to be
created in a later step of this release).
2. Update the appVersion in `deployment/helmchart/Chart.yaml` to match that new tag version.
3. Increase the chart version in `deployment/helmchart/Chart.yaml`.
4. Update `CHANGELOG.md` and/or `deployment/helmchart/CHANGELOG.md` and check
that it includes relevant changes, including ones added by renovatebot.
5. Commit and push these changes to `main`.
6. Create a new git tag for the new release and push it to gitlab as well.
The last step will trigger a CI run that will package and publish the helm chart.
.venv
.idea
.vscode
__pycache__
*.pyc
.DS_Store
*.swp
.envrc
.direnv
run_app.local.sh
[MAIN]
# List of plugins (as comma separated values of python module names) to load,
# usually to register additional checkers.
load-plugins=pylint_flask,pylint_flask_sqlalchemy
# List of class names for which member attributes should not be checked (useful
# for classes with dynamically set attributes). This supports the use of
# qualified names.
ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace,scoped_session
FROM python:3.13-slim
# set "app" as the working directory from which CMD, RUN, ADD references
WORKDIR /app
# First install apt packages, so we can cache this even if requirements.txt
# changes.
# hadolint ignore=DL3008
RUN apt-get update \
&& apt-get install --no-install-recommends -y gcc g++ libffi-dev libc6-dev \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# Now copy the python dependencies specification.
COPY requirements.txt .
# Install python dependencies.
RUN pip install --no-cache-dir -r requirements.txt
# now copy all the files in this directory to /app
COPY . .
EXPOSE 5000
# Define our command to be run when launching the container
CMD ["gunicorn", "app:app", "-b", "0.0.0.0:5000", "--workers", "8", "--preload", "--capture-output", "--enable-stdio-inheritance", "--log-level", "DEBUG"]
This diff is collapsed.
List of message codes used in the frontend
Kratos codes:
=============
4000006 The provided credentials are invalid, check for spelling mistakes
in your password or username, email address, or phone number.
1010003 Please confirm this action by verifying that it is you.
Stackspin codes:
================
S_CONFIRM_CREDENTIALS Please confirm your credentials to complete this action.
from apscheduler.schedulers.background import BackgroundScheduler
from flask import Flask, jsonify
from flask_cors import CORS
from flask_jwt_extended import JWTManager
import flask_migrate
from jsonschema.exceptions import ValidationError
from NamedAtomicLock import NamedAtomicLock
import threading
import traceback
from werkzeug.exceptions import BadRequest
# These imports are required
from areas import api_v1
from cliapp import cli
from web import web
from areas import users
from areas.apps.apps import *
from areas import auth
from areas import resources
from areas import roles
from areas import tags
from cliapp import cliapp
import config
import helpers.kubernetes
import helpers.provision
import helpers.threads
from web import login
from database import db
from helpers import (
BadRequest,
KratosError,
HydraError,
Unauthorized,
bad_request_error,
validation_error,
kratos_error,
global_error,
hydra_error,
unauthorized_error,
)
import cluster_config
from config import *
import logging
import migration_reset
import os
import sys
# Configure logging.
log_level = logging.getLevelName(config.LOG_LEVEL or 'INFO')
from logging.config import dictConfig
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] %(levelname)s in %(name)s (%(filename)s+%(lineno)d): %(message)s',
}},
'handlers': {'wsgi': {
'class': 'logging.StreamHandler',
'stream': 'ext://flask.logging.wsgi_errors_stream',
'formatter': 'default',
'level': log_level,
}},
'root': {
'handlers': ['wsgi'],
'level': log_level,
},
# Loggers are created also by alembic, flask_migrate, etc. Without this
# setting, those loggers seem to be ignored.
'disable_existing_loggers': False,
})
logging.getLogger("kubernetes.client.rest").setLevel(logging.WARNING)
app = Flask(__name__)
app.config["SECRET_KEY"] = SECRET_KEY
app.config["SQLALCHEMY_DATABASE_URI"] = SQLALCHEMY_DATABASE_URI
app.config["SQLALCHEMY_ENGINE_OPTIONS"] = {'pool_pre_ping': True}
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = SQLALCHEMY_TRACK_MODIFICATIONS
cors = CORS(app)
db.init_app(app)
with app.app_context():
provisioner = helpers.provision.Provision()
def init_routines():
with app.app_context():
# We have reset the alembic migration history at Stackspin version 2.2.
# This checks whether we need to prepare the database to follow that
# change.
migration_reset.reset()
app.logger.info("Loading flask_migrate.")
# flask_migrate exits the program when it encounters an error, for example
# when the version set in the database is not found in the
# `migrations/versions` directory. We could prevent that by catching the
# `SystemExit` exception here, but actually it's not safe to continue in
# that case.
flask_migrate.Migrate(app, db)
app.logger.info("Attempting flask_migrate database upgrade.")
try:
with app.app_context():
flask_migrate.upgrade()
# TODO: actually flask_migrate.upgrade will catch any errors and
# exit the program :/
except Exception as e:
app.logger.info(f"upgrade failed: {type(e)}: {e}")
sys.exit(2)
def reload():
# We need this app context in order to talk the database, which is managed by
# flask-sqlalchemy, which assumes a flask app context.
with app.app_context():
app.logger.info("Reloading dashboard config from cluster resources.")
# Load the list of apps from a configmap and store any missing ones in the
# database.
app_slugs = cluster_config.populate_apps()
# Same for the list of oauthclients.
cluster_config.populate_oauthclients()
# Load per-app scim config if present.
cluster_config.populate_scim_config(app_slugs)
# We could call `reload` here manually, but actually the watch also at its
# start creates events for existing secrets so we don't need to.
with app.app_context():
# Set watch for dashboard SCIM config secrets. Any time those change,
# we reload so we can do SCIM for newly installed apps.
try:
helpers.kubernetes.watch_dashboard_config(app, reload)
except Exception as e:
app.logger.error(f"Error watching dashboard config: {e}")
# Set up a generic task scheduler (cron-like).
scheduler = BackgroundScheduler()
scheduler.start()
# Add a job to run the provisioning reconciliation routine regularly.
# We'll also run it when we make changes that should be propagated
# immediately.
scheduler.add_job(helpers.threads.request_provision, 'interval', id='provision', hours=24)
# We'll run this in a separate thread so it can be done in the background.
# We have this single "provisioning worker" so there will be only one
# provisioning operation at a time. We use an Event to signal a
# provisioning request.
def provisioning_loop():
while True:
app.logger.info("Waiting for next provisioning run.")
# helpers.threads.provision_event.wait()
# helpers.threads.provision_event.clear()
helpers.threads.wait_provision()
app.logger.info("Starting provisioning.")
with app.app_context():
try:
provisioner.reconcile()
except Exception as e:
app.logger.warn(f"Exception during user provisioning:")
app.logger.warn(traceback.format_exc())
threading.Thread(target=provisioning_loop).start()
# `init_routines` should only run once per dashboard instance. To enforce this
# we have different behaviour for production and development mode:
# * We have "preload" on for gunicorn, so this file is loaded only once, before
# workers are forked (production).
# * We make sure that in development mode we run this only once, even though
# this file is loaded twice by flask for some reason.
if RUN_BY_GUNICORN:
logging.info("Running initialization code (production mode).")
init_routines()
else:
logging.info("WERKZEUG_RUN_MAIN: {}".format(os.environ.get("WERKZEUG_RUN_MAIN", "unset")))
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
logging.info("Running initialization code (dev mode).")
init_routines()
else:
logging.info("Not running initialization code (dev or cli mode).")
# This should not perform any actual migration, just load the
# flask_migrate extension so we can use `flask db` commands from the
# cli.
flask_migrate.Migrate(app, db)
# Now that we've done all database interactions in the initialisation routines,
# we need to drop all connections to the database to prevent those from being
# shared among different worker processes.
logging.info("Disposing of database connections.")
with app.app_context():
db.engine.dispose()
app.register_blueprint(api_v1)
app.register_blueprint(web)
app.register_blueprint(cli)
# Error handlers
app.register_error_handler(Exception, global_error)
app.register_error_handler(BadRequest, bad_request_error)
app.register_error_handler(ValidationError, validation_error)
app.register_error_handler(KratosError, kratos_error)
app.register_error_handler(HydraError, hydra_error)
app.register_error_handler(Unauthorized, unauthorized_error)
jwt = JWTManager(app)
# When token is not valid or missing handler
@jwt.invalid_token_loader
def invalid_token_callback(reason):
logging.info(f"Invalid token: {reason}.")
return jsonify({"errorMessage": "Unauthorized (invalid token)"}), 401
@jwt.unauthorized_loader
def unauthorized_callback(reason):
logging.info(f"No token: {reason}.")
return jsonify({"errorMessage": "Unauthorized (no token)"}), 401
@jwt.expired_token_loader
def expired_token_callback(reason):
logging.info(f"Expired token: {reason}.")
return jsonify({"errorMessage": "Unauthorized (expired token)"}), 401
@app.route("/")
def index():
return "Stackspin API v1.0"
from flask import Blueprint, jsonify
import yaml
from config import *
import helpers.kubernetes as k8s
import requests
api_v1 = Blueprint("api_v1", __name__, url_prefix="/api/v1")
@api_v1.route("/")
@api_v1.route("/health")
def api_index():
return "Stackspin API v1.0"
@api_v1.route("/environment")
def api_environment():
environment = {
"HYDRA_PUBLIC_URL": HYDRA_PUBLIC_URL,
"KRATOS_PUBLIC_URL": KRATOS_PUBLIC_URL,
"TELEPRESENCE": TELEPRESENCE,
}
return jsonify(environment)
# We want to know if
# 1. A release has happened recently and is already deployed on this cluster.
# 2. A release has happened recently but has not yet been deployed on this
# cluster -- that will then probably happen automatically during the next
# maintenance window.
#
# To get the last release, we get the contents of the `VERSION` file from
# the main branch. The `VERSION` file is only updated as part of the release
# process.
#
# To find out how long ago the currently running version was deployed, we look
# at the `lastUpdateTime` of the stackspin `GitRepo` object on the cluster.
@api_v1.route("/info")
def api_info():
# Get static info from configmap on cluster.
static_info = k8s.get_kubernetes_config_map_data(
"stackspin-static-info",
"flux-system",
)
results = static_info
# Get app versions from apps configmaps on cluster.
results['appVersions'] = {}
apps = k8s.get_kubernetes_config_map_data(
"stackspin-apps",
"flux-system",
)
for app, app_data in apps.items():
data = yaml.safe_load(app_data)
if 'version' in data:
results['appVersions'][app] = data['version']
apps_custom = k8s.get_kubernetes_config_map_data(
"stackspin-apps-custom",
"flux-system",
)
if apps_custom is not None:
for app, app_data in apps_custom.items():
data = yaml.safe_load(app_data)
if 'version' in data:
results['appVersions'][app] = data['version']
# Get last update time of stackspin GitRepo object on the cluster; that
# tells us when flux last updated the cluster based on changes in the
# stackspin git repo.
stackspin_repo = k8s.get_gitrepo('stackspin')
results['lastUpdated'] = stackspin_repo['status']['artifact']['lastUpdateTime']
# This is the branch (or other git ref, like tag or commit) that this
# cluster follows.
flux_ref = stackspin_repo['spec']['ref']
# `flux_ref` is a structured object, though as far as we've seen always a
# dict with a single entry. The key can be `branch` or `tag` or `commit`.
# We reduce this to a single string git ref for simplicity in the
# front-end.
ref = next(iter(flux_ref.values()))
results['followingGit'] = ref
# Get latest released version from gitlab. Whether it's considered
# "released" depends on which branch we're following, but usually that's
# the `vX` "production" branch.
git_release = 'Unknown'
result = requests.get(f"https://open.greenhost.net/stackspin/stackspin/-/raw/{ref}/VERSION", timeout=5)
if result.status_code == 200:
git_release = result.text.rstrip()
results['lastRelease'] = git_release
return jsonify(results)
from flask import jsonify
from flask_jwt_extended import jwt_required
from flask_cors import cross_origin
from areas import api_v1
from .apps_service import AppsService
CONFIG_DATA = [
{
"id": "values.yml",
"description": "Some user friendly description",
"raw": "cronjob:\n # Set curl to accept insecure connections when acme staging is used\n curlInsecure: false",
"fields": [
{"name": "cronjob", "type": "string", "value": ""},
{"name": "curlInsecure", "type": "boolean", "value": "false"}
]
}
]
@api_v1.route('/apps', methods=['GET'])
@jwt_required()
@cross_origin()
def get_apps():
"""Return data about all apps"""
apps = AppsService.get_accessible_apps()
return jsonify(apps)
@api_v1.route('/apps/<string:slug>', methods=['GET'])
@jwt_required()
@cross_origin()
def get_app(slug):
"""Return data about a single app"""
app = AppsService.get_app(slug)
return jsonify(app)
@api_v1.route('/apps', methods=['POST'])
@jwt_required()
@cross_origin()
def post_app():
"""Unused function, returns bogus data for now"""
return jsonify([]), 201
@api_v1.route('/apps/<string:slug>', methods=['PUT'])
@jwt_required()
@cross_origin()
def put_app(slug):
"""Unused function, returns bogus data for now"""
return jsonify([])
@api_v1.route('/apps/<string:slug>/config', methods=['GET'])
@jwt_required()
@cross_origin()
def get_config(slug):
"""Returns bogus config data"""
return jsonify(CONFIG_DATA)
@api_v1.route('/apps/<string:slug>/config', methods=['DELETE'])
@jwt_required()
@cross_origin()
def delete_config(slug):
"""Does nothing, then returns bogus config data"""
return jsonify(CONFIG_DATA)
import threading
from flask import current_app
from flask_jwt_extended import get_jwt
import ory_kratos_client
from ory_kratos_client.api import identity_api
from .models import App, AppRole
from areas.roles.models import Role
from areas.users.models import User
from config import *
from database import db
from helpers.access_control import user_has_access
from helpers.kratos_user import KratosUser
import helpers.kubernetes as k8s
from helpers.threads import request_provision
class AppsService:
@staticmethod
def get_all_apps():
apps = App.query.all()
return [app.to_dict() for app in apps]
@staticmethod
def get_accessible_apps():
apps = App.query.all()
kratos_admin_api_configuration = ory_kratos_client.Configuration(host=KRATOS_ADMIN_URL)
with ory_kratos_client.ApiClient(kratos_admin_api_configuration) as kratos_admin_client:
kratos_identity_api = identity_api.IdentityApi(kratos_admin_client)
user_id = get_jwt()['user_id']
current_app.logger.info(f"user_id: {user_id}")
# Get the related user object
current_app.logger.info(f"Info: Getting user from admin {user_id}")
user = KratosUser(kratos_identity_api, user_id)
if not user:
current_app.logger.error(f"User not found in database: {user_id}")
return []
return [app.to_dict() for app in apps if user_has_access(user, app)]
@staticmethod
def get_app(slug):
app = App.query.filter_by(slug=slug).first()
return app.to_dict()
@staticmethod
def get_app_roles():
app_roles = AppRole.query.all()
return [{"user_id": app_role.user_id, "app_id": app_role.app_id, "role_id": app_role.role_id} for app_role in app_roles]
@classmethod
def install_app(cls, app):
app.install()
# Create app roles for the new app for all admins, and reprovision. We
# do this asynchronously, because we first need to wait for the app
# installation to be finished -- otherwise the SCIM config for user
# provisioning is not ready yet.
current_app.logger.info("Starting thread for creating app roles.")
# We need to pass the app context to the thread, because it needs that
# for database operations.
ca = current_app._get_current_object()
threading.Thread(target=cls.create_admin_app_roles, args=(ca, app,)).start()
@staticmethod
def create_admin_app_roles(ca, app):
"""Create AppRole objects for the given app for all admins."""
with ca.app_context():
ca.logger.info("Waiting for kustomizations to be ready.")
k8s.wait_kustomization_ready(app)
for user in User.get_all():
if not user['stackspin_data']['stackspin_admin']:
# We are only dealing with admin users here.
continue
existing_app_role = AppRole.query.filter_by(app_id=app.id, user_id=user['id']).first()
if existing_app_role is None:
ca.logger.info(f"Creating app role for app {app.slug} for admin user {user['id']}")
app_role = AppRole(
user_id=user['id'],
app_id=app.id,
role_id=Role.ADMIN_ROLE_ID
)
db.session.add(app_role)
db.session.commit()
ca.logger.info("Requesting user provisioning.")
request_provision()
"""Everything to do with Apps"""
import base64
import enum
import os
from sqlalchemy import Boolean, DateTime, Enum, ForeignKey, Integer, String, Unicode
from sqlalchemy.orm import relationship
from database import db
import helpers.kubernetes as k8s
DEFAULT_APP_SUBDOMAINS = {
"nextcloud": "files",
"wordpress": "www",
"monitoring": "grafana",
}
class App(db.Model):
"""
The App object, interact with the App database object. Data is stored in
the local database.
"""
id = db.Column(Integer, primary_key=True)
name = db.Column(String(length=64))
slug = db.Column(String(length=64), unique=True)
external = db.Column(Boolean, unique=False, nullable=False, server_default='0')
# The URL is only stored in the DB for external applications; otherwise the
# URL is stored in a configmap (see get_url)
url = db.Column(String(length=128), unique=False)
scim_url = db.Column(String(length=1024), nullable=True)
scim_token = db.Column(String(length=1024), nullable=True)
scim_group_support = db.Column(Boolean, nullable=False, server_default='0')
oauthclients = relationship("OAuthClientApp", back_populates="app")
def __init__(self, slug, name, external=False, url=None):
self.slug = slug
self.name = name
self.external = external
self.url = url
def __str__(self):
return f"{self.id} <{self.name}>"
def get_url(self):
"""
Returns the URL where this application is running
For external applications: the URL is stored in the database
For internal applications: the URL is stored in a configmap named
`stackspin-{self.slug}-kustomization-variables under
`{self.slug_domain}`. This function reads that configmap. If the
configmap does not contain a URL for the application (which is
possible, if the app is not installed yet, for example), we return a
default URL.
"""
if self.external:
return self.url
# Get domain name from configmap
ks_config_map = k8s.get_kubernetes_config_map_data(
f"stackspin-{self.slug}-kustomization-variables",
"flux-system")
domain_key = f"{self.slug}_domain"
# If config map found with this domain name for this service, return
# that URL
if ks_config_map and domain_key in ks_config_map.keys():
return f"https://{ks_config_map[domain_key]}"
domain_secret = k8s.get_kubernetes_secret_data(
"stackspin-cluster-variables",
"flux-system")
domain = base64.b64decode(domain_secret['domain']).decode()
# See if there is another default subdomain for this app than just
# "slug.{domain}"
if self.slug in DEFAULT_APP_SUBDOMAINS:
return f"https://{DEFAULT_APP_SUBDOMAINS[self.slug]}.{domain}"
# No default known
return f"https://{self.slug}.{domain}"
def get_status(self):
"""Returns an AppStatus object that describes the current cluster state"""
return AppStatus(self)
def install(self):
"""Creates a Kustomization in the Kubernetes cluster that installs this application"""
# Create add-<app> kustomization
self.__create_kustomization()
def uninstall(self):
"""
Delete the `add-$app` kustomization.
This triggers a deletion of the app's PVCs (so deletes all data), as
well as any other Kustomizations and HelmReleases related to the app.
It also triggers a deletion of the OAuth2Client object. It does not
remove the TLS secret generated by cert-manager.
"""
self.__delete_kustomization()
def delete(self):
"""
Fully deletes an application
This includes user roles, all kubernetes objects and also PVCs, so your
data will be *gone*
"""
# Delete all roles first
for role in self.roles:
db.session.delete(role)
# Delete all related oauthclients
for auth in self.oauthclients:
db.session.delete(auth)
db.session.commit()
db.session.delete(self)
return db.session.commit()
def __create_kustomization(self):
"""Creates the `add-{app_slug}` kustomization in the Kubernetes cluster"""
kustomization_template_filepath = \
os.path.join(self.__get_templates_dir(),
"add-app-kustomization.yaml.jinja")
k8s.store_kustomization(kustomization_template_filepath, self.slug)
def __delete_kustomization(self):
"""Deletes kustomization for this app"""
k8s.delete_kustomization(f"add-{self.slug}")
@property
def namespace(self):
"""
Returns the Kubernetes namespace of this app
FIXME: This should probably become a database field.
"""
if self.slug in ['nextcloud', 'wordpress', 'wekan', 'zulip', 'hedgedoc']:
return 'stackspin-apps'
return 'stackspin'
@property
def roles(self):
"""
All roles that are linked to this app
"""
return AppRole.query.filter_by(
app_id=self.id
).all()
@property
def kustomization(self):
"""Returns the kustomization object for this app"""
return k8s.get_kustomization(self.slug)
def to_dict(self):
"""
represent this object as a dict, compatible for JSON output
"""
return {"id": self.id,
"name": self.name,
"slug": self.slug,
"external": self.external,
"status": self.get_status().to_dict(),
"url": self.get_url()}
@property
def helmreleases(self):
"""Returns the helmreleases associated with the kustomization for this app"""
return k8s.get_all_helmreleases(self.namespace,
f"kustomize.toolkit.fluxcd.io/name={self.slug}")
@staticmethod
def __get_templates_dir():
"""Returns directory that contains the Jinja templates for kubernetes manifests."""
return os.path.join(os.path.dirname(os.path.realpath(__file__)), "templates")
class ProvisionStatus(enum.Enum):
SyncNeeded = "SyncNeeded"
Provisioned = "Provisioned"
# Provisioning is not necessary for this user/role, for
# example because the user has no access to this app.
NotProvisioned = "NotProvisioned"
# SCIM Provisioning is not supported for this particular app.
NotSupported = "NotSupported"
# This user needs to be deleted from this app.
ToDelete = "ToDelete"
# This app role entry corresponds to a Stackspin user that no longer
# exists.
Orphaned = "Orphaned"
# Something went wrong; more information can be found in the
# `last_provision_message`.
Error = "Error"
class AppRole(db.Model): # pylint: disable=too-few-public-methods
"""
The AppRole object, stores the roles Users have on Apps
"""
user_id = db.Column(String(length=64), primary_key=True)
app_id = db.Column(Integer, ForeignKey("app.id"), primary_key=True)
role_id = db.Column(Integer, ForeignKey("role.id"))
provision_status = db.Column(
Enum(
ProvisionStatus,
native_enum=False,
length=32,
values_callable=lambda _: [str(member.value) for member in ProvisionStatus]
),
nullable=False,
default=ProvisionStatus.SyncNeeded,
server_default=ProvisionStatus.SyncNeeded.value
)
last_provision_attempt = db.Column(DateTime, nullable=True)
last_provision_message = db.Column(Unicode(length=256), nullable=True)
scim_id = db.Column(Unicode(length=256), nullable=True)
role = relationship("Role")
app = relationship("App")
def __str__(self):
return (f"role_id: {self.role_id}, user_id: {self.user_id},"
f" app_id: {self.app_id}, role: {self.role}")
class AppStatus(): # pylint: disable=too-few-public-methods
"""
Represents the status of an app in the Kubernetes cluster.
This class can answer a few questions, like "is the app installed?", but
can also return raw status messages from Kustomizations and HelmReleases
This constructor sets three variables:
self.installed (bool): Whether the app should be installed
self.ready (bool): Whether the app is installed correctly
self.message (str): Information about the status
:param app: An app of which the kustomization and helmreleases
property will be used.
:type app: App
"""
def __init__(self, app):
if app.external:
self.installed = True
self.ready = True
self.message = "App is external"
return
kustomization = app.kustomization
if kustomization is not None and "status" in kustomization:
ks_ready, ks_message = k8s.check_condition(kustomization['status'])
self.installed = True
if ks_ready:
self.ready = ks_ready
self.message = "Installed"
return
else:
ks_ready = None
ks_message = "Kustomization does not exist"
self.installed = False
self.ready = False
self.message = "Not installed"
return
helmreleases = app.helmreleases
for helmrelease in helmreleases:
hr_status = helmrelease['status']
hr_ready, hr_message = k8s.check_condition(hr_status)
# For now, only show the message of the first HR that isn't ready
if not hr_ready:
self.ready = False
self.message = f"HelmRelease {helmrelease['metadata']['name']} status: {hr_message}"
return
# If we end up here, all HRs are ready, but the kustomization is not
self.ready = ks_ready
self.message = f"App Kustomization status: {ks_message}"
def __str__(self):
return f"Installed: {self.installed}\tReady: {self.ready}\tMessage: {self.message}"
def to_dict(self):
"""Represents this app status as a dict"""
return {
"installed": self.installed,
"ready": self.ready,
"message": self.message,
}
class OAuthClientApp(db.Model): # pylint: disable=too-few-public-methods
"""
The OAuthClientApp object maps an OAuth client to the corresponding App.
This mapping exists so that
* you can have a different name for the OAuth client than for the app, and
* you can have multiple OAuth clients that belong to the same app.
Also, some apps might have no OAuth client at all.
"""
__tablename__ = "oauthclient_app"
oauthclient_id = db.Column(String(length=64), primary_key=True)
app_id = db.Column(Integer, ForeignKey("app.id"))
app = relationship("App", back_populates="oauthclients")
def __str__(self):
return (f"oauthclient_id: {self.oauthclient_id}, app_id: {self.app_id},"
f" app: {self.app}")
class ScimAttribute(db.Model): # pylint: disable=too-few-public-methods
"""
The ScimAttribute object records that a certain user attribute needs to be
set in a certain app via SCIM.
"""
user_id = db.Column(String(length=64), primary_key=True)
app_id = db.Column(Integer, ForeignKey("app.id"), primary_key=True)
attribute = db.Column(String(length=64), primary_key=True)
def __str__(self):
return (f"attribute: {self.attribute}, user_id: {self.user_id},"
f" app_id: {self.app_id}")
---
apiVersion: kustomize.toolkit.fluxcd.io/v1beta2
kind: Kustomization
metadata:
name: add-{{ app }}
namespace: flux-system
spec:
interval: 1h0m0s
path: ./flux2/cluster/optional/{{ app }}
prune: true
sourceRef:
kind: GitRepository
name: stackspin
from .auth import *
\ No newline at end of file
from flask import current_app, jsonify, request
from flask_jwt_extended import create_access_token
from flask_cors import cross_origin
from datetime import timedelta
from areas import api_v1
from areas.apps.models import App, AppRole
from config import *
from helpers import HydraOauth, BadRequest, KratosApi
@api_v1.route("/login", methods=["POST"])
@cross_origin()
def login():
authorization_url = HydraOauth.authorize()
return jsonify({"authorizationUrl": authorization_url})
@api_v1.route("/hydra/callback")
@cross_origin()
def hydra_callback():
state = request.args.get("state")
code = request.args.get("code")
if state == None:
raise BadRequest("Missing state query param")
if code == None:
raise BadRequest("Missing code query param")
token = HydraOauth.get_token(state, code)
token_id = token["access_token"]
user_info = HydraOauth.get_user_info()
kratos_id = user_info["sub"]
# TODO: add a check to see if this a valid ID/active account
try:
access_token = create_access_token(
identity=token_id, expires_delta=timedelta(hours=1), additional_claims={"user_id": kratos_id}
)
except Exception as e:
raise BadRequest("Error with creating auth token between backend and frontend")
apps = App.query.all()
app_roles = []
for app in apps:
tmp_app_role = AppRole.query.filter_by(
user_id=kratos_id, app_id=app.id
).first()
app_roles.append(
{
"name": app.slug,
"role_id": tmp_app_role.role_id if tmp_app_role else None,
}
)
return jsonify(
{
"accessToken": access_token,
"userInfo": {
"id": kratos_id,
"email": user_info["email"],
"name": user_info["name"],
"preferredUsername": user_info["preferred_username"],
"app_roles": app_roles,
},
}
)
from .resources import *
from .resources_service import *
from flask import jsonify, request
from flask_cors import cross_origin
from flask_expects_json import expects_json
from flask_jwt_extended import get_jwt, jwt_required
from areas import api_v1
from helpers.auth_guard import admin_required
from .resources_service import ResourcesService
@api_v1.route("/resources", methods=["GET"])
@jwt_required()
@cross_origin()
@admin_required()
def get_resources():
res = ResourcesService.get_resources()
return jsonify(res)