From 388d2c6d138f96a2bfe9e6917519c1654371e6d5 Mon Sep 17 00:00:00 2001
From: Arie Peterson <arie@greenhost.nl>
Date: Thu, 30 Nov 2023 11:46:29 +0100
Subject: [PATCH] Remove pre-telepresence docker-compose stuff

---
 dev.sh             | 28 +++++++++++++++++++++++++-
 docker-compose.yml | 49 ----------------------------------------------
 run_app.sh         | 43 ----------------------------------------
 3 files changed, 27 insertions(+), 93 deletions(-)
 delete mode 100644 docker-compose.yml
 delete mode 100755 run_app.sh

diff --git a/dev.sh b/dev.sh
index f2cac835..e9f383a8 100755
--- a/dev.sh
+++ b/dev.sh
@@ -1,10 +1,30 @@
-#!/bin/bash
+#!/usr/bin/env bash
 
 # TODO:
 # * Check that KUBECONFIG is set, maybe load automatically like before?
 # * env var for native/docker mode
 # * env var for local (or remote) docker image name
 
+set -u
+
+# If KUBECONFIG is not set, we try to find a kubeconfig file in a standard
+# location.
+if ! [[ -v KUBECONFIG ]]
+then
+  if [ -f "$(pwd)/kubeconfig/kube_config_cluster.yml" ]
+  then
+    export KUBECONFIG="$(pwd)/kubeconfig/kube_config_cluster.yml"
+  elif [ -f "$(pwd)/backend/kubeconfig/kube_config_cluster.yml" ]
+  then
+    export KUBECONFIG="$(pwd)/backend/kubeconfig/kube_config_cluster.yml"
+  fi
+  if [[ -v KUBECONFIG ]]
+  then
+    echo "Local kubeconfig file found."
+    echo "Setting KUBECONFIG=$KUBECONFIG"
+  fi
+fi
+
 TELEPRESENCE_NOT_RUNNING=0
 TELEPRESENCE_NATIVE=1
 TELEPRESENCE_DOCKER=2
@@ -139,6 +159,12 @@ cleanCluster() {
   helm uninstall -n ambassador traffic-manager
 }
 
+if [ $# -eq 0 ]
+then
+  echo "No command given. See README.md for help."
+  exit 1
+fi
+
 if [ "$1" == "reset" ]
 then
   telepresence quit -s
diff --git a/docker-compose.yml b/docker-compose.yml
deleted file mode 100644
index f2f3437a..00000000
--- a/docker-compose.yml
+++ /dev/null
@@ -1,49 +0,0 @@
-version: "3"
-services:
-  frontend:
-    build:
-      context: ./frontend
-    env_file: ./frontend/local.env
-    volumes:
-      - ./frontend/src:/home/node/app/src
-      - ./frontend/public:/home/node/app/public
-    ports:
-      - "3000:3000"
-    command: "yarn start --watch --verbose"
-  flask_app:
-    build:
-      context: ./backend
-    environment:
-      - FLASK_APP=app.py
-      - FLASK_ENV=development
-      - HYDRA_CLIENT_ID=dashboard-local
-
-      # Domain-specific URL settings
-      - HYDRA_AUTHORIZATION_BASE_URL=https://sso.$DOMAIN/oauth2/auth
-      - TOKEN_URL=https://sso.$DOMAIN/oauth2/token
-      - HYDRA_PUBLIC_URL=https://sso.$DOMAIN
-
-      # Local path overrides
-      - DASHBOARD_URL=http://localhost:3000
-      - KRATOS_PUBLIC_URL=http://stackspin_proxy:8081/kratos
-      - KRATOS_ADMIN_URL=http://kube_port_kratos_admin:8000
-      - HYDRA_ADMIN_URL=http://kube_port_hydra_admin:4445
-      - LOGIN_PANEL_URL=http://stackspin_proxy:8081/web/
-      - DATABASE_URL=mysql+pymysql://stackspin:$DATABASE_PASSWORD@kube_port_mysql/stackspin
-
-      # ENV variables that are deployment-specific
-      - SECRET_KEY=$FLASK_SECRET_KEY
-      - HYDRA_CLIENT_SECRET=$HYDRA_CLIENT_SECRET
-      - KUBECONFIG=/.kube/config
-
-      # Disable loading config from the service account
-      - LOAD_INCLUSTER_CONFIG=false
-    ports:
-      - "5000:5000"
-    user: "${KUBECTL_UID}:${KUBECTL_GID}"
-    volumes:
-      - ./backend:/app
-      - "$KUBECONFIG:/.kube/config"
-    depends_on:
-      - kube_port_mysql
-    entrypoint: ["bash", "-c", "flask run --host $$(hostname -i)"]
diff --git a/run_app.sh b/run_app.sh
deleted file mode 100755
index fa4d0b3d..00000000
--- a/run_app.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env bash
-
-if [ -f "./backend/kubeconfig/kube_config_cluster.yml" ]; then
-    echo "Local KUBECONFIG configuration file found, applying custom configuration."
-    export KUBECONFIG=./backend/kubeconfig/kube_config_cluster.yml
-else
-    echo "no Local KUBECONFIG configuration file found, skipping custom configuration."
-fi
-
-set -euo pipefail
-
-
-
-dockerComposeArgs=$@
-
-export DATABASE_PASSWORD=$(kubectl get secret -n flux-system stackspin-single-sign-on-variables -o jsonpath --template '{.data.dashboard_database_password}' | base64 -d)
-export DOMAIN=$(kubectl get secret -n flux-system stackspin-cluster-variables -o jsonpath --template '{.data.domain}' | base64 -d)
-export HYDRA_CLIENT_SECRET=$(kubectl get secret -n flux-system stackspin-dashboard-local-oauth-variables -o jsonpath --template '{.data.client_secret}' | base64 -d)
-export FLASK_SECRET_KEY=$(kubectl get secret -n flux-system stackspin-dashboard-variables -o jsonpath --template '{.data.backend_secret_key}' | base64 -d)
-
-
-if [[ -z "$DATABASE_PASSWORD" ]]; then
-    echo "Could not find database password in stackspin-single-sign-on-variables secret"
-    exit 1
-fi
-
-if [[ -z "$DOMAIN" ]]; then
-    echo "Could not find domain name in stackspin-cluster-variables secret"
-    exit 1
-fi
-
-if [[ -z "$FLASK_SECRET_KEY" ]]; then
-    echo "Could not find backend_secret_key in stackspin-dashboard-variables secret"
-    exit 1
-fi
-
-if [[ -z "$HYDRA_CLIENT_SECRET" ]]; then
-    echo "Could not find client_secret in stackspin-dashboard-local-oauth-variables secret"
-    echo "make sure you add this secret following instructions in the dashboard-dev-overrides repository"
-    exit 1
-fi
-
-KUBECTL_UID=${UID:-1001} KUBECTL_GID=${GID:-0} docker compose up $dockerComposeArgs
-- 
GitLab