#!/usr/bin/env bash

read -rp $'What is the name of the Volume Backup in MyNWS? (https://my.nws.netways.de/kubernetes/backups)\n' name

regex='^[-_0-9a-z]+$'
if [[ -z "${name}" || ! "${name}" =~ $regex ]]; then
  echo "Incorrect input, please make sure to use a valid name"
  exit 1
fi

read -rp $'What is the name of the namespace where the restored pvc should be in? (default: default)\n' namespace
namespace=${namespace:-default}

until kubectl get ns "$namespace" &>/dev/null; do
  read -rp $'Namespace does not exist, please try again:\n' namespace
done

regex='[0-9]{4}-[01][0-9]-[0-3][0-9]'
if [[ "${name}" =~ $regex ]]; then
  date=${BASH_REMATCH[0]}
else
	echo "Cannot determine backup creation date"
  exit 1
fi

regex='pvc-[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}'
if [[ "${name}" =~ $regex ]]; then
  pvc=${BASH_REMATCH[0]}
else
  echo "Cannot determine pvc name"
fi

printf -- '---\nINFO Restoring "%s" to "%s-restore" in Namespace "%s".\n' "$name" "$pvc" "$namespace"

function set-var {
  local data
  local var_name=$1
  shift
  mapfile data
  printf -v "$var_name" "%s" "${data[@]}"
}

set-var yaml <<EOF
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  labels:
    app.kubernetes.io/name: pvc-restore
  name: pvc-restore
rules:
- apiGroups: [""]
  resources: ["persistentvolumes","persistentvolumeclaims"]
  verbs: ["create", "get"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  labels:
    app.kubernetes.io/name: pvc-restore
  name: pvc-restore
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: pvc-restore
subjects:
- kind: ServiceAccount
  name: pvc-restore
  namespace: kube-system
---
apiVersion: v1
kind: ServiceAccount
metadata:
  labels:
    app.kubernetes.io/name: pvc-restore
  name: pvc-restore
  namespace: kube-system
---
apiVersion: batch/v1
kind: Job
metadata:
  labels:
    app.kubernetes.io/name: pvc-restore
    batch.kubernetes.io/job-name: pvc-restore
    job-name: pvc-restore
  name: pvc-restore
  namespace: kube-system
spec:
  backoffLimit: 1
  completionMode: NonIndexed
  completions: 1
  parallelism: 1
  podReplacementPolicy: Failed
  template:
    metadata:
      labels:
        app.kubernetes.io/name: pvc-restore
        job-name: pvc-restore
    spec:
      containers:
      - name: pvc-restore
        image: registry.nws.netways.de/kubernetes/pvc-backup-restore:latest
        imagePullPolicy: Always
        env:
        - name: NAMESPACE
          value: ${namespace}
        - name: PVC_NAME
          value: ${pvc}
        - name: DATE
          value: ${date}
        resources:
          limits:
            memory: 50Mi
            cpu: 100m
          requests:
            memory: 30Mi
            cpu: 10m
        securityContext:
          allowPrivilegeEscalation: false
          capabilities:
            drop:
            - ALL
        volumeMounts:
        - name: cloud-config
          mountPath: /etc/cloud-config
          readOnly: true
      restartPolicy: Never
      serviceAccount: pvc-restore
      nodeSelector:
        magnum.openstack.org/role: master
      tolerations:
      - key: node-role.kubernetes.io/control-plane
        effect: NoSchedule
      - key: node-role.kubernetes.io/master
        effect: NoSchedule
      securityContext:
        fsGroup: 123
        runAsGroup: 563
        runAsUser: 563
        runAsNonRoot: true
      volumes:
      - name: cloud-config
        hostPath:
          path: /etc/kubernetes/cloud-config-occm
          type: File
EOF

kubectl delete -f - <<<"$yaml" &>/dev/null
kubectl apply -f - <<<"$yaml" &>/dev/null

printf "INFO Waiting for job to complete..."
kubectl wait --timeout=2m --for=condition=Complete -n kube-system job/pvc-restore &>/dev/null

ret=$?
[[ "$ret" -ne 0 ]] && {
  printf '\nERROR Unfortunately the restore did not succeed, please open a ticket and provide the generated "debug.log". Sorry for the inconvenience.'
  kubectl logs -n kube-system -l job-name=pvc-restore &> debug.log
  exit 1
}
echo "Done"

echo "INFO Job completed, pvc is now available to be used"
printf "INFO Cleaning Up..."
kubectl delete -f - <<<"$yaml" &>/dev/null
echo "Done"