diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..88d3f1105363420c9c31bf22bbcab8bfc43179c6
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
+managed_clusters/*_credentials.sh
+**/keypair.key
diff --git a/README.md b/README.md
index bc3b42f815e589073512e00ecc77f81c689496b9..9803007b19604c2a66ba6afb7a6942c868c1ea3d 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,10 @@
 # Kubernetes as a Service administration
 This repo is used to create clusters on [JSC-Cloud](https://cloud.jsc.fz-juelich.de) and deploy software on them.
 
+## Supported Labels
+ - kured: "true"  -> Install [Kured](https://github.com/kubereboot/kured), this will reboot your nodes if necessary on a sunday between 2am and 5am (Timezone: Europe/Berlin). [more](https://gitlab.jsc.fz-juelich.de/kaas/fleet-deployments/-/tree/kured)
+ - cinder-csi: "true"  -> Install [Cinder-CSI Plugin](https://github.com/kubernetes/cloud-provider-openstack/tree/release-1.26/docs/cinder-csi-plugin), this will create a storage class on the cluster, which uses OpenStack Cinder Volumes as persistent storage. [more](https://gitlab.jsc.fz-juelich.de/kaas/fleet-deployments/-/tree/openstack-cinder-csi)
+
 
 ## Create Cluster
 Requirements:
@@ -13,7 +17,7 @@ Create OpenStack environment in users project:
  - `cd fleet_deployments/managed_clusters`
  - Store `jsc-cloud-team` credentials in `managed_clusters/management_credentials.sh`
  - Store `<user>` credentials in `managed_clusters/<NAME>_credentials.sh` (<NAME> must be equal to the Name given in create.sh)
- - # UPDATE create.sh , fill in name, project id and subnet cidr
+ - **update create.sh , fill in name, project id and subnet cidr**
  - `/bin/bash create.sh`
 
 Create NodeTemplate / RKETemplate
@@ -25,8 +29,7 @@ Create NodeTemplate / RKETemplate
  - **IMPORTANT: At the end of the node template creation, `Engine Options` -> `Docker Install URL` must be "None"!**
  - RKE1 Configuration (sidebar) -> RKE Templates
  - Add template (top right), name should be equal to cluster name, revision can be v1
- - Click "Edit as YAML" on the right side, copy the rke.yaml file from this repo into it.
- - Replace the secrets and subnet ID from the output given by create.sh earlier
+ - Click "Edit as YAML" on the right side, copy the ${NAME}/rke.yaml file from into it.
 
 Create Cluster:
  - Browse to https://zam12142.zam.kfa-juelich.de , log in
@@ -35,7 +38,7 @@ Create Cluster:
  - Cluster Name: as before in create.sh, create two nodepools (one for main nodes [check: drain before delete, etcd, control-plane], one for worker nodes [check: drain before delete, worker]). Set "Auto Replace" to 5 minutes. Use the previously created node templates.
  - Cluster Options: "Use an existing RKE Template and revision" -> Choose the previously created one.
  - Member roles (above Cluster Options) -> Add member as owner to this cluster. If user does not exists yet, it can be done later.
- - Labels: can be used to install default software. See List below for available labels
+ - Labels: can be used to install default software. See List above for available labels
  - Scroll down: Create -> Done.
 
 How to Manage Cluster (once it's created, may take up to 10 minutes):
@@ -49,11 +52,7 @@ How to increase/decrease number of nodes:
     - `kubectl drain --ignore-daemonsets --delete-emptydir-data <node>` (or in UI, same as above)
     - In Cluster Management select node and click on `Scale Down`. (Deleted nodes would be replaced otherwise)
 
-## Supported Labels
- - kured: "true"  -> Install [Kured](https://github.com/kubereboot/kured), this will reboot your nodes if necessary on a sunday between 2am and 5am (Timezone: Europe/Berlin). [more](https://gitlab.jsc.fz-juelich.de/kaas/fleet-deployments/-/tree/kured)
- - cinder-csi: "true"  -> Install [Cinder-CSI Plugin](https://github.com/kubernetes/cloud-provider-openstack/tree/release-1.26/docs/cinder-csi-plugin), this will create a storage class on the cluster, which uses OpenStack Cinder Volumes as persistent storage. [more](https://gitlab.jsc.fz-juelich.de/kaas/fleet-deployments/-/tree/openstack-cinder-csi)
-
 
 ## Delete cluster
  - Delete Cluster in Rancher UI
- - Use `delete.sh` to revert all changes done before (network, security-group, static-routes, etc.)
\ No newline at end of file
+ - Use `delete.sh` to revert all changes done before (network, security-group, static-routes, etc.)
diff --git a/managed_clusters/create.sh b/managed_clusters/create.sh
index 95d9af6d5bbdc586a3af9eb4db5080322a9b9e10..e76d463fd3f6c6919b477bc76ce116b4df682fd1 100644
--- a/managed_clusters/create.sh
+++ b/managed_clusters/create.sh
@@ -2,16 +2,21 @@
 
 ### Customization
 
-NAME="" # Enter a (ideally) unique name for the cluster
-PROJECT_ID="" # project id from the users project, where the k8s cluster should be created
-SUBNET_CIDR="" # Unique CIDR (10.0.x.0/24) , each cluster needs a different subnet CIDR.
+NAME="jupyterjsc-production" # Enter a (ideally) unique name for the cluster
+PROJECT_ID="800dd44618eb4fe08f874109d6a54417" # project id from the users project, where the k8s cluster should be created
+SUBNET_CIDR="10.0.101.0/24" # Unique CIDR (10.0.x.0/24) , each cluster needs a different subnet CIDR.
 
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+
+if [[ ! -f ${DIR}/${NAME}_credentials.sh ]] || [[ ! -f ${DIR}/management_credentials.sh ]]; then
+  echo "Missing credentials. Stop script"
+  exit 1
+fi
 
 ###
 # set to false, to get the output at the end without creating anything
 CREATE="true"
 
-DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
 mkdir -p ${DIR}/${NAME}
 
 # Some variables for our `jsc-cloud-team` management project
@@ -114,24 +119,8 @@ sed -e "s@<name>@${NAME}@g" ${DIR}/userdata_worker.yaml > ${DIR}/${NAME}/userdat
 echo "engineInstallUrl:            None"
 echo "----------------------------------"
 
-# You can use the rke.yaml file and create a RKE Template in Rancher 
-echo "----------------------------------"
-echo "--- RkeTemplate (replace in rke.yaml line 16,17,22) ---"
-echo "        [Global]"
-echo "        auth-url=https://cloud.jsc.fz-juelich.de:5000/v3"
-echo "        application-credential-id=$OS_APPLICATION_CREDENTIAL_ID"
-echo "        application-credential-secret=$OS_APPLICATION_CREDENTIAL_SECRET"
-echo "        region=JSCCloud"
-echo "        tls-insecure=true"
-echo "        [LoadBalancer]"
-echo "        use-octavia=true"
-echo "        subnet-id=$USER_SUBNET_ID"
-echo "        floating-network-id=c2ce19a1-ad08-41fb-8dd2-4b97d78815fc"
-echo "        manage-security-groups=false"
-echo "        [BlockStorage]"
-echo "        bs-version=v2"
-echo "        ignore-volume-az=true"
-echo "----------------------------------"
+
+sed -e "s@<credential_id>@${OS_APPLICATION_CREDENTIAL_ID}@g" -e "s@<credential_secret>@${OS_APPLICATION_CREDENTIAL_SECRET}@g" -e "s@<subnet_id>@${USER_SUBNET_ID}@g" ${DIR}/rke.yaml > ${DIR}/${NAME}/rke.yaml
 
 # ssh into the rancher-1 vm.
 # Create a NFS folder for the cluster-backups
diff --git a/managed_clusters/credentials_unset.sh b/managed_clusters/credentials_unset.sh
new file mode 100644
index 0000000000000000000000000000000000000000..a5a796c1f01e8ca342e7bb218feb3a0400e7acd6
--- /dev/null
+++ b/managed_clusters/credentials_unset.sh
@@ -0,0 +1,10 @@
+#!/usr/bin/env bash
+
+unset OS_AUTH_TYPE
+unset OS_AUTH_URL
+unset OS_IDENTITY_API_VERSION
+unset OS_REGION_NAME
+unset OS_INTERFACE
+unset OS_APPLICATION_CREDENTIAL_ID
+unset OS_APPLICATION_CREDENTIAL_SECRET
+
diff --git a/managed_clusters/rke.yaml b/managed_clusters/rke.yaml
index 359378afe91e39c7f9ef360bb5e742990773b5c9..d6cae888c26760b5e77745aedac71ba4cb6c9ce8 100644
--- a/managed_clusters/rke.yaml
+++ b/managed_clusters/rke.yaml
@@ -13,13 +13,13 @@ rancher_kubernetes_engine_config:
       cloud-config: |-
         [Global]
         auth-url=https://cloud.jsc.fz-juelich.de:5000/v3
-        application-credential-id=...
-        application-credential-secret=...
+        application-credential-id=<credential_id>
+        application-credential-secret=<credential_secret>
         region=JSCCloud
         tls-insecure=true
         [LoadBalancer]
         use-octavia=true
-        subnet-id=...
+        subnet-id=<subnet_id>
         floating-network-id=c2ce19a1-ad08-41fb-8dd2-4b97d78815fc
         manage-security-groups=false
         [BlockStorage]