diff --git a/kured/fleet.yaml.all b/kured/fleet.yaml.all
deleted file mode 100644
index dd5ef448714f897b3e516b3880d6a23659e6847a..0000000000000000000000000000000000000000
--- a/kured/fleet.yaml.all
+++ /dev/null
@@ -1,129 +0,0 @@
-defaultNamespace: ingress-nginx
-helm:
-  releaseName: ingress-nginx
-  chart: https://github.com/kubernetes/ingress-nginx/releases/download/helm-chart-4.4.0/ingress-nginx-4.4.0.tgz
-  values:
-    controller:
-      ingressClassResource:
-        default: true
-      config:
-        compute-full-forwarded-for: "true"
-        proxy-real-ip-cidr: 0.0.0.0/0
-        real-ip-header: proxy_protocol
-        use-forwarded-headers: "true"
-        use-proxy-protocol: "true"
-        use-http2: false
-        upstream-keepalive-connections: "0"
-        ssl-session-timeout: "1d"
-        ssl-session-cache-size: "50m"
-        ssl-protocols: "TLSv1.2 TLSv1.3"
-        http-snippet: |
-          upstream default-retries {
-              server default-proxy-public.jupyterjsc.svc:443 max_fails=0;
-              server default-proxy-public-0.jupyterjsc.svc:443 max_fails=0;
-              server default-proxy-public-1.jupyterjsc.svc:443 max_fails=0;
-              server default-proxy-public-2.jupyterjsc.svc:443 max_fails=0;
-              server default-proxy-public-3.jupyterjsc.svc:443 max_fails=0;
-              server default-proxy-public-4.jupyterjsc.svc:443 max_fails=0;
-              keepalive 32;
-          }
-          upstream juniq-retries {
-              server juniq-proxy-public.jupyterjsc.svc:443 max_fails=0;
-              server juniq-proxy-public-0.jupyterjsc.svc:443 max_fails=0;
-              server juniq-proxy-public-1.jupyterjsc.svc:443 max_fails=0;
-              server juniq-proxy-public-2.jupyterjsc.svc:443 max_fails=0;
-              server juniq-proxy-public-3.jupyterjsc.svc:443 max_fails=0;
-              server juniq-proxy-public-4.jupyterjsc.svc:443 max_fails=0;
-              keepalive 32;
-          }
-          upstream eurocc-retries {
-              server eurocc-proxy-public.jupyterjsc.svc:443 max_fails=0;
-              server eurocc-proxy-public-0.jupyterjsc.svc:443 max_fails=0;
-              server eurocc-proxy-public-1.jupyterjsc.svc:443 max_fails=0;
-              server eurocc-proxy-public-2.jupyterjsc.svc:443 max_fails=0;
-              server eurocc-proxy-public-3.jupyterjsc.svc:443 max_fails=0;
-              server eurocc-proxy-public-4.jupyterjsc.svc:443 max_fails=0;
-              keepalive 32;
-          }
-          upstream portalgauss-retries {
-              server portalgauss-proxy-public.jupyterjsc.svc:443 max_fails=0;
-              server portalgauss-proxy-public-0.jupyterjsc.svc:443 max_fails=0;
-              server portalgauss-proxy-public-1.jupyterjsc.svc:443 max_fails=0;
-              server portalgauss-proxy-public-2.jupyterjsc.svc:443 max_fails=0;
-              server portalgauss-proxy-public-3.jupyterjsc.svc:443 max_fails=0;
-              server portalgauss-proxy-public-4.jupyterjsc.svc:443 max_fails=0;
-              keepalive 32;
-          }
-          upstream dev1-retries {
-              server dev1-proxy-public.jupyterjscdev.svc:443 max_fails=0;
-              server dev1-proxy-public-0.jupyterjscdev.svc:443 max_fails=0;
-              server dev1-proxy-public-1.jupyterjscdev.svc:443 max_fails=0;
-              server dev1-proxy-public-2.jupyterjscdev.svc:443 max_fails=0;
-              server dev1-proxy-public-3.jupyterjscdev.svc:443 max_fails=0;
-              server dev1-proxy-public-4.jupyterjscdev.svc:443 max_fails=0;
-              keepalive 32;
-          }
-          upstream dev2-retries {
-              server dev2-proxy-public.jupyterjscdev.svc:443 max_fails=0;
-              server dev2-proxy-public-0.jupyterjscdev.svc:443 max_fails=0;
-              server dev2-proxy-public-1.jupyterjscdev.svc:443 max_fails=0;
-              server dev2-proxy-public-2.jupyterjscdev.svc:443 max_fails=0;
-              server dev2-proxy-public-3.jupyterjscdev.svc:443 max_fails=0;
-              server dev2-proxy-public-4.jupyterjscdev.svc:443 max_fails=0;
-              keepalive 32;
-          }
-          upstream coeraise-retries {
-              server coeraise-proxy-public.jupyterjsc.svc:443 max_fails=0;
-              server coeraise-proxy-public-0.jupyterjsc.svc:443 max_fails=0;
-              server coeraise-proxy-public-1.jupyterjsc.svc:443 max_fails=0;
-              server coeraise-proxy-public-2.jupyterjsc.svc:443 max_fails=0;
-              server coeraise-proxy-public-3.jupyterjsc.svc:443 max_fails=0;
-              server coeraise-proxy-public-4.jupyterjsc.svc:443 max_fails=0;
-              keepalive 32;
-          }
-      extraArgs:
-        enable-ssl-passthrough: ""
-      kind: DaemonSet
-      metrics:
-        enabled: true
-        serviceMonitor:
-          additionalLabels:
-            release: prometheus-stack
-          enabled: true
-          namespace: cattle-monitoring-system
-          metricRelabelings:
-          - action: drop
-            regex: '(nginx_ingress_controller_request_duration_seconds_bucket|nginx_ingress_controller_response_size_bucket|nginx_ingress_controller_request_size_bucket|nginx_ingress_controller_response_duration_seconds_bucket|nginx_ingress_controller_bytes_sent_bucket)'
-            sourceLabels: [__name__]          
-      service:
-        annotations:
-          loadbalancer.openstack.org/keep-floatingip: true
-          loadbalancer.openstack.org/proxy-protocol: "true"
-          #loadbalancer.openstack.org/timeout-client-data: "600000"
-          #loadbalancer.openstack.org/timeout-member-connect: "600000"
-          loadbalancer.openstack.org/timeout-member-data: "600000"
-targetCustomizations:
-  - name: staging
-    clusterSelector:
-      matchLabels:
-        stage: staging
-        provider.cattle.io: rke
-    helm:
-      releaseName: ingress-nginx-staging
-      values:
-        controller:
-          service:
-            loadBalancerIP: 134.94.199.199
-  - name: production
-    clusterSelector:
-      matchLabels:
-        stage: production
-        provider.cattle.io: rke
-    helm:
-      releaseName: ingress-nginx-production
-      values:
-        controller:
-          service:
-            loadBalancerIP: 134.94.199.54
-dependsOn:
-  - name: basics-monitor-crd
diff --git a/kured/fleet.yaml.dev b/kured/fleet.yaml.dev
deleted file mode 100644
index 2f888e0476dbfbcb092e5a520856caaa41528e32..0000000000000000000000000000000000000000
--- a/kured/fleet.yaml.dev
+++ /dev/null
@@ -1,84 +0,0 @@
-defaultNamespace: ingress-nginx
-helm:
-  releaseName: ingress-nginx
-  chart: https://github.com/kubernetes/ingress-nginx/releases/download/helm-chart-4.4.0/ingress-nginx-4.4.0.tgz
-  values:
-    controller:
-      ingressClassResource:
-        default: true
-      config:
-        compute-full-forwarded-for: "true"
-        proxy-real-ip-cidr: 0.0.0.0/0
-        real-ip-header: proxy_protocol
-        use-forwarded-headers: "true"
-        use-proxy-protocol: "true"
-        use-http2: false
-        upstream-keepalive-connections: "0"
-        ssl-session-timeout: "1d"
-        ssl-session-cache-size: "50m"
-        ssl-protocols: "TLSv1.2 TLSv1.3"
-        http-snippet: |
-          upstream dev1-retries {
-              server dev1-proxy-public.jupyterjscdev.svc:443 max_fails=0;
-              server dev1-proxy-public-0.jupyterjscdev.svc:443 max_fails=0;
-              server dev1-proxy-public-1.jupyterjscdev.svc:443 max_fails=0;
-              server dev1-proxy-public-2.jupyterjscdev.svc:443 max_fails=0;
-              server dev1-proxy-public-3.jupyterjscdev.svc:443 max_fails=0;
-              server dev1-proxy-public-4.jupyterjscdev.svc:443 max_fails=0;
-              keepalive 32;
-          }
-          upstream dev2-retries {
-              server dev2-proxy-public.jupyterjscdev.svc:443 max_fails=0;
-              server dev2-proxy-public-0.jupyterjscdev.svc:443 max_fails=0;
-              server dev2-proxy-public-1.jupyterjscdev.svc:443 max_fails=0;
-              server dev2-proxy-public-2.jupyterjscdev.svc:443 max_fails=0;
-              server dev2-proxy-public-3.jupyterjscdev.svc:443 max_fails=0;
-              server dev2-proxy-public-4.jupyterjscdev.svc:443 max_fails=0;
-              keepalive 32;
-          }
-      extraArgs:
-        enable-ssl-passthrough: ""
-      kind: DaemonSet
-      metrics:
-        enabled: true
-        serviceMonitor:
-          additionalLabels:
-            release: prometheus-stack
-          enabled: true
-          namespace: cattle-monitoring-system
-          metricRelabelings:
-          - action: drop
-            regex: '(nginx_ingress_controller_request_duration_seconds_bucket|nginx_ingress_controller_response_size_bucket|nginx_ingress_controller_request_size_bucket|nginx_ingress_controller_response_duration_seconds_bucket|nginx_ingress_controller_bytes_sent_bucket)'
-            sourceLabels: [__name__]          
-      service:
-        annotations:
-          loadbalancer.openstack.org/keep-floatingip: true
-          loadbalancer.openstack.org/proxy-protocol: "true"
-          #loadbalancer.openstack.org/timeout-client-data: "600000"
-          #loadbalancer.openstack.org/timeout-member-connect: "600000"
-          loadbalancer.openstack.org/timeout-member-data: "600000"
-targetCustomizations:
-  - name: staging
-    clusterSelector:
-      matchLabels:
-        stage: staging
-        provider.cattle.io: rke
-    helm:
-      releaseName: ingress-nginx-staging
-      values:
-        controller:
-          service:
-            loadBalancerIP: 134.94.199.199
-  - name: production
-    clusterSelector:
-      matchLabels:
-        stage: production
-        provider.cattle.io: rke
-    helm:
-      releaseName: ingress-nginx-production
-      values:
-        controller:
-          service:
-            loadBalancerIP: 134.94.199.54
-dependsOn:
-  - name: basics-monitor-crd
diff --git a/kured/fleet.yaml.min b/kured/fleet.yaml.min
deleted file mode 100644
index 6c2407e97222483c91b7c259644140432d864cd1..0000000000000000000000000000000000000000
--- a/kured/fleet.yaml.min
+++ /dev/null
@@ -1,65 +0,0 @@
-defaultNamespace: ingress-nginx
-helm:
-  releaseName: ingress-nginx
-  chart: https://github.com/kubernetes/ingress-nginx/releases/download/helm-chart-4.4.0/ingress-nginx-4.4.0.tgz
-  values:
-    controller:
-      ingressClassResource:
-        default: true
-      config:
-        compute-full-forwarded-for: "true"
-        proxy-real-ip-cidr: 0.0.0.0/0
-        real-ip-header: proxy_protocol
-        use-forwarded-headers: "true"
-        use-proxy-protocol: "true"
-        use-http2: false
-        upstream-keepalive-connections: "0"
-        ssl-session-timeout: "1d"
-        ssl-session-cache-size: "50m"
-        ssl-protocols: "TLSv1.2 TLSv1.3"
-      extraArgs:
-        enable-ssl-passthrough: ""
-      kind: DaemonSet
-      metrics:
-        enabled: true
-        serviceMonitor:
-          additionalLabels:
-            release: prometheus-stack
-          enabled: true
-          namespace: cattle-monitoring-system
-          metricRelabelings:
-          - action: drop
-            regex: '(nginx_ingress_controller_request_duration_seconds_bucket|nginx_ingress_controller_response_size_bucket|nginx_ingress_controller_request_size_bucket|nginx_ingress_controller_response_duration_seconds_bucket|nginx_ingress_controller_bytes_sent_bucket)'
-            sourceLabels: [__name__]          
-      service:
-        annotations:
-          loadbalancer.openstack.org/keep-floatingip: true
-          loadbalancer.openstack.org/proxy-protocol: "true"
-          #loadbalancer.openstack.org/timeout-client-data: "600000"
-          #loadbalancer.openstack.org/timeout-member-connect: "600000"
-          loadbalancer.openstack.org/timeout-member-data: "600000"
-targetCustomizations:
-  - name: staging
-    clusterSelector:
-      matchLabels:
-        stage: staging
-        provider.cattle.io: rke
-    helm:
-      releaseName: ingress-nginx-staging
-      values:
-        controller:
-          service:
-            loadBalancerIP: 134.94.199.199
-  - name: production
-    clusterSelector:
-      matchLabels:
-        stage: production
-        provider.cattle.io: rke
-    helm:
-      releaseName: ingress-nginx-production
-      values:
-        controller:
-          service:
-            loadBalancerIP: 134.94.199.54
-dependsOn:
-  - name: basics-monitor-crd