Skip to content
Snippets Groups Projects
Commit 3fdd56fc authored by Tim Kreuzer's avatar Tim Kreuzer
Browse files

Merge branch 'kured' of gitlab.jsc.fz-juelich.de:kaas/fleet-deployments into kured

parents b9cff294 94da137e
Branches
No related tags found
No related merge requests found
defaultNamespace: ingress-nginx
helm:
releaseName: ingress-nginx
chart: https://github.com/kubernetes/ingress-nginx/releases/download/helm-chart-4.4.0/ingress-nginx-4.4.0.tgz
values:
controller:
ingressClassResource:
default: true
config:
compute-full-forwarded-for: "true"
proxy-real-ip-cidr: 0.0.0.0/0
real-ip-header: proxy_protocol
use-forwarded-headers: "true"
use-proxy-protocol: "true"
use-http2: false
upstream-keepalive-connections: "0"
ssl-session-timeout: "1d"
ssl-session-cache-size: "50m"
ssl-protocols: "TLSv1.2 TLSv1.3"
http-snippet: |
upstream default-retries {
server default-proxy-public.jupyterjsc.svc:443 max_fails=0;
server default-proxy-public-0.jupyterjsc.svc:443 max_fails=0;
server default-proxy-public-1.jupyterjsc.svc:443 max_fails=0;
server default-proxy-public-2.jupyterjsc.svc:443 max_fails=0;
server default-proxy-public-3.jupyterjsc.svc:443 max_fails=0;
server default-proxy-public-4.jupyterjsc.svc:443 max_fails=0;
keepalive 32;
}
upstream juniq-retries {
server juniq-proxy-public.jupyterjsc.svc:443 max_fails=0;
server juniq-proxy-public-0.jupyterjsc.svc:443 max_fails=0;
server juniq-proxy-public-1.jupyterjsc.svc:443 max_fails=0;
server juniq-proxy-public-2.jupyterjsc.svc:443 max_fails=0;
server juniq-proxy-public-3.jupyterjsc.svc:443 max_fails=0;
server juniq-proxy-public-4.jupyterjsc.svc:443 max_fails=0;
keepalive 32;
}
upstream eurocc-retries {
server eurocc-proxy-public.jupyterjsc.svc:443 max_fails=0;
server eurocc-proxy-public-0.jupyterjsc.svc:443 max_fails=0;
server eurocc-proxy-public-1.jupyterjsc.svc:443 max_fails=0;
server eurocc-proxy-public-2.jupyterjsc.svc:443 max_fails=0;
server eurocc-proxy-public-3.jupyterjsc.svc:443 max_fails=0;
server eurocc-proxy-public-4.jupyterjsc.svc:443 max_fails=0;
keepalive 32;
}
upstream portalgauss-retries {
server portalgauss-proxy-public.jupyterjsc.svc:443 max_fails=0;
server portalgauss-proxy-public-0.jupyterjsc.svc:443 max_fails=0;
server portalgauss-proxy-public-1.jupyterjsc.svc:443 max_fails=0;
server portalgauss-proxy-public-2.jupyterjsc.svc:443 max_fails=0;
server portalgauss-proxy-public-3.jupyterjsc.svc:443 max_fails=0;
server portalgauss-proxy-public-4.jupyterjsc.svc:443 max_fails=0;
keepalive 32;
}
upstream dev1-retries {
server dev1-proxy-public.jupyterjscdev.svc:443 max_fails=0;
server dev1-proxy-public-0.jupyterjscdev.svc:443 max_fails=0;
server dev1-proxy-public-1.jupyterjscdev.svc:443 max_fails=0;
server dev1-proxy-public-2.jupyterjscdev.svc:443 max_fails=0;
server dev1-proxy-public-3.jupyterjscdev.svc:443 max_fails=0;
server dev1-proxy-public-4.jupyterjscdev.svc:443 max_fails=0;
keepalive 32;
}
upstream dev2-retries {
server dev2-proxy-public.jupyterjscdev.svc:443 max_fails=0;
server dev2-proxy-public-0.jupyterjscdev.svc:443 max_fails=0;
server dev2-proxy-public-1.jupyterjscdev.svc:443 max_fails=0;
server dev2-proxy-public-2.jupyterjscdev.svc:443 max_fails=0;
server dev2-proxy-public-3.jupyterjscdev.svc:443 max_fails=0;
server dev2-proxy-public-4.jupyterjscdev.svc:443 max_fails=0;
keepalive 32;
}
upstream coeraise-retries {
server coeraise-proxy-public.jupyterjsc.svc:443 max_fails=0;
server coeraise-proxy-public-0.jupyterjsc.svc:443 max_fails=0;
server coeraise-proxy-public-1.jupyterjsc.svc:443 max_fails=0;
server coeraise-proxy-public-2.jupyterjsc.svc:443 max_fails=0;
server coeraise-proxy-public-3.jupyterjsc.svc:443 max_fails=0;
server coeraise-proxy-public-4.jupyterjsc.svc:443 max_fails=0;
keepalive 32;
}
extraArgs:
enable-ssl-passthrough: ""
kind: DaemonSet
metrics:
enabled: true
serviceMonitor:
additionalLabels:
release: prometheus-stack
enabled: true
namespace: cattle-monitoring-system
metricRelabelings:
- action: drop
regex: '(nginx_ingress_controller_request_duration_seconds_bucket|nginx_ingress_controller_response_size_bucket|nginx_ingress_controller_request_size_bucket|nginx_ingress_controller_response_duration_seconds_bucket|nginx_ingress_controller_bytes_sent_bucket)'
sourceLabels: [__name__]
service:
annotations:
loadbalancer.openstack.org/keep-floatingip: true
loadbalancer.openstack.org/proxy-protocol: "true"
#loadbalancer.openstack.org/timeout-client-data: "600000"
#loadbalancer.openstack.org/timeout-member-connect: "600000"
loadbalancer.openstack.org/timeout-member-data: "600000"
targetCustomizations:
- name: staging
clusterSelector:
matchLabels:
stage: staging
provider.cattle.io: rke
helm:
releaseName: ingress-nginx-staging
values:
controller:
service:
loadBalancerIP: 134.94.199.199
- name: production
clusterSelector:
matchLabels:
stage: production
provider.cattle.io: rke
helm:
releaseName: ingress-nginx-production
values:
controller:
service:
loadBalancerIP: 134.94.199.54
dependsOn:
- name: basics-monitor-crd
defaultNamespace: ingress-nginx
helm:
releaseName: ingress-nginx
chart: https://github.com/kubernetes/ingress-nginx/releases/download/helm-chart-4.4.0/ingress-nginx-4.4.0.tgz
values:
controller:
ingressClassResource:
default: true
config:
compute-full-forwarded-for: "true"
proxy-real-ip-cidr: 0.0.0.0/0
real-ip-header: proxy_protocol
use-forwarded-headers: "true"
use-proxy-protocol: "true"
use-http2: false
upstream-keepalive-connections: "0"
ssl-session-timeout: "1d"
ssl-session-cache-size: "50m"
ssl-protocols: "TLSv1.2 TLSv1.3"
http-snippet: |
upstream dev1-retries {
server dev1-proxy-public.jupyterjscdev.svc:443 max_fails=0;
server dev1-proxy-public-0.jupyterjscdev.svc:443 max_fails=0;
server dev1-proxy-public-1.jupyterjscdev.svc:443 max_fails=0;
server dev1-proxy-public-2.jupyterjscdev.svc:443 max_fails=0;
server dev1-proxy-public-3.jupyterjscdev.svc:443 max_fails=0;
server dev1-proxy-public-4.jupyterjscdev.svc:443 max_fails=0;
keepalive 32;
}
upstream dev2-retries {
server dev2-proxy-public.jupyterjscdev.svc:443 max_fails=0;
server dev2-proxy-public-0.jupyterjscdev.svc:443 max_fails=0;
server dev2-proxy-public-1.jupyterjscdev.svc:443 max_fails=0;
server dev2-proxy-public-2.jupyterjscdev.svc:443 max_fails=0;
server dev2-proxy-public-3.jupyterjscdev.svc:443 max_fails=0;
server dev2-proxy-public-4.jupyterjscdev.svc:443 max_fails=0;
keepalive 32;
}
extraArgs:
enable-ssl-passthrough: ""
kind: DaemonSet
metrics:
enabled: true
serviceMonitor:
additionalLabels:
release: prometheus-stack
enabled: true
namespace: cattle-monitoring-system
metricRelabelings:
- action: drop
regex: '(nginx_ingress_controller_request_duration_seconds_bucket|nginx_ingress_controller_response_size_bucket|nginx_ingress_controller_request_size_bucket|nginx_ingress_controller_response_duration_seconds_bucket|nginx_ingress_controller_bytes_sent_bucket)'
sourceLabels: [__name__]
service:
annotations:
loadbalancer.openstack.org/keep-floatingip: true
loadbalancer.openstack.org/proxy-protocol: "true"
#loadbalancer.openstack.org/timeout-client-data: "600000"
#loadbalancer.openstack.org/timeout-member-connect: "600000"
loadbalancer.openstack.org/timeout-member-data: "600000"
targetCustomizations:
- name: staging
clusterSelector:
matchLabels:
stage: staging
provider.cattle.io: rke
helm:
releaseName: ingress-nginx-staging
values:
controller:
service:
loadBalancerIP: 134.94.199.199
- name: production
clusterSelector:
matchLabels:
stage: production
provider.cattle.io: rke
helm:
releaseName: ingress-nginx-production
values:
controller:
service:
loadBalancerIP: 134.94.199.54
dependsOn:
- name: basics-monitor-crd
defaultNamespace: ingress-nginx
helm:
releaseName: ingress-nginx
chart: https://github.com/kubernetes/ingress-nginx/releases/download/helm-chart-4.4.0/ingress-nginx-4.4.0.tgz
values:
controller:
ingressClassResource:
default: true
config:
compute-full-forwarded-for: "true"
proxy-real-ip-cidr: 0.0.0.0/0
real-ip-header: proxy_protocol
use-forwarded-headers: "true"
use-proxy-protocol: "true"
use-http2: false
upstream-keepalive-connections: "0"
ssl-session-timeout: "1d"
ssl-session-cache-size: "50m"
ssl-protocols: "TLSv1.2 TLSv1.3"
extraArgs:
enable-ssl-passthrough: ""
kind: DaemonSet
metrics:
enabled: true
serviceMonitor:
additionalLabels:
release: prometheus-stack
enabled: true
namespace: cattle-monitoring-system
metricRelabelings:
- action: drop
regex: '(nginx_ingress_controller_request_duration_seconds_bucket|nginx_ingress_controller_response_size_bucket|nginx_ingress_controller_request_size_bucket|nginx_ingress_controller_response_duration_seconds_bucket|nginx_ingress_controller_bytes_sent_bucket)'
sourceLabels: [__name__]
service:
annotations:
loadbalancer.openstack.org/keep-floatingip: true
loadbalancer.openstack.org/proxy-protocol: "true"
#loadbalancer.openstack.org/timeout-client-data: "600000"
#loadbalancer.openstack.org/timeout-member-connect: "600000"
loadbalancer.openstack.org/timeout-member-data: "600000"
targetCustomizations:
- name: staging
clusterSelector:
matchLabels:
stage: staging
provider.cattle.io: rke
helm:
releaseName: ingress-nginx-staging
values:
controller:
service:
loadBalancerIP: 134.94.199.199
- name: production
clusterSelector:
matchLabels:
stage: production
provider.cattle.io: rke
helm:
releaseName: ingress-nginx-production
values:
controller:
service:
loadBalancerIP: 134.94.199.54
dependsOn:
- name: basics-monitor-crd
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment