Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
F
fleet-deployments
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Package registry
Container registry
Model registry
Operate
Environments
Terraform modules
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
GitLab community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
KaaS
fleet-deployments
Commits
94da137e
Commit
94da137e
authored
1 year ago
by
Tim Kreuzer
Browse files
Options
Downloads
Patches
Plain Diff
remove unused files
parent
1c16ebf6
Branches
Branches containing commit
No related tags found
No related merge requests found
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
kured/fleet.yaml.all
+0
-129
0 additions, 129 deletions
kured/fleet.yaml.all
kured/fleet.yaml.dev
+0
-84
0 additions, 84 deletions
kured/fleet.yaml.dev
kured/fleet.yaml.min
+0
-65
0 additions, 65 deletions
kured/fleet.yaml.min
with
0 additions
and
278 deletions
kured/fleet.yaml.all
deleted
100644 → 0
+
0
−
129
View file @
1c16ebf6
defaultNamespace: ingress-nginx
helm:
releaseName: ingress-nginx
chart: https://github.com/kubernetes/ingress-nginx/releases/download/helm-chart-4.4.0/ingress-nginx-4.4.0.tgz
values:
controller:
ingressClassResource:
default: true
config:
compute-full-forwarded-for: "true"
proxy-real-ip-cidr: 0.0.0.0/0
real-ip-header: proxy_protocol
use-forwarded-headers: "true"
use-proxy-protocol: "true"
use-http2: false
upstream-keepalive-connections: "0"
ssl-session-timeout: "1d"
ssl-session-cache-size: "50m"
ssl-protocols: "TLSv1.2 TLSv1.3"
http-snippet: |
upstream default-retries {
server default-proxy-public.jupyterjsc.svc:443 max_fails=0;
server default-proxy-public-0.jupyterjsc.svc:443 max_fails=0;
server default-proxy-public-1.jupyterjsc.svc:443 max_fails=0;
server default-proxy-public-2.jupyterjsc.svc:443 max_fails=0;
server default-proxy-public-3.jupyterjsc.svc:443 max_fails=0;
server default-proxy-public-4.jupyterjsc.svc:443 max_fails=0;
keepalive 32;
}
upstream juniq-retries {
server juniq-proxy-public.jupyterjsc.svc:443 max_fails=0;
server juniq-proxy-public-0.jupyterjsc.svc:443 max_fails=0;
server juniq-proxy-public-1.jupyterjsc.svc:443 max_fails=0;
server juniq-proxy-public-2.jupyterjsc.svc:443 max_fails=0;
server juniq-proxy-public-3.jupyterjsc.svc:443 max_fails=0;
server juniq-proxy-public-4.jupyterjsc.svc:443 max_fails=0;
keepalive 32;
}
upstream eurocc-retries {
server eurocc-proxy-public.jupyterjsc.svc:443 max_fails=0;
server eurocc-proxy-public-0.jupyterjsc.svc:443 max_fails=0;
server eurocc-proxy-public-1.jupyterjsc.svc:443 max_fails=0;
server eurocc-proxy-public-2.jupyterjsc.svc:443 max_fails=0;
server eurocc-proxy-public-3.jupyterjsc.svc:443 max_fails=0;
server eurocc-proxy-public-4.jupyterjsc.svc:443 max_fails=0;
keepalive 32;
}
upstream portalgauss-retries {
server portalgauss-proxy-public.jupyterjsc.svc:443 max_fails=0;
server portalgauss-proxy-public-0.jupyterjsc.svc:443 max_fails=0;
server portalgauss-proxy-public-1.jupyterjsc.svc:443 max_fails=0;
server portalgauss-proxy-public-2.jupyterjsc.svc:443 max_fails=0;
server portalgauss-proxy-public-3.jupyterjsc.svc:443 max_fails=0;
server portalgauss-proxy-public-4.jupyterjsc.svc:443 max_fails=0;
keepalive 32;
}
upstream dev1-retries {
server dev1-proxy-public.jupyterjscdev.svc:443 max_fails=0;
server dev1-proxy-public-0.jupyterjscdev.svc:443 max_fails=0;
server dev1-proxy-public-1.jupyterjscdev.svc:443 max_fails=0;
server dev1-proxy-public-2.jupyterjscdev.svc:443 max_fails=0;
server dev1-proxy-public-3.jupyterjscdev.svc:443 max_fails=0;
server dev1-proxy-public-4.jupyterjscdev.svc:443 max_fails=0;
keepalive 32;
}
upstream dev2-retries {
server dev2-proxy-public.jupyterjscdev.svc:443 max_fails=0;
server dev2-proxy-public-0.jupyterjscdev.svc:443 max_fails=0;
server dev2-proxy-public-1.jupyterjscdev.svc:443 max_fails=0;
server dev2-proxy-public-2.jupyterjscdev.svc:443 max_fails=0;
server dev2-proxy-public-3.jupyterjscdev.svc:443 max_fails=0;
server dev2-proxy-public-4.jupyterjscdev.svc:443 max_fails=0;
keepalive 32;
}
upstream coeraise-retries {
server coeraise-proxy-public.jupyterjsc.svc:443 max_fails=0;
server coeraise-proxy-public-0.jupyterjsc.svc:443 max_fails=0;
server coeraise-proxy-public-1.jupyterjsc.svc:443 max_fails=0;
server coeraise-proxy-public-2.jupyterjsc.svc:443 max_fails=0;
server coeraise-proxy-public-3.jupyterjsc.svc:443 max_fails=0;
server coeraise-proxy-public-4.jupyterjsc.svc:443 max_fails=0;
keepalive 32;
}
extraArgs:
enable-ssl-passthrough: ""
kind: DaemonSet
metrics:
enabled: true
serviceMonitor:
additionalLabels:
release: prometheus-stack
enabled: true
namespace: cattle-monitoring-system
metricRelabelings:
- action: drop
regex: '(nginx_ingress_controller_request_duration_seconds_bucket|nginx_ingress_controller_response_size_bucket|nginx_ingress_controller_request_size_bucket|nginx_ingress_controller_response_duration_seconds_bucket|nginx_ingress_controller_bytes_sent_bucket)'
sourceLabels: [__name__]
service:
annotations:
loadbalancer.openstack.org/keep-floatingip: true
loadbalancer.openstack.org/proxy-protocol: "true"
#loadbalancer.openstack.org/timeout-client-data: "600000"
#loadbalancer.openstack.org/timeout-member-connect: "600000"
loadbalancer.openstack.org/timeout-member-data: "600000"
targetCustomizations:
- name: staging
clusterSelector:
matchLabels:
stage: staging
provider.cattle.io: rke
helm:
releaseName: ingress-nginx-staging
values:
controller:
service:
loadBalancerIP: 134.94.199.199
- name: production
clusterSelector:
matchLabels:
stage: production
provider.cattle.io: rke
helm:
releaseName: ingress-nginx-production
values:
controller:
service:
loadBalancerIP: 134.94.199.54
dependsOn:
- name: basics-monitor-crd
This diff is collapsed.
Click to expand it.
kured/fleet.yaml.dev
deleted
100644 → 0
+
0
−
84
View file @
1c16ebf6
defaultNamespace: ingress-nginx
helm:
releaseName: ingress-nginx
chart: https://github.com/kubernetes/ingress-nginx/releases/download/helm-chart-4.4.0/ingress-nginx-4.4.0.tgz
values:
controller:
ingressClassResource:
default: true
config:
compute-full-forwarded-for: "true"
proxy-real-ip-cidr: 0.0.0.0/0
real-ip-header: proxy_protocol
use-forwarded-headers: "true"
use-proxy-protocol: "true"
use-http2: false
upstream-keepalive-connections: "0"
ssl-session-timeout: "1d"
ssl-session-cache-size: "50m"
ssl-protocols: "TLSv1.2 TLSv1.3"
http-snippet: |
upstream dev1-retries {
server dev1-proxy-public.jupyterjscdev.svc:443 max_fails=0;
server dev1-proxy-public-0.jupyterjscdev.svc:443 max_fails=0;
server dev1-proxy-public-1.jupyterjscdev.svc:443 max_fails=0;
server dev1-proxy-public-2.jupyterjscdev.svc:443 max_fails=0;
server dev1-proxy-public-3.jupyterjscdev.svc:443 max_fails=0;
server dev1-proxy-public-4.jupyterjscdev.svc:443 max_fails=0;
keepalive 32;
}
upstream dev2-retries {
server dev2-proxy-public.jupyterjscdev.svc:443 max_fails=0;
server dev2-proxy-public-0.jupyterjscdev.svc:443 max_fails=0;
server dev2-proxy-public-1.jupyterjscdev.svc:443 max_fails=0;
server dev2-proxy-public-2.jupyterjscdev.svc:443 max_fails=0;
server dev2-proxy-public-3.jupyterjscdev.svc:443 max_fails=0;
server dev2-proxy-public-4.jupyterjscdev.svc:443 max_fails=0;
keepalive 32;
}
extraArgs:
enable-ssl-passthrough: ""
kind: DaemonSet
metrics:
enabled: true
serviceMonitor:
additionalLabels:
release: prometheus-stack
enabled: true
namespace: cattle-monitoring-system
metricRelabelings:
- action: drop
regex: '(nginx_ingress_controller_request_duration_seconds_bucket|nginx_ingress_controller_response_size_bucket|nginx_ingress_controller_request_size_bucket|nginx_ingress_controller_response_duration_seconds_bucket|nginx_ingress_controller_bytes_sent_bucket)'
sourceLabels: [__name__]
service:
annotations:
loadbalancer.openstack.org/keep-floatingip: true
loadbalancer.openstack.org/proxy-protocol: "true"
#loadbalancer.openstack.org/timeout-client-data: "600000"
#loadbalancer.openstack.org/timeout-member-connect: "600000"
loadbalancer.openstack.org/timeout-member-data: "600000"
targetCustomizations:
- name: staging
clusterSelector:
matchLabels:
stage: staging
provider.cattle.io: rke
helm:
releaseName: ingress-nginx-staging
values:
controller:
service:
loadBalancerIP: 134.94.199.199
- name: production
clusterSelector:
matchLabels:
stage: production
provider.cattle.io: rke
helm:
releaseName: ingress-nginx-production
values:
controller:
service:
loadBalancerIP: 134.94.199.54
dependsOn:
- name: basics-monitor-crd
This diff is collapsed.
Click to expand it.
kured/fleet.yaml.min
deleted
100644 → 0
+
0
−
65
View file @
1c16ebf6
defaultNamespace: ingress-nginx
helm:
releaseName: ingress-nginx
chart: https://github.com/kubernetes/ingress-nginx/releases/download/helm-chart-4.4.0/ingress-nginx-4.4.0.tgz
values:
controller:
ingressClassResource:
default: true
config:
compute-full-forwarded-for: "true"
proxy-real-ip-cidr: 0.0.0.0/0
real-ip-header: proxy_protocol
use-forwarded-headers: "true"
use-proxy-protocol: "true"
use-http2: false
upstream-keepalive-connections: "0"
ssl-session-timeout: "1d"
ssl-session-cache-size: "50m"
ssl-protocols: "TLSv1.2 TLSv1.3"
extraArgs:
enable-ssl-passthrough: ""
kind: DaemonSet
metrics:
enabled: true
serviceMonitor:
additionalLabels:
release: prometheus-stack
enabled: true
namespace: cattle-monitoring-system
metricRelabelings:
- action: drop
regex: '(nginx_ingress_controller_request_duration_seconds_bucket|nginx_ingress_controller_response_size_bucket|nginx_ingress_controller_request_size_bucket|nginx_ingress_controller_response_duration_seconds_bucket|nginx_ingress_controller_bytes_sent_bucket)'
sourceLabels: [__name__]
service:
annotations:
loadbalancer.openstack.org/keep-floatingip: true
loadbalancer.openstack.org/proxy-protocol: "true"
#loadbalancer.openstack.org/timeout-client-data: "600000"
#loadbalancer.openstack.org/timeout-member-connect: "600000"
loadbalancer.openstack.org/timeout-member-data: "600000"
targetCustomizations:
- name: staging
clusterSelector:
matchLabels:
stage: staging
provider.cattle.io: rke
helm:
releaseName: ingress-nginx-staging
values:
controller:
service:
loadBalancerIP: 134.94.199.199
- name: production
clusterSelector:
matchLabels:
stage: production
provider.cattle.io: rke
helm:
releaseName: ingress-nginx-production
values:
controller:
service:
loadBalancerIP: 134.94.199.54
dependsOn:
- name: basics-monitor-crd
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment