Replies: 2 comments
-
I have the same issue - updating a very old deployment which has worked for a couple years. For some reason, I can't get the load balancer to be generated. file: kube.tflocals {
hcloud_token = "REDACTED"
}
variable "hcloud_token" {
sensitive = true
default = ""
}
provider "hcloud" {
token = var.hcloud_token != "" ? var.hcloud_token : local.hcloud_token
}
terraform {
required_version = ">= 1.5.0"
required_providers {
hcloud = {
source = "hetznercloud/hcloud"
version = ">= 1.43.0"
}
}
}
module "kube-hetzner" {
providers = {
hcloud = hcloud
}
source = "kube-hetzner/kube-hetzner/hcloud"
version = "2.17.0"
hcloud_token = var.hcloud_token != "" ? var.hcloud_token : local.hcloud_token
ssh_public_key = file("/home/REDACTED/.ssh/id_ed25519.pub")
ssh_private_key = file("/home/REDACTED/.ssh/id_ed25519")
network_region = "eu-central"
control_plane_nodepools = [
{
name = "control-plane",
server_type = "cpx21",
location = "fsn1",
labels = [
"node.kubernetes.io/server-usage=storage"
],
taints = [],
count = 1
}
]
agent_nodepools = [
{
name = "agent",
server_type = "cpx21",
location = "fsn1",
labels = [
"node.kubernetes.io/server-usage=storage"
],
taints = [],
count = 2,
longhorn_volume_size = 40
}
]
load_balancer_type = "lb11"
load_balancer_location = "fsn1"
ingress_controller = "none"
enable_longhorn = true
longhorn_replica_count = 3
allow_scheduling_on_control_plane = true
automatically_upgrade_os = false
initial_k3s_channel = "v1.30"
}
output "kubeconfig" {
value = module.kube-hetzner.kubeconfig
sensitive = true
} |
Beta Was this translation helpful? Give feedback.
-
All the You can get an understanding of it if you look at the files on the control plane under In that directory there is a file call The annotations basically create the LB in Hetzner via the That way, you can create any service with type LB and get a public IP for it, and you could expose any application via a service of type "Load Balancer" through its own LB if you wanted to. Btw. I did the same, basically turned everything off, and now I deploy everything via GitOps and FluxCD. So in theory, I could swap out the Kubernetes cluster fairly easily without much configuration changes and not be too dependent on the terraform/Hetzner setup. Anyway, here is my traefik configuration.
---
apiVersion: v1
kind: Namespace
metadata:
name:
---
apiVersion: helm.cattle.io/v1
kind: HelmChart
metadata:
name: traefik
namespace: kube-system
spec:
chart: traefik
version: ""
repo: https://traefik.github.io/charts
targetNamespace:
bootstrap: true
valuesContent: |-
image:
tag:
deployment:
replicas: 3
globalArguments: []
service:
enabled: true
type: LoadBalancer
annotations:
"load-balancer.hetzner.cloud/name": "ha-cluster-none"
"load-balancer.hetzner.cloud/use-private-ip": "true"
"load-balancer.hetzner.cloud/disable-private-ingress": "true"
"load-balancer.hetzner.cloud/disable-public-network": "false"
"load-balancer.hetzner.cloud/ipv6-disabled": "false"
"load-balancer.hetzner.cloud/location": "nbg1"
"load-balancer.hetzner.cloud/type": "lb11"
"load-balancer.hetzner.cloud/uses-proxyprotocol": "true"
"load-balancer.hetzner.cloud/algorithm-type": "round_robin"
"load-balancer.hetzner.cloud/health-check-interval": "15s"
"load-balancer.hetzner.cloud/health-check-timeout": "10s"
"load-balancer.hetzner.cloud/health-check-retries": "3"
ports:
web:
redirections:
entryPoint:
to: websecure
scheme: https
permanent: true
proxyProtocol:
trustedIPs:
- 127.0.0.1/32
- 10.0.0.0/8
forwardedHeaders:
trustedIPs:
- 127.0.0.1/32
- 10.0.0.0/8
websecure:
proxyProtocol:
trustedIPs:
- 127.0.0.1/32
- 10.0.0.0/8
forwardedHeaders:
trustedIPs:
- 127.0.0.1/32
- 10.0.0.0/8
podDisruptionBudget:
enabled: true
maxUnavailable: 33%
additionalArguments:
- "--providers.kubernetesingress.ingressendpoint.publishedservice=/traefik"
- "--global.sendanonymoususage=false"
resources:
requests:
cpu: "100m"
memory: "50Mi"
limits:
cpu: "300m"
memory: "150Mi"
autoscaling:
enabled: true
minReplicas: 3
maxReplicas: 10 |
Beta Was this translation helpful? Give feedback.
Uh oh!
There was an error while loading. Please reload this page.
-
I was thinking of setting the
ingress_controller = "none"
in an attempt to try and deploy Traefik with Helm separately after the cluster has been created. However when usingingress_controller = "none"
I saw that it does not create the external load balancer (lb11), and on active cluster it removes the load balancer when applied.Could someone explain why the
ingress_controller
is a requirement for an external loadbalancer to be created?Beta Was this translation helpful? Give feedback.
All reactions