Enable Cilium IPv4 only (#1)

Co-authored-by: Marcel Straub <m@straubs.eu>
Reviewed-on: s5b-private/k8s#1
This commit is contained in:
2025-09-05 12:43:00 +02:00
parent bcb019c17d
commit b2f7eba0fa
9 changed files with 121 additions and 41 deletions

View File

@@ -0,0 +1,20 @@
#!/usr/bin/bash
PROXY_REGISTRY_BASE="harbor.prod.eis-mk8.de.s5b.org/proxy-"
NS="cilium-test-1"
for pod in $(kubectl -n ${NS} get pods --no-headers=true -o custom-columns='NAME:.metadata.name'); do
kubectl -n ${NS} delete pod ${pod}
done
echo "Make cilium Test NS privileged"
kubectl label ns ${NS} pod-security.kubernetes.io/enforce=privileged
cilium connectivity test \
--curl-image ${PROXY_REGISTRY_BASE}quay.io/cilium/alpine-curl:v1.10.0 \
--dns-test-server-image ${PROXY_REGISTRY_BASE}registry.k8s.io/coredns/coredns:v1.12.0 \
--echo-image ${PROXY_REGISTRY_BASE}gcr.io/k8s-staging-gateway-api/echo-advanced:v20240412-v1.0.0-394-g40c666fd \
--frr-image ${PROXY_REGISTRY_BASE}quay.io/frrouting/frr:10.2.1 \
--json-mock-image ${PROXY_REGISTRY_BASE}quay.io/cilium/json-mock:v1.3.8 \
--socat-image ${PROXY_REGISTRY_BASE}docker.io/alpine/socat:1.8.0.1 \
--test-conn-disrupt-image ${PROXY_REGISTRY_BASE}quay.io/cilium/test-connection-disruption:v0.0.16

View File

@@ -0,0 +1,29 @@
#!/usr/bin/bash
# Install Cilium CLI
CILIUM_CLI_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/cilium-cli/main/stable.txt)
CLI_ARCH=amd64
if [ "$(uname -m)" = "aarch64" ]; then CLI_ARCH=arm64; fi
echo "CLI Version: $CILIUM_CLI_VERSION"
echo "Aarch: $CLI_ARCH"
curl -L --fail --remote-name-all https://github.com/cilium/cilium-cli/releases/download/${CILIUM_CLI_VERSION}/cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
sha256sum --check cilium-linux-${CLI_ARCH}.tar.gz.sha256sum
sudo tar xzvfC cilium-linux-${CLI_ARCH}.tar.gz /usr/local/bin
rm cilium-linux-${CLI_ARCH}.tar.gz{,.sha256sum}
# Install hubble cli
HUBBLE_VERSION=$(curl -s https://raw.githubusercontent.com/cilium/hubble/master/stable.txt)
HUBBLE_ARCH=amd64
if [ "$(uname -m)" = "aarch64" ]; then HUBBLE_ARCH=arm64; fi
curl -L --fail --remote-name-all https://github.com/cilium/hubble/releases/download/$HUBBLE_VERSION/hubble-linux-${HUBBLE_ARCH}.tar.gz{,.sha256sum}
sha256sum --check hubble-linux-${HUBBLE_ARCH}.tar.gz.sha256sum
sudo tar xzvfC hubble-linux-${HUBBLE_ARCH}.tar.gz /usr/local/bin
rm hubble-linux-${HUBBLE_ARCH}.tar.gz{,.sha256sum}

View File

@@ -41,6 +41,11 @@ resource "local_file" "talos_config" {
file_permission = "0600"
}
output "talos_config" {
value = module.talos.client_configuration.talos_config
sensitive = true
}
resource "local_file" "kube_config" {
content = module.talos.kube_config.kubeconfig_raw
filename = "output/kube-config.yaml"
@@ -50,9 +55,4 @@ resource "local_file" "kube_config" {
output "kube_config" {
value = module.talos.kube_config.kubeconfig_raw
sensitive = true
}
output "talos_config" {
value = module.talos.client_configuration.talos_config
sensitive = true
}

View File

@@ -19,28 +19,28 @@ data "talos_client_configuration" "this" {
endpoints = [for k, v in var.nodes : v.ip if v.machine_type == "controlplane"]
}
# resource "terraform_data" "cilium_bootstrap_inline_manifests" {
# input = [
# {
# name = "cilium-bootstrap"
# contents = file("${path.root}/${var.cluster.cilium.bootstrap_manifest_path}")
# },
# {
# name = "cilium-values"
# contents = yamlencode({
# apiVersion = "v1"
# kind = "ConfigMap"
# metadata = {
# name = "cilium-values"
# namespace = "kube-system"
# }
# data = {
# "values.yaml" = file("${path.root}/${var.cluster.cilium.values_file_path}")
# }
# })
# }
# ]
# }
resource "terraform_data" "cilium_bootstrap_inline_manifests" {
input = [
{
name = "cilium-bootstrap"
contents = file("${path.root}/${var.cluster.cilium.bootstrap_manifest_path}")
},
{
name = "cilium-values"
contents = yamlencode({
apiVersion = "v1"
kind = "ConfigMap"
metadata = {
name = "cilium-values"
namespace = "kube-system"
}
data = {
"values.yaml" = file("${path.root}/${var.cluster.cilium.values_file_path}")
}
})
}
]
}
data "talos_machine_configuration" "this" {
for_each = var.nodes
@@ -70,7 +70,7 @@ data "talos_machine_configuration" "this" {
vip = var.cluster.vip
extra_manifests = jsonencode(local.extra_manifests)
api_server = var.cluster.api_server
# inline_manifests = jsonencode(terraform_data.cilium_bootstrap_inline_manifests.output)
inline_manifests = jsonencode(terraform_data.cilium_bootstrap_inline_manifests.output)
}) :
templatefile("${path.module}/machine-config/worker.yaml.tftpl", {
mac_address = lower(each.value.mac_address)

View File

@@ -26,13 +26,15 @@ cluster:
extraArgs:
bind-address: 0.0.0.0
# Let's go with the default network
# network:
# cni:
# name: none
# proxy:
# disabled: true
network:
cni:
name: none
proxy:
disabled: true
discovery:
enabled: true
registries:
service:
disabled: false
disabled: false
extraManifests: ${extra_manifests}
inlineManifests: ${inline_manifests}

View File

@@ -23,5 +23,6 @@ talos_cluster_config = {
"gcr.io" = "https://harbor.prod.eis-mk8.de.s5b.org/v2/proxy-gcr.io"
"registry.k8s.io" = "https://harbor.prod.eis-mk8.de.s5b.org/v2/proxy-registry.k8s.io"
"mcr.microsoft.com" = "https://harbor.prod.eis-mk8.de.s5b.org/v2/proxy-mcr.microsoft.com"
"quay.io" = "https://harbor.prod.eis-mk8.de.s5b.org/v2/proxy-quay.io"
}
}

View File

@@ -6,8 +6,8 @@ talos_nodes = {
ip = "10.51.10.101"
mac_address = "BC:24:11:7B:76:3E"
vm_id = 301
cpu = 1
ram_dedicated = 4096
cpu = 2
ram_dedicated = 6144
}
"ctrl-02" = {
host_node = "pve02"
@@ -15,8 +15,8 @@ talos_nodes = {
ip = "10.51.10.102"
mac_address = "BC:24:11:16:85:7D"
vm_id = 302
cpu = 1
ram_dedicated = 4096
cpu = 2
ram_dedicated = 6144
}
"ctrl-03" = {
host_node = "pve-oberon"
@@ -24,8 +24,8 @@ talos_nodes = {
ip = "10.51.10.103"
mac_address = "BC:24:11:B8:B6:6F"
vm_id = 303
cpu = 1
ram_dedicated = 4096
cpu = 2
ram_dedicated = 6144
}
# Worker Nodes
"worker-01" = {

View File

@@ -62,4 +62,5 @@ kubectl delete pod debug
## Literature
- [Talos Kubernetes on Proxmox using OpenTofu](https://blog.stonegarden.dev/articles/2024/08/talos-proxmox-tofu/)
- [Talos on Proxmox with Terraform (multiple node pools)](https://github.com/sergelogvinov/terraform-talos/tree/main/proxmox)
- [Infrastructure Automation: Provisioning VMs on Proxmox with Packer, OpenTOFU, GitLab, Vault, and Minio.](https://medium.com/@avishkumar27/infrastructure-automation-provisioning-vms-on-proxmox-with-packer-opentofu-gitlab-vault-and-27fda7d73771)
- [Infrastructure Automation: Provisioning VMs on Proxmox with Packer, OpenTOFU, GitLab, Vault, and Minio.](https://medium.com/@avishkumar27/infrastructure-automation-provisioning-vms-on-proxmox-with-packer-opentofu-gitlab-vault-and-27fda7d73771)
- [Talos IPv6 only cluster (2023/12/07)](https://www.redpill-linpro.com/techblog/2023/12/07/ipv6-only-k8s.html)

View File

@@ -27,6 +27,33 @@ bpf:
ipam:
mode: kubernetes
multiPoolPreAllocation: ""
# mode: multi-pool
# operator:
# autoCreateCiliumPodIPPools:
# default:
# ipv6:
# cidrs:
# - 2a13:fc80:0001:d200::/64
# maskSize: 120
# # TODO ########!!!!!!!!!!!!!!!!!!!!!!!!!!!!%%%%%%%%%%%%%%%%%%%%%%%%%%%55555
# # The service subnet CIDR.
# serviceSubnets:
# - 10.96.0.0/12
# - 2a13:fc80:0001:d201::/64
# routingMode: native
# bgpControlPlane:
# enabled: true
# ipv4:
# enabled: true
# ipv6:
# enabled: true
# enableIPv4Masquerade: false
# enableIPv6Masquerade: false
operator:
rollOutPods: true