Skip to content

RKE2 sur OpenStack

Introduction

RKE2 (Rancher Kubernetes Engine 2) est une distribution Kubernetes certifiée CNCF, orientée sécurité et conformité (CIS, FIPS). Idéale pour les environnements gouvernementaux ou régulés nécessitant une sécurité renforcée.

Prérequis

Points à apprendre

Différences RKE2 vs K3s

Aspect RKE2 K3s
Focus Sécurité, conformité Légèreté
CIS Hardening Par défaut Manuel
Runtime containerd containerd
Ingress Nginx (par défaut) Traefik
etcd Embedded (HA) SQLite ou etcd
FIPS 140-2 Supporté Non
Cible Production, gouvernement Edge, IoT, dev

Architecture RKE2

graph TB
    subgraph rke2[RKE2 Cluster]
        server1[RKE2 Server 1<br/>VM<br/>API Server<br/>etcd<br/>Controller<br/>Scheduler]
        server2[RKE2 Server 2<br/>VM<br/>HA member]
        server3[RKE2 Server 3<br/>VM<br/>HA member]

        agent1[RKE2 Agent 1<br/>VM<br/>kubelet<br/>containerd<br/>Workloads]
        agent2[RKE2 Agent 2<br/>VM]
        agent3[RKE2 Agent 3<br/>VM]
    end

    lb[Load Balancer<br/>Octavia<br/>TCP 6443, 9345]
    cinder[Cinder CSI<br/>PersistentVolumes]

    lb -->|6443, 9345| server1
    lb --> server2
    lb --> server3

    agent1 -->|Join via LB| lb
    agent2 --> lb
    agent3 --> lb

    agent1 -->|CSI| cinder

    server1 <-->|etcd sync 2379-2380| server2
    server2 <--> server3

Déploiement avec Terraform

# rke2/main.tf

terraform {
  required_providers {
    openstack = {
      source  = "terraform-provider-openstack/openstack"
      version = "~> 1.54"
    }
  }
}

variable "cluster_name" {
  default = "rke2-cluster"
}

variable "server_count" {
  default = 3
}

variable "agent_count" {
  default = 3
}

variable "server_flavor" {
  default = "m1.large"
}

variable "agent_flavor" {
  default = "m1.medium"
}

variable "image_name" {
  default = "ubuntu-22.04"
}

variable "network_id" {}
variable "subnet_id" {}
variable "external_network" {
  default = "external"
}

# Token RKE2
resource "random_password" "rke2_token" {
  length  = 64
  special = false
}

# Security group
resource "openstack_networking_secgroup_v2" "rke2" {
  name        = "${var.cluster_name}-sg"
  description = "Security group for RKE2"
}

# Règles de sécurité
locals {
  sg_rules = [
    { port = 22, desc = "SSH" },
    { port = 6443, desc = "Kubernetes API" },
    { port = 9345, desc = "RKE2 supervisor" },
    { port = 80, desc = "HTTP" },
    { port = 443, desc = "HTTPS" },
  ]
}

resource "openstack_networking_secgroup_rule_v2" "rules" {
  for_each          = { for r in local.sg_rules : r.port => r }
  security_group_id = openstack_networking_secgroup_v2.rke2.id
  direction         = "ingress"
  ethertype         = "IPv4"
  protocol          = "tcp"
  port_range_min    = each.value.port
  port_range_max    = each.value.port
  remote_ip_prefix  = "0.0.0.0/0"
}

# Communication interne cluster
resource "openstack_networking_secgroup_rule_v2" "internal" {
  security_group_id = openstack_networking_secgroup_v2.rke2.id
  direction         = "ingress"
  ethertype         = "IPv4"
  remote_group_id   = openstack_networking_secgroup_v2.rke2.id
}

# Keypair
resource "openstack_compute_keypair_v2" "rke2" {
  name       = "${var.cluster_name}-keypair"
  public_key = file("~/.ssh/id_rsa.pub")
}

# Load Balancer pour HA
resource "openstack_lb_loadbalancer_v2" "rke2" {
  name          = "${var.cluster_name}-lb"
  vip_subnet_id = var.subnet_id
}

resource "openstack_networking_floatingip_v2" "lb" {
  pool = var.external_network
}

resource "openstack_networking_floatingip_associate_v2" "lb" {
  floating_ip = openstack_networking_floatingip_v2.lb.address
  port_id     = openstack_lb_loadbalancer_v2.rke2.vip_port_id
}

# Listener et pool pour API (6443)
resource "openstack_lb_listener_v2" "api" {
  name            = "${var.cluster_name}-api"
  protocol        = "TCP"
  protocol_port   = 6443
  loadbalancer_id = openstack_lb_loadbalancer_v2.rke2.id
}

resource "openstack_lb_pool_v2" "api" {
  name        = "${var.cluster_name}-api-pool"
  protocol    = "TCP"
  lb_method   = "ROUND_ROBIN"
  listener_id = openstack_lb_listener_v2.api.id
}

# Listener et pool pour supervisor (9345)
resource "openstack_lb_listener_v2" "supervisor" {
  name            = "${var.cluster_name}-supervisor"
  protocol        = "TCP"
  protocol_port   = 9345
  loadbalancer_id = openstack_lb_loadbalancer_v2.rke2.id
}

resource "openstack_lb_pool_v2" "supervisor" {
  name        = "${var.cluster_name}-supervisor-pool"
  protocol    = "TCP"
  lb_method   = "ROUND_ROBIN"
  listener_id = openstack_lb_listener_v2.supervisor.id
}

# RKE2 Servers
resource "openstack_compute_instance_v2" "server" {
  count           = var.server_count
  name            = "${var.cluster_name}-server-${count.index}"
  flavor_name     = var.server_flavor
  image_name      = var.image_name
  key_pair        = openstack_compute_keypair_v2.rke2.name
  security_groups = [openstack_networking_secgroup_v2.rke2.name]

  network {
    uuid = var.network_id
  }

  user_data = templatefile("${path.module}/cloud-init-server.yaml", {
    rke2_token    = random_password.rke2_token.result
    cluster_init  = count.index == 0
    lb_address    = openstack_networking_floatingip_v2.lb.address
    node_name     = "${var.cluster_name}-server-${count.index}"
    server_index  = count.index
  })
}

# Membres du pool LB
resource "openstack_lb_member_v2" "api" {
  count         = var.server_count
  pool_id       = openstack_lb_pool_v2.api.id
  address       = openstack_compute_instance_v2.server[count.index].access_ip_v4
  protocol_port = 6443
  subnet_id     = var.subnet_id
}

resource "openstack_lb_member_v2" "supervisor" {
  count         = var.server_count
  pool_id       = openstack_lb_pool_v2.supervisor.id
  address       = openstack_compute_instance_v2.server[count.index].access_ip_v4
  protocol_port = 9345
  subnet_id     = var.subnet_id
}

# RKE2 Agents
resource "openstack_compute_instance_v2" "agent" {
  count           = var.agent_count
  name            = "${var.cluster_name}-agent-${count.index}"
  flavor_name     = var.agent_flavor
  image_name      = var.image_name
  key_pair        = openstack_compute_keypair_v2.rke2.name
  security_groups = [openstack_networking_secgroup_v2.rke2.name]

  depends_on = [openstack_compute_instance_v2.server]

  network {
    uuid = var.network_id
  }

  user_data = templatefile("${path.module}/cloud-init-agent.yaml", {
    rke2_token = random_password.rke2_token.result
    lb_address = openstack_networking_floatingip_v2.lb.address
    node_name  = "${var.cluster_name}-agent-${count.index}"
  })
}

output "lb_address" {
  value = openstack_networking_floatingip_v2.lb.address
}

output "kubeconfig_command" {
  value = "ssh ubuntu@${openstack_compute_instance_v2.server[0].access_ip_v4} 'sudo cat /etc/rancher/rke2/rke2.yaml' | sed 's/127.0.0.1/${openstack_networking_floatingip_v2.lb.address}/g' > ~/.kube/rke2-config"
}

Cloud-init Server

# cloud-init-server.yaml
#cloud-config
package_update: true
package_upgrade: true

packages:
  - curl

write_files:
  - path: /etc/rancher/rke2/config.yaml
    content: |
      token: ${rke2_token}
      tls-san:
        - ${lb_address}
        - ${node_name}
      cni: calico
      %{ if cluster_init }
      cluster-init: true
      %{ else }
      server: https://${lb_address}:9345
      %{ endif }
      # CIS Hardening profile
      profile: cis-1.6

runcmd:
  - |
    # Installer RKE2
    curl -sfL https://get.rke2.io | sh -

    # Activer et démarrer
    systemctl enable rke2-server.service
    systemctl start rke2-server.service

    # Attendre que le cluster soit prêt
    until /var/lib/rancher/rke2/bin/kubectl --kubeconfig /etc/rancher/rke2/rke2.yaml get nodes; do
      sleep 10
    done

    # Symlink kubectl
    ln -sf /var/lib/rancher/rke2/bin/kubectl /usr/local/bin/kubectl

    echo "RKE2 server ${node_name} ready"

Cloud-init Agent

# cloud-init-agent.yaml
#cloud-config
package_update: true

packages:
  - curl

write_files:
  - path: /etc/rancher/rke2/config.yaml
    content: |
      token: ${rke2_token}
      server: https://${lb_address}:9345

runcmd:
  - |
    # Attendre le serveur
    until curl -k https://${lb_address}:9345/ping 2>/dev/null; do
      echo "Waiting for RKE2 server..."
      sleep 10
    done

    # Installer RKE2 agent
    curl -sfL https://get.rke2.io | INSTALL_RKE2_TYPE="agent" sh -

    # Activer et démarrer
    systemctl enable rke2-agent.service
    systemctl start rke2-agent.service

    echo "RKE2 agent ${node_name} ready"

Configuration CIS Hardening

# /etc/rancher/rke2/config.yaml (options CIS)

# Profil CIS 1.6
profile: cis-1.6

# PSA (Pod Security Admission)
pod-security-admission-config-file: /etc/rancher/rke2/psa.yaml

# Audit logging
audit-policy-file: /etc/rancher/rke2/audit-policy.yaml

# Encryption des secrets au repos
secrets-encryption: true
# /etc/rancher/rke2/psa.yaml
apiVersion: pod-security.admission.config.k8s.io/v1
kind: PodSecurityConfiguration
defaults:
  enforce: "restricted"
  enforce-version: "latest"
  audit: "restricted"
  audit-version: "latest"
  warn: "restricted"
  warn-version: "latest"
exemptions:
  usernames: []
  runtimeClasses: []
  namespaces: [kube-system, cis-operator-system]

Exemples pratiques

Déploiement complet

cd terraform/rke2

terraform init
terraform apply

# Attendre le déploiement (~10-15 min)
# Récupérer kubeconfig
LB_IP=$(terraform output -raw lb_address)
SERVER_IP=$(terraform output -json | jq -r '.server_ips.value[0]')

ssh ubuntu@$SERVER_IP 'sudo cat /etc/rancher/rke2/rke2.yaml' | \
    sed "s/127.0.0.1/$LB_IP/g" > ~/.kube/rke2-config

export KUBECONFIG=~/.kube/rke2-config
kubectl get nodes

Vérification CIS

# Exécuter kube-bench pour vérifier la conformité CIS
kubectl apply -f https://raw.githubusercontent.com/aquasecurity/kube-bench/main/job.yaml
kubectl logs -l app=kube-bench

Installation Cloud Provider OpenStack

# Pour l'intégration LoadBalancer et volumes
kubectl apply -f https://raw.githubusercontent.com/kubernetes/cloud-provider-openstack/master/manifests/cinder-csi-plugin/cinder-csi-controllerplugin.yaml
kubectl apply -f https://raw.githubusercontent.com/kubernetes/cloud-provider-openstack/master/manifests/cinder-csi-plugin/cinder-csi-nodeplugin.yaml

Ressources

Checkpoint

  • Cluster RKE2 HA (3 servers) déployé
  • Load Balancer configuré pour API et supervisor
  • Agents joints au cluster
  • Profil CIS activé
  • kubeconfig récupéré et fonctionnel
  • Cloud Provider OpenStack installé