Skip to content

Création des VMs Controllers

Introduction

Cette section couvre la création des 6 VMs nécessaires au lab : 3 controllers (qui hébergeront aussi Ceph), 2 computes, et 1 nœud de déploiement.

Prérequis

Points à apprendre

Plan d'adressage IP

VM br-mgmt (eth0) br-storage (eth1) br-external (eth2) Rôle
controller-1 10.0.0.11 10.0.1.11 192.168.100.11 Ctrl + Ceph
controller-2 10.0.0.12 10.0.1.12 192.168.100.12 Ctrl + Ceph
controller-3 10.0.0.13 10.0.1.13 192.168.100.13 Ctrl + Ceph
compute-1 10.0.0.21 10.0.1.21 192.168.100.21 Compute
compute-2 10.0.0.22 10.0.1.22 192.168.100.22 Compute
deploy 10.0.0.5 - 192.168.100.5 Ansible

VIP (Virtual IPs) pour HA

Service VIP Réseau
API interne 10.0.0.10 br-mgmt
API externe 192.168.100.10 br-external

Création des VMs

Script de création complète

#!/bin/bash
# /usr/local/bin/create-lab.sh

set -e

echo "=== Création du lab OpenStack ==="

# Clé SSH
SSH_KEY=$(cat ~/.ssh/id_rsa.pub 2>/dev/null || cat ~/.ssh/id_ed25519.pub)

# Fonction création VM avec IPs fixes
create_vm_with_static_ip() {
    local NAME=$1
    local RAM=$2
    local VCPUS=$3
    local DISK=$4
    local EXTRA_DISK=$5
    local IP_MGMT=$6
    local IP_STORAGE=$7
    local IP_EXTERNAL=$8

    echo "Création de $NAME..."

    BASE_IMG="/var/lib/libvirt/images/base/noble-server-cloudimg-amd64.img"
    VM_DIR="/var/lib/libvirt/images"
    CLOUD_INIT_DIR="/var/lib/libvirt/cloud-init"

    # Créer disque principal
    qemu-img create -f qcow2 -F qcow2 \
      -b $BASE_IMG \
      $VM_DIR/${NAME}.qcow2 ${DISK}G

    # Créer disque OSD si demandé
    EXTRA_DISK_OPT=""
    if [ "$EXTRA_DISK" -gt 0 ]; then
      qemu-img create -f qcow2 $VM_DIR/${NAME}-osd.qcow2 ${EXTRA_DISK}G
      EXTRA_DISK_OPT="--disk $VM_DIR/${NAME}-osd.qcow2,format=qcow2,bus=virtio"
    fi

    # Cloud-init avec IP statiques
    mkdir -p $CLOUD_INIT_DIR/$NAME

    cat > $CLOUD_INIT_DIR/$NAME/user-data << EOF
#cloud-config
hostname: $NAME
manage_etc_hosts: true
fqdn: ${NAME}.openstack.local

users:
  - name: ubuntu
    sudo: ALL=(ALL) NOPASSWD:ALL
    shell: /bin/bash
    lock_passwd: false
    ssh_authorized_keys:
      - $SSH_KEY

chpasswd:
  list: |
    ubuntu:ubuntu
  expire: false

package_update: true
packages:
  - qemu-guest-agent
  - python3
  - python3-pip
  - vim
  - curl
  - wget
  - chrony
  - lvm2

write_files:
  - path: /etc/netplan/50-cloud-init.yaml
    content: |
      network:
        version: 2
        ethernets:
          enp1s0:
            addresses:
              - ${IP_MGMT}/24
            routes:
              - to: default
                via: 10.0.0.1
            nameservers:
              addresses:
                - 8.8.8.8
                - 8.8.4.4
          enp2s0:
            addresses:
              - ${IP_STORAGE}/24
            mtu: 9000
          enp3s0:
            addresses:
              - ${IP_EXTERNAL}/24

runcmd:
  - systemctl enable --now qemu-guest-agent
  - netplan apply
  - systemctl restart chrony
EOF

    cat > $CLOUD_INIT_DIR/$NAME/meta-data << EOF
instance-id: $NAME
local-hostname: $NAME
EOF

    # ISO cloud-init
    genisoimage -output $CLOUD_INIT_DIR/$NAME/cidata.iso \
      -V cidata -r -J \
      $CLOUD_INIT_DIR/$NAME/user-data \
      $CLOUD_INIT_DIR/$NAME/meta-data 2>/dev/null

    # Créer VM
    virt-install \
      --name $NAME \
      --ram $RAM \
      --vcpus $VCPUS \
      --disk $VM_DIR/${NAME}.qcow2,format=qcow2,bus=virtio \
      $EXTRA_DISK_OPT \
      --disk $CLOUD_INIT_DIR/$NAME/cidata.iso,device=cdrom \
      --os-variant ubuntu24.04 \
      --network bridge=br-mgmt,model=virtio \
      --network bridge=br-storage,model=virtio \
      --network bridge=br-external,model=virtio \
      --graphics none \
      --console pty,target_type=serial \
      --import \
      --noautoconsole

    echo "$NAME créé"
}

# Créer les controllers (avec disque OSD de 200G)
create_vm_with_static_ip controller-1 16384 4 100 200 10.0.0.11 10.0.1.11 192.168.100.11
create_vm_with_static_ip controller-2 16384 4 100 200 10.0.0.12 10.0.1.12 192.168.100.12
create_vm_with_static_ip controller-3 16384 4 100 200 10.0.0.13 10.0.1.13 192.168.100.13

# Créer les computes (sans disque OSD)
create_vm_with_static_ip compute-1 24576 4 50 0 10.0.0.21 10.0.1.21 192.168.100.21
create_vm_with_static_ip compute-2 24576 4 50 0 10.0.0.22 10.0.1.22 192.168.100.22

# Créer le nœud deploy (minimal)
create_vm_with_static_ip deploy 4096 2 50 0 10.0.0.5 10.0.1.5 192.168.100.5

echo ""
echo "=== Toutes les VMs créées ==="
virsh list --all

echo ""
echo "Attendre ~60 secondes pour le boot complet, puis tester:"
echo "  ssh ubuntu@10.0.0.11  # controller-1"
echo "  ssh ubuntu@10.0.0.5   # deploy"
chmod +x /usr/local/bin/create-lab.sh

Exécution

# Créer toutes les VMs
/usr/local/bin/create-lab.sh

# Attendre le boot
sleep 90

# Vérifier
virsh list --all

Vérification des VMs

# Test connectivité
for ip in 10.0.0.{5,11,12,13,21,22}; do
  echo "Testing $ip..."
  ping -c 1 -W 2 $ip && echo "OK" || echo "FAIL"
done

# Test SSH
for ip in 10.0.0.{5,11,12,13,21,22}; do
  echo "SSH to $ip..."
  ssh -o StrictHostKeyChecking=no -o ConnectTimeout=5 ubuntu@$ip hostname
done

Configuration du fichier /etc/hosts

Sur l'hôte Hetzner :

cat >> /etc/hosts << 'EOF'
# OpenStack Lab
10.0.0.5    deploy
10.0.0.10   api.openstack.local
10.0.0.11   controller-1
10.0.0.12   controller-2
10.0.0.13   controller-3
10.0.0.21   compute-1
10.0.0.22   compute-2
EOF

Configuration SSH depuis l'hôte

cat >> ~/.ssh/config << 'EOF'
Host controller-* compute-* deploy
  User ubuntu
  StrictHostKeyChecking no
  UserKnownHostsFile /dev/null

Host controller-1
  HostName 10.0.0.11

Host controller-2
  HostName 10.0.0.12

Host controller-3
  HostName 10.0.0.13

Host compute-1
  HostName 10.0.0.21

Host compute-2
  HostName 10.0.0.22

Host deploy
  HostName 10.0.0.5
EOF

Copier la clé SSH sur toutes les VMs

# Depuis l'hôte, distribuer la clé
for host in controller-{1,2,3} compute-{1,2} deploy; do
  ssh-copy-id -o StrictHostKeyChecking=no ubuntu@$host
done

Configuration initiale des VMs

Script à exécuter sur chaque VM :

#!/bin/bash
# /usr/local/bin/init-vm.sh (à exécuter via SSH sur chaque VM)

# Mettre à jour /etc/hosts
sudo tee -a /etc/hosts << 'EOF'
# OpenStack Lab
10.0.0.5    deploy
10.0.0.10   api.openstack.local vip
10.0.0.11   controller-1
10.0.0.12   controller-2
10.0.0.13   controller-3
10.0.0.21   compute-1
10.0.0.22   compute-2

# Storage network aliases
10.0.1.11   controller-1-storage
10.0.1.12   controller-2-storage
10.0.1.13   controller-3-storage
10.0.1.21   compute-1-storage
10.0.1.22   compute-2-storage
EOF

# Désactiver swap (pour Kubernetes plus tard)
sudo swapoff -a
sudo sed -i '/swap/d' /etc/fstab

# Configurer chrony
sudo tee /etc/chrony/chrony.conf << 'EOF'
pool ntp.ubuntu.com iburst
driftfile /var/lib/chrony/chrony.drift
makestep 1.0 3
rtcsync
EOF
sudo systemctl restart chrony

# Modules kernel pour OpenStack
sudo tee /etc/modules-load.d/openstack.conf << 'EOF'
br_netfilter
overlay
ip_vs
ip_vs_rr
ip_vs_wrr
ip_vs_sh
nf_conntrack
EOF

sudo modprobe br_netfilter
sudo modprobe overlay

# Sysctl pour networking
sudo tee /etc/sysctl.d/99-openstack.conf << 'EOF'
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
net.ipv4.conf.all.rp_filter = 0
net.ipv4.conf.default.rp_filter = 0
EOF
sudo sysctl --system

Déploiement de la configuration initiale

# Depuis l'hôte Hetzner
for host in controller-{1,2,3} compute-{1,2} deploy; do
  echo "Configuring $host..."
  ssh ubuntu@$host 'bash -s' << 'SCRIPT'
# Contenu du script init-vm.sh ci-dessus
sudo tee -a /etc/hosts << 'EOF'
10.0.0.5    deploy
10.0.0.10   api.openstack.local vip
10.0.0.11   controller-1
10.0.0.12   controller-2
10.0.0.13   controller-3
10.0.0.21   compute-1
10.0.0.22   compute-2
10.0.1.11   controller-1-storage
10.0.1.12   controller-2-storage
10.0.1.13   controller-3-storage
10.0.1.21   compute-1-storage
10.0.1.22   compute-2-storage
EOF

sudo swapoff -a
sudo sed -i '/swap/d' /etc/fstab

sudo tee /etc/modules-load.d/openstack.conf << 'EOF'
br_netfilter
overlay
EOF

sudo modprobe br_netfilter || true
sudo modprobe overlay || true

sudo tee /etc/sysctl.d/99-openstack.conf << 'EOF'
net.bridge.bridge-nf-call-iptables = 1
net.bridge.bridge-nf-call-ip6tables = 1
net.ipv4.ip_forward = 1
EOF
sudo sysctl --system
SCRIPT
done

Vérification des disques OSD sur controllers

# Vérifier que le disque OSD est présent
for host in controller-{1,2,3}; do
  echo "=== $host ==="
  ssh ubuntu@$host 'lsblk'
done

# Le disque vdb (200G) sera utilisé pour Ceph OSD

Exemples pratiques

Script de status du lab

#!/bin/bash
# /usr/local/bin/lab-status.sh

echo "=== Status des VMs ==="
virsh list --all

echo -e "\n=== Ressources utilisées ==="
virsh domstats --raw | grep -E "^Domain|vcpu.current|balloon.current"

echo -e "\n=== Connectivité réseau ==="
for ip in 10.0.0.{5,11,12,13,21,22}; do
  name=$(getent hosts $ip | awk '{print $2}')
  ping -c 1 -W 1 $ip > /dev/null 2>&1 && status="UP" || status="DOWN"
  echo "$name ($ip): $status"
done

echo -e "\n=== Espace disque hôte ==="
df -h /var/lib/libvirt

Snapshot de toutes les VMs

#!/bin/bash
# /usr/local/bin/snapshot-lab.sh

SNAP_NAME=${1:-"baseline"}

for vm in controller-{1,2,3} compute-{1,2} deploy; do
  echo "Snapshot $vm -> $SNAP_NAME"
  virsh snapshot-create-as $vm $SNAP_NAME "Snapshot $SNAP_NAME"
done

echo "Snapshots créés:"
for vm in controller-{1,2,3} compute-{1,2} deploy; do
  virsh snapshot-list $vm
done

Ressources

Checkpoint

  • 6 VMs créées et démarrées
  • Toutes les VMs accessibles en SSH
  • IPs statiques configurées correctement
  • /etc/hosts configuré sur toutes les VMs
  • Disques OSD présents sur les controllers (vdb 200G)
  • Modules kernel chargés (br_netfilter, overlay)