Skip to content

Audit CADF

Introduction

CADF (Cloud Auditing Data Federation) est le standard d'audit pour le cloud. OpenStack génère des événements d'audit au format CADF pour la traçabilité des actions utilisateurs et la conformité réglementaire.

Prérequis

  • OpenStack avec messaging configuré
  • Logging en place
  • Stockage pour les événements d'audit

Points à apprendre

Architecture Audit CADF

graph TB
    user["User"]

    subgraph OpenStack
        api["API Service<br/>Nova, Neutron...<br/>Middleware audit"]
        oslo_msg["Oslo.messaging<br/>Notification driver"]
        notifier["Notifier<br/>cadf, log"]
    end

    subgraph Audit_System["Audit System"]
        listener["Audit Listener<br/>Consomme messages"]
        storage[("Audit Storage<br/>Elasticsearch<br/>SQL Database")]
        siem["SIEM<br/>Splunk, ELK<br/>Analyse"]
    end

    user -->|API call| api
    api -->|Audit event<br/>CADF| oslo_msg
    oslo_msg -->|Consume<br/>AMQP| listener
    listener -->|Store| storage
    storage -->|Analyze| siem

Structure événement CADF

{
  "typeURI": "http://schemas.dmtf.org/cloud/audit/1.0/event",
  "eventType": "activity",
  "id": "openstack:12345678-1234-5678-1234-567812345678",
  "eventTime": "2024-01-15T10:30:00.000000+0000",
  "action": "create",
  "outcome": "success",
  "initiator": {
    "typeURI": "service/security/account/user",
    "id": "user-uuid",
    "name": "admin",
    "project_id": "project-uuid",
    "domain_id": "default",
    "host": {
      "address": "192.168.1.100",
      "agent": "python-novaclient"
    }
  },
  "target": {
    "typeURI": "compute/server",
    "id": "server-uuid",
    "name": "my-instance"
  },
  "observer": {
    "typeURI": "service/compute",
    "id": "nova-api",
    "name": "nova"
  },
  "reason": {
    "reasonCode": "201",
    "reasonType": "HTTP"
  },
  "requestPath": "/v2.1/servers"
}

Activation audit CADF

# /etc/kolla/globals.yml

# Activer les notifications
enable_ceilometer: "yes"  # ou configure manuellement

# Ou via oslo.messaging
# Dans chaque service
# /etc/kolla/config/nova/nova.conf

[oslo_messaging_notifications]
driver = messagingv2
topics = notifications,audit

[audit_middleware_notifications]
driver = log
# ou driver = messaging pour envoyer via RabbitMQ

# /etc/kolla/config/keystone/keystone.conf
[oslo_messaging_notifications]
driver = messagingv2
topics = notifications,audit

# Activer l'audit dans le pipeline
[paste_deploy]
config_file = /etc/keystone/keystone-paste.ini

Configuration paste.ini

# /etc/kolla/config/keystone/keystone-paste.ini

[pipeline:public_api]
pipeline = cors sizelimit osprofiler url_normalize request_id admin_token_auth build_auth_context token_auth audit json_body ec2_extension_v3 s3_extension service_v3

[pipeline:admin_api]
pipeline = cors sizelimit osprofiler url_normalize request_id admin_token_auth build_auth_context token_auth audit json_body ec2_extension_v3 s3_extension service_v3

[filter:audit]
paste.filter_factory = keystonemiddleware.audit:filter_factory
audit_map_file = /etc/keystone/keystone_api_audit_map.conf

Audit API Mapping

# /etc/kolla/config/keystone/keystone_api_audit_map.conf

[DEFAULT]
target_endpoint_type = identity

[path_keywords]
users = user
projects = project
domains = domain
roles = role
groups = group
credentials = credential
tokens = token

[service_endpoints]
identity = service/identity

[custom_actions]
validate_token = read/authenticate
authenticate = authenticate

Consumer d'audit

#!/usr/bin/env python3
# audit-consumer.py

import json
import pika
from datetime import datetime
from elasticsearch import Elasticsearch

# Connexion RabbitMQ
credentials = pika.PlainCredentials('openstack', 'password')
connection = pika.BlockingConnection(
    pika.ConnectionParameters(
        host='rabbitmq',
        credentials=credentials
    )
)
channel = connection.channel()
channel.queue_declare(queue='audit')
channel.queue_bind(exchange='openstack', queue='audit', routing_key='audit.*')

# Connexion Elasticsearch
es = Elasticsearch(['http://elasticsearch:9200'])

def callback(ch, method, properties, body):
    event = json.loads(body)

    # Parser l'événement CADF
    audit_record = {
        '@timestamp': event.get('eventTime', datetime.utcnow().isoformat()),
        'action': event.get('action'),
        'outcome': event.get('outcome'),
        'user': event.get('initiator', {}).get('name'),
        'user_id': event.get('initiator', {}).get('id'),
        'project_id': event.get('initiator', {}).get('project_id'),
        'source_ip': event.get('initiator', {}).get('host', {}).get('address'),
        'target_type': event.get('target', {}).get('typeURI'),
        'target_id': event.get('target', {}).get('id'),
        'target_name': event.get('target', {}).get('name'),
        'service': event.get('observer', {}).get('name'),
        'request_path': event.get('requestPath'),
        'raw_event': event
    }

    # Indexer dans Elasticsearch
    es.index(
        index=f"openstack-audit-{datetime.utcnow().strftime('%Y.%m')}",
        document=audit_record
    )

    print(f"Indexed: {audit_record['action']} by {audit_record['user']}")

channel.basic_consume(queue='audit', on_message_callback=callback, auto_ack=True)
print('Waiting for audit events...')
channel.start_consuming()

Dashboard audit Grafana

{
  "title": "Audit Dashboard",
  "panels": [
    {
      "title": "Events by Action",
      "type": "piechart",
      "targets": [
        {
          "query": "action.keyword",
          "metrics": [{"type": "count"}],
          "bucketAggs": [{"type": "terms", "field": "action.keyword"}]
        }
      ]
    },
    {
      "title": "Events Timeline",
      "type": "timeseries",
      "targets": [
        {
          "query": "*",
          "metrics": [{"type": "count"}],
          "bucketAggs": [{"type": "date_histogram", "field": "@timestamp"}]
        }
      ]
    },
    {
      "title": "Failed Actions",
      "type": "table",
      "targets": [
        {
          "query": "outcome:failure",
          "fields": ["@timestamp", "user", "action", "target_type", "source_ip"]
        }
      ]
    },
    {
      "title": "Top Users by Activity",
      "type": "bargauge",
      "targets": [
        {
          "query": "*",
          "metrics": [{"type": "count"}],
          "bucketAggs": [{"type": "terms", "field": "user.keyword", "size": 10}]
        }
      ]
    }
  ]
}

Alertes d'audit

# /etc/kolla/config/prometheus/rules/audit-alerts.yml
groups:
  - name: audit-security
    rules:
      - alert: HighFailedAuthAttempts
        expr: |
          sum(count_over_time({job="audit"} |= "authenticate" |= "failure"[5m])) > 10
        for: 1m
        labels:
          severity: critical
        annotations:
          summary: "High number of failed authentication attempts"

      - alert: AdminActionFromUnknownIP
        expr: |
          count_over_time({job="audit"} |= "admin" | json | source_ip!~"10.0.0.*"[5m]) > 0
        for: 0m
        labels:
          severity: warning
        annotations:
          summary: "Admin action from unexpected IP"

      - alert: UnusualDeleteActivity
        expr: |
          sum(count_over_time({job="audit"} |= "delete"[1h])) > 50
        for: 5m
        labels:
          severity: warning
        annotations:
          summary: "Unusual number of delete operations"

Diagramme flux audit

sequenceDiagram
    actor User
    participant API as API<br/>(+ Audit Middleware)
    participant MQ as RabbitMQ<br/>(audit topic)
    participant Consumer as Audit Consumer
    participant ES as Elasticsearch
    participant SIEM as Grafana/SIEM
    participant Alert as Alerting

    User->>API: API Request
    API->>API: Execute request
    API->>API: Generate CADF event
    API->>MQ: Publish audit event

    MQ->>Consumer: Consume
    Consumer->>Consumer: Parse CADF
    Consumer->>ES: Index event

    SIEM->>ES: Query
    SIEM->>SIEM: Dashboard

    alt Anomaly detected
        ES->>Alert: Trigger alert
        Alert->>Alert: Notify security team
    end

Exemples pratiques

Requêtes d'audit Elasticsearch

# Actions échouées dernières 24h
curl -X GET "elasticsearch:9200/openstack-audit-*/_search" -H 'Content-Type: application/json' -d'
{
  "query": {
    "bool": {
      "must": [
        {"match": {"outcome": "failure"}},
        {"range": {"@timestamp": {"gte": "now-24h"}}}
      ]
    }
  },
  "sort": [{"@timestamp": "desc"}]
}'

# Actions par utilisateur
curl -X GET "elasticsearch:9200/openstack-audit-*/_search" -H 'Content-Type: application/json' -d'
{
  "size": 0,
  "aggs": {
    "users": {
      "terms": {"field": "user.keyword", "size": 20}
    }
  }
}'

# Rechercher des suppressions suspectes
curl -X GET "elasticsearch:9200/openstack-audit-*/_search" -H 'Content-Type: application/json' -d'
{
  "query": {
    "bool": {
      "must": [
        {"match": {"action": "delete"}},
        {"range": {"@timestamp": {"gte": "now-1h"}}}
      ]
    }
  }
}'

Rapport de conformité

#!/bin/bash
# audit-report.sh

START_DATE=${1:-$(date -d "30 days ago" +%Y-%m-%d)}
END_DATE=${2:-$(date +%Y-%m-%d)}

echo "=== Audit Report: $START_DATE to $END_DATE ==="

echo -e "\n=== Total Events ==="
curl -s "elasticsearch:9200/openstack-audit-*/_count" | jq .count

echo -e "\n=== Events by Service ==="
curl -s "elasticsearch:9200/openstack-audit-*/_search" -d'
{
  "size": 0,
  "aggs": {"services": {"terms": {"field": "service.keyword"}}}
}' | jq '.aggregations.services.buckets'

echo -e "\n=== Failed Actions ==="
curl -s "elasticsearch:9200/openstack-audit-*/_count" -d'
{
  "query": {"match": {"outcome": "failure"}}
}' | jq .count

echo -e "\n=== Unique Users ==="
curl -s "elasticsearch:9200/openstack-audit-*/_search" -d'
{
  "size": 0,
  "aggs": {"users": {"cardinality": {"field": "user_id.keyword"}}}
}' | jq '.aggregations.users.value'

Ressources

Checkpoint

  • Middleware audit activé sur tous les services
  • Messages CADF publiés dans RabbitMQ
  • Consumer d'audit fonctionnel
  • Événements indexés dans Elasticsearch
  • Dashboard audit dans Grafana
  • Alertes de sécurité configurées
  • Rapport de conformité automatisé
  • Rétention des logs définie