| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153 |
- ---
- # ============================================================
- # Global Variables — AI Platform Ansible Automation
- # ============================================================
- # Domain and networking
- domain: example.com
- ai_server_ip: 192.168.1.100
- nginx_proxy_ip: 192.168.1.30
- coredns_host_ip: 192.168.1.29
- # SSH user for all managed hosts (override per-host in host_vars if needed)
- ansible_user: admin
- # Platform identity — used for Keycloak realm, Vault paths, UI display names
- platform_name: "AI Platform"
- vault_project_slug: "ai-platform"
- # Service URLs
- vault_url: "https://vault.{{ domain }}"
- keycloak_url: "https://idm.{{ domain }}"
- openwebui_url: "https://ollama-ui.{{ domain }}"
- ollama_api_url: "https://ollama-api.{{ domain }}"
- # Storage paths on ai_server
- ai_data_root: /mnt/ai_data
- ollama_models_path: "{{ ai_data_root }}/ollama_models"
- keycloak_data_path: "{{ ai_data_root }}/keycloak"
- qdrant_data_path: "{{ ai_data_root }}/qdrant"
- openwebui_data_path: "{{ ai_data_root }}/open-webui"
- openclaw_data_path: "{{ ai_data_root }}/openclaw"
- benchmark_results_path: "{{ ai_data_root }}/benchmarks"
- # Storage paths on coredns_host
- vault_config_path: /docker_mounts/vault/config
- vault_data_path: /docker_mounts/vault/data
- vault_scripts_path: /docker_mounts/vault
- coredns_zone_file: "/docker_mounts/coredns/{{ domain }}.db"
- # Local control-node paths (gitignored)
- vault_token_file: "{{ playbook_dir }}/../vault/.vault-token"
- vault_init_file: "{{ playbook_dir }}/../vault/.vault-init.json"
- # Vault configuration
- vault_port: 8202
- vault_api_addr: "https://vault.{{ domain }}"
- vault_secret_prefix: "secret/data/{{ vault_project_slug }}"
- vault_secret_meta_prefix: "secret/metadata/{{ vault_project_slug }}"
- vault_approle_name: "ai-services"
- # Service ports
- keycloak_port: 8180
- ollama_port: 11434
- ollama_node0_port: 11435
- qdrant_http_port: 6333
- qdrant_grpc_port: 6334
- # Ollama configuration
- ollama_host: "0.0.0.0:11434"
- ollama_num_threads: 14
- ollama_num_parallel: 2
- ollama_max_loaded_models: 3 # 3 per socket (6 total across both NUMA instances)
- ollama_keep_alive: "-1"
- ollama_flash_attention: "1"
- # NUMA/CPU affinity - Dell M630, 2x E5-2690v4
- # CPUs are interleaved: odd = socket 1 (NUMA node 1), even = socket 0.
- # Physical cores on node 1: 1,3,...,27 (14 cores). HT siblings: 29,31,...,55.
- # Physical cores on node 0: 0,2,...,26 (14 cores). HT siblings: 28,30,...,54.
- # Pinning to physical cores only eliminates HT contention on the memory bus.
- # NUMA node 1 has ~120 GB free RAM vs node 0's ~75 GB.
- ollama_numa_node: "1"
- ollama_cpu_affinity: "1 3 5 7 9 11 13 15 17 19 21 23 25 27"
- ollama_node0_cpu_affinity: "0 2 4 6 8 10 12 14 16 18 20 22 24 26"
- ollama_binary_path: /usr/bin/ollama
- # Keycloak configuration
- keycloak_realm: "{{ vault_project_slug }}"
- keycloak_realm_display: "{{ platform_name }}"
- keycloak_client_id: open-webui
- keycloak_redirect_uri: "https://ollama-ui.{{ domain }}/*"
- keycloak_oidc_url: "https://idm.{{ domain }}/realms/{{ keycloak_realm }}"
- keycloak_realm_admin_user: "{{ vault_project_slug }}-admin"
- # Benchmark thresholds
- benchmark_thresholds:
- min_tokens_per_sec: 5.0
- min_quality_score: 0.6
- min_composite_score: 0.55
- benchmark_toks_norm_ceiling: 40 # Conservative dual-socket estimate (was 22 single-socket)
- benchmark_coding_threshold: 0.10 # Delta to classify a model as coding-specialized
- # Modelfile aliases created by 04_models.yml — excluded from benchmark to prevent
- # 32k-token KV cache allocations stalling the run with 285-second response times.
- benchmark_skip_aliases:
- - "coder-128k"
- - "coder-32k"
- - "coder-rotate"
- - "llama-family"
- - "gemma-family"
- benchmark_small_max_gb: 10 # upper size boundary for small pass (< 10 GB), based on runtime RAM
- benchmark_medium_max_gb: 15 # upper size boundary for medium pass (10–15 GB), based on runtime RAM
- benchmark_size_overhead_factor: 1.2 # ollama list shows disk size; multiply by this to estimate runtime RAM
- benchmark_load_timeout: 180 # seconds — warm-up "Hi" prompt per model before benchmarking
- benchmark_small_timeout: 90 # seconds per request, small models (<10 GB)
- benchmark_medium_timeout: 240 # seconds per request, medium models (10–15 GB)
- benchmark_large_timeout: 480 # seconds per request, large models (>15 GB)
- benchmark_num_predict: 500 # cap output tokens; allows full coding responses (def+return+docstring+assert); worst-case: 6.5 tok/s→77s, 22 tok/s→23s
- # Explicit category overrides applied before heuristics. Keys are model names as
- # returned by `ollama list`. Valid values: 'coding' or 'general'.
- # Example: { "deepseek-coder-v2": "coding", "qwen2.5-coder:7b": "coding" }
- model_category_overrides: {}
- # Baseline models — always pulled before benchmarking regardless of model_selection.json.
- # These are the minimum set needed to populate all 4 slots with meaningful candidates.
- baseline_models:
- - "llama3.2:3b"
- - "deepseek-coder-v2:16b"
- - "qwen2.5-coder:7b"
- - "llama3.1:8b"
- # Candidate models to recommend/pull if benchmark scores are below threshold
- candidate_models:
- - name: "qwen2.5-coder:32b-instruct-q4_K_M"
- size_gb: 20
- expected_tokens_sec: 4.5
- reason: "Larger qwen2.5-coder for higher quality"
- category: coding
- - name: "codegemma:7b-instruct-q5_K_M"
- size_gb: 5.5
- expected_tokens_sec: 12.0
- reason: "Fast Google coding model"
- category: coding
- - name: "starcoder2:15b-instruct-q4_K_M"
- size_gb: 9.5
- expected_tokens_sec: 7.0
- reason: "StarCoder2 coding specialist"
- category: coding
- # OpenClaw default model — overridden dynamically by 08_openclaw.yml from slot1_general
- openclaw_model: "deepseek-coder-v2:16b-lite-instruct-q4_K_M"
- # AWS Bedrock (OpenAI-compatible API via Open WebUI)
- # Pass bearer_token on first run: -e "bedrock_bearer_token=<value>"
- # To rotate: re-run with the new token value.
- bedrock_aws_region: "us-east-1"
- # NGINX SSL certificate paths (on nginx_proxy)
- nginx_ssl_cert: "/etc/nginx/ssl/{{ domain }}.crt"
- nginx_ssl_key: "/etc/nginx/ssl/{{ domain }}.key"
|