03_benchmark.yml 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451
  1. ---
  2. # playbooks/03_benchmark.yml
  3. # Benchmark installed Ollama models and select optimal models for each slot
  4. - name: "Benchmark | Evaluate Ollama models"
  5. hosts: ai_server
  6. become: false
  7. gather_facts: true
  8. tags:
  9. - benchmark
  10. vars:
  11. benchmark_models: ""
  12. pull_if_better: false
  13. min_composite_score: "{{ benchmark_thresholds.min_composite_score }}"
  14. ollama_api_url: "http://localhost:11434"
  15. ollama_api_key: "{{ lookup('community.hashi_vault.hashi_vault', vault_secret_prefix ~ '/ollama:api_key token=' ~ lookup('ansible.builtin.file', vault_token_file) ~ ' url=' ~ vault_url) }}"
  16. benchmark_results_dir: "{{ playbook_dir }}/../benchmarks/results"
  17. test_prompts:
  18. code_gen:
  19. prompt: "Write a Python merge sort with type hints, docstring, and 3 unit tests"
  20. category: coding
  21. weight: 1.0
  22. debug:
  23. prompt: >-
  24. Here is a Python function with 3 bugs. Find and fix all bugs:
  25. def calculate_average(numbers):
  26. total = 0
  27. for n in numbers:
  28. total =+ n
  29. average = total / len(numbers
  30. return averege
  31. category: coding
  32. weight: 1.0
  33. refactor:
  34. prompt: >-
  35. Refactor this for readability and performance:
  36. def f(l):
  37. r=[]
  38. for i in range(len(l)):
  39. if l[i]%2==0:
  40. r.append(l[i]*2)
  41. return r
  42. category: coding
  43. weight: 1.0
  44. explain:
  45. prompt: "Explain how Python's GIL works and when it matters"
  46. category: general
  47. weight: 1.0
  48. creative:
  49. prompt: "Suggest 5 fun family activities for a rainy weekend"
  50. category: general
  51. weight: 1.0
  52. reasoning:
  53. prompt: "I have 3 apples. I give away half. Then I get 4 more. How many do I have?"
  54. category: general
  55. weight: 1.0
  56. latency:
  57. prompt: "Hi"
  58. category: latency
  59. weight: 0.5
  60. tasks:
  61. - name: "Benchmark | Ensure results directory exists on control node"
  62. ansible.builtin.file:
  63. path: "{{ benchmark_results_dir }}"
  64. state: directory
  65. mode: "0755"
  66. delegate_to: localhost
  67. tags:
  68. - benchmark-setup
  69. - name: "Benchmark | Wait for Ollama API to be ready"
  70. ansible.builtin.uri:
  71. url: "http://localhost:11434/api/tags"
  72. method: GET
  73. status_code: 200
  74. timeout: 10
  75. register: ollama_ready
  76. retries: 24
  77. delay: 5
  78. until: ollama_ready.status == 200
  79. tags:
  80. - benchmark-discover
  81. - name: "Benchmark | Discover installed models"
  82. ansible.builtin.command: ollama list
  83. changed_when: false
  84. register: ollama_list_output
  85. retries: 6
  86. delay: 10
  87. until: ollama_list_output.rc == 0
  88. tags:
  89. - benchmark-discover
  90. - name: "Benchmark | Parse model names from ollama list"
  91. ansible.builtin.set_fact:
  92. installed_models: "{{ ollama_list_output.stdout_lines[1:] | map('split') | map('first') | list }}"
  93. tags:
  94. - benchmark-discover
  95. - name: "Benchmark | Parse model sizes from ollama list"
  96. ansible.builtin.set_fact:
  97. _benchmark_sizes_json: |
  98. {% set ns = namespace(d={}) %}
  99. {% for line in ollama_list_output.stdout_lines[1:] %}
  100. {% set p = line.split() %}
  101. {% if p | length >= 4 %}
  102. {% set gb = (p[2] | float) if (p[3] | upper == 'GB') else ((p[2] | float) / 1024) %}
  103. {% set _ = ns.d.update({p[0]: gb}) %}
  104. {% endif %}
  105. {% endfor %}
  106. {{ ns.d | to_json }}
  107. tags:
  108. - benchmark-discover
  109. - name: "Benchmark | Partition models into small, medium, and large passes"
  110. ansible.builtin.set_fact:
  111. _small_models: "{{ _alias_filtered | select('in', _small_ok) | list }}"
  112. _medium_models: "{{ _alias_filtered | select('in', _medium_ok) | list }}"
  113. _large_models: "{{ _alias_filtered | reject('in', _small_ok) | reject('in', _medium_ok) | list }}"
  114. models_to_benchmark: "{{ _alias_filtered | list }}"
  115. vars:
  116. _sizes: "{{ _benchmark_sizes_json | from_json }}"
  117. _small_cut: "{{ (benchmark_small_max_gb | float) / (benchmark_size_overhead_factor | float) }}"
  118. _medium_cut: "{{ (benchmark_medium_max_gb | float) / (benchmark_size_overhead_factor | float) }}"
  119. _small_ok: "{{ _sizes | dict2items | selectattr('value', 'le', _small_cut | float) | map(attribute='key') | list }}"
  120. _medium_ok: "{{ _sizes | dict2items | selectattr('value', 'gt', _small_cut | float)
  121. | selectattr('value', 'le', _medium_cut | float)
  122. | map(attribute='key') | list }}"
  123. _alias_filtered: "{{ installed_models | reject('match', '^(' ~ benchmark_skip_aliases | join('|') ~ ')(:|$)') | list }}"
  124. when: benchmark_models | default('') | length == 0
  125. tags:
  126. - benchmark-discover
  127. - name: "Benchmark | Set models_to_benchmark to specified subset"
  128. ansible.builtin.set_fact:
  129. models_to_benchmark: "{{ _specified }}"
  130. _small_models: "{{ _specified | select('in', _small_ok) | list }}"
  131. _medium_models: "{{ _specified | select('in', _medium_ok) | list }}"
  132. _large_models: "{{ _specified | reject('in', _small_ok) | reject('in', _medium_ok) | list }}"
  133. vars:
  134. _specified: "{{ benchmark_models.split(',') | map('trim') | list }}"
  135. _sizes: "{{ _benchmark_sizes_json | from_json }}"
  136. _small_cut: "{{ (benchmark_small_max_gb | float) / (benchmark_size_overhead_factor | float) }}"
  137. _medium_cut: "{{ (benchmark_medium_max_gb | float) / (benchmark_size_overhead_factor | float) }}"
  138. _small_ok: "{{ _sizes | dict2items | selectattr('value', 'le', _small_cut | float) | map(attribute='key') | list }}"
  139. _medium_ok: "{{ _sizes | dict2items | selectattr('value', 'gt', _small_cut | float)
  140. | selectattr('value', 'le', _medium_cut | float)
  141. | map(attribute='key') | list }}"
  142. when: benchmark_models | default('') | length > 0
  143. tags:
  144. - benchmark-discover
  145. - name: "Benchmark | Display models to benchmark"
  146. ansible.builtin.debug:
  147. msg:
  148. - "Small pass (timeout {{ benchmark_small_timeout }}s, ≤{{ benchmark_small_max_gb }}GB): {{ _small_models }}"
  149. - "Medium pass (timeout {{ benchmark_medium_timeout }}s, {{ benchmark_small_max_gb }}–{{ benchmark_medium_max_gb }}GB): {{ _medium_models }}"
  150. - "Large pass (timeout {{ benchmark_large_timeout }}s, >{{ benchmark_medium_max_gb }}GB): {{ _large_models }}"
  151. tags:
  152. - benchmark-discover
  153. - name: "Benchmark | Run test prompts against small models"
  154. ansible.builtin.uri:
  155. url: "{{ ollama_api_url }}/api/generate"
  156. method: POST
  157. body_format: json
  158. body:
  159. model: "{{ item.0 }}"
  160. prompt: "{{ test_prompts[item.1].prompt }}"
  161. stream: false
  162. headers:
  163. Authorization: "Bearer {{ ollama_api_key }}"
  164. timeout: "{{ benchmark_small_timeout }}"
  165. status_code: 200
  166. loop: "{{ _small_models | product(test_prompts.keys() | list) | list }}"
  167. loop_control:
  168. label: "{{ item.0 }} / {{ item.1 }}"
  169. register: _bench_small
  170. failed_when: false
  171. tags:
  172. - benchmark-run
  173. - name: "Benchmark | Run test prompts against medium models"
  174. ansible.builtin.uri:
  175. url: "{{ ollama_api_url }}/api/generate"
  176. method: POST
  177. body_format: json
  178. body:
  179. model: "{{ item.0 }}"
  180. prompt: "{{ test_prompts[item.1].prompt }}"
  181. stream: false
  182. headers:
  183. Authorization: "Bearer {{ ollama_api_key }}"
  184. timeout: "{{ benchmark_medium_timeout }}"
  185. status_code: 200
  186. loop: "{{ _medium_models | product(test_prompts.keys() | list) | list }}"
  187. loop_control:
  188. label: "{{ item.0 }} / {{ item.1 }}"
  189. register: _bench_medium
  190. failed_when: false
  191. when: _medium_models | length > 0
  192. tags:
  193. - benchmark-run
  194. - name: "Benchmark | Run test prompts against large models"
  195. ansible.builtin.uri:
  196. url: "{{ ollama_api_url }}/api/generate"
  197. method: POST
  198. body_format: json
  199. body:
  200. model: "{{ item.0 }}"
  201. prompt: "{{ test_prompts[item.1].prompt }}"
  202. stream: false
  203. headers:
  204. Authorization: "Bearer {{ ollama_api_key }}"
  205. timeout: "{{ benchmark_large_timeout }}"
  206. status_code: 200
  207. loop: "{{ _large_models | product(test_prompts.keys() | list) | list }}"
  208. loop_control:
  209. label: "{{ item.0 }} / {{ item.1 }}"
  210. register: _bench_large
  211. failed_when: false
  212. when: _large_models | length > 0
  213. tags:
  214. - benchmark-run
  215. - name: "Benchmark | Merge small, medium, and large model results"
  216. ansible.builtin.set_fact:
  217. benchmark_raw_results:
  218. results: >-
  219. {{ (_bench_small.results | default([]))
  220. + (_bench_medium.results | default([]))
  221. + (_bench_large.results | default([])) }}
  222. tags:
  223. - benchmark-run
  224. - name: "Benchmark | Compute per-model metrics"
  225. ansible.builtin.set_fact:
  226. model_metrics: |
  227. {% set ns = namespace(results={}) %}
  228. {% for model in models_to_benchmark %}
  229. {% set ns2 = namespace(coding_quality=0, coding_count=0, general_quality=0, general_count=0, total_toks=0, total_eval_time=0, ttft_sum=0, ttft_count=0, latency_ns=0) %}
  230. {% for result in benchmark_raw_results.results %}
  231. {% if result.item[0] == model and result.status == 200 %}
  232. {% set test_name = result.item[1] %}
  233. {% set resp = result.json | default({}) %}
  234. {% set eval_count = resp.eval_count | default(0) | int %}
  235. {% set eval_duration = resp.eval_duration | default(1) | int %}
  236. {% set prompt_eval_duration = resp.prompt_eval_duration | default(0) | int %}
  237. {% set response_text = resp.response | default('') %}
  238. {% set tok_per_sec = (eval_count / (eval_duration / 1000000000.0)) if eval_duration > 0 else 0 %}
  239. {% set ns2.total_toks = ns2.total_toks + tok_per_sec %}
  240. {% set ns2.ttft_sum = ns2.ttft_sum + prompt_eval_duration %}
  241. {% set ns2.ttft_count = ns2.ttft_count + 1 %}
  242. {% if test_name == 'latency' %}
  243. {% set ns2.latency_ns = eval_duration + prompt_eval_duration %}
  244. {% endif %}
  245. {% set resp_len = response_text | length %}
  246. {% if test_name in ['code_gen', 'debug', 'refactor'] %}
  247. {% set has_def = 1 if 'def ' in response_text else 0 %}
  248. {% set has_return = 1 if 'return' in response_text else 0 %}
  249. {% set has_assert = 1 if 'assert ' in response_text else 0 %}
  250. {% set has_test_def = 1 if 'def test_' in response_text else 0 %}
  251. {% set has_docstring = 1 if '"""' in response_text else 0 %}
  252. {% set has_type_hint = 1 if ' -> ' in response_text else 0 %}
  253. {% set has_code_block = 1 if '```' in response_text else 0 %}
  254. {% set has_import = 1 if ('import ' in response_text or 'from ' in response_text) else 0 %}
  255. {% if test_name == 'code_gen' %}
  256. {% set quality = (has_def * 0.20 + has_return * 0.20 + has_docstring * 0.15 + has_type_hint * 0.15 + has_code_block * 0.10 + has_assert * 0.08 + has_test_def * 0.07 + has_import * 0.05) %}
  257. {% elif test_name == 'debug' %}
  258. {% set quality = (has_def * 0.30 + has_return * 0.30 + has_code_block * 0.25 + has_assert * 0.15) %}
  259. {% else %}
  260. {% set quality = (has_def * 0.25 + has_return * 0.25 + has_code_block * 0.20 + has_type_hint * 0.15 + has_import * 0.15) %}
  261. {% endif %}
  262. {% set ns2.coding_quality = ns2.coding_quality + quality %}
  263. {% set ns2.coding_count = ns2.coding_count + 1 %}
  264. {% elif test_name in ['explain', 'creative', 'reasoning'] %}
  265. {% set length_score = [resp_len / 800.0, 1.0] | min %}
  266. {% set has_structure = 1 if ('\n' in response_text and resp_len > 100) else 0 %}
  267. {% set has_list = 1 if ('\n- ' in response_text or '\n* ' in response_text or '\n1.' in response_text) else 0 %}
  268. {% set has_detail = 1 if '\n\n' in response_text else 0 %}
  269. {% set quality = (length_score * 0.35 + has_structure * 0.40 + has_list * 0.15 + has_detail * 0.10) %}
  270. {% set ns2.general_quality = ns2.general_quality + quality %}
  271. {% set ns2.general_count = ns2.general_count + 1 %}
  272. {% endif %}
  273. {% endif %}
  274. {% endfor %}
  275. {% set coding_avg = (ns2.coding_quality / ns2.coding_count) if ns2.coding_count > 0 else 0 %}
  276. {% set general_avg = (ns2.general_quality / ns2.general_count) if ns2.general_count > 0 else 0 %}
  277. {% set test_count = (ns2.ttft_count) if ns2.ttft_count > 0 else 1 %}
  278. {% set avg_toks = ns2.total_toks / test_count %}
  279. {% set toks_norm = [avg_toks / benchmark_toks_norm_ceiling, 1.0] | min %}
  280. {% set latency_ms = ns2.latency_ns / 1000000.0 if ns2.latency_ns > 0 else 9999 %}
  281. {% set latency_score = [1.0 - (latency_ms / 5000.0), 0] | max %}
  282. {% set coding_composite = coding_avg * 0.45 + toks_norm * 0.30 + latency_score * 0.25 %}
  283. {% set general_composite = general_avg * 0.45 + toks_norm * 0.30 + latency_score * 0.25 %}
  284. {% set _override = (model_category_overrides | default({}))[model] | default('') %}
  285. {% if _override in ['coding', 'general'] %}
  286. {% set category = _override %}
  287. {% elif (coding_avg - general_avg) >= benchmark_coding_threshold %}
  288. {% set category = 'coding' %}
  289. {% elif 'coder' in model | lower or 'codestral' in model | lower or 'codellama' in model | lower or 'starcoder' in model | lower %}
  290. {% set category = 'coding' %}
  291. {% else %}
  292. {% set category = 'general' %}
  293. {% endif %}
  294. {% set _ = ns.results.update({model: {'coding_quality': coding_avg | round(3), 'general_quality': general_avg | round(3), 'avg_tok_per_sec': avg_toks | round(1), 'toks_norm': toks_norm | round(3), 'latency_ms': latency_ms | round(1), 'latency_score': latency_score | round(3), 'coding_composite': coding_composite | round(3), 'general_composite': general_composite | round(3), 'category': category}}) %}
  295. {% endfor %}
  296. {{ ns.results | to_json }}
  297. tags:
  298. - benchmark-compute
  299. - name: "Benchmark | Parse model metrics"
  300. ansible.builtin.set_fact:
  301. parsed_metrics: "{{ model_metrics | from_json }}"
  302. tags:
  303. - benchmark-compute
  304. - name: "Benchmark | Rank models and select slots"
  305. ansible.builtin.set_fact:
  306. model_selection: |
  307. {% set general_models = [] %}
  308. {% set coding_models = [] %}
  309. {% for model, metrics in parsed_metrics.items() %}
  310. {% if metrics.category == 'general' %}
  311. {% set _ = general_models.append({'name': model, 'composite': metrics.general_composite, 'metrics': metrics}) %}
  312. {% else %}
  313. {% set _ = coding_models.append({'name': model, 'composite': metrics.coding_composite, 'metrics': metrics}) %}
  314. {% endif %}
  315. {% endfor %}
  316. {% set general_sorted = general_models | sort(attribute='composite', reverse=true) %}
  317. {% set coding_sorted = coding_models | sort(attribute='composite', reverse=true) %}
  318. {% set slot1 = general_sorted[0].name if general_sorted | length > 0 else 'none' %}
  319. {% set slot2 = general_sorted[1].name if general_sorted | length > 1 else (general_sorted[0].name if general_sorted | length > 0 else 'none') %}
  320. {% set slot5 = general_sorted[2].name if general_sorted | length > 2 else 'none' %}
  321. {% set slot3 = coding_sorted[0].name if coding_sorted | length > 0 else (general_sorted[0].name if general_sorted | length > 0 else 'none') %}
  322. {% set slot4 = coding_sorted[1].name if coding_sorted | length > 1 else (coding_sorted[0].name if coding_sorted | length > 0 else 'none') %}
  323. {% set slot6 = coding_sorted[2].name if coding_sorted | length > 2 else 'none' %}
  324. {{ {'slot1_general': slot1, 'slot2_general': slot2, 'slot5_general_rotate': slot5,
  325. 'slot3_coding': slot3, 'slot4_coding': slot4, 'slot6_coding_rotate': slot6,
  326. 'all_metrics': parsed_metrics, 'general_ranking': general_sorted,
  327. 'coding_ranking': coding_sorted} | to_json }}
  328. tags:
  329. - benchmark-select
  330. - name: "Benchmark | Parse model selection"
  331. ansible.builtin.set_fact:
  332. selection: "{{ model_selection | from_json }}"
  333. tags:
  334. - benchmark-select
  335. - name: "Benchmark | Display model selection results"
  336. ansible.builtin.debug:
  337. msg:
  338. - "============================================="
  339. - " MODEL SELECTION RESULTS (6-slot / 2-socket)"
  340. - "============================================="
  341. - " Node 1 — General (port 11434)"
  342. - " Slot 1 (locked): {{ selection.slot1_general }}"
  343. - " Slot 2 (locked): {{ selection.slot2_general }}"
  344. - " Slot 5 (rotate): {{ selection.slot5_general_rotate }}"
  345. - " Node 0 — Coding (port 11435)"
  346. - " Slot 3 (locked): {{ selection.slot3_coding }}"
  347. - " Slot 4 (locked): {{ selection.slot4_coding }}"
  348. - " Slot 6 (rotate): {{ selection.slot6_coding_rotate }}"
  349. - "============================================="
  350. tags:
  351. - benchmark-select
  352. - name: "Benchmark | Generate timestamp"
  353. ansible.builtin.set_fact:
  354. benchmark_timestamp: "{{ ansible_date_time.iso8601_basic_short }}"
  355. tags:
  356. - benchmark-report
  357. - name: "Benchmark | Save benchmark results markdown"
  358. ansible.builtin.copy:
  359. content: |
  360. # Benchmark Results - {{ benchmark_timestamp }}
  361. ## Model Selection (6-slot / 2-socket)
  362. | Slot | Socket | Role | Model | Composite Score |
  363. |------|--------|------|-------|----------------|
  364. | 1 | Node 1 (port 11434) | General (locked) | {{ selection.slot1_general }} | {{ parsed_metrics[selection.slot1_general].general_composite | default('N/A') }} |
  365. | 2 | Node 1 (port 11434) | General (locked) | {{ selection.slot2_general }} | {{ parsed_metrics[selection.slot2_general].general_composite | default('N/A') }} |
  366. | 5 | Node 1 (port 11434) | General (rotate) | {{ selection.slot5_general_rotate }} | {{ parsed_metrics[selection.slot5_general_rotate].general_composite | default('N/A') }} |
  367. | 3 | Node 0 (port 11435) | Coding (locked) | {{ selection.slot3_coding }} | {{ parsed_metrics[selection.slot3_coding].coding_composite | default('N/A') }} |
  368. | 4 | Node 0 (port 11435) | Coding (locked) | {{ selection.slot4_coding }} | {{ parsed_metrics[selection.slot4_coding].coding_composite | default('N/A') }} |
  369. | 6 | Node 0 (port 11435) | Coding (rotate) | {{ selection.slot6_coding_rotate }} | {{ parsed_metrics[selection.slot6_coding_rotate].coding_composite | default('N/A') }} |
  370. ## Detailed Metrics
  371. {% for model, metrics in parsed_metrics.items() %}
  372. ### {{ model }}
  373. - **Category**: {{ metrics.category }}
  374. - **Coding Quality**: {{ metrics.coding_quality }}
  375. - **General Quality**: {{ metrics.general_quality }}
  376. - **Avg Tokens/sec**: {{ metrics.avg_tok_per_sec }}
  377. - **Latency (ms)**: {{ metrics.latency_ms }}
  378. - **Coding Composite**: {{ metrics.coding_composite }}
  379. - **General Composite**: {{ metrics.general_composite }}
  380. {% endfor %}
  381. ## Scoring Formula
  382. - Composite = quality * 0.45 + token_speed_normalized * 0.30 + latency_score * 0.25
  383. - Speed normalized against {{ benchmark_toks_norm_ceiling }} tok/sec ceiling (hardware-observed max)
  384. - Coding quality (per-prompt):
  385. code_gen: has_def×0.20 + has_return×0.20 + has_docstring×0.15 + has_type_hint×0.15 + has_code_block×0.10 + has_assert×0.08 + has_test_def×0.07 + has_import×0.05
  386. debug: has_def×0.30 + has_return×0.30 + has_code_block×0.25 + has_assert×0.15
  387. refactor: has_def×0.25 + has_return×0.25 + has_code_block×0.20 + has_type_hint×0.15 + has_import×0.15
  388. - Category: override dict → quality delta (coding_avg - general_avg >= {{ benchmark_coding_threshold }}) → name pattern (coder/codestral/codellama/starcoder) → general
  389. dest: "{{ benchmark_results_dir }}/benchmark_{{ benchmark_timestamp }}.md"
  390. mode: "0644"
  391. delegate_to: localhost
  392. tags:
  393. - benchmark-report
  394. - name: "Benchmark | Save model_selection.json"
  395. ansible.builtin.copy:
  396. content: "{{ selection | to_nice_json }}"
  397. dest: "{{ benchmark_results_dir }}/model_selection.json"
  398. mode: "0644"
  399. delegate_to: localhost
  400. tags:
  401. - benchmark-report
  402. - name: "Benchmark | Check minimum composite scores"
  403. ansible.builtin.debug:
  404. msg: >-
  405. WARNING: Best composite score for {{ item.key }} models is below threshold
  406. ({{ min_composite_score }}). Consider pulling additional models.
  407. Recommended candidates: qwen2.5-coder:14b, deepseek-coder-v2:16b, codellama:34b
  408. when: >-
  409. (item.value.coding_composite < min_composite_score | float) and
  410. (item.value.general_composite < min_composite_score | float)
  411. loop: "{{ parsed_metrics | dict2items }}"
  412. loop_control:
  413. label: "{{ item.key }}"
  414. tags:
  415. - benchmark-report
  416. - name: "Benchmark | Pull recommended model if pull_if_better is true"
  417. ansible.builtin.command: "ollama pull qwen2.5-coder:14b"
  418. when:
  419. - pull_if_better | bool
  420. - parsed_metrics.values() | map(attribute='coding_composite') | max < min_composite_score | float
  421. changed_when: true
  422. tags:
  423. - benchmark-pull