1mod assertions;
2mod example;
3mod examples;
4mod explorer;
5mod ids;
6mod instance;
7mod tool_metrics;
8
9use assertions::{AssertionsReport, display_error_row};
10use instance::{ExampleInstance, JudgeOutput, RunOutput, run_git};
11use language_extension::LspAccess;
12pub(crate) use tool_metrics::*;
13
14use ::fs::RealFs;
15use clap::Parser;
16use client::{Client, ProxySettings, UserStore};
17use collections::{HashMap, HashSet};
18use extension::ExtensionHostProxy;
19use futures::future;
20use gpui::http_client::read_proxy_from_env;
21use gpui::{App, AppContext, Application, AsyncApp, Entity, UpdateGlobal};
22use gpui_tokio::Tokio;
23use language::LanguageRegistry;
24use language_model::{ConfiguredModel, LanguageModel, LanguageModelRegistry, SelectedModel};
25use node_runtime::{NodeBinaryOptions, NodeRuntime};
26use project::Project;
27use project::project_settings::ProjectSettings;
28use prompt_store::PromptBuilder;
29use release_channel::AppVersion;
30use reqwest_client::ReqwestClient;
31use settings::{Settings, SettingsStore};
32use std::cell::RefCell;
33use std::collections::VecDeque;
34use std::env;
35use std::path::{Path, PathBuf};
36use std::rc::Rc;
37use std::str::FromStr;
38use std::sync::{Arc, LazyLock};
39use util::ResultExt as _;
40
41static CARGO_MANIFEST_DIR: LazyLock<PathBuf> =
42 LazyLock::new(|| PathBuf::from(env!("CARGO_MANIFEST_DIR")));
43
44#[derive(Parser, Debug)]
45#[command(name = "eval", disable_version_flag = true)]
46struct Args {
47 /// Runs all examples and threads that contain these substrings. If unspecified, all examples and threads are run.
48 #[arg(value_name = "EXAMPLE_SUBSTRING")]
49 filter: Vec<String>,
50 /// provider/model to use for agent
51 #[arg(long, default_value = "anthropic/claude-3-7-sonnet-latest")]
52 model: String,
53 /// provider/model to use for judges
54 #[arg(long, default_value = "anthropic/claude-3-7-sonnet-latest")]
55 judge_model: String,
56 #[arg(long, value_delimiter = ',', default_value = "rs,ts,py")]
57 languages: Vec<String>,
58 /// How many times to run each example.
59 #[arg(long, default_value = "8")]
60 repetitions: usize,
61 /// Maximum number of examples to run concurrently.
62 #[arg(long, default_value = "4")]
63 concurrency: usize,
64}
65
66fn main() {
67 dotenvy::from_filename(CARGO_MANIFEST_DIR.join(".env")).ok();
68
69 env_logger::init();
70
71 let system_id = ids::get_or_create_id(&ids::eval_system_id_path()).ok();
72 let installation_id = ids::get_or_create_id(&ids::eval_installation_id_path()).ok();
73 let session_id = uuid::Uuid::new_v4().to_string();
74 let run_timestamp = chrono::Local::now().format("%Y-%m-%d_%H-%M-%S");
75 let run_id = match env::var("GITHUB_RUN_ID") {
76 Ok(run_id) => format!("github/{}", run_id),
77 Err(_) => format!("local/{}", run_timestamp),
78 };
79
80 let root_dir = Path::new(std::env!("CARGO_MANIFEST_DIR"))
81 .parent()
82 .unwrap()
83 .parent()
84 .unwrap()
85 .canonicalize()
86 .unwrap();
87 let eval_crate_dir = root_dir.join("crates").join("eval");
88 let repos_dir = eval_crate_dir.join("repos");
89 let worktrees_dir = eval_crate_dir.join("worktrees");
90 let examples_dir = eval_crate_dir.join("src").join("examples");
91 let run_dir = eval_crate_dir
92 .join("runs")
93 .join(format!("{}", run_timestamp));
94 std::fs::create_dir_all(&run_dir).unwrap();
95 std::fs::create_dir_all(&repos_dir).unwrap();
96 std::fs::create_dir_all(&worktrees_dir).unwrap();
97 std::fs::create_dir_all(&examples_dir).unwrap();
98 std::fs::create_dir_all(&paths::config_dir()).unwrap();
99
100 let zed_commit_sha = commit_sha_for_path(&root_dir);
101 let zed_branch_name = git_branch_for_path(&root_dir);
102 let args = Args::parse();
103 let languages: HashSet<String> = args.languages.into_iter().collect();
104
105 let http_client = Arc::new(ReqwestClient::new());
106 let app = Application::headless().with_http_client(http_client);
107 let all_threads = examples::all(&examples_dir);
108
109 app.run(move |cx| {
110 let app_state = init(cx);
111
112 let telemetry = app_state.client.telemetry();
113 telemetry.start(system_id, installation_id, session_id, cx);
114
115 let enable_telemetry = env::var("ZED_EVAL_TELEMETRY").is_ok_and(|value| value == "1")
116 && telemetry.has_checksum_seed();
117 if enable_telemetry {
118 println!("Telemetry enabled");
119 telemetry::event!(
120 "Agent Eval Started",
121 zed_commit_sha = zed_commit_sha,
122 zed_branch_name = zed_branch_name,
123 run_id = run_id,
124 );
125 }
126
127 let mut cumulative_tool_metrics = ToolMetrics::default();
128
129 let agent_model = load_model(&args.model, cx).unwrap();
130 let judge_model = load_model(&args.judge_model, cx).unwrap();
131
132 LanguageModelRegistry::global(cx).update(cx, |registry, cx| {
133 registry.set_default_model(Some(agent_model.clone()), cx);
134 });
135
136 let auth1 = agent_model.provider.authenticate(cx);
137 let auth2 = judge_model.provider.authenticate(cx);
138
139 cx.spawn(async move |cx| {
140 auth1.await?;
141 auth2.await?;
142
143 let mut examples = Vec::new();
144
145 const COLORS: [&str; 12] = [
146 "\x1b[31m", // Red
147 "\x1b[32m", // Green
148 "\x1b[33m", // Yellow
149 "\x1b[34m", // Blue
150 "\x1b[35m", // Magenta
151 "\x1b[36m", // Cyan
152 "\x1b[91m", // Bright Red
153 "\x1b[92m", // Bright Green
154 "\x1b[93m", // Bright Yellow
155 "\x1b[94m", // Bright Blue
156 "\x1b[95m", // Bright Magenta
157 "\x1b[96m", // Bright Cyan
158 ];
159
160 let mut skipped = Vec::new();
161
162 for thread in all_threads {
163 let meta = thread.meta();
164 if !args.filter.is_empty() && !args.filter.iter().any(|sub| meta.name.contains(sub))
165 {
166 skipped.push(meta.name);
167 continue;
168 }
169
170 if let Some(language) = meta.language_server
171 && !languages.contains(&language.file_extension) {
172 panic!(
173 "Eval for {:?} could not be run because no language server was found for extension {:?}",
174 meta.name,
175 language.file_extension
176 );
177 }
178
179 // TODO: This creates a worktree per repetition. Ideally these examples should
180 // either be run sequentially on the same worktree, or reuse worktrees when there
181 // are more examples to run than the concurrency limit.
182 for repetition_number in 0..args.repetitions {
183 let example_instance = ExampleInstance::new(
184 thread.clone(),
185 &repos_dir,
186 &run_dir,
187 &worktrees_dir,
188 repetition_number,
189 );
190
191 examples.push(example_instance);
192 }
193 }
194
195 if !skipped.is_empty() {
196 println!("Skipped threads: {}", skipped.join(", "));
197 }
198
199 if examples.is_empty() {
200 eprintln!("Filter matched no examples");
201 return cx.update(|cx| cx.quit());
202 }
203
204 let mut repo_urls = HashSet::default();
205 let mut clone_tasks = Vec::new();
206
207 let max_name_width = examples
208 .iter()
209 .map(|e| e.worktree_name().len())
210 .max()
211 .unwrap_or(0);
212
213 for (i, example_instance) in examples.iter_mut().enumerate() {
214 let color = COLORS[i % COLORS.len()].to_string();
215 example_instance.set_log_prefix_style(&color, max_name_width);
216
217 println!(
218 "{}Logging to: {}",
219 example_instance.log_prefix,
220 example_instance.run_directory.display()
221 );
222
223 let repo_url = example_instance.repo_url();
224 if repo_urls.insert(repo_url.clone()) {
225 let repo_path = example_instance.repo_path.clone();
226
227 if !repo_path.join(".git").is_dir() {
228 println!(
229 "{:<width$} < {}",
230 "↓ Cloning",
231 repo_url,
232 width = max_name_width
233 );
234
235 let git_task = cx.spawn(async move |_cx| {
236 std::fs::create_dir_all(&repo_path)?;
237 run_git(&repo_path, &["init"]).await?;
238 run_git(&repo_path, &["remote", "add", "origin", &repo_url]).await
239 });
240
241 clone_tasks.push(git_task);
242 } else {
243 println!(
244 "{:<width$} < {}",
245 "✔︎ Already cloned",
246 repo_url,
247 width = max_name_width
248 );
249
250 let actual_origin =
251 run_git(&repo_path, &["remote", "get-url", "origin"]).await?;
252 anyhow::ensure!(
253 actual_origin == repo_url,
254 "remote origin {actual_origin} does not match expected origin {repo_url}"
255 );
256 }
257 }
258 }
259
260 future::join_all(clone_tasks).await;
261
262 for example_instance in examples.iter_mut() {
263 example_instance.fetch().await?;
264 }
265
266 let examples = Rc::new(RefCell::new(VecDeque::from(examples)));
267 let results_by_example_name = Rc::new(RefCell::new(HashMap::default()));
268
269 future::join_all((0..args.concurrency).map(|_| {
270 let app_state = app_state.clone();
271 let model = agent_model.model.clone();
272 let judge_model = judge_model.model.clone();
273 let zed_commit_sha = zed_commit_sha.clone();
274 let zed_branch_name = zed_branch_name.clone();
275 let run_id = run_id.clone();
276 let examples = examples.clone();
277 let results = results_by_example_name.clone();
278 cx.spawn(async move |cx| {
279 loop {
280 let Some(mut example) = examples.borrow_mut().pop_front() else {
281 break;
282 };
283 let result = async {
284 example.setup().await?;
285 let run_output = cx
286 .update(|cx| example.run(model.clone(), app_state.clone(), cx))?
287 .await?;
288 let judge_output = judge_example(
289 example.clone(),
290 judge_model.clone(),
291 &zed_commit_sha,
292 &zed_branch_name,
293 &run_id,
294 &run_output,
295 enable_telemetry,
296 cx,
297 )
298 .await;
299 anyhow::Ok((run_output, judge_output))
300 }
301 .await;
302 results
303 .borrow_mut()
304 .entry(example.name.clone())
305 .or_insert(Vec::new())
306 .push((example.clone(), result));
307 }
308 })
309 }))
310 .await;
311
312 print_report(
313 &mut results_by_example_name.borrow_mut(),
314 &mut cumulative_tool_metrics,
315 &run_dir,
316 )?;
317
318 app_state.client.telemetry().flush_events().await;
319
320 cx.update(|cx| cx.quit())
321 })
322 .detach_and_log_err(cx);
323 });
324}
325
326/// Subset of `workspace::AppState` needed by `HeadlessAssistant`, with additional fields.
327pub struct AgentAppState {
328 pub languages: Arc<LanguageRegistry>,
329 pub client: Arc<Client>,
330 pub user_store: Entity<UserStore>,
331 pub fs: Arc<dyn fs::Fs>,
332 pub node_runtime: NodeRuntime,
333
334 // Additional fields not present in `workspace::AppState`.
335 pub prompt_builder: Arc<PromptBuilder>,
336}
337
338pub fn init(cx: &mut App) -> Arc<AgentAppState> {
339 let app_version = AppVersion::load(env!("ZED_PKG_VERSION"));
340 release_channel::init(app_version, cx);
341 gpui_tokio::init(cx);
342
343 let settings_store = SettingsStore::new(cx, &settings::default_settings());
344 cx.set_global(settings_store);
345 client::init_settings(cx);
346
347 // Set User-Agent so we can download language servers from GitHub
348 let user_agent = format!(
349 "Zed Agent Eval/{} ({}; {})",
350 app_version,
351 std::env::consts::OS,
352 std::env::consts::ARCH
353 );
354 let proxy_str = ProxySettings::get_global(cx).proxy.to_owned();
355 let proxy_url = proxy_str
356 .as_ref()
357 .and_then(|input| input.parse().ok())
358 .or_else(read_proxy_from_env);
359 let http = {
360 let _guard = Tokio::handle(cx).enter();
361
362 ReqwestClient::proxy_and_user_agent(proxy_url, &user_agent)
363 .expect("could not start HTTP client")
364 };
365 cx.set_http_client(Arc::new(http));
366
367 Project::init_settings(cx);
368
369 let client = Client::production(cx);
370 cx.set_http_client(client.http_client());
371
372 let git_binary_path = None;
373 let fs = Arc::new(RealFs::new(
374 git_binary_path,
375 cx.background_executor().clone(),
376 ));
377
378 let mut languages = LanguageRegistry::new(cx.background_executor().clone());
379 languages.set_language_server_download_dir(paths::languages_dir().clone());
380 let languages = Arc::new(languages);
381
382 let user_store = cx.new(|cx| UserStore::new(client.clone(), cx));
383
384 extension::init(cx);
385
386 let (mut tx, rx) = watch::channel(None);
387 cx.observe_global::<SettingsStore>(move |cx| {
388 let settings = &ProjectSettings::get_global(cx).node;
389 let options = NodeBinaryOptions {
390 allow_path_lookup: !settings.ignore_system_version,
391 allow_binary_download: true,
392 use_paths: settings.path.as_ref().map(|node_path| {
393 let node_path = PathBuf::from(shellexpand::tilde(node_path).as_ref());
394 let npm_path = settings
395 .npm_path
396 .as_ref()
397 .map(|path| PathBuf::from(shellexpand::tilde(&path).as_ref()));
398 (
399 node_path.clone(),
400 npm_path.unwrap_or_else(|| {
401 let base_path = PathBuf::new();
402 node_path.parent().unwrap_or(&base_path).join("npm")
403 }),
404 )
405 }),
406 };
407 tx.send(Some(options)).log_err();
408 })
409 .detach();
410 let node_runtime = NodeRuntime::new(client.http_client(), None, rx);
411
412 let extension_host_proxy = ExtensionHostProxy::global(cx);
413
414 language::init(cx);
415 debug_adapter_extension::init(extension_host_proxy.clone(), cx);
416 language_extension::init(LspAccess::Noop, extension_host_proxy, languages.clone());
417 language_model::init(client.clone(), cx);
418 language_models::init(user_store.clone(), client.clone(), cx);
419 languages::init(languages.clone(), fs.clone(), node_runtime.clone(), cx);
420 prompt_store::init(cx);
421 terminal_view::init(cx);
422 let stdout_is_a_pty = false;
423 let prompt_builder = PromptBuilder::load(fs.clone(), stdout_is_a_pty, cx);
424 agent_ui::init(
425 fs.clone(),
426 client.clone(),
427 prompt_builder.clone(),
428 languages.clone(),
429 true,
430 cx,
431 );
432
433 SettingsStore::update_global(cx, |store, cx| {
434 store.set_user_settings(include_str!("../runner_settings.json"), cx)
435 })
436 .unwrap();
437
438 Arc::new(AgentAppState {
439 languages,
440 client,
441 user_store,
442 fs,
443 node_runtime,
444 prompt_builder,
445 })
446}
447
448pub fn find_model(
449 model_name: &str,
450 model_registry: &LanguageModelRegistry,
451 cx: &App,
452) -> anyhow::Result<Arc<dyn LanguageModel>> {
453 let selected = SelectedModel::from_str(model_name).map_err(|e| anyhow::anyhow!(e))?;
454 model_registry
455 .available_models(cx)
456 .find(|model| model.id() == selected.model && model.provider_id() == selected.provider)
457 .ok_or_else(|| {
458 anyhow::anyhow!(
459 "No language model with ID {}/{} was available. Available models: {}",
460 selected.model.0,
461 selected.provider.0,
462 model_registry
463 .available_models(cx)
464 .map(|model| format!("{}/{}", model.provider_id().0, model.id().0))
465 .collect::<Vec<_>>()
466 .join(", ")
467 )
468 })
469}
470
471pub fn load_model(model_name: &str, cx: &mut App) -> anyhow::Result<ConfiguredModel> {
472 let model = {
473 let model_registry = LanguageModelRegistry::read_global(cx);
474 find_model(model_name, model_registry, cx)?
475 };
476
477 let provider = {
478 let model_registry = LanguageModelRegistry::read_global(cx);
479 model_registry
480 .provider(&model.provider_id())
481 .ok_or_else(|| anyhow::anyhow!("Provider not found: {}", model.provider_id()))?
482 };
483
484 Ok(ConfiguredModel {
485 provider: provider.clone(),
486 model: model.clone(),
487 })
488}
489
490pub fn commit_sha_for_path(repo_path: &Path) -> String {
491 futures::executor::block_on(run_git(repo_path, &["rev-parse", "HEAD"])).unwrap()
492}
493
494pub fn git_branch_for_path(repo_path: &Path) -> String {
495 match std::env::var("GITHUB_REF_NAME") {
496 Ok(branch) => branch,
497 Err(_) => {
498 futures::executor::block_on(run_git(repo_path, &["rev-parse", "--abbrev-ref", "HEAD"]))
499 .unwrap_or_else(|_| "unknown".to_string())
500 }
501 }
502}
503
504async fn judge_example(
505 example: ExampleInstance,
506 model: Arc<dyn LanguageModel>,
507 zed_commit_sha: &str,
508 zed_branch_name: &str,
509 run_id: &str,
510 run_output: &RunOutput,
511 enable_telemetry: bool,
512 cx: &AsyncApp,
513) -> JudgeOutput {
514 let judge_output = example.judge(model.clone(), run_output, cx).await;
515
516 if enable_telemetry {
517 telemetry::event!(
518 "Agent Example Evaluated",
519 zed_commit_sha = zed_commit_sha,
520 zed_branch_name = zed_branch_name,
521 run_id = run_id,
522 example_name = example.name.clone(),
523 example_repetition = example.repetition,
524 diff_evaluation = judge_output.diff.clone(),
525 thread_evaluation = judge_output.thread,
526 tool_metrics = run_output.tool_metrics,
527 response_count = run_output.response_count,
528 token_usage = run_output.token_usage,
529 model = model.telemetry_id(),
530 model_provider = model.provider_id().to_string(),
531 repository_url = example.repo_url(),
532 repository_revision = example.revision(),
533 diagnostic_summary_before = run_output.diagnostic_summary_before,
534 diagnostic_summary_after = run_output.diagnostic_summary_after,
535 diagnostics_before = run_output.diagnostics_before,
536 diagnostics_after = run_output.diagnostics_after,
537 );
538 }
539
540 judge_output
541}
542
543const HEADER_WIDTH: usize = 65;
544
545fn print_h1(header: &str) {
546 println!("\n\n{:=^HEADER_WIDTH$}", "");
547 println!("{:^HEADER_WIDTH$}", header);
548 println!("{:=^HEADER_WIDTH$}\n", "");
549}
550
551fn print_h2(header: &str) {
552 println!("\n{:-^HEADER_WIDTH$}", "");
553 println!("{:^HEADER_WIDTH$}", header);
554 println!("{:-^HEADER_WIDTH$}\n", "");
555}
556
557fn print_report(
558 results_by_example_name: &mut HashMap<
559 String,
560 Vec<(ExampleInstance, anyhow::Result<(RunOutput, JudgeOutput)>)>,
561 >,
562 cumulative_tool_metrics: &mut ToolMetrics,
563 run_dir: &Path,
564) -> anyhow::Result<()> {
565 print_h1("EVAL RESULTS");
566
567 let mut diff_scores = Vec::new();
568 let mut thread_scores = Vec::new();
569 let mut programmatic_scores = Vec::new();
570 let mut error_count = 0;
571
572 for (example_name, results) in results_by_example_name.iter_mut() {
573 print_h2(example_name);
574
575 results.sort_unstable_by_key(|(example, _)| example.repetition);
576 let mut example_cumulative_tool_metrics = ToolMetrics::default();
577
578 let mut table_rows = String::new();
579
580 for (example, result) in results.iter() {
581 match result {
582 Err(err) => {
583 display_error_row(&mut table_rows, example.repetition, err.to_string())?;
584 error_count += 1;
585 programmatic_scores.push(0.0);
586 diff_scores.push(0.0);
587 thread_scores.push(0.0);
588 }
589 Ok((run_output, judge_output)) => {
590 cumulative_tool_metrics.merge(&run_output.tool_metrics);
591 example_cumulative_tool_metrics.merge(&run_output.tool_metrics);
592
593 if run_output.programmatic_assertions.total_count() > 0 {
594 for assertion in &run_output.programmatic_assertions.ran {
595 assertions::display_table_row(
596 &mut table_rows,
597 example.repetition,
598 assertion,
599 )?;
600 }
601
602 programmatic_scores
603 .push(run_output.programmatic_assertions.passed_percentage())
604 }
605
606 if !judge_output.diff.is_empty() {
607 diff_scores.push(judge_output.diff.passed_percentage());
608
609 for assertion in &judge_output.diff.ran {
610 assertions::display_table_row(
611 &mut table_rows,
612 example.repetition,
613 assertion,
614 )?;
615 }
616 }
617
618 if !judge_output.thread.is_empty() {
619 thread_scores.push(judge_output.thread.passed_percentage());
620
621 for assertion in &judge_output.thread.ran {
622 assertions::display_table_row(
623 &mut table_rows,
624 example.repetition,
625 assertion,
626 )?;
627 }
628 }
629 }
630 }
631 }
632
633 let mut all_asserts = Vec::new();
634
635 if !table_rows.is_empty() {
636 assertions::print_table_header();
637 print!("{}", table_rows);
638
639 assertions::print_table_divider();
640
641 for (example, result) in results.iter() {
642 if let Ok((run_output, judge_output)) = result {
643 let asserts = [
644 run_output.programmatic_assertions.clone(),
645 judge_output.diff.clone(),
646 judge_output.thread.clone(),
647 ];
648 all_asserts.extend_from_slice(&asserts);
649 assertions::print_table_round_summary(
650 &example.repetition.to_string(),
651 asserts.iter(),
652 )
653 } else if let Err(err) = result {
654 let assert = AssertionsReport::error(err.to_string());
655 all_asserts.push(assert.clone());
656 assertions::print_table_round_summary(
657 &example.repetition.to_string(),
658 [assert].iter(),
659 )
660 }
661 }
662
663 assertions::print_table_divider();
664
665 assertions::print_table_round_summary("avg", all_asserts.iter());
666
667 assertions::print_table_footer();
668 }
669
670 if !example_cumulative_tool_metrics.is_empty() {
671 println!("{}", &example_cumulative_tool_metrics);
672 }
673 }
674
675 if results_by_example_name.len() > 1 {
676 print_h1("AGGREGATE");
677
678 if error_count > 0 {
679 println!("\n{error_count} examples failed to run!");
680 }
681
682 let programmatic_score_count = programmatic_scores.len();
683 if programmatic_score_count > 0 {
684 let average_programmatic_score = (programmatic_scores.into_iter().sum::<f32>()
685 / (programmatic_score_count as f32))
686 .floor();
687 println!("Average programmatic score: {average_programmatic_score}%");
688 }
689
690 let diff_score_count = diff_scores.len();
691 if diff_score_count > 0 {
692 let average_diff_score =
693 (diff_scores.into_iter().sum::<f32>() / (diff_score_count as f32)).floor();
694 println!("Average diff score: {average_diff_score}%");
695 }
696
697 let thread_score_count = thread_scores.len();
698
699 if thread_score_count > 0 {
700 let average_thread_score =
701 (thread_scores.into_iter().sum::<f32>() / (thread_score_count as f32)).floor();
702 println!("Average thread score: {average_thread_score}%");
703 }
704
705 println!();
706
707 print_h2("CUMULATIVE TOOL METRICS");
708 println!("{}", cumulative_tool_metrics);
709 }
710
711 let explorer_output_path = run_dir.join("overview.html");
712 let mut json_paths: Vec<PathBuf> = results_by_example_name
713 .values()
714 .flat_map(|results| {
715 results.iter().map(|(example, _)| {
716 let absolute_path = run_dir.join(example.run_directory.join("last.messages.json"));
717 let cwd = std::env::current_dir().expect("Can't get current dir");
718 pathdiff::diff_paths(&absolute_path, cwd).unwrap_or_else(|| absolute_path.clone())
719 })
720 })
721 .collect::<Vec<_>>();
722 json_paths.sort();
723 if let Err(err) = explorer::generate_explorer_html(&json_paths, &explorer_output_path) {
724 eprintln!("Failed to generate explorer HTML: {}", err);
725 }
726
727 Ok(())
728}