diff --git a/crates/cloud_llm_client/src/cloud_llm_client.rs b/crates/cloud_llm_client/src/cloud_llm_client.rs index 4ae72ce0a4c41c9279dd18ca215c0dc0a7839f33..bb77c3a5b7f8009093cbf7bc427160ed535e6c62 100644 --- a/crates/cloud_llm_client/src/cloud_llm_client.rs +++ b/crates/cloud_llm_client/src/cloud_llm_client.rs @@ -322,6 +322,9 @@ pub struct LanguageModel { pub supports_images: bool, pub supports_thinking: bool, pub supports_max_mode: bool, + // only used by OpenAI and xAI + #[serde(default)] + pub supports_parallel_tool_calls: bool, } #[derive(Debug, Serialize, Deserialize)] diff --git a/crates/language_models/src/provider/cloud.rs b/crates/language_models/src/provider/cloud.rs index 1c21e566d68df3191932a860b6ed06ea603083a6..d85533ecce63441fe5aaa7a382bf04af79992f63 100644 --- a/crates/language_models/src/provider/cloud.rs +++ b/crates/language_models/src/provider/cloud.rs @@ -810,15 +810,11 @@ impl LanguageModel for CloudLanguageModel { } cloud_llm_client::LanguageModelProvider::OpenAi => { let client = self.client.clone(); - let model = match open_ai::Model::from_id(&self.model.id.0) { - Ok(model) => model, - Err(err) => return async move { Err(anyhow!(err).into()) }.boxed(), - }; let request = into_open_ai( request, - model.id(), - model.supports_parallel_tool_calls(), - model.supports_prompt_cache_key(), + &self.model.id.0, + self.model.supports_parallel_tool_calls, + true, None, None, ); @@ -860,15 +856,11 @@ impl LanguageModel for CloudLanguageModel { } cloud_llm_client::LanguageModelProvider::XAi => { let client = self.client.clone(); - let model = match x_ai::Model::from_id(&self.model.id.0) { - Ok(model) => model, - Err(err) => return async move { Err(anyhow!(err).into()) }.boxed(), - }; let request = into_open_ai( request, - model.id(), - model.supports_parallel_tool_calls(), - model.supports_prompt_cache_key(), + &self.model.id.0, + self.model.supports_parallel_tool_calls, + false, None, None, );