language_model: Remove `use_any_tool` method from `LanguageModel` (#27930)

Marshall Bowers created

This PR removes the `use_any_tool` method from the `LanguageModel`
trait.

It was not being used anywhere, and doesn't really fit in our new tool
use story.

Release Notes:

- N/A

Change summary

crates/language_model/src/fake_provider.rs          |  28 ----
crates/language_model/src/language_model.rs         |  40 -----
crates/language_models/src/provider/anthropic.rs    |  40 -----
crates/language_models/src/provider/bedrock.rs      |  50 -------
crates/language_models/src/provider/cloud.rs        | 104 ---------------
crates/language_models/src/provider/copilot_chat.rs |  12 -
crates/language_models/src/provider/deepseek.rs     |  55 -------
crates/language_models/src/provider/google.rs       |  55 -------
crates/language_models/src/provider/lmstudio.rs     |  11 -
crates/language_models/src/provider/mistral.rs      |  49 -------
crates/language_models/src/provider/ollama.rs       |  64 --------
crates/language_models/src/provider/open_ai.rs      |  41 -----
12 files changed, 8 insertions(+), 541 deletions(-)

Detailed changes

crates/language_model/src/fake_provider.rs 🔗

@@ -7,7 +7,6 @@ use futures::{FutureExt, StreamExt, channel::mpsc, future::BoxFuture, stream::Bo
 use gpui::{AnyView, App, AsyncApp, Entity, Task, Window};
 use http_client::Result;
 use parking_lot::Mutex;
-use serde::Serialize;
 use std::sync::Arc;
 
 pub fn language_model_id() -> LanguageModelId {
@@ -88,7 +87,6 @@ pub struct ToolUseRequest {
 #[derive(Default)]
 pub struct FakeLanguageModel {
     current_completion_txs: Mutex<Vec<(LanguageModelRequest, mpsc::UnboundedSender<String>)>>,
-    current_tool_use_txs: Mutex<Vec<(ToolUseRequest, mpsc::UnboundedSender<String>)>>,
 }
 
 impl FakeLanguageModel {
@@ -127,13 +125,6 @@ impl FakeLanguageModel {
     pub fn end_last_completion_stream(&self) {
         self.end_completion_stream(self.pending_completions().last().unwrap());
     }
-
-    pub fn respond_to_last_tool_use<T: Serialize>(&self, response: T) {
-        let response = serde_json::to_string(&response).unwrap();
-        let mut current_tool_call_txs = self.current_tool_use_txs.lock();
-        let (_, tx) = current_tool_call_txs.pop().unwrap();
-        tx.unbounded_send(response).unwrap();
-    }
 }
 
 impl LanguageModel for FakeLanguageModel {
@@ -184,25 +175,6 @@ impl LanguageModel for FakeLanguageModel {
         .boxed()
     }
 
-    fn use_any_tool(
-        &self,
-        request: LanguageModelRequest,
-        name: String,
-        description: String,
-        schema: serde_json::Value,
-        _cx: &AsyncApp,
-    ) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
-        let (tx, rx) = mpsc::unbounded();
-        let tool_call = ToolUseRequest {
-            request,
-            name,
-            description,
-            schema,
-        };
-        self.current_tool_use_txs.lock().push((tool_call, tx));
-        async move { Ok(rx.map(Ok).boxed()) }.boxed()
-    }
-
     fn as_fake(&self) -> &Self {
         self
     }

crates/language_model/src/language_model.rs 🔗

@@ -11,7 +11,7 @@ pub mod fake_provider;
 use anyhow::Result;
 use client::Client;
 use futures::FutureExt;
-use futures::{StreamExt, TryStreamExt as _, future::BoxFuture, stream::BoxStream};
+use futures::{StreamExt, future::BoxFuture, stream::BoxStream};
 use gpui::{AnyElement, AnyView, App, AsyncApp, SharedString, Task, Window};
 use icons::IconName;
 use parking_lot::Mutex;
@@ -20,7 +20,7 @@ use schemars::JsonSchema;
 use serde::{Deserialize, Serialize, de::DeserializeOwned};
 use std::fmt;
 use std::ops::{Add, Sub};
-use std::{future::Future, sync::Arc};
+use std::sync::Arc;
 use thiserror::Error;
 use util::serde::is_default;
 
@@ -266,15 +266,6 @@ pub trait LanguageModel: Send + Sync {
         .boxed()
     }
 
-    fn use_any_tool(
-        &self,
-        request: LanguageModelRequest,
-        name: String,
-        description: String,
-        schema: serde_json::Value,
-        cx: &AsyncApp,
-    ) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>>;
-
     fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
         None
     }
@@ -285,33 +276,6 @@ pub trait LanguageModel: Send + Sync {
     }
 }
 
-impl dyn LanguageModel {
-    pub fn use_tool<T: LanguageModelTool>(
-        &self,
-        request: LanguageModelRequest,
-        cx: &AsyncApp,
-    ) -> impl 'static + Future<Output = Result<T>> {
-        let schema = schemars::schema_for!(T);
-        let schema_json = serde_json::to_value(&schema).unwrap();
-        let stream = self.use_any_tool(request, T::name(), T::description(), schema_json, cx);
-        async move {
-            let stream = stream.await?;
-            let response = stream.try_collect::<String>().await?;
-            Ok(serde_json::from_str(&response)?)
-        }
-    }
-
-    pub fn use_tool_stream<T: LanguageModelTool>(
-        &self,
-        request: LanguageModelRequest,
-        cx: &AsyncApp,
-    ) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
-        let schema = schemars::schema_for!(T);
-        let schema_json = serde_json::to_value(&schema).unwrap();
-        self.use_any_tool(request, T::name(), T::description(), schema_json, cx)
-    }
-}
-
 pub trait LanguageModelTool: 'static + DeserializeOwned + JsonSchema {
     fn name() -> String;
     fn description() -> String;

crates/language_models/src/provider/anthropic.rs 🔗

@@ -6,7 +6,7 @@ use collections::{BTreeMap, HashMap};
 use credentials_provider::CredentialsProvider;
 use editor::{Editor, EditorElement, EditorStyle};
 use futures::Stream;
-use futures::{FutureExt, StreamExt, TryStreamExt as _, future::BoxFuture, stream::BoxStream};
+use futures::{FutureExt, StreamExt, future::BoxFuture, stream::BoxStream};
 use gpui::{
     AnyView, App, AsyncApp, Context, Entity, FontStyle, Subscription, Task, TextStyle, WhiteSpace,
 };
@@ -457,44 +457,6 @@ impl LanguageModel for AnthropicModel {
                 min_total_token: config.min_total_token,
             })
     }
-
-    fn use_any_tool(
-        &self,
-        request: LanguageModelRequest,
-        tool_name: String,
-        tool_description: String,
-        input_schema: serde_json::Value,
-        cx: &AsyncApp,
-    ) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
-        let mut request = into_anthropic(
-            request,
-            self.model.tool_model_id().into(),
-            self.model.default_temperature(),
-            self.model.max_output_tokens(),
-            self.model.mode(),
-        );
-        request.tool_choice = Some(anthropic::ToolChoice::Tool {
-            name: tool_name.clone(),
-        });
-        request.tools = vec![anthropic::Tool {
-            name: tool_name.clone(),
-            description: tool_description,
-            input_schema,
-        }];
-
-        let response = self.stream_completion(request, cx);
-        self.request_limiter
-            .run(async move {
-                let response = response.await?;
-                Ok(anthropic::extract_tool_args_from_events(
-                    tool_name,
-                    Box::pin(response.map_err(|e| anyhow!(e))),
-                )
-                .await?
-                .boxed())
-            })
-            .boxed()
-    }
 }
 
 pub fn into_anthropic(

crates/language_models/src/provider/bedrock.rs 🔗

@@ -12,11 +12,7 @@ use bedrock::bedrock_client::types::{
     ContentBlockDelta, ContentBlockStart, ContentBlockStartEvent, ConverseStreamOutput,
 };
 use bedrock::bedrock_client::{self, Config};
-use bedrock::{
-    BedrockError, BedrockInnerContent, BedrockMessage, BedrockSpecificTool,
-    BedrockStreamingResponse, BedrockTool, BedrockToolChoice, BedrockToolInputSchema, Model,
-    value_to_aws_document,
-};
+use bedrock::{BedrockError, BedrockInnerContent, BedrockMessage, BedrockStreamingResponse, Model};
 use collections::{BTreeMap, HashMap};
 use credentials_provider::CredentialsProvider;
 use editor::{Editor, EditorElement, EditorStyle};
@@ -414,50 +410,6 @@ impl LanguageModel for BedrockModel {
         async move { Ok(future.await?.boxed()) }.boxed()
     }
 
-    fn use_any_tool(
-        &self,
-        request: LanguageModelRequest,
-        name: String,
-        description: String,
-        schema: Value,
-        _cx: &AsyncApp,
-    ) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
-        let mut request = into_bedrock(
-            request,
-            self.model.id().into(),
-            self.model.default_temperature(),
-            self.model.max_output_tokens(),
-        );
-
-        request.tool_choice = BedrockSpecificTool::builder()
-            .name(name.clone())
-            .build()
-            .log_err()
-            .map(BedrockToolChoice::Tool);
-
-        if let Some(tool) = BedrockTool::builder()
-            .name(name.clone())
-            .description(description.clone())
-            .input_schema(BedrockToolInputSchema::Json(value_to_aws_document(&schema)))
-            .build()
-            .log_err()
-        {
-            request.tools.push(tool);
-        }
-
-        let handle = self.handler.clone();
-
-        let request = self.stream_completion(request, _cx);
-        self.request_limiter
-            .run(async move {
-                let response = request.map_err(|err| anyhow!(err))?.await;
-                Ok(extract_tool_args_from_events(name, response, handle)
-                    .await?
-                    .boxed())
-            })
-            .boxed()
-    }
-
     fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
         None
     }

crates/language_models/src/provider/cloud.rs 🔗

@@ -29,7 +29,6 @@ use settings::{Settings, SettingsStore};
 use smol::Timer;
 use smol::io::{AsyncReadExt, BufReader};
 use std::{
-    future,
     sync::{Arc, LazyLock},
     time::Duration,
 };
@@ -743,109 +742,6 @@ impl LanguageModel for CloudLanguageModel {
             }
         }
     }
-
-    fn use_any_tool(
-        &self,
-        request: LanguageModelRequest,
-        tool_name: String,
-        tool_description: String,
-        input_schema: serde_json::Value,
-        _cx: &AsyncApp,
-    ) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
-        let client = self.client.clone();
-        let llm_api_token = self.llm_api_token.clone();
-
-        match &self.model {
-            CloudModel::Anthropic(model) => {
-                let mut request = into_anthropic(
-                    request,
-                    model.tool_model_id().into(),
-                    model.default_temperature(),
-                    model.max_output_tokens(),
-                    model.mode(),
-                );
-                request.tool_choice = Some(anthropic::ToolChoice::Tool {
-                    name: tool_name.clone(),
-                });
-                request.tools = vec![anthropic::Tool {
-                    name: tool_name.clone(),
-                    description: tool_description,
-                    input_schema,
-                }];
-
-                self.request_limiter
-                    .run(async move {
-                        let response = Self::perform_llm_completion(
-                            client.clone(),
-                            llm_api_token,
-                            PerformCompletionParams {
-                                provider: client::LanguageModelProvider::Anthropic,
-                                model: request.model.clone(),
-                                provider_request: RawValue::from_string(serde_json::to_string(
-                                    &request,
-                                )?)?,
-                            },
-                        )
-                        .await?;
-
-                        Ok(anthropic::extract_tool_args_from_events(
-                            tool_name,
-                            Box::pin(response_lines(response)),
-                        )
-                        .await?
-                        .boxed())
-                    })
-                    .boxed()
-            }
-            CloudModel::OpenAi(model) => {
-                let mut request =
-                    into_open_ai(request, model.id().into(), model.max_output_tokens());
-                request.tool_choice = Some(open_ai::ToolChoice::Other(
-                    open_ai::ToolDefinition::Function {
-                        function: open_ai::FunctionDefinition {
-                            name: tool_name.clone(),
-                            description: None,
-                            parameters: None,
-                        },
-                    },
-                ));
-                request.tools = vec![open_ai::ToolDefinition::Function {
-                    function: open_ai::FunctionDefinition {
-                        name: tool_name.clone(),
-                        description: Some(tool_description),
-                        parameters: Some(input_schema),
-                    },
-                }];
-
-                self.request_limiter
-                    .run(async move {
-                        let response = Self::perform_llm_completion(
-                            client.clone(),
-                            llm_api_token,
-                            PerformCompletionParams {
-                                provider: client::LanguageModelProvider::OpenAi,
-                                model: request.model.clone(),
-                                provider_request: RawValue::from_string(serde_json::to_string(
-                                    &request,
-                                )?)?,
-                            },
-                        )
-                        .await?;
-
-                        Ok(open_ai::extract_tool_args_from_events(
-                            tool_name,
-                            Box::pin(response_lines(response)),
-                        )
-                        .await?
-                        .boxed())
-                    })
-                    .boxed()
-            }
-            CloudModel::Google(_) => {
-                future::ready(Err(anyhow!("tool use not implemented for Google AI"))).boxed()
-            }
-        }
-    }
 }
 
 fn response_lines<T: DeserializeOwned>(

crates/language_models/src/provider/copilot_chat.rs 🔗

@@ -1,4 +1,3 @@
-use std::future;
 use std::sync::Arc;
 
 use anyhow::{Result, anyhow};
@@ -293,17 +292,6 @@ impl LanguageModel for CopilotChatLanguageModel {
         }
         .boxed()
     }
-
-    fn use_any_tool(
-        &self,
-        _request: LanguageModelRequest,
-        _name: String,
-        _description: String,
-        _schema: serde_json::Value,
-        _cx: &AsyncApp,
-    ) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
-        future::ready(Err(anyhow!("not implemented"))).boxed()
-    }
 }
 
 impl CopilotChatLanguageModel {

crates/language_models/src/provider/deepseek.rs 🔗

@@ -356,61 +356,6 @@ impl LanguageModel for DeepSeekLanguageModel {
         }
         .boxed()
     }
-
-    fn use_any_tool(
-        &self,
-        request: LanguageModelRequest,
-        name: String,
-        description: String,
-        schema: serde_json::Value,
-        cx: &AsyncApp,
-    ) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<String>>>> {
-        let mut deepseek_request = into_deepseek(
-            request,
-            self.model.id().to_string(),
-            self.max_output_tokens(),
-        );
-
-        deepseek_request.tools = vec![deepseek::ToolDefinition::Function {
-            function: deepseek::FunctionDefinition {
-                name: name.clone(),
-                description: Some(description),
-                parameters: Some(schema),
-            },
-        }];
-
-        let response_stream = self.stream_completion(deepseek_request, cx);
-
-        self.request_limiter
-            .run(async move {
-                let stream = response_stream.await?;
-
-                let tool_args_stream = stream
-                    .filter_map(move |response| async move {
-                        match response {
-                            Ok(response) => {
-                                for choice in response.choices {
-                                    if let Some(tool_calls) = choice.delta.tool_calls {
-                                        for tool_call in tool_calls {
-                                            if let Some(function) = tool_call.function {
-                                                if let Some(args) = function.arguments {
-                                                    return Some(Ok(args));
-                                                }
-                                            }
-                                        }
-                                    }
-                                }
-                                None
-                            }
-                            Err(e) => Some(Err(e)),
-                        }
-                    })
-                    .boxed();
-
-                Ok(tool_args_stream)
-            })
-            .boxed()
-    }
 }
 
 pub fn into_deepseek(

crates/language_models/src/provider/google.rs 🔗

@@ -356,61 +356,6 @@ impl LanguageModel for GoogleLanguageModel {
         });
         async move { Ok(future.await?.boxed()) }.boxed()
     }
-
-    fn use_any_tool(
-        &self,
-        request: LanguageModelRequest,
-        name: String,
-        description: String,
-        schema: serde_json::Value,
-        cx: &AsyncApp,
-    ) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<String>>>> {
-        let mut request = into_google(request, self.model.id().to_string());
-        request.tools = Some(vec![google_ai::Tool {
-            function_declarations: vec![google_ai::FunctionDeclaration {
-                name: name.clone(),
-                description,
-                parameters: schema,
-            }],
-        }]);
-        request.tool_config = Some(google_ai::ToolConfig {
-            function_calling_config: google_ai::FunctionCallingConfig {
-                mode: google_ai::FunctionCallingMode::Any,
-                allowed_function_names: Some(vec![name]),
-            },
-        });
-        let response = self.stream_completion(request, cx);
-        self.request_limiter
-            .run(async move {
-                let response = response.await?;
-                Ok(response
-                    .filter_map(|event| async move {
-                        match event {
-                            Ok(response) => {
-                                if let Some(candidates) = &response.candidates {
-                                    for candidate in candidates {
-                                        for part in &candidate.content.parts {
-                                            if let google_ai::Part::FunctionCallPart(
-                                                function_call_part,
-                                            ) = part
-                                            {
-                                                return Some(Ok(serde_json::to_string(
-                                                    &function_call_part.function_call.args,
-                                                )
-                                                .unwrap_or_default()));
-                                            }
-                                        }
-                                    }
-                                }
-                                None
-                            }
-                            Err(e) => Some(Err(e)),
-                        }
-                    })
-                    .boxed())
-            })
-            .boxed()
-    }
 }
 
 pub fn into_google(

crates/language_models/src/provider/lmstudio.rs 🔗

@@ -364,17 +364,6 @@ impl LanguageModel for LmStudioLanguageModel {
         }
         .boxed()
     }
-
-    fn use_any_tool(
-        &self,
-        _request: LanguageModelRequest,
-        _tool_name: String,
-        _tool_description: String,
-        _schema: serde_json::Value,
-        _cx: &AsyncApp,
-    ) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
-        async move { Ok(futures::stream::empty().boxed()) }.boxed()
-    }
 }
 
 struct ConfigurationView {

crates/language_models/src/provider/mistral.rs 🔗

@@ -368,55 +368,6 @@ impl LanguageModel for MistralLanguageModel {
         }
         .boxed()
     }
-
-    fn use_any_tool(
-        &self,
-        request: LanguageModelRequest,
-        tool_name: String,
-        tool_description: String,
-        schema: serde_json::Value,
-        cx: &AsyncApp,
-    ) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<String>>>> {
-        let mut request = into_mistral(request, self.model.id().into(), self.max_output_tokens());
-        request.tools = vec![mistral::ToolDefinition::Function {
-            function: mistral::FunctionDefinition {
-                name: tool_name.clone(),
-                description: Some(tool_description),
-                parameters: Some(schema),
-            },
-        }];
-
-        let response = self.stream_completion(request, cx);
-        self.request_limiter
-            .run(async move {
-                let stream = response.await?;
-
-                let tool_args_stream = stream
-                    .filter_map(move |response| async move {
-                        match response {
-                            Ok(response) => {
-                                for choice in response.choices {
-                                    if let Some(tool_calls) = choice.delta.tool_calls {
-                                        for tool_call in tool_calls {
-                                            if let Some(function) = tool_call.function {
-                                                if let Some(args) = function.arguments {
-                                                    return Some(Ok(args));
-                                                }
-                                            }
-                                        }
-                                    }
-                                }
-                                None
-                            }
-                            Err(e) => Some(Err(e)),
-                        }
-                    })
-                    .boxed();
-
-                Ok(tool_args_stream)
-            })
-            .boxed()
-    }
 }
 
 pub fn into_mistral(

crates/language_models/src/provider/ollama.rs 🔗

@@ -1,4 +1,4 @@
-use anyhow::{Result, anyhow, bail};
+use anyhow::{Result, anyhow};
 use futures::{FutureExt, StreamExt, future::BoxFuture, stream::BoxStream};
 use gpui::{AnyView, App, AsyncApp, Context, Subscription, Task};
 use http_client::HttpClient;
@@ -9,8 +9,8 @@ use language_model::{
     LanguageModelRequest, RateLimiter, Role,
 };
 use ollama::{
-    ChatMessage, ChatOptions, ChatRequest, ChatResponseDelta, KeepAlive, OllamaToolCall,
-    get_models, preload_model, stream_chat_completion,
+    ChatMessage, ChatOptions, ChatRequest, KeepAlive, get_models, preload_model,
+    stream_chat_completion,
 };
 use schemars::JsonSchema;
 use serde::{Deserialize, Serialize};
@@ -265,22 +265,6 @@ impl OllamaLanguageModel {
             tools: vec![],
         }
     }
-    fn request_completion(
-        &self,
-        request: ChatRequest,
-        cx: &AsyncApp,
-    ) -> BoxFuture<'static, Result<ChatResponseDelta>> {
-        let http_client = self.http_client.clone();
-
-        let Ok(api_url) = cx.update(|cx| {
-            let settings = &AllLanguageModelSettings::get_global(cx).ollama;
-            settings.api_url.clone()
-        }) else {
-            return futures::future::ready(Err(anyhow!("App state dropped"))).boxed();
-        };
-
-        async move { ollama::complete(http_client.as_ref(), &api_url, request).await }.boxed()
-    }
 }
 
 impl LanguageModel for OllamaLanguageModel {
@@ -372,48 +356,6 @@ impl LanguageModel for OllamaLanguageModel {
         }
         .boxed()
     }
-
-    fn use_any_tool(
-        &self,
-        request: LanguageModelRequest,
-        tool_name: String,
-        tool_description: String,
-        schema: serde_json::Value,
-        cx: &AsyncApp,
-    ) -> BoxFuture<'static, Result<BoxStream<'static, Result<String>>>> {
-        use ollama::{OllamaFunctionTool, OllamaTool};
-        let function = OllamaFunctionTool {
-            name: tool_name.clone(),
-            description: Some(tool_description),
-            parameters: Some(schema),
-        };
-        let tools = vec![OllamaTool::Function { function }];
-        let request = self.to_ollama_request(request).with_tools(tools);
-        let response = self.request_completion(request, cx);
-        self.request_limiter
-            .run(async move {
-                let response = response.await?;
-                let ChatMessage::Assistant { tool_calls, .. } = response.message else {
-                    bail!("message does not have an assistant role");
-                };
-                if let Some(tool_calls) = tool_calls.filter(|calls| !calls.is_empty()) {
-                    for call in tool_calls {
-                        let OllamaToolCall::Function(function) = call;
-                        if function.name == tool_name {
-                            return Ok(futures::stream::once(async move {
-                                Ok(function.arguments.to_string())
-                            })
-                            .boxed());
-                        }
-                    }
-                } else {
-                    bail!("assistant message does not have any tool calls");
-                };
-
-                bail!("tool not used")
-            })
-            .boxed()
-    }
 }
 
 struct ConfigurationView {

crates/language_models/src/provider/open_ai.rs 🔗

@@ -12,9 +12,7 @@ use language_model::{
     LanguageModelName, LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
     LanguageModelProviderState, LanguageModelRequest, RateLimiter, Role,
 };
-use open_ai::{
-    FunctionDefinition, ResponseStreamEvent, ToolChoice, ToolDefinition, stream_completion,
-};
+use open_ai::{ResponseStreamEvent, stream_completion};
 use schemars::JsonSchema;
 use serde::{Deserialize, Serialize};
 use settings::{Settings, SettingsStore};
@@ -331,43 +329,6 @@ impl LanguageModel for OpenAiLanguageModel {
         }
         .boxed()
     }
-
-    fn use_any_tool(
-        &self,
-        request: LanguageModelRequest,
-        tool_name: String,
-        tool_description: String,
-        schema: serde_json::Value,
-        cx: &AsyncApp,
-    ) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<String>>>> {
-        let mut request = into_open_ai(request, self.model.id().into(), self.max_output_tokens());
-        request.tool_choice = Some(ToolChoice::Other(ToolDefinition::Function {
-            function: FunctionDefinition {
-                name: tool_name.clone(),
-                description: None,
-                parameters: None,
-            },
-        }));
-        request.tools = vec![ToolDefinition::Function {
-            function: FunctionDefinition {
-                name: tool_name.clone(),
-                description: Some(tool_description),
-                parameters: Some(schema),
-            },
-        }];
-
-        let response = self.stream_completion(request, cx);
-        self.request_limiter
-            .run(async move {
-                let response = response.await?;
-                Ok(
-                    open_ai::extract_tool_args_from_events(tool_name, Box::pin(response))
-                        .await?
-                        .boxed(),
-                )
-            })
-            .boxed()
-    }
 }
 
 pub fn into_open_ai(