Cargo.lock 🔗
@@ -9334,7 +9334,6 @@ dependencies = [
"schemars",
"serde",
"serde_json",
- "settings",
"smol",
"thiserror 2.0.17",
"util",
Jakub Konka created
A couple of things that this PR wants to accomplish:
* remove dependency on `settings` crate from `language_model`
* refactor provider-specific code into submodules - to be honest, I
would go one step further and put all provider-specific bits in
`language_models` instead but I realise we have cloud logic in
`language_model` which uses those too making it tricky
* move anthropic-specific telemetry into `language_models` crate - I
think it makes more sense for it to be there
Anyhow, I would very appreciate if you could have a look @mikayla-maki
and @maxdeviant and lemme know what you think, if you would tweak
something, etc.
Release Notes:
- N/A
Cargo.lock | 1
crates/agent_ui/src/buffer_codegen.rs | 11
crates/agent_ui/src/inline_assistant.rs | 17
crates/agent_ui/src/terminal_codegen.rs | 11
crates/agent_ui/src/terminal_inline_assistant.rs | 13
crates/language_model/Cargo.toml | 1
crates/language_model/src/language_model.rs | 193 -------
crates/language_model/src/provider.rs | 12
crates/language_model/src/provider/anthropic.rs | 80 +++
crates/language_model/src/provider/google.rs | 5
crates/language_model/src/provider/open_ai.rs | 28 +
crates/language_model/src/provider/open_router.rs | 69 ++
crates/language_model/src/provider/x_ai.rs | 4
crates/language_model/src/provider/zed.rs | 5
crates/language_model/src/registry.rs | 2
crates/language_models/src/provider/anthropic.rs | 9
crates/language_models/src/provider/anthropic/telemetry.rs | 6
crates/language_models/src/provider/cloud.rs | 37
crates/language_models/src/provider/google.rs | 10
crates/language_models/src/provider/open_ai.rs | 7
20 files changed, 281 insertions(+), 240 deletions(-)
@@ -9334,7 +9334,6 @@ dependencies = [
"schemars",
"serde",
"serde_json",
- "settings",
"smol",
"thiserror 2.0.17",
"util",
@@ -18,6 +18,9 @@ use language_model::{
LanguageModelRequestTool, LanguageModelTextStream, LanguageModelToolChoice,
LanguageModelToolUse, Role, TokenUsage,
};
+use language_models::provider::anthropic::telemetry::{
+ AnthropicCompletionType, AnthropicEventData, AnthropicEventReporter, AnthropicEventType,
+};
use multi_buffer::MultiBufferRow;
use parking_lot::Mutex;
use prompt_store::PromptBuilder;
@@ -637,7 +640,7 @@ impl CodegenAlternative {
stream: impl 'static + Future<Output = Result<LanguageModelTextStream>>,
cx: &mut Context<Self>,
) -> Task<()> {
- let anthropic_reporter = language_model::AnthropicEventReporter::new(&model, cx);
+ let anthropic_reporter = AnthropicEventReporter::new(&model, cx);
let session_id = self.session_id;
let model_telemetry_id = model.telemetry_id();
let model_provider_id = model.provider_id().to_string();
@@ -830,9 +833,9 @@ impl CodegenAlternative {
error_message = error_message.as_deref(),
);
- anthropic_reporter.report(language_model::AnthropicEventData {
- completion_type: language_model::AnthropicCompletionType::Editor,
- event: language_model::AnthropicEventType::Response,
+ anthropic_reporter.report(AnthropicEventData {
+ completion_type: AnthropicCompletionType::Editor,
+ event: AnthropicEventType::Response,
language_name: language_name.map(|n| n.to_string()),
message_id,
});
@@ -1,5 +1,6 @@
-use language_model::AnthropicEventData;
-use language_model::report_anthropic_event;
+use language_models::provider::anthropic::telemetry::{
+ AnthropicCompletionType, AnthropicEventData, AnthropicEventType, report_anthropic_event,
+};
use std::cmp;
use std::mem;
use std::ops::Range;
@@ -467,8 +468,8 @@ impl InlineAssistant {
report_anthropic_event(
&model.model,
AnthropicEventData {
- completion_type: language_model::AnthropicCompletionType::Editor,
- event: language_model::AnthropicEventType::Invoked,
+ completion_type: AnthropicCompletionType::Editor,
+ event: AnthropicEventType::Invoked,
language_name: buffer.language().map(|language| language.name().to_proto()),
message_id: None,
},
@@ -1105,13 +1106,13 @@ impl InlineAssistant {
(
"rejected",
"Assistant Response Rejected",
- language_model::AnthropicEventType::Reject,
+ AnthropicEventType::Reject,
)
} else {
(
"accepted",
"Assistant Response Accepted",
- language_model::AnthropicEventType::Accept,
+ AnthropicEventType::Accept,
)
};
@@ -1128,8 +1129,8 @@ impl InlineAssistant {
report_anthropic_event(
&model.model,
- language_model::AnthropicEventData {
- completion_type: language_model::AnthropicCompletionType::Editor,
+ AnthropicEventData {
+ completion_type: AnthropicCompletionType::Editor,
event: anthropic_event_type,
language_name,
message_id,
@@ -2,6 +2,9 @@ use crate::inline_prompt_editor::CodegenStatus;
use futures::{SinkExt, StreamExt, channel::mpsc};
use gpui::{App, AppContext as _, Context, Entity, EventEmitter, Task};
use language_model::{ConfiguredModel, LanguageModelRegistry, LanguageModelRequest};
+use language_models::provider::anthropic::telemetry::{
+ AnthropicCompletionType, AnthropicEventData, AnthropicEventReporter, AnthropicEventType,
+};
use std::time::Instant;
use terminal::Terminal;
use uuid::Uuid;
@@ -40,7 +43,7 @@ impl TerminalCodegen {
return;
};
- let anthropic_reporter = language_model::AnthropicEventReporter::new(&model, cx);
+ let anthropic_reporter = AnthropicEventReporter::new(&model, cx);
let session_id = self.session_id;
let model_telemetry_id = model.telemetry_id();
let model_provider_id = model.provider_id().to_string();
@@ -94,9 +97,9 @@ impl TerminalCodegen {
error_message = error_message,
);
- anthropic_reporter.report(language_model::AnthropicEventData {
- completion_type: language_model::AnthropicCompletionType::Terminal,
- event: language_model::AnthropicEventType::Response,
+ anthropic_reporter.report(AnthropicEventData {
+ completion_type: AnthropicCompletionType::Terminal,
+ event: AnthropicEventType::Response,
language_name: None,
message_id,
});
@@ -17,7 +17,10 @@ use gpui::{App, Entity, Focusable, Global, Subscription, Task, UpdateGlobal, Wea
use language::Buffer;
use language_model::{
CompletionIntent, ConfiguredModel, LanguageModelRegistry, LanguageModelRequest,
- LanguageModelRequestMessage, Role, report_anthropic_event,
+ LanguageModelRequestMessage, Role,
+};
+use language_models::provider::anthropic::telemetry::{
+ AnthropicCompletionType, AnthropicEventData, AnthropicEventType, report_anthropic_event,
};
use project::Project;
use prompt_store::{PromptBuilder, PromptStore};
@@ -312,13 +315,13 @@ impl TerminalInlineAssistant {
(
"rejected",
"Assistant Response Rejected",
- language_model::AnthropicEventType::Reject,
+ AnthropicEventType::Reject,
)
} else {
(
"accepted",
"Assistant Response Accepted",
- language_model::AnthropicEventType::Accept,
+ AnthropicEventType::Accept,
)
};
@@ -335,8 +338,8 @@ impl TerminalInlineAssistant {
report_anthropic_event(
&model,
- language_model::AnthropicEventData {
- completion_type: language_model::AnthropicCompletionType::Terminal,
+ AnthropicEventData {
+ completion_type: AnthropicCompletionType::Terminal,
event: anthropic_event_type,
language_name: None,
message_id,
@@ -37,7 +37,6 @@ parking_lot.workspace = true
schemars.workspace = true
serde.workspace = true
serde_json.workspace = true
-settings.workspace = true
smol.workspace = true
thiserror.workspace = true
util.workspace = true
@@ -1,16 +1,15 @@
mod api_key;
mod model;
+mod provider;
mod rate_limiter;
mod registry;
mod request;
mod role;
-mod telemetry;
pub mod tool_schema;
#[cfg(any(test, feature = "test-support"))]
pub mod fake_provider;
-use anthropic::{AnthropicError, parse_prompt_too_long};
use anyhow::{Result, anyhow};
use client::Client;
use client::UserStore;
@@ -20,10 +19,8 @@ use futures::{StreamExt, future::BoxFuture, stream::BoxStream};
use gpui::{AnyView, App, AsyncApp, Entity, SharedString, Task, Window};
use http_client::{StatusCode, http};
use icons::IconName;
-use open_router::OpenRouterError;
use parking_lot::Mutex;
use serde::{Deserialize, Serialize};
-pub use settings::LanguageModelCacheConfiguration;
use std::ops::{Add, Sub};
use std::str::FromStr;
use std::sync::Arc;
@@ -38,30 +35,10 @@ pub use crate::rate_limiter::*;
pub use crate::registry::*;
pub use crate::request::*;
pub use crate::role::*;
-pub use crate::telemetry::*;
pub use crate::tool_schema::LanguageModelToolSchemaFormat;
+pub use provider::*;
pub use zed_env_vars::{EnvVar, env_var};
-pub const ANTHROPIC_PROVIDER_ID: LanguageModelProviderId =
- LanguageModelProviderId::new("anthropic");
-pub const ANTHROPIC_PROVIDER_NAME: LanguageModelProviderName =
- LanguageModelProviderName::new("Anthropic");
-
-pub const GOOGLE_PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("google");
-pub const GOOGLE_PROVIDER_NAME: LanguageModelProviderName =
- LanguageModelProviderName::new("Google AI");
-
-pub const OPEN_AI_PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("openai");
-pub const OPEN_AI_PROVIDER_NAME: LanguageModelProviderName =
- LanguageModelProviderName::new("OpenAI");
-
-pub const X_AI_PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("x_ai");
-pub const X_AI_PROVIDER_NAME: LanguageModelProviderName = LanguageModelProviderName::new("xAI");
-
-pub const ZED_CLOUD_PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("zed.dev");
-pub const ZED_CLOUD_PROVIDER_NAME: LanguageModelProviderName =
- LanguageModelProviderName::new("Zed");
-
pub fn init(user_store: Entity<UserStore>, client: Arc<Client>, cx: &mut App) {
init_settings(cx);
RefreshLlmTokenListener::register(client, user_store, cx);
@@ -71,6 +48,13 @@ pub fn init_settings(cx: &mut App) {
registry::init(cx);
}
+#[derive(Clone, Debug)]
+pub struct LanguageModelCacheConfiguration {
+ pub max_cache_anchors: usize,
+ pub should_speculate: bool,
+ pub min_total_token: u64,
+}
+
/// A completion event from a language model.
#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
pub enum LanguageModelCompletionEvent {
@@ -310,165 +294,6 @@ impl LanguageModelCompletionError {
}
}
-impl From<AnthropicError> for LanguageModelCompletionError {
- fn from(error: AnthropicError) -> Self {
- let provider = ANTHROPIC_PROVIDER_NAME;
- match error {
- AnthropicError::SerializeRequest(error) => Self::SerializeRequest { provider, error },
- AnthropicError::BuildRequestBody(error) => Self::BuildRequestBody { provider, error },
- AnthropicError::HttpSend(error) => Self::HttpSend { provider, error },
- AnthropicError::DeserializeResponse(error) => {
- Self::DeserializeResponse { provider, error }
- }
- AnthropicError::ReadResponse(error) => Self::ApiReadResponseError { provider, error },
- AnthropicError::HttpResponseError {
- status_code,
- message,
- } => Self::HttpResponseError {
- provider,
- status_code,
- message,
- },
- AnthropicError::RateLimit { retry_after } => Self::RateLimitExceeded {
- provider,
- retry_after: Some(retry_after),
- },
- AnthropicError::ServerOverloaded { retry_after } => Self::ServerOverloaded {
- provider,
- retry_after,
- },
- AnthropicError::ApiError(api_error) => api_error.into(),
- }
- }
-}
-
-impl From<anthropic::ApiError> for LanguageModelCompletionError {
- fn from(error: anthropic::ApiError) -> Self {
- use anthropic::ApiErrorCode::*;
- let provider = ANTHROPIC_PROVIDER_NAME;
- match error.code() {
- Some(code) => match code {
- InvalidRequestError => Self::BadRequestFormat {
- provider,
- message: error.message,
- },
- AuthenticationError => Self::AuthenticationError {
- provider,
- message: error.message,
- },
- PermissionError => Self::PermissionError {
- provider,
- message: error.message,
- },
- NotFoundError => Self::ApiEndpointNotFound { provider },
- RequestTooLarge => Self::PromptTooLarge {
- tokens: parse_prompt_too_long(&error.message),
- },
- RateLimitError => Self::RateLimitExceeded {
- provider,
- retry_after: None,
- },
- ApiError => Self::ApiInternalServerError {
- provider,
- message: error.message,
- },
- OverloadedError => Self::ServerOverloaded {
- provider,
- retry_after: None,
- },
- },
- None => Self::Other(error.into()),
- }
- }
-}
-
-impl From<open_ai::RequestError> for LanguageModelCompletionError {
- fn from(error: open_ai::RequestError) -> Self {
- match error {
- open_ai::RequestError::HttpResponseError {
- provider,
- status_code,
- body,
- headers,
- } => {
- let retry_after = headers
- .get(http::header::RETRY_AFTER)
- .and_then(|val| val.to_str().ok()?.parse::<u64>().ok())
- .map(Duration::from_secs);
-
- Self::from_http_status(provider.into(), status_code, body, retry_after)
- }
- open_ai::RequestError::Other(e) => Self::Other(e),
- }
- }
-}
-
-impl From<OpenRouterError> for LanguageModelCompletionError {
- fn from(error: OpenRouterError) -> Self {
- let provider = LanguageModelProviderName::new("OpenRouter");
- match error {
- OpenRouterError::SerializeRequest(error) => Self::SerializeRequest { provider, error },
- OpenRouterError::BuildRequestBody(error) => Self::BuildRequestBody { provider, error },
- OpenRouterError::HttpSend(error) => Self::HttpSend { provider, error },
- OpenRouterError::DeserializeResponse(error) => {
- Self::DeserializeResponse { provider, error }
- }
- OpenRouterError::ReadResponse(error) => Self::ApiReadResponseError { provider, error },
- OpenRouterError::RateLimit { retry_after } => Self::RateLimitExceeded {
- provider,
- retry_after: Some(retry_after),
- },
- OpenRouterError::ServerOverloaded { retry_after } => Self::ServerOverloaded {
- provider,
- retry_after,
- },
- OpenRouterError::ApiError(api_error) => api_error.into(),
- }
- }
-}
-
-impl From<open_router::ApiError> for LanguageModelCompletionError {
- fn from(error: open_router::ApiError) -> Self {
- use open_router::ApiErrorCode::*;
- let provider = LanguageModelProviderName::new("OpenRouter");
- match error.code {
- InvalidRequestError => Self::BadRequestFormat {
- provider,
- message: error.message,
- },
- AuthenticationError => Self::AuthenticationError {
- provider,
- message: error.message,
- },
- PaymentRequiredError => Self::AuthenticationError {
- provider,
- message: format!("Payment required: {}", error.message),
- },
- PermissionError => Self::PermissionError {
- provider,
- message: error.message,
- },
- RequestTimedOut => Self::HttpResponseError {
- provider,
- status_code: StatusCode::REQUEST_TIMEOUT,
- message: error.message,
- },
- RateLimitError => Self::RateLimitExceeded {
- provider,
- retry_after: None,
- },
- ApiError => Self::ApiInternalServerError {
- provider,
- message: error.message,
- },
- OverloadedError => Self::ServerOverloaded {
- provider,
- retry_after: None,
- },
- }
- }
-}
-
#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum StopReason {
@@ -0,0 +1,12 @@
+pub mod anthropic;
+pub mod google;
+pub mod open_ai;
+pub mod open_router;
+pub mod x_ai;
+pub mod zed;
+
+pub use anthropic::*;
+pub use google::*;
+pub use open_ai::*;
+pub use x_ai::*;
+pub use zed::*;
@@ -0,0 +1,80 @@
+use crate::{LanguageModelCompletionError, LanguageModelProviderId, LanguageModelProviderName};
+use anthropic::AnthropicError;
+pub use anthropic::parse_prompt_too_long;
+
+pub const ANTHROPIC_PROVIDER_ID: LanguageModelProviderId =
+ LanguageModelProviderId::new("anthropic");
+pub const ANTHROPIC_PROVIDER_NAME: LanguageModelProviderName =
+ LanguageModelProviderName::new("Anthropic");
+
+impl From<AnthropicError> for LanguageModelCompletionError {
+ fn from(error: AnthropicError) -> Self {
+ let provider = ANTHROPIC_PROVIDER_NAME;
+ match error {
+ AnthropicError::SerializeRequest(error) => Self::SerializeRequest { provider, error },
+ AnthropicError::BuildRequestBody(error) => Self::BuildRequestBody { provider, error },
+ AnthropicError::HttpSend(error) => Self::HttpSend { provider, error },
+ AnthropicError::DeserializeResponse(error) => {
+ Self::DeserializeResponse { provider, error }
+ }
+ AnthropicError::ReadResponse(error) => Self::ApiReadResponseError { provider, error },
+ AnthropicError::HttpResponseError {
+ status_code,
+ message,
+ } => Self::HttpResponseError {
+ provider,
+ status_code,
+ message,
+ },
+ AnthropicError::RateLimit { retry_after } => Self::RateLimitExceeded {
+ provider,
+ retry_after: Some(retry_after),
+ },
+ AnthropicError::ServerOverloaded { retry_after } => Self::ServerOverloaded {
+ provider,
+ retry_after,
+ },
+ AnthropicError::ApiError(api_error) => api_error.into(),
+ }
+ }
+}
+
+impl From<anthropic::ApiError> for LanguageModelCompletionError {
+ fn from(error: anthropic::ApiError) -> Self {
+ use anthropic::ApiErrorCode::*;
+ let provider = ANTHROPIC_PROVIDER_NAME;
+ match error.code() {
+ Some(code) => match code {
+ InvalidRequestError => Self::BadRequestFormat {
+ provider,
+ message: error.message,
+ },
+ AuthenticationError => Self::AuthenticationError {
+ provider,
+ message: error.message,
+ },
+ PermissionError => Self::PermissionError {
+ provider,
+ message: error.message,
+ },
+ NotFoundError => Self::ApiEndpointNotFound { provider },
+ RequestTooLarge => Self::PromptTooLarge {
+ tokens: parse_prompt_too_long(&error.message),
+ },
+ RateLimitError => Self::RateLimitExceeded {
+ provider,
+ retry_after: None,
+ },
+ ApiError => Self::ApiInternalServerError {
+ provider,
+ message: error.message,
+ },
+ OverloadedError => Self::ServerOverloaded {
+ provider,
+ retry_after: None,
+ },
+ },
+ None => Self::Other(error.into()),
+ }
+ }
+}
@@ -0,0 +1,5 @@
+use crate::{LanguageModelProviderId, LanguageModelProviderName};
+
+pub const GOOGLE_PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("google");
+pub const GOOGLE_PROVIDER_NAME: LanguageModelProviderName =
+ LanguageModelProviderName::new("Google AI");
@@ -0,0 +1,28 @@
+use crate::{LanguageModelCompletionError, LanguageModelProviderId, LanguageModelProviderName};
+use http_client::http;
+use std::time::Duration;
+
+pub const OPEN_AI_PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("openai");
+pub const OPEN_AI_PROVIDER_NAME: LanguageModelProviderName =
+ LanguageModelProviderName::new("OpenAI");
+
+impl From<open_ai::RequestError> for LanguageModelCompletionError {
+ fn from(error: open_ai::RequestError) -> Self {
+ match error {
+ open_ai::RequestError::HttpResponseError {
+ provider,
+ status_code,
+ body,
+ headers,
+ } => {
+ let retry_after = headers
+ .get(http::header::RETRY_AFTER)
+ .and_then(|val| val.to_str().ok()?.parse::<u64>().ok())
+ .map(Duration::from_secs);
+
+ Self::from_http_status(provider.into(), status_code, body, retry_after)
+ }
+ open_ai::RequestError::Other(e) => Self::Other(e),
+ }
+ }
+}
@@ -0,0 +1,69 @@
+use crate::{LanguageModelCompletionError, LanguageModelProviderName};
+use http_client::StatusCode;
+use open_router::OpenRouterError;
+
+impl From<OpenRouterError> for LanguageModelCompletionError {
+ fn from(error: OpenRouterError) -> Self {
+ let provider = LanguageModelProviderName::new("OpenRouter");
+ match error {
+ OpenRouterError::SerializeRequest(error) => Self::SerializeRequest { provider, error },
+ OpenRouterError::BuildRequestBody(error) => Self::BuildRequestBody { provider, error },
+ OpenRouterError::HttpSend(error) => Self::HttpSend { provider, error },
+ OpenRouterError::DeserializeResponse(error) => {
+ Self::DeserializeResponse { provider, error }
+ }
+ OpenRouterError::ReadResponse(error) => Self::ApiReadResponseError { provider, error },
+ OpenRouterError::RateLimit { retry_after } => Self::RateLimitExceeded {
+ provider,
+ retry_after: Some(retry_after),
+ },
+ OpenRouterError::ServerOverloaded { retry_after } => Self::ServerOverloaded {
+ provider,
+ retry_after,
+ },
+ OpenRouterError::ApiError(api_error) => api_error.into(),
+ }
+ }
+}
+
+impl From<open_router::ApiError> for LanguageModelCompletionError {
+ fn from(error: open_router::ApiError) -> Self {
+ use open_router::ApiErrorCode::*;
+ let provider = LanguageModelProviderName::new("OpenRouter");
+ match error.code {
+ InvalidRequestError => Self::BadRequestFormat {
+ provider,
+ message: error.message,
+ },
+ AuthenticationError => Self::AuthenticationError {
+ provider,
+ message: error.message,
+ },
+ PaymentRequiredError => Self::AuthenticationError {
+ provider,
+ message: format!("Payment required: {}", error.message),
+ },
+ PermissionError => Self::PermissionError {
+ provider,
+ message: error.message,
+ },
+ RequestTimedOut => Self::HttpResponseError {
+ provider,
+ status_code: StatusCode::REQUEST_TIMEOUT,
+ message: error.message,
+ },
+ RateLimitError => Self::RateLimitExceeded {
+ provider,
+ retry_after: None,
+ },
+ ApiError => Self::ApiInternalServerError {
+ provider,
+ message: error.message,
+ },
+ OverloadedError => Self::ServerOverloaded {
+ provider,
+ retry_after: None,
+ },
+ }
+ }
+}
@@ -0,0 +1,4 @@
+use crate::{LanguageModelProviderId, LanguageModelProviderName};
+
+pub const X_AI_PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("x_ai");
+pub const X_AI_PROVIDER_NAME: LanguageModelProviderName = LanguageModelProviderName::new("xAI");
@@ -0,0 +1,5 @@
+use crate::{LanguageModelProviderId, LanguageModelProviderName};
+
+pub const ZED_CLOUD_PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("zed.dev");
+pub const ZED_CLOUD_PROVIDER_NAME: LanguageModelProviderName =
+ LanguageModelProviderName::new("Zed");
@@ -101,7 +101,7 @@ impl ConfiguredModel {
}
pub fn is_provided_by_zed(&self) -> bool {
- self.provider.id() == crate::ZED_CLOUD_PROVIDER_ID
+ self.provider.id() == crate::provider::ZED_CLOUD_PROVIDER_ID
}
}
@@ -1,3 +1,5 @@
+pub mod telemetry;
+
use anthropic::{
ANTHROPIC_API_URL, AnthropicError, AnthropicModelMode, ContentDelta, CountTokensRequest, Event,
ResponseContent, ToolResultContent, ToolResultPart, Usage,
@@ -8,7 +10,8 @@ use futures::{FutureExt, Stream, StreamExt, future::BoxFuture, stream::BoxStream
use gpui::{AnyView, App, AsyncApp, Context, Entity, Task};
use http_client::HttpClient;
use language_model::{
- ApiKeyState, AuthenticateError, ConfigurationViewTargetAgent, EnvVar, IconOrSvg, LanguageModel,
+ ANTHROPIC_PROVIDER_ID, ANTHROPIC_PROVIDER_NAME, ApiKeyState, AuthenticateError,
+ ConfigurationViewTargetAgent, EnvVar, IconOrSvg, LanguageModel,
LanguageModelCacheConfiguration, LanguageModelCompletionError, LanguageModelCompletionEvent,
LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
@@ -28,8 +31,8 @@ use crate::provider::util::{fix_streamed_json, parse_tool_arguments};
pub use settings::AnthropicAvailableModel as AvailableModel;
-const PROVIDER_ID: LanguageModelProviderId = language_model::ANTHROPIC_PROVIDER_ID;
-const PROVIDER_NAME: LanguageModelProviderName = language_model::ANTHROPIC_PROVIDER_NAME;
+const PROVIDER_ID: LanguageModelProviderId = ANTHROPIC_PROVIDER_ID;
+const PROVIDER_NAME: LanguageModelProviderName = ANTHROPIC_PROVIDER_NAME;
#[derive(Default, Clone, Debug, PartialEq)]
pub struct AnthropicSettings {
@@ -1,8 +1,8 @@
-use crate::ANTHROPIC_PROVIDER_ID;
use anthropic::ANTHROPIC_API_URL;
use anyhow::{Context as _, anyhow};
use gpui::BackgroundExecutor;
use http_client::{AsyncBody, HttpClient, Method, Request as HttpRequest};
+use language_model::{ANTHROPIC_PROVIDER_ID, LanguageModel};
use std::env;
use std::sync::Arc;
use util::ResultExt;
@@ -52,7 +52,7 @@ impl AnthropicEventType {
}
pub fn report_anthropic_event(
- model: &Arc<dyn crate::LanguageModel>,
+ model: &Arc<dyn LanguageModel>,
event: AnthropicEventData,
cx: &gpui::App,
) {
@@ -69,7 +69,7 @@ pub struct AnthropicEventReporter {
}
impl AnthropicEventReporter {
- pub fn new(model: &Arc<dyn crate::LanguageModel>, cx: &gpui::App) -> Self {
+ pub fn new(model: &Arc<dyn LanguageModel>, cx: &gpui::App) -> Self {
Self {
http_client: cx.http_client(),
executor: cx.background_executor().clone(),
@@ -19,12 +19,15 @@ use gpui::{AnyElement, AnyView, App, AsyncApp, Context, Entity, Subscription, Ta
use http_client::http::{HeaderMap, HeaderValue};
use http_client::{AsyncBody, HttpClient, HttpRequestExt, Method, Response, StatusCode};
use language_model::{
- AuthenticateError, IconOrSvg, LanguageModel, LanguageModelCacheConfiguration,
+ ANTHROPIC_PROVIDER_ID, ANTHROPIC_PROVIDER_NAME, AuthenticateError, GOOGLE_PROVIDER_ID,
+ GOOGLE_PROVIDER_NAME, IconOrSvg, LanguageModel, LanguageModelCacheConfiguration,
LanguageModelCompletionError, LanguageModelCompletionEvent, LanguageModelEffortLevel,
LanguageModelId, LanguageModelName, LanguageModelProvider, LanguageModelProviderId,
LanguageModelProviderName, LanguageModelProviderState, LanguageModelRequest,
LanguageModelToolChoice, LanguageModelToolSchemaFormat, LlmApiToken, NeedsLlmTokenRefresh,
- PaymentRequiredError, RateLimiter, RefreshLlmTokenListener,
+ OPEN_AI_PROVIDER_ID, OPEN_AI_PROVIDER_NAME, PaymentRequiredError, RateLimiter,
+ RefreshLlmTokenListener, X_AI_PROVIDER_ID, X_AI_PROVIDER_NAME, ZED_CLOUD_PROVIDER_ID,
+ ZED_CLOUD_PROVIDER_NAME,
};
use release_channel::AppVersion;
use schemars::JsonSchema;
@@ -53,8 +56,8 @@ use crate::provider::open_ai::{
};
use crate::provider::x_ai::count_xai_tokens;
-const PROVIDER_ID: LanguageModelProviderId = language_model::ZED_CLOUD_PROVIDER_ID;
-const PROVIDER_NAME: LanguageModelProviderName = language_model::ZED_CLOUD_PROVIDER_NAME;
+const PROVIDER_ID: LanguageModelProviderId = ZED_CLOUD_PROVIDER_ID;
+const PROVIDER_NAME: LanguageModelProviderName = ZED_CLOUD_PROVIDER_NAME;
#[derive(Default, Clone, Debug, PartialEq)]
pub struct ZedDotDevSettings {
@@ -568,20 +571,20 @@ impl LanguageModel for CloudLanguageModel {
fn upstream_provider_id(&self) -> LanguageModelProviderId {
use cloud_llm_client::LanguageModelProvider::*;
match self.model.provider {
- Anthropic => language_model::ANTHROPIC_PROVIDER_ID,
- OpenAi => language_model::OPEN_AI_PROVIDER_ID,
- Google => language_model::GOOGLE_PROVIDER_ID,
- XAi => language_model::X_AI_PROVIDER_ID,
+ Anthropic => ANTHROPIC_PROVIDER_ID,
+ OpenAi => OPEN_AI_PROVIDER_ID,
+ Google => GOOGLE_PROVIDER_ID,
+ XAi => X_AI_PROVIDER_ID,
}
}
fn upstream_provider_name(&self) -> LanguageModelProviderName {
use cloud_llm_client::LanguageModelProvider::*;
match self.model.provider {
- Anthropic => language_model::ANTHROPIC_PROVIDER_NAME,
- OpenAi => language_model::OPEN_AI_PROVIDER_NAME,
- Google => language_model::GOOGLE_PROVIDER_NAME,
- XAi => language_model::X_AI_PROVIDER_NAME,
+ Anthropic => ANTHROPIC_PROVIDER_NAME,
+ OpenAi => OPEN_AI_PROVIDER_NAME,
+ Google => GOOGLE_PROVIDER_NAME,
+ XAi => X_AI_PROVIDER_NAME,
}
}
@@ -1047,12 +1050,10 @@ where
fn provider_name(provider: &cloud_llm_client::LanguageModelProvider) -> LanguageModelProviderName {
match provider {
- cloud_llm_client::LanguageModelProvider::Anthropic => {
- language_model::ANTHROPIC_PROVIDER_NAME
- }
- cloud_llm_client::LanguageModelProvider::OpenAi => language_model::OPEN_AI_PROVIDER_NAME,
- cloud_llm_client::LanguageModelProvider::Google => language_model::GOOGLE_PROVIDER_NAME,
- cloud_llm_client::LanguageModelProvider::XAi => language_model::X_AI_PROVIDER_NAME,
+ cloud_llm_client::LanguageModelProvider::Anthropic => ANTHROPIC_PROVIDER_NAME,
+ cloud_llm_client::LanguageModelProvider::OpenAi => OPEN_AI_PROVIDER_NAME,
+ cloud_llm_client::LanguageModelProvider::Google => GOOGLE_PROVIDER_NAME,
+ cloud_llm_client::LanguageModelProvider::XAi => X_AI_PROVIDER_NAME,
}
}
@@ -13,9 +13,9 @@ use language_model::{
LanguageModelToolUse, LanguageModelToolUseId, MessageContent, StopReason,
};
use language_model::{
- IconOrSvg, LanguageModel, LanguageModelId, LanguageModelName, LanguageModelProvider,
- LanguageModelProviderId, LanguageModelProviderName, LanguageModelProviderState,
- LanguageModelRequest, RateLimiter, Role,
+ GOOGLE_PROVIDER_ID, GOOGLE_PROVIDER_NAME, IconOrSvg, LanguageModel, LanguageModelId,
+ LanguageModelName, LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
+ LanguageModelProviderState, LanguageModelRequest, RateLimiter, Role,
};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
@@ -33,8 +33,8 @@ use util::ResultExt;
use language_model::ApiKeyState;
-const PROVIDER_ID: LanguageModelProviderId = language_model::GOOGLE_PROVIDER_ID;
-const PROVIDER_NAME: LanguageModelProviderName = language_model::GOOGLE_PROVIDER_NAME;
+const PROVIDER_ID: LanguageModelProviderId = GOOGLE_PROVIDER_ID;
+const PROVIDER_NAME: LanguageModelProviderName = GOOGLE_PROVIDER_NAME;
#[derive(Default, Clone, Debug, PartialEq)]
pub struct GoogleSettings {
@@ -10,7 +10,8 @@ use language_model::{
LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
LanguageModelProviderState, LanguageModelRequest, LanguageModelRequestMessage,
LanguageModelToolChoice, LanguageModelToolResultContent, LanguageModelToolUse,
- LanguageModelToolUseId, MessageContent, RateLimiter, Role, StopReason, TokenUsage, env_var,
+ LanguageModelToolUseId, MessageContent, OPEN_AI_PROVIDER_ID, OPEN_AI_PROVIDER_NAME,
+ RateLimiter, Role, StopReason, TokenUsage, env_var,
};
use menu;
use open_ai::responses::{
@@ -35,8 +36,8 @@ use util::ResultExt;
use crate::provider::util::{fix_streamed_json, parse_tool_arguments};
-const PROVIDER_ID: LanguageModelProviderId = language_model::OPEN_AI_PROVIDER_ID;
-const PROVIDER_NAME: LanguageModelProviderName = language_model::OPEN_AI_PROVIDER_NAME;
+const PROVIDER_ID: LanguageModelProviderId = OPEN_AI_PROVIDER_ID;
+const PROVIDER_NAME: LanguageModelProviderName = OPEN_AI_PROVIDER_NAME;
const API_KEY_ENV_VAR_NAME: &str = "OPENAI_API_KEY";
static API_KEY_ENV_VAR: LazyLock<EnvVar> = env_var!(API_KEY_ENV_VAR_NAME);