Detailed changes
@@ -395,7 +395,6 @@ dependencies = [
"language",
"language_model",
"language_model_selector",
- "language_models",
"languages",
"log",
"lsp",
@@ -7014,6 +7013,7 @@ dependencies = [
"anthropic",
"anyhow",
"base64 0.22.1",
+ "client",
"collections",
"futures 0.3.31",
"google_ai",
@@ -7029,6 +7029,7 @@ dependencies = [
"serde_json",
"smol",
"strum",
+ "telemetry_events",
"thiserror 1.0.69",
"ui",
"util",
@@ -7081,7 +7082,6 @@ dependencies = [
"settings",
"smol",
"strum",
- "telemetry_events",
"theme",
"thiserror 1.0.69",
"tiktoken-rs",
@@ -43,7 +43,6 @@ indoc.workspace = true
language.workspace = true
language_model.workspace = true
language_model_selector.workspace = true
-language_models.workspace = true
log.workspace = true
lsp.workspace = true
menu.workspace = true
@@ -32,11 +32,10 @@ use gpui::{
};
use language::{line_diff, Buffer, IndentKind, Point, Selection, TransactionId};
use language_model::{
- LanguageModel, LanguageModelRegistry, LanguageModelRequest, LanguageModelRequestMessage,
- LanguageModelTextStream, Role,
+ report_assistant_event, LanguageModel, LanguageModelRegistry, LanguageModelRequest,
+ LanguageModelRequestMessage, LanguageModelTextStream, Role,
};
use language_model_selector::{LanguageModelSelector, LanguageModelSelectorPopoverMenu};
-use language_models::report_assistant_event;
use multi_buffer::MultiBufferRow;
use parking_lot::Mutex;
use project::{CodeAction, ProjectTransaction};
@@ -16,10 +16,10 @@ use gpui::{
};
use language::Buffer;
use language_model::{
- LanguageModelRegistry, LanguageModelRequest, LanguageModelRequestMessage, Role,
+ report_assistant_event, LanguageModelRegistry, LanguageModelRequest,
+ LanguageModelRequestMessage, Role,
};
use language_model_selector::{LanguageModelSelector, LanguageModelSelectorPopoverMenu};
-use language_models::report_assistant_event;
use prompt_library::PromptBuilder;
use settings::{update_settings_file, Settings};
use std::{
@@ -9,10 +9,9 @@ use futures::{channel::mpsc, future::LocalBoxFuture, join, SinkExt, Stream, Stre
use gpui::{App, AppContext as _, Context, Entity, EventEmitter, Subscription, Task};
use language::{line_diff, Buffer, IndentKind, Point, TransactionId};
use language_model::{
- LanguageModel, LanguageModelRegistry, LanguageModelRequest, LanguageModelRequestMessage,
- LanguageModelTextStream, Role,
+ report_assistant_event, LanguageModel, LanguageModelRegistry, LanguageModelRequest,
+ LanguageModelRequestMessage, LanguageModelTextStream, Role,
};
-use language_models::report_assistant_event;
use multi_buffer::MultiBufferRow;
use parking_lot::Mutex;
use prompt_library::PromptBuilder;
@@ -24,8 +24,7 @@ use gpui::{
UpdateGlobal, WeakEntity, Window,
};
use language::{Buffer, Point, Selection, TransactionId};
-use language_model::LanguageModelRegistry;
-use language_models::report_assistant_event;
+use language_model::{report_assistant_event, LanguageModelRegistry};
use multi_buffer::MultiBufferRow;
use parking_lot::Mutex;
use project::{CodeAction, ProjectTransaction};
@@ -2,8 +2,7 @@ use crate::inline_prompt_editor::CodegenStatus;
use client::telemetry::Telemetry;
use futures::{channel::mpsc, SinkExt, StreamExt};
use gpui::{App, AppContext as _, Context, Entity, EventEmitter, Task};
-use language_model::{LanguageModelRegistry, LanguageModelRequest};
-use language_models::report_assistant_event;
+use language_model::{report_assistant_event, LanguageModelRegistry, LanguageModelRequest};
use std::{sync::Arc, time::Instant};
use telemetry_events::{AssistantEvent, AssistantKind, AssistantPhase};
use terminal::Terminal;
@@ -13,9 +13,9 @@ use fs::Fs;
use gpui::{App, Entity, Focusable, Global, Subscription, UpdateGlobal, WeakEntity};
use language::Buffer;
use language_model::{
- LanguageModelRegistry, LanguageModelRequest, LanguageModelRequestMessage, Role,
+ report_assistant_event, LanguageModelRegistry, LanguageModelRequest,
+ LanguageModelRequestMessage, Role,
};
-use language_models::report_assistant_event;
use prompt_library::PromptBuilder;
use std::sync::Arc;
use telemetry_events::{AssistantEvent, AssistantKind, AssistantPhase};
@@ -19,14 +19,11 @@ use gpui::{
};
use language::{AnchorRangeExt, Bias, Buffer, LanguageRegistry, OffsetRangeExt, Point, ToOffset};
use language_model::{
- LanguageModel, LanguageModelCacheConfiguration, LanguageModelCompletionEvent,
- LanguageModelImage, LanguageModelRegistry, LanguageModelRequest, LanguageModelRequestMessage,
- LanguageModelToolUseId, MessageContent, Role, StopReason,
-};
-use language_models::{
- provider::cloud::{MaxMonthlySpendReachedError, PaymentRequiredError},
- report_assistant_event,
+ report_assistant_event, LanguageModel, LanguageModelCacheConfiguration,
+ LanguageModelCompletionEvent, LanguageModelImage, LanguageModelRegistry, LanguageModelRequest,
+ LanguageModelRequestMessage, LanguageModelToolUseId, MessageContent, Role, StopReason,
};
+use language_models::provider::cloud::{MaxMonthlySpendReachedError, PaymentRequiredError};
use open_ai::Model as OpenAiModel;
use paths::contexts_dir;
use project::Project;
@@ -19,6 +19,7 @@ test-support = []
anthropic = { workspace = true, features = ["schemars"] }
anyhow.workspace = true
base64.workspace = true
+client.workspace = true
collections.workspace = true
futures.workspace = true
google_ai = { workspace = true, features = ["schemars"] }
@@ -34,6 +35,7 @@ serde.workspace = true
serde_json.workspace = true
smol.workspace = true
strum.workspace = true
+telemetry_events.workspace = true
thiserror.workspace = true
ui.workspace = true
util.workspace = true
@@ -3,6 +3,7 @@ mod rate_limiter;
mod registry;
mod request;
mod role;
+mod telemetry;
#[cfg(any(test, feature = "test-support"))]
pub mod fake_provider;
@@ -11,12 +12,7 @@ use anyhow::Result;
use futures::FutureExt;
use futures::{future::BoxFuture, stream::BoxStream, StreamExt, TryStreamExt as _};
use gpui::{AnyElement, AnyView, App, AsyncApp, SharedString, Task, Window};
-pub use model::*;
use proto::Plan;
-pub use rate_limiter::*;
-pub use registry::*;
-pub use request::*;
-pub use role::*;
use schemars::JsonSchema;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::fmt;
@@ -24,6 +20,13 @@ use std::{future::Future, sync::Arc};
use thiserror::Error;
use ui::IconName;
+pub use crate::model::*;
+pub use crate::rate_limiter::*;
+pub use crate::registry::*;
+pub use crate::request::*;
+pub use crate::role::*;
+pub use crate::telemetry::*;
+
pub const ZED_CLOUD_PROVIDER_ID: &str = "zed.dev";
pub fn init(cx: &mut App) {
@@ -8,7 +8,7 @@ use std::sync::Arc;
use telemetry_events::{AssistantEvent, AssistantKind, AssistantPhase};
use util::ResultExt;
-use crate::provider::anthropic::PROVIDER_ID as ANTHROPIC_PROVIDER_ID;
+pub const ANTHROPIC_PROVIDER_ID: &str = "anthropic";
pub fn report_assistant_event(
event: AssistantEvent,
@@ -40,7 +40,6 @@ serde_json.workspace = true
settings.workspace = true
smol.workspace = true
strum.workspace = true
-telemetry_events.workspace = true
theme.workspace = true
thiserror.workspace = true
tiktoken-rs.workspace = true
@@ -6,7 +6,6 @@ use gpui::{App, Context, Entity};
use language_model::{LanguageModelProviderId, LanguageModelRegistry, ZED_CLOUD_PROVIDER_ID};
use provider::deepseek::DeepSeekLanguageModelProvider;
-mod logging;
pub mod provider;
mod settings;
@@ -21,7 +20,6 @@ use crate::provider::mistral::MistralLanguageModelProvider;
use crate::provider::ollama::OllamaLanguageModelProvider;
use crate::provider::open_ai::OpenAiLanguageModelProvider;
pub use crate::settings::*;
-pub use logging::report_assistant_event;
pub fn init(user_store: Entity<UserStore>, client: Arc<Client>, fs: Arc<dyn Fs>, cx: &mut App) {
crate::settings::init(fs, cx);
@@ -27,7 +27,7 @@ use theme::ThemeSettings;
use ui::{prelude::*, Icon, IconName, Tooltip};
use util::{maybe, ResultExt};
-pub const PROVIDER_ID: &str = "anthropic";
+const PROVIDER_ID: &str = language_model::ANTHROPIC_PROVIDER_ID;
const PROVIDER_NAME: &str = "Anthropic";
#[derive(Default, Clone, Debug, PartialEq)]