1use std::fmt;
2use std::sync::Arc;
3
4use anyhow::Result;
5use client::Client;
6use gpui::{
7 App, AppContext as _, AsyncApp, Context, Entity, EventEmitter, Global, ReadGlobal as _,
8};
9use proto::{Plan, TypedEnvelope};
10use schemars::JsonSchema;
11use serde::{Deserialize, Serialize};
12use smol::lock::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard};
13use strum::EnumIter;
14use thiserror::Error;
15use ui::IconName;
16
17use crate::LanguageModelAvailability;
18
19#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
20#[serde(tag = "provider", rename_all = "lowercase")]
21pub enum CloudModel {
22 Anthropic(anthropic::Model),
23 OpenAi(open_ai::Model),
24 Google(google_ai::Model),
25}
26
27#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, EnumIter)]
28pub enum ZedModel {
29 #[serde(rename = "Qwen/Qwen2-7B-Instruct")]
30 Qwen2_7bInstruct,
31}
32
33impl Default for CloudModel {
34 fn default() -> Self {
35 Self::Anthropic(anthropic::Model::default())
36 }
37}
38
39impl CloudModel {
40 pub fn id(&self) -> &str {
41 match self {
42 Self::Anthropic(model) => model.id(),
43 Self::OpenAi(model) => model.id(),
44 Self::Google(model) => model.id(),
45 }
46 }
47
48 pub fn display_name(&self) -> &str {
49 match self {
50 Self::Anthropic(model) => model.display_name(),
51 Self::OpenAi(model) => model.display_name(),
52 Self::Google(model) => model.display_name(),
53 }
54 }
55
56 pub fn icon(&self) -> Option<IconName> {
57 match self {
58 Self::Anthropic(_) => Some(IconName::AiAnthropicHosted),
59 _ => None,
60 }
61 }
62
63 pub fn max_token_count(&self) -> usize {
64 match self {
65 Self::Anthropic(model) => model.max_token_count(),
66 Self::OpenAi(model) => model.max_token_count(),
67 Self::Google(model) => model.max_token_count(),
68 }
69 }
70
71 /// Returns the availability of this model.
72 pub fn availability(&self) -> LanguageModelAvailability {
73 match self {
74 Self::Anthropic(model) => match model {
75 anthropic::Model::Claude3_5Sonnet => {
76 LanguageModelAvailability::RequiresPlan(Plan::Free)
77 }
78 anthropic::Model::Claude3Opus
79 | anthropic::Model::Claude3Sonnet
80 | anthropic::Model::Claude3Haiku
81 | anthropic::Model::Claude3_5Haiku
82 | anthropic::Model::Claude3_7Sonnet
83 | anthropic::Model::Custom { .. } => {
84 LanguageModelAvailability::RequiresPlan(Plan::ZedPro)
85 }
86 },
87 Self::OpenAi(model) => match model {
88 open_ai::Model::ThreePointFiveTurbo
89 | open_ai::Model::Four
90 | open_ai::Model::FourTurbo
91 | open_ai::Model::FourOmni
92 | open_ai::Model::FourOmniMini
93 | open_ai::Model::O1Mini
94 | open_ai::Model::O1Preview
95 | open_ai::Model::O1
96 | open_ai::Model::O3Mini
97 | open_ai::Model::Custom { .. } => {
98 LanguageModelAvailability::RequiresPlan(Plan::ZedPro)
99 }
100 },
101 Self::Google(model) => match model {
102 google_ai::Model::Gemini15Pro
103 | google_ai::Model::Gemini15Flash
104 | google_ai::Model::Gemini20Pro
105 | google_ai::Model::Gemini20Flash
106 | google_ai::Model::Gemini20FlashThinking
107 | google_ai::Model::Gemini20FlashLite
108 | google_ai::Model::Custom { .. } => {
109 LanguageModelAvailability::RequiresPlan(Plan::ZedPro)
110 }
111 },
112 }
113 }
114}
115
116#[derive(Error, Debug)]
117pub struct PaymentRequiredError;
118
119impl fmt::Display for PaymentRequiredError {
120 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
121 write!(
122 f,
123 "Payment required to use this language model. Please upgrade your account."
124 )
125 }
126}
127
128#[derive(Error, Debug)]
129pub struct MaxMonthlySpendReachedError;
130
131impl fmt::Display for MaxMonthlySpendReachedError {
132 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
133 write!(
134 f,
135 "Maximum spending limit reached for this month. For more usage, increase your spending limit."
136 )
137 }
138}
139
140#[derive(Clone, Default)]
141pub struct LlmApiToken(Arc<RwLock<Option<String>>>);
142
143impl LlmApiToken {
144 pub async fn acquire(&self, client: &Arc<Client>) -> Result<String> {
145 let lock = self.0.upgradable_read().await;
146 if let Some(token) = lock.as_ref() {
147 Ok(token.to_string())
148 } else {
149 Self::fetch(RwLockUpgradableReadGuard::upgrade(lock).await, client).await
150 }
151 }
152
153 pub async fn refresh(&self, client: &Arc<Client>) -> Result<String> {
154 Self::fetch(self.0.write().await, client).await
155 }
156
157 async fn fetch<'a>(
158 mut lock: RwLockWriteGuard<'a, Option<String>>,
159 client: &Arc<Client>,
160 ) -> Result<String> {
161 let response = client.request(proto::GetLlmToken {}).await?;
162 *lock = Some(response.token.clone());
163 Ok(response.token.clone())
164 }
165}
166
167struct GlobalRefreshLlmTokenListener(Entity<RefreshLlmTokenListener>);
168
169impl Global for GlobalRefreshLlmTokenListener {}
170
171pub struct RefreshLlmTokenEvent;
172
173pub struct RefreshLlmTokenListener {
174 _llm_token_subscription: client::Subscription,
175}
176
177impl EventEmitter<RefreshLlmTokenEvent> for RefreshLlmTokenListener {}
178
179impl RefreshLlmTokenListener {
180 pub fn register(client: Arc<Client>, cx: &mut App) {
181 let listener = cx.new(|cx| RefreshLlmTokenListener::new(client, cx));
182 cx.set_global(GlobalRefreshLlmTokenListener(listener));
183 }
184
185 pub fn global(cx: &App) -> Entity<Self> {
186 GlobalRefreshLlmTokenListener::global(cx).0.clone()
187 }
188
189 fn new(client: Arc<Client>, cx: &mut Context<Self>) -> Self {
190 Self {
191 _llm_token_subscription: client
192 .add_message_handler(cx.weak_entity(), Self::handle_refresh_llm_token),
193 }
194 }
195
196 async fn handle_refresh_llm_token(
197 this: Entity<Self>,
198 _: TypedEnvelope<proto::RefreshLlmToken>,
199 mut cx: AsyncApp,
200 ) -> Result<()> {
201 this.update(&mut cx, |_this, cx| cx.emit(RefreshLlmTokenEvent))
202 }
203}