1use std::fmt;
2use std::sync::Arc;
3
4use anyhow::Result;
5use client::Client;
6use gpui::{
7 App, AppContext as _, AsyncApp, Context, Entity, EventEmitter, Global, ReadGlobal as _,
8};
9use icons::IconName;
10use proto::{Plan, TypedEnvelope};
11use schemars::JsonSchema;
12use serde::{Deserialize, Serialize};
13use smol::lock::{RwLock, RwLockUpgradableReadGuard, RwLockWriteGuard};
14use strum::EnumIter;
15use thiserror::Error;
16
17use crate::LanguageModelAvailability;
18
19#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
20#[serde(tag = "provider", rename_all = "lowercase")]
21pub enum CloudModel {
22 Anthropic(anthropic::Model),
23 OpenAi(open_ai::Model),
24 Google(google_ai::Model),
25}
26
27#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema, EnumIter)]
28pub enum ZedModel {
29 #[serde(rename = "Qwen/Qwen2-7B-Instruct")]
30 Qwen2_7bInstruct,
31}
32
33impl Default for CloudModel {
34 fn default() -> Self {
35 Self::Anthropic(anthropic::Model::default())
36 }
37}
38
39impl CloudModel {
40 pub fn id(&self) -> &str {
41 match self {
42 Self::Anthropic(model) => model.id(),
43 Self::OpenAi(model) => model.id(),
44 Self::Google(model) => model.id(),
45 }
46 }
47
48 pub fn display_name(&self) -> &str {
49 match self {
50 Self::Anthropic(model) => model.display_name(),
51 Self::OpenAi(model) => model.display_name(),
52 Self::Google(model) => model.display_name(),
53 }
54 }
55
56 pub fn icon(&self) -> Option<IconName> {
57 match self {
58 Self::Anthropic(_) => Some(IconName::AiAnthropicHosted),
59 _ => None,
60 }
61 }
62
63 pub fn max_token_count(&self) -> usize {
64 match self {
65 Self::Anthropic(model) => model.max_token_count(),
66 Self::OpenAi(model) => model.max_token_count(),
67 Self::Google(model) => model.max_token_count(),
68 }
69 }
70
71 /// Returns the availability of this model.
72 pub fn availability(&self) -> LanguageModelAvailability {
73 match self {
74 Self::Anthropic(model) => match model {
75 anthropic::Model::Claude3_5Sonnet
76 | anthropic::Model::Claude3_7Sonnet
77 | anthropic::Model::Claude3_7SonnetThinking => {
78 LanguageModelAvailability::RequiresPlan(Plan::Free)
79 }
80 anthropic::Model::Claude3Opus
81 | anthropic::Model::Claude3Sonnet
82 | anthropic::Model::Claude3Haiku
83 | anthropic::Model::Claude3_5Haiku
84 | anthropic::Model::Custom { .. } => {
85 LanguageModelAvailability::RequiresPlan(Plan::ZedPro)
86 }
87 },
88 Self::OpenAi(model) => match model {
89 open_ai::Model::ThreePointFiveTurbo
90 | open_ai::Model::Four
91 | open_ai::Model::FourTurbo
92 | open_ai::Model::FourOmni
93 | open_ai::Model::FourOmniMini
94 | open_ai::Model::O1Mini
95 | open_ai::Model::O1Preview
96 | open_ai::Model::O1
97 | open_ai::Model::O3Mini
98 | open_ai::Model::Custom { .. } => {
99 LanguageModelAvailability::RequiresPlan(Plan::ZedPro)
100 }
101 },
102 Self::Google(model) => match model {
103 google_ai::Model::Gemini15Pro
104 | google_ai::Model::Gemini15Flash
105 | google_ai::Model::Gemini20Pro
106 | google_ai::Model::Gemini20Flash
107 | google_ai::Model::Gemini20FlashThinking
108 | google_ai::Model::Gemini20FlashLite
109 | google_ai::Model::Gemini25ProExp0325
110 | google_ai::Model::Custom { .. } => {
111 LanguageModelAvailability::RequiresPlan(Plan::ZedPro)
112 }
113 },
114 }
115 }
116}
117
118#[derive(Error, Debug)]
119pub struct PaymentRequiredError;
120
121impl fmt::Display for PaymentRequiredError {
122 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
123 write!(
124 f,
125 "Payment required to use this language model. Please upgrade your account."
126 )
127 }
128}
129
130#[derive(Error, Debug)]
131pub struct MaxMonthlySpendReachedError;
132
133impl fmt::Display for MaxMonthlySpendReachedError {
134 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
135 write!(
136 f,
137 "Maximum spending limit reached for this month. For more usage, increase your spending limit."
138 )
139 }
140}
141
142#[derive(Clone, Default)]
143pub struct LlmApiToken(Arc<RwLock<Option<String>>>);
144
145impl LlmApiToken {
146 pub async fn acquire(&self, client: &Arc<Client>) -> Result<String> {
147 let lock = self.0.upgradable_read().await;
148 if let Some(token) = lock.as_ref() {
149 Ok(token.to_string())
150 } else {
151 Self::fetch(RwLockUpgradableReadGuard::upgrade(lock).await, client).await
152 }
153 }
154
155 pub async fn refresh(&self, client: &Arc<Client>) -> Result<String> {
156 Self::fetch(self.0.write().await, client).await
157 }
158
159 async fn fetch(
160 mut lock: RwLockWriteGuard<'_, Option<String>>,
161 client: &Arc<Client>,
162 ) -> Result<String> {
163 let response = client.request(proto::GetLlmToken {}).await?;
164 *lock = Some(response.token.clone());
165 Ok(response.token.clone())
166 }
167}
168
169struct GlobalRefreshLlmTokenListener(Entity<RefreshLlmTokenListener>);
170
171impl Global for GlobalRefreshLlmTokenListener {}
172
173pub struct RefreshLlmTokenEvent;
174
175pub struct RefreshLlmTokenListener {
176 _llm_token_subscription: client::Subscription,
177}
178
179impl EventEmitter<RefreshLlmTokenEvent> for RefreshLlmTokenListener {}
180
181impl RefreshLlmTokenListener {
182 pub fn register(client: Arc<Client>, cx: &mut App) {
183 let listener = cx.new(|cx| RefreshLlmTokenListener::new(client, cx));
184 cx.set_global(GlobalRefreshLlmTokenListener(listener));
185 }
186
187 pub fn global(cx: &App) -> Entity<Self> {
188 GlobalRefreshLlmTokenListener::global(cx).0.clone()
189 }
190
191 fn new(client: Arc<Client>, cx: &mut Context<Self>) -> Self {
192 Self {
193 _llm_token_subscription: client
194 .add_message_handler(cx.weak_entity(), Self::handle_refresh_llm_token),
195 }
196 }
197
198 async fn handle_refresh_llm_token(
199 this: Entity<Self>,
200 _: TypedEnvelope<proto::RefreshLlmToken>,
201 mut cx: AsyncApp,
202 ) -> Result<()> {
203 this.update(&mut cx, |_this, cx| cx.emit(RefreshLlmTokenEvent))
204 }
205}