1mod authorization;
2pub mod db;
3mod telemetry;
4mod token;
5
6use crate::{
7 api::CloudflareIpCountryHeader, build_clickhouse_client, db::UserId, executor::Executor, Cents,
8 Config, Error, Result,
9};
10use anyhow::{anyhow, Context as _};
11use authorization::authorize_access_to_language_model;
12use axum::routing::get;
13use axum::{
14 body::Body,
15 http::{self, HeaderName, HeaderValue, Request, StatusCode},
16 middleware::{self, Next},
17 response::{IntoResponse, Response},
18 routing::post,
19 Extension, Json, Router, TypedHeader,
20};
21use chrono::{DateTime, Duration, Utc};
22use collections::HashMap;
23use db::TokenUsage;
24use db::{usage_measure::UsageMeasure, ActiveUserCount, LlmDatabase};
25use futures::{Stream, StreamExt as _};
26use reqwest_client::ReqwestClient;
27use rpc::{
28 proto::Plan, LanguageModelProvider, PerformCompletionParams, EXPIRED_LLM_TOKEN_HEADER_NAME,
29};
30use rpc::{ListModelsResponse, MAX_LLM_MONTHLY_SPEND_REACHED_HEADER_NAME};
31use std::{
32 pin::Pin,
33 sync::Arc,
34 task::{Context, Poll},
35};
36use strum::IntoEnumIterator;
37use telemetry::{report_llm_rate_limit, report_llm_usage, LlmRateLimitEventRow, LlmUsageEventRow};
38use tokio::sync::RwLock;
39use util::ResultExt;
40
41pub use token::*;
42
43pub struct LlmState {
44 pub config: Config,
45 pub executor: Executor,
46 pub db: Arc<LlmDatabase>,
47 pub http_client: ReqwestClient,
48 pub clickhouse_client: Option<clickhouse::Client>,
49 active_user_count_by_model:
50 RwLock<HashMap<(LanguageModelProvider, String), (DateTime<Utc>, ActiveUserCount)>>,
51}
52
53const ACTIVE_USER_COUNT_CACHE_DURATION: Duration = Duration::seconds(30);
54
55impl LlmState {
56 pub async fn new(config: Config, executor: Executor) -> Result<Arc<Self>> {
57 let database_url = config
58 .llm_database_url
59 .as_ref()
60 .ok_or_else(|| anyhow!("missing LLM_DATABASE_URL"))?;
61 let max_connections = config
62 .llm_database_max_connections
63 .ok_or_else(|| anyhow!("missing LLM_DATABASE_MAX_CONNECTIONS"))?;
64
65 let mut db_options = db::ConnectOptions::new(database_url);
66 db_options.max_connections(max_connections);
67 let mut db = LlmDatabase::new(db_options, executor.clone()).await?;
68 db.initialize().await?;
69
70 let db = Arc::new(db);
71
72 let user_agent = format!("Zed Server/{}", env!("CARGO_PKG_VERSION"));
73 let http_client =
74 ReqwestClient::user_agent(&user_agent).context("failed to construct http client")?;
75
76 let this = Self {
77 executor,
78 db,
79 http_client,
80 clickhouse_client: config
81 .clickhouse_url
82 .as_ref()
83 .and_then(|_| build_clickhouse_client(&config).log_err()),
84 active_user_count_by_model: RwLock::new(HashMap::default()),
85 config,
86 };
87
88 Ok(Arc::new(this))
89 }
90
91 pub async fn get_active_user_count(
92 &self,
93 provider: LanguageModelProvider,
94 model: &str,
95 ) -> Result<ActiveUserCount> {
96 let now = Utc::now();
97
98 {
99 let active_user_count_by_model = self.active_user_count_by_model.read().await;
100 if let Some((last_updated, count)) =
101 active_user_count_by_model.get(&(provider, model.to_string()))
102 {
103 if now - *last_updated < ACTIVE_USER_COUNT_CACHE_DURATION {
104 return Ok(*count);
105 }
106 }
107 }
108
109 let mut cache = self.active_user_count_by_model.write().await;
110 let new_count = self.db.get_active_user_count(provider, model, now).await?;
111 cache.insert((provider, model.to_string()), (now, new_count));
112 Ok(new_count)
113 }
114}
115
116pub fn routes() -> Router<(), Body> {
117 Router::new()
118 .route("/models", get(list_models))
119 .route("/completion", post(perform_completion))
120 .layer(middleware::from_fn(validate_api_token))
121}
122
123async fn validate_api_token<B>(mut req: Request<B>, next: Next<B>) -> impl IntoResponse {
124 let token = req
125 .headers()
126 .get(http::header::AUTHORIZATION)
127 .and_then(|header| header.to_str().ok())
128 .ok_or_else(|| {
129 Error::http(
130 StatusCode::BAD_REQUEST,
131 "missing authorization header".to_string(),
132 )
133 })?
134 .strip_prefix("Bearer ")
135 .ok_or_else(|| {
136 Error::http(
137 StatusCode::BAD_REQUEST,
138 "invalid authorization header".to_string(),
139 )
140 })?;
141
142 let state = req.extensions().get::<Arc<LlmState>>().unwrap();
143 match LlmTokenClaims::validate(token, &state.config) {
144 Ok(claims) => {
145 if state.db.is_access_token_revoked(&claims.jti).await? {
146 return Err(Error::http(
147 StatusCode::UNAUTHORIZED,
148 "unauthorized".to_string(),
149 ));
150 }
151
152 tracing::Span::current()
153 .record("user_id", claims.user_id)
154 .record("login", claims.github_user_login.clone())
155 .record("authn.jti", &claims.jti)
156 .record("is_staff", claims.is_staff);
157
158 req.extensions_mut().insert(claims);
159 Ok::<_, Error>(next.run(req).await.into_response())
160 }
161 Err(ValidateLlmTokenError::Expired) => Err(Error::Http(
162 StatusCode::UNAUTHORIZED,
163 "unauthorized".to_string(),
164 [(
165 HeaderName::from_static(EXPIRED_LLM_TOKEN_HEADER_NAME),
166 HeaderValue::from_static("true"),
167 )]
168 .into_iter()
169 .collect(),
170 )),
171 Err(_err) => Err(Error::http(
172 StatusCode::UNAUTHORIZED,
173 "unauthorized".to_string(),
174 )),
175 }
176}
177
178async fn list_models(
179 Extension(state): Extension<Arc<LlmState>>,
180 Extension(claims): Extension<LlmTokenClaims>,
181 country_code_header: Option<TypedHeader<CloudflareIpCountryHeader>>,
182) -> Result<Json<ListModelsResponse>> {
183 let country_code = country_code_header.map(|header| header.to_string());
184
185 let mut accessible_models = Vec::new();
186
187 for (provider, model) in state.db.all_models() {
188 let authorize_result = authorize_access_to_language_model(
189 &state.config,
190 &claims,
191 country_code.as_deref(),
192 provider,
193 &model.name,
194 );
195
196 if authorize_result.is_ok() {
197 accessible_models.push(rpc::LanguageModel {
198 provider,
199 name: model.name,
200 });
201 }
202 }
203
204 Ok(Json(ListModelsResponse {
205 models: accessible_models,
206 }))
207}
208
209async fn perform_completion(
210 Extension(state): Extension<Arc<LlmState>>,
211 Extension(claims): Extension<LlmTokenClaims>,
212 country_code_header: Option<TypedHeader<CloudflareIpCountryHeader>>,
213 Json(params): Json<PerformCompletionParams>,
214) -> Result<impl IntoResponse> {
215 let model = normalize_model_name(
216 state.db.model_names_for_provider(params.provider),
217 params.model,
218 );
219
220 authorize_access_to_language_model(
221 &state.config,
222 &claims,
223 country_code_header
224 .map(|header| header.to_string())
225 .as_deref(),
226 params.provider,
227 &model,
228 )?;
229
230 check_usage_limit(&state, params.provider, &model, &claims).await?;
231
232 let stream = match params.provider {
233 LanguageModelProvider::Anthropic => {
234 let api_key = if claims.is_staff {
235 state
236 .config
237 .anthropic_staff_api_key
238 .as_ref()
239 .context("no Anthropic AI staff API key configured on the server")?
240 } else {
241 state
242 .config
243 .anthropic_api_key
244 .as_ref()
245 .context("no Anthropic AI API key configured on the server")?
246 };
247
248 let mut request: anthropic::Request =
249 serde_json::from_str(params.provider_request.get())?;
250
251 // Override the model on the request with the latest version of the model that is
252 // known to the server.
253 //
254 // Right now, we use the version that's defined in `model.id()`, but we will likely
255 // want to change this code once a new version of an Anthropic model is released,
256 // so that users can use the new version, without having to update Zed.
257 request.model = match model.as_str() {
258 "claude-3-5-sonnet" => anthropic::Model::Claude3_5Sonnet.id().to_string(),
259 "claude-3-opus" => anthropic::Model::Claude3Opus.id().to_string(),
260 "claude-3-haiku" => anthropic::Model::Claude3Haiku.id().to_string(),
261 "claude-3-sonnet" => anthropic::Model::Claude3Sonnet.id().to_string(),
262 _ => request.model,
263 };
264
265 let (chunks, rate_limit_info) = anthropic::stream_completion_with_rate_limit_info(
266 &state.http_client,
267 anthropic::ANTHROPIC_API_URL,
268 api_key,
269 request,
270 None,
271 )
272 .await
273 .map_err(|err| match err {
274 anthropic::AnthropicError::ApiError(ref api_error) => match api_error.code() {
275 Some(anthropic::ApiErrorCode::RateLimitError) => {
276 tracing::info!(
277 target: "upstream rate limit exceeded",
278 user_id = claims.user_id,
279 login = claims.github_user_login,
280 authn.jti = claims.jti,
281 is_staff = claims.is_staff,
282 provider = params.provider.to_string(),
283 model = model
284 );
285
286 Error::http(
287 StatusCode::TOO_MANY_REQUESTS,
288 "Upstream Anthropic rate limit exceeded.".to_string(),
289 )
290 }
291 Some(anthropic::ApiErrorCode::InvalidRequestError) => {
292 Error::http(StatusCode::BAD_REQUEST, api_error.message.clone())
293 }
294 Some(anthropic::ApiErrorCode::OverloadedError) => {
295 Error::http(StatusCode::SERVICE_UNAVAILABLE, api_error.message.clone())
296 }
297 Some(_) => {
298 Error::http(StatusCode::INTERNAL_SERVER_ERROR, api_error.message.clone())
299 }
300 None => Error::Internal(anyhow!(err)),
301 },
302 anthropic::AnthropicError::Other(err) => Error::Internal(err),
303 })?;
304
305 if let Some(rate_limit_info) = rate_limit_info {
306 tracing::info!(
307 target: "upstream rate limit",
308 is_staff = claims.is_staff,
309 provider = params.provider.to_string(),
310 model = model,
311 tokens_remaining = rate_limit_info.tokens_remaining,
312 requests_remaining = rate_limit_info.requests_remaining,
313 requests_reset = ?rate_limit_info.requests_reset,
314 tokens_reset = ?rate_limit_info.tokens_reset,
315 );
316 }
317
318 chunks
319 .map(move |event| {
320 let chunk = event?;
321 let (
322 input_tokens,
323 output_tokens,
324 cache_creation_input_tokens,
325 cache_read_input_tokens,
326 ) = match &chunk {
327 anthropic::Event::MessageStart {
328 message: anthropic::Response { usage, .. },
329 }
330 | anthropic::Event::MessageDelta { usage, .. } => (
331 usage.input_tokens.unwrap_or(0) as usize,
332 usage.output_tokens.unwrap_or(0) as usize,
333 usage.cache_creation_input_tokens.unwrap_or(0) as usize,
334 usage.cache_read_input_tokens.unwrap_or(0) as usize,
335 ),
336 _ => (0, 0, 0, 0),
337 };
338
339 anyhow::Ok(CompletionChunk {
340 bytes: serde_json::to_vec(&chunk).unwrap(),
341 input_tokens,
342 output_tokens,
343 cache_creation_input_tokens,
344 cache_read_input_tokens,
345 })
346 })
347 .boxed()
348 }
349 LanguageModelProvider::OpenAi => {
350 let api_key = state
351 .config
352 .openai_api_key
353 .as_ref()
354 .context("no OpenAI API key configured on the server")?;
355 let chunks = open_ai::stream_completion(
356 &state.http_client,
357 open_ai::OPEN_AI_API_URL,
358 api_key,
359 serde_json::from_str(params.provider_request.get())?,
360 None,
361 )
362 .await?;
363
364 chunks
365 .map(|event| {
366 event.map(|chunk| {
367 let input_tokens =
368 chunk.usage.as_ref().map_or(0, |u| u.prompt_tokens) as usize;
369 let output_tokens =
370 chunk.usage.as_ref().map_or(0, |u| u.completion_tokens) as usize;
371 CompletionChunk {
372 bytes: serde_json::to_vec(&chunk).unwrap(),
373 input_tokens,
374 output_tokens,
375 cache_creation_input_tokens: 0,
376 cache_read_input_tokens: 0,
377 }
378 })
379 })
380 .boxed()
381 }
382 LanguageModelProvider::Google => {
383 let api_key = state
384 .config
385 .google_ai_api_key
386 .as_ref()
387 .context("no Google AI API key configured on the server")?;
388 let chunks = google_ai::stream_generate_content(
389 &state.http_client,
390 google_ai::API_URL,
391 api_key,
392 serde_json::from_str(params.provider_request.get())?,
393 None,
394 )
395 .await?;
396
397 chunks
398 .map(|event| {
399 event.map(|chunk| {
400 // TODO - implement token counting for Google AI
401 CompletionChunk {
402 bytes: serde_json::to_vec(&chunk).unwrap(),
403 input_tokens: 0,
404 output_tokens: 0,
405 cache_creation_input_tokens: 0,
406 cache_read_input_tokens: 0,
407 }
408 })
409 })
410 .boxed()
411 }
412 };
413
414 Ok(Response::new(Body::wrap_stream(TokenCountingStream {
415 state,
416 claims,
417 provider: params.provider,
418 model,
419 tokens: TokenUsage::default(),
420 inner_stream: stream,
421 })))
422}
423
424fn normalize_model_name(known_models: Vec<String>, name: String) -> String {
425 if let Some(known_model_name) = known_models
426 .iter()
427 .filter(|known_model_name| name.starts_with(known_model_name.as_str()))
428 .max_by_key(|known_model_name| known_model_name.len())
429 {
430 known_model_name.to_string()
431 } else {
432 name
433 }
434}
435
436/// The maximum monthly spending an individual user can reach on the free tier
437/// before they have to pay.
438pub const FREE_TIER_MONTHLY_SPENDING_LIMIT: Cents = Cents::from_dollars(10);
439
440/// The default value to use for maximum spend per month if the user did not
441/// explicitly set a maximum spend.
442///
443/// Used to prevent surprise bills.
444pub const DEFAULT_MAX_MONTHLY_SPEND: Cents = Cents::from_dollars(10);
445
446/// The maximum lifetime spending an individual user can reach before being cut off.
447const LIFETIME_SPENDING_LIMIT: Cents = Cents::from_dollars(1_000);
448
449async fn check_usage_limit(
450 state: &Arc<LlmState>,
451 provider: LanguageModelProvider,
452 model_name: &str,
453 claims: &LlmTokenClaims,
454) -> Result<()> {
455 let model = state.db.model(provider, model_name)?;
456 let usage = state
457 .db
458 .get_usage(
459 UserId::from_proto(claims.user_id),
460 provider,
461 model_name,
462 Utc::now(),
463 )
464 .await?;
465
466 if state.config.is_llm_billing_enabled() {
467 if usage.spending_this_month >= FREE_TIER_MONTHLY_SPENDING_LIMIT {
468 if !claims.has_llm_subscription {
469 return Err(Error::http(
470 StatusCode::PAYMENT_REQUIRED,
471 "Maximum spending limit reached for this month.".to_string(),
472 ));
473 }
474
475 if usage.spending_this_month >= Cents(claims.max_monthly_spend_in_cents) {
476 return Err(Error::Http(
477 StatusCode::FORBIDDEN,
478 "Maximum spending limit reached for this month.".to_string(),
479 [(
480 HeaderName::from_static(MAX_LLM_MONTHLY_SPEND_REACHED_HEADER_NAME),
481 HeaderValue::from_static("true"),
482 )]
483 .into_iter()
484 .collect(),
485 ));
486 }
487 }
488 }
489
490 // TODO: Remove this once we've rolled out monthly spending limits.
491 if usage.lifetime_spending >= LIFETIME_SPENDING_LIMIT {
492 return Err(Error::http(
493 StatusCode::FORBIDDEN,
494 "Maximum spending limit reached.".to_string(),
495 ));
496 }
497
498 let active_users = state.get_active_user_count(provider, model_name).await?;
499
500 let users_in_recent_minutes = active_users.users_in_recent_minutes.max(1);
501 let users_in_recent_days = active_users.users_in_recent_days.max(1);
502
503 let per_user_max_requests_per_minute =
504 model.max_requests_per_minute as usize / users_in_recent_minutes;
505 let per_user_max_tokens_per_minute =
506 model.max_tokens_per_minute as usize / users_in_recent_minutes;
507 let per_user_max_tokens_per_day = model.max_tokens_per_day as usize / users_in_recent_days;
508
509 let checks = [
510 (
511 usage.requests_this_minute,
512 per_user_max_requests_per_minute,
513 UsageMeasure::RequestsPerMinute,
514 ),
515 (
516 usage.tokens_this_minute,
517 per_user_max_tokens_per_minute,
518 UsageMeasure::TokensPerMinute,
519 ),
520 (
521 usage.tokens_this_day,
522 per_user_max_tokens_per_day,
523 UsageMeasure::TokensPerDay,
524 ),
525 ];
526
527 for (used, limit, usage_measure) in checks {
528 // Temporarily bypass rate-limiting for staff members.
529 if claims.is_staff {
530 continue;
531 }
532
533 if used > limit {
534 let resource = match usage_measure {
535 UsageMeasure::RequestsPerMinute => "requests_per_minute",
536 UsageMeasure::TokensPerMinute => "tokens_per_minute",
537 UsageMeasure::TokensPerDay => "tokens_per_day",
538 };
539
540 if let Some(client) = state.clickhouse_client.as_ref() {
541 tracing::info!(
542 target: "user rate limit",
543 user_id = claims.user_id,
544 login = claims.github_user_login,
545 authn.jti = claims.jti,
546 is_staff = claims.is_staff,
547 provider = provider.to_string(),
548 model = model.name,
549 requests_this_minute = usage.requests_this_minute,
550 tokens_this_minute = usage.tokens_this_minute,
551 tokens_this_day = usage.tokens_this_day,
552 users_in_recent_minutes = users_in_recent_minutes,
553 users_in_recent_days = users_in_recent_days,
554 max_requests_per_minute = per_user_max_requests_per_minute,
555 max_tokens_per_minute = per_user_max_tokens_per_minute,
556 max_tokens_per_day = per_user_max_tokens_per_day,
557 );
558
559 report_llm_rate_limit(
560 client,
561 LlmRateLimitEventRow {
562 time: Utc::now().timestamp_millis(),
563 user_id: claims.user_id as i32,
564 is_staff: claims.is_staff,
565 plan: match claims.plan {
566 Plan::Free => "free".to_string(),
567 Plan::ZedPro => "zed_pro".to_string(),
568 },
569 model: model.name.clone(),
570 provider: provider.to_string(),
571 usage_measure: resource.to_string(),
572 requests_this_minute: usage.requests_this_minute as u64,
573 tokens_this_minute: usage.tokens_this_minute as u64,
574 tokens_this_day: usage.tokens_this_day as u64,
575 users_in_recent_minutes: users_in_recent_minutes as u64,
576 users_in_recent_days: users_in_recent_days as u64,
577 max_requests_per_minute: per_user_max_requests_per_minute as u64,
578 max_tokens_per_minute: per_user_max_tokens_per_minute as u64,
579 max_tokens_per_day: per_user_max_tokens_per_day as u64,
580 },
581 )
582 .await
583 .log_err();
584 }
585
586 return Err(Error::http(
587 StatusCode::TOO_MANY_REQUESTS,
588 format!("Rate limit exceeded. Maximum {} reached.", resource),
589 ));
590 }
591 }
592
593 Ok(())
594}
595
596struct CompletionChunk {
597 bytes: Vec<u8>,
598 input_tokens: usize,
599 output_tokens: usize,
600 cache_creation_input_tokens: usize,
601 cache_read_input_tokens: usize,
602}
603
604struct TokenCountingStream<S> {
605 state: Arc<LlmState>,
606 claims: LlmTokenClaims,
607 provider: LanguageModelProvider,
608 model: String,
609 tokens: TokenUsage,
610 inner_stream: S,
611}
612
613impl<S> Stream for TokenCountingStream<S>
614where
615 S: Stream<Item = Result<CompletionChunk, anyhow::Error>> + Unpin,
616{
617 type Item = Result<Vec<u8>, anyhow::Error>;
618
619 fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
620 match Pin::new(&mut self.inner_stream).poll_next(cx) {
621 Poll::Ready(Some(Ok(mut chunk))) => {
622 chunk.bytes.push(b'\n');
623 self.tokens.input += chunk.input_tokens;
624 self.tokens.output += chunk.output_tokens;
625 self.tokens.input_cache_creation += chunk.cache_creation_input_tokens;
626 self.tokens.input_cache_read += chunk.cache_read_input_tokens;
627 Poll::Ready(Some(Ok(chunk.bytes)))
628 }
629 Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))),
630 Poll::Ready(None) => Poll::Ready(None),
631 Poll::Pending => Poll::Pending,
632 }
633 }
634}
635
636impl<S> Drop for TokenCountingStream<S> {
637 fn drop(&mut self) {
638 let state = self.state.clone();
639 let is_llm_billing_enabled = state.config.is_llm_billing_enabled();
640 let claims = self.claims.clone();
641 let provider = self.provider;
642 let model = std::mem::take(&mut self.model);
643 let tokens = self.tokens;
644 self.state.executor.spawn_detached(async move {
645 let usage = state
646 .db
647 .record_usage(
648 UserId::from_proto(claims.user_id),
649 claims.is_staff,
650 provider,
651 &model,
652 tokens,
653 // We're passing `false` here if LLM billing is not enabled
654 // so that we don't write any records to the
655 // `billing_events` table until we're ready to bill users.
656 if is_llm_billing_enabled {
657 claims.has_llm_subscription
658 } else {
659 false
660 },
661 Cents(claims.max_monthly_spend_in_cents),
662 Utc::now(),
663 )
664 .await
665 .log_err();
666
667 if let Some(usage) = usage {
668 tracing::info!(
669 target: "user usage",
670 user_id = claims.user_id,
671 login = claims.github_user_login,
672 authn.jti = claims.jti,
673 is_staff = claims.is_staff,
674 requests_this_minute = usage.requests_this_minute,
675 tokens_this_minute = usage.tokens_this_minute,
676 );
677
678 if let Some(clickhouse_client) = state.clickhouse_client.as_ref() {
679 report_llm_usage(
680 clickhouse_client,
681 LlmUsageEventRow {
682 time: Utc::now().timestamp_millis(),
683 user_id: claims.user_id as i32,
684 is_staff: claims.is_staff,
685 plan: match claims.plan {
686 Plan::Free => "free".to_string(),
687 Plan::ZedPro => "zed_pro".to_string(),
688 },
689 model,
690 provider: provider.to_string(),
691 input_token_count: tokens.input as u64,
692 cache_creation_input_token_count: tokens.input_cache_creation as u64,
693 cache_read_input_token_count: tokens.input_cache_read as u64,
694 output_token_count: tokens.output as u64,
695 requests_this_minute: usage.requests_this_minute as u64,
696 tokens_this_minute: usage.tokens_this_minute as u64,
697 tokens_this_day: usage.tokens_this_day as u64,
698 input_tokens_this_month: usage.tokens_this_month.input as u64,
699 cache_creation_input_tokens_this_month: usage
700 .tokens_this_month
701 .input_cache_creation
702 as u64,
703 cache_read_input_tokens_this_month: usage
704 .tokens_this_month
705 .input_cache_read
706 as u64,
707 output_tokens_this_month: usage.tokens_this_month.output as u64,
708 spending_this_month: usage.spending_this_month.0 as u64,
709 lifetime_spending: usage.lifetime_spending.0 as u64,
710 },
711 )
712 .await
713 .log_err();
714 }
715 }
716 })
717 }
718}
719
720pub fn log_usage_periodically(state: Arc<LlmState>) {
721 state.executor.clone().spawn_detached(async move {
722 loop {
723 state
724 .executor
725 .sleep(std::time::Duration::from_secs(30))
726 .await;
727
728 for provider in LanguageModelProvider::iter() {
729 for model in state.db.model_names_for_provider(provider) {
730 if let Some(active_user_count) = state
731 .get_active_user_count(provider, &model)
732 .await
733 .log_err()
734 {
735 tracing::info!(
736 target: "active user counts",
737 provider = provider.to_string(),
738 model = model,
739 users_in_recent_minutes = active_user_count.users_in_recent_minutes,
740 users_in_recent_days = active_user_count.users_in_recent_days,
741 );
742 }
743 }
744 }
745
746 if let Some(usages) = state
747 .db
748 .get_application_wide_usages_by_model(Utc::now())
749 .await
750 .log_err()
751 {
752 for usage in usages {
753 tracing::info!(
754 target: "computed usage",
755 provider = usage.provider.to_string(),
756 model = usage.model,
757 requests_this_minute = usage.requests_this_minute,
758 tokens_this_minute = usage.tokens_this_minute,
759 );
760 }
761 }
762 }
763 })
764}