1mod authorization;
2pub mod db;
3mod token;
4
5use crate::api::events::SnowflakeRow;
6use crate::api::CloudflareIpCountryHeader;
7use crate::build_kinesis_client;
8use crate::{db::UserId, executor::Executor, Cents, Config, Error, Result};
9use anyhow::{anyhow, Context as _};
10use authorization::authorize_access_to_language_model;
11use axum::routing::get;
12use axum::{
13 body::Body,
14 http::{self, HeaderName, HeaderValue, Request, StatusCode},
15 middleware::{self, Next},
16 response::{IntoResponse, Response},
17 routing::post,
18 Extension, Json, Router, TypedHeader,
19};
20use chrono::{DateTime, Duration, Utc};
21use collections::HashMap;
22use db::TokenUsage;
23use db::{usage_measure::UsageMeasure, ActiveUserCount, LlmDatabase};
24use futures::{Stream, StreamExt as _};
25use reqwest_client::ReqwestClient;
26use rpc::{
27 proto::Plan, LanguageModelProvider, PerformCompletionParams, EXPIRED_LLM_TOKEN_HEADER_NAME,
28};
29use rpc::{
30 ListModelsResponse, PredictEditsParams, PredictEditsResponse,
31 MAX_LLM_MONTHLY_SPEND_REACHED_HEADER_NAME,
32};
33use serde_json::json;
34use std::{
35 pin::Pin,
36 sync::Arc,
37 task::{Context, Poll},
38};
39use strum::IntoEnumIterator;
40use tokio::sync::RwLock;
41use util::ResultExt;
42
43pub use token::*;
44
45pub struct LlmState {
46 pub config: Config,
47 pub executor: Executor,
48 pub db: Arc<LlmDatabase>,
49 pub http_client: ReqwestClient,
50 pub kinesis_client: Option<aws_sdk_kinesis::Client>,
51 active_user_count_by_model:
52 RwLock<HashMap<(LanguageModelProvider, String), (DateTime<Utc>, ActiveUserCount)>>,
53}
54
55const ACTIVE_USER_COUNT_CACHE_DURATION: Duration = Duration::seconds(30);
56
57impl LlmState {
58 pub async fn new(config: Config, executor: Executor) -> Result<Arc<Self>> {
59 let database_url = config
60 .llm_database_url
61 .as_ref()
62 .ok_or_else(|| anyhow!("missing LLM_DATABASE_URL"))?;
63 let max_connections = config
64 .llm_database_max_connections
65 .ok_or_else(|| anyhow!("missing LLM_DATABASE_MAX_CONNECTIONS"))?;
66
67 let mut db_options = db::ConnectOptions::new(database_url);
68 db_options.max_connections(max_connections);
69 let mut db = LlmDatabase::new(db_options, executor.clone()).await?;
70 db.initialize().await?;
71
72 let db = Arc::new(db);
73
74 let user_agent = format!("Zed Server/{}", env!("CARGO_PKG_VERSION"));
75 let http_client =
76 ReqwestClient::user_agent(&user_agent).context("failed to construct http client")?;
77
78 let this = Self {
79 executor,
80 db,
81 http_client,
82 kinesis_client: if config.kinesis_access_key.is_some() {
83 build_kinesis_client(&config).await.log_err()
84 } else {
85 None
86 },
87 active_user_count_by_model: RwLock::new(HashMap::default()),
88 config,
89 };
90
91 Ok(Arc::new(this))
92 }
93
94 pub async fn get_active_user_count(
95 &self,
96 provider: LanguageModelProvider,
97 model: &str,
98 ) -> Result<ActiveUserCount> {
99 let now = Utc::now();
100
101 {
102 let active_user_count_by_model = self.active_user_count_by_model.read().await;
103 if let Some((last_updated, count)) =
104 active_user_count_by_model.get(&(provider, model.to_string()))
105 {
106 if now - *last_updated < ACTIVE_USER_COUNT_CACHE_DURATION {
107 return Ok(*count);
108 }
109 }
110 }
111
112 let mut cache = self.active_user_count_by_model.write().await;
113 let new_count = self.db.get_active_user_count(provider, model, now).await?;
114 cache.insert((provider, model.to_string()), (now, new_count));
115 Ok(new_count)
116 }
117}
118
119pub fn routes() -> Router<(), Body> {
120 Router::new()
121 .route("/models", get(list_models))
122 .route("/completion", post(perform_completion))
123 .route("/predict_edits", post(predict_edits))
124 .layer(middleware::from_fn(validate_api_token))
125}
126
127async fn validate_api_token<B>(mut req: Request<B>, next: Next<B>) -> impl IntoResponse {
128 let token = req
129 .headers()
130 .get(http::header::AUTHORIZATION)
131 .and_then(|header| header.to_str().ok())
132 .ok_or_else(|| {
133 Error::http(
134 StatusCode::BAD_REQUEST,
135 "missing authorization header".to_string(),
136 )
137 })?
138 .strip_prefix("Bearer ")
139 .ok_or_else(|| {
140 Error::http(
141 StatusCode::BAD_REQUEST,
142 "invalid authorization header".to_string(),
143 )
144 })?;
145
146 let state = req.extensions().get::<Arc<LlmState>>().unwrap();
147 match LlmTokenClaims::validate(token, &state.config) {
148 Ok(claims) => {
149 if state.db.is_access_token_revoked(&claims.jti).await? {
150 return Err(Error::http(
151 StatusCode::UNAUTHORIZED,
152 "unauthorized".to_string(),
153 ));
154 }
155
156 tracing::Span::current()
157 .record("user_id", claims.user_id)
158 .record("login", claims.github_user_login.clone())
159 .record("authn.jti", &claims.jti)
160 .record("is_staff", claims.is_staff);
161
162 req.extensions_mut().insert(claims);
163 Ok::<_, Error>(next.run(req).await.into_response())
164 }
165 Err(ValidateLlmTokenError::Expired) => Err(Error::Http(
166 StatusCode::UNAUTHORIZED,
167 "unauthorized".to_string(),
168 [(
169 HeaderName::from_static(EXPIRED_LLM_TOKEN_HEADER_NAME),
170 HeaderValue::from_static("true"),
171 )]
172 .into_iter()
173 .collect(),
174 )),
175 Err(_err) => Err(Error::http(
176 StatusCode::UNAUTHORIZED,
177 "unauthorized".to_string(),
178 )),
179 }
180}
181
182async fn list_models(
183 Extension(state): Extension<Arc<LlmState>>,
184 Extension(claims): Extension<LlmTokenClaims>,
185 country_code_header: Option<TypedHeader<CloudflareIpCountryHeader>>,
186) -> Result<Json<ListModelsResponse>> {
187 let country_code = country_code_header.map(|header| header.to_string());
188
189 let mut accessible_models = Vec::new();
190
191 for (provider, model) in state.db.all_models() {
192 let authorize_result = authorize_access_to_language_model(
193 &state.config,
194 &claims,
195 country_code.as_deref(),
196 provider,
197 &model.name,
198 );
199
200 if authorize_result.is_ok() {
201 accessible_models.push(rpc::LanguageModel {
202 provider,
203 name: model.name,
204 });
205 }
206 }
207
208 Ok(Json(ListModelsResponse {
209 models: accessible_models,
210 }))
211}
212
213async fn perform_completion(
214 Extension(state): Extension<Arc<LlmState>>,
215 Extension(claims): Extension<LlmTokenClaims>,
216 country_code_header: Option<TypedHeader<CloudflareIpCountryHeader>>,
217 Json(params): Json<PerformCompletionParams>,
218) -> Result<impl IntoResponse> {
219 let model = normalize_model_name(
220 state.db.model_names_for_provider(params.provider),
221 params.model,
222 );
223
224 authorize_access_to_language_model(
225 &state.config,
226 &claims,
227 country_code_header
228 .map(|header| header.to_string())
229 .as_deref(),
230 params.provider,
231 &model,
232 )?;
233
234 check_usage_limit(&state, params.provider, &model, &claims).await?;
235
236 let stream = match params.provider {
237 LanguageModelProvider::Anthropic => {
238 let api_key = if claims.is_staff {
239 state
240 .config
241 .anthropic_staff_api_key
242 .as_ref()
243 .context("no Anthropic AI staff API key configured on the server")?
244 } else {
245 state
246 .config
247 .anthropic_api_key
248 .as_ref()
249 .context("no Anthropic AI API key configured on the server")?
250 };
251
252 let mut request: anthropic::Request =
253 serde_json::from_str(params.provider_request.get())?;
254
255 // Override the model on the request with the latest version of the model that is
256 // known to the server.
257 //
258 // Right now, we use the version that's defined in `model.id()`, but we will likely
259 // want to change this code once a new version of an Anthropic model is released,
260 // so that users can use the new version, without having to update Zed.
261 request.model = match model.as_str() {
262 "claude-3-5-sonnet" => anthropic::Model::Claude3_5Sonnet.id().to_string(),
263 "claude-3-opus" => anthropic::Model::Claude3Opus.id().to_string(),
264 "claude-3-haiku" => anthropic::Model::Claude3Haiku.id().to_string(),
265 "claude-3-sonnet" => anthropic::Model::Claude3Sonnet.id().to_string(),
266 _ => request.model,
267 };
268
269 let (chunks, rate_limit_info) = anthropic::stream_completion_with_rate_limit_info(
270 &state.http_client,
271 anthropic::ANTHROPIC_API_URL,
272 api_key,
273 request,
274 )
275 .await
276 .map_err(|err| match err {
277 anthropic::AnthropicError::ApiError(ref api_error) => match api_error.code() {
278 Some(anthropic::ApiErrorCode::RateLimitError) => {
279 tracing::info!(
280 target: "upstream rate limit exceeded",
281 user_id = claims.user_id,
282 login = claims.github_user_login,
283 authn.jti = claims.jti,
284 is_staff = claims.is_staff,
285 provider = params.provider.to_string(),
286 model = model
287 );
288
289 Error::http(
290 StatusCode::TOO_MANY_REQUESTS,
291 "Upstream Anthropic rate limit exceeded.".to_string(),
292 )
293 }
294 Some(anthropic::ApiErrorCode::InvalidRequestError) => {
295 Error::http(StatusCode::BAD_REQUEST, api_error.message.clone())
296 }
297 Some(anthropic::ApiErrorCode::OverloadedError) => {
298 Error::http(StatusCode::SERVICE_UNAVAILABLE, api_error.message.clone())
299 }
300 Some(_) => {
301 Error::http(StatusCode::INTERNAL_SERVER_ERROR, api_error.message.clone())
302 }
303 None => Error::Internal(anyhow!(err)),
304 },
305 anthropic::AnthropicError::Other(err) => Error::Internal(err),
306 })?;
307
308 if let Some(rate_limit_info) = rate_limit_info {
309 tracing::info!(
310 target: "upstream rate limit",
311 is_staff = claims.is_staff,
312 provider = params.provider.to_string(),
313 model = model,
314 tokens_remaining = rate_limit_info.tokens_remaining,
315 requests_remaining = rate_limit_info.requests_remaining,
316 requests_reset = ?rate_limit_info.requests_reset,
317 tokens_reset = ?rate_limit_info.tokens_reset,
318 );
319 }
320
321 chunks
322 .map(move |event| {
323 let chunk = event?;
324 let (
325 input_tokens,
326 output_tokens,
327 cache_creation_input_tokens,
328 cache_read_input_tokens,
329 ) = match &chunk {
330 anthropic::Event::MessageStart {
331 message: anthropic::Response { usage, .. },
332 }
333 | anthropic::Event::MessageDelta { usage, .. } => (
334 usage.input_tokens.unwrap_or(0) as usize,
335 usage.output_tokens.unwrap_or(0) as usize,
336 usage.cache_creation_input_tokens.unwrap_or(0) as usize,
337 usage.cache_read_input_tokens.unwrap_or(0) as usize,
338 ),
339 _ => (0, 0, 0, 0),
340 };
341
342 anyhow::Ok(CompletionChunk {
343 bytes: serde_json::to_vec(&chunk).unwrap(),
344 input_tokens,
345 output_tokens,
346 cache_creation_input_tokens,
347 cache_read_input_tokens,
348 })
349 })
350 .boxed()
351 }
352 LanguageModelProvider::OpenAi => {
353 let api_key = state
354 .config
355 .openai_api_key
356 .as_ref()
357 .context("no OpenAI API key configured on the server")?;
358 let chunks = open_ai::stream_completion(
359 &state.http_client,
360 open_ai::OPEN_AI_API_URL,
361 api_key,
362 serde_json::from_str(params.provider_request.get())?,
363 )
364 .await?;
365
366 chunks
367 .map(|event| {
368 event.map(|chunk| {
369 let input_tokens =
370 chunk.usage.as_ref().map_or(0, |u| u.prompt_tokens) as usize;
371 let output_tokens =
372 chunk.usage.as_ref().map_or(0, |u| u.completion_tokens) as usize;
373 CompletionChunk {
374 bytes: serde_json::to_vec(&chunk).unwrap(),
375 input_tokens,
376 output_tokens,
377 cache_creation_input_tokens: 0,
378 cache_read_input_tokens: 0,
379 }
380 })
381 })
382 .boxed()
383 }
384 LanguageModelProvider::Google => {
385 let api_key = state
386 .config
387 .google_ai_api_key
388 .as_ref()
389 .context("no Google AI API key configured on the server")?;
390 let chunks = google_ai::stream_generate_content(
391 &state.http_client,
392 google_ai::API_URL,
393 api_key,
394 serde_json::from_str(params.provider_request.get())?,
395 )
396 .await?;
397
398 chunks
399 .map(|event| {
400 event.map(|chunk| {
401 // TODO - implement token counting for Google AI
402 CompletionChunk {
403 bytes: serde_json::to_vec(&chunk).unwrap(),
404 input_tokens: 0,
405 output_tokens: 0,
406 cache_creation_input_tokens: 0,
407 cache_read_input_tokens: 0,
408 }
409 })
410 })
411 .boxed()
412 }
413 };
414
415 Ok(Response::new(Body::wrap_stream(TokenCountingStream {
416 state,
417 claims,
418 provider: params.provider,
419 model,
420 tokens: TokenUsage::default(),
421 inner_stream: stream,
422 })))
423}
424
425fn normalize_model_name(known_models: Vec<String>, name: String) -> String {
426 if let Some(known_model_name) = known_models
427 .iter()
428 .filter(|known_model_name| name.starts_with(known_model_name.as_str()))
429 .max_by_key(|known_model_name| known_model_name.len())
430 {
431 known_model_name.to_string()
432 } else {
433 name
434 }
435}
436
437async fn predict_edits(
438 Extension(state): Extension<Arc<LlmState>>,
439 Extension(claims): Extension<LlmTokenClaims>,
440 _country_code_header: Option<TypedHeader<CloudflareIpCountryHeader>>,
441 Json(params): Json<PredictEditsParams>,
442) -> Result<impl IntoResponse> {
443 if !claims.is_staff {
444 return Err(anyhow!("not found"))?;
445 }
446
447 let api_url = state
448 .config
449 .prediction_api_url
450 .as_ref()
451 .context("no PREDICTION_API_URL configured on the server")?;
452 let api_key = state
453 .config
454 .prediction_api_key
455 .as_ref()
456 .context("no PREDICTION_API_KEY configured on the server")?;
457 let model = state
458 .config
459 .prediction_model
460 .as_ref()
461 .context("no PREDICTION_MODEL configured on the server")?;
462 let prompt = include_str!("./llm/prediction_prompt.md")
463 .replace("<events>", ¶ms.input_events)
464 .replace("<excerpt>", ¶ms.input_excerpt);
465 let mut response = open_ai::complete_text(
466 &state.http_client,
467 api_url,
468 api_key,
469 open_ai::CompletionRequest {
470 model: model.to_string(),
471 prompt: prompt.clone(),
472 max_tokens: 1024,
473 temperature: 0.,
474 prediction: Some(open_ai::Prediction::Content {
475 content: params.input_excerpt,
476 }),
477 rewrite_speculation: Some(true),
478 },
479 )
480 .await?;
481 let choice = response
482 .choices
483 .pop()
484 .context("no output from completion response")?;
485 Ok(Json(PredictEditsResponse {
486 output_excerpt: choice.text,
487 }))
488}
489
490/// The maximum monthly spending an individual user can reach on the free tier
491/// before they have to pay.
492pub const FREE_TIER_MONTHLY_SPENDING_LIMIT: Cents = Cents::from_dollars(10);
493
494/// The default value to use for maximum spend per month if the user did not
495/// explicitly set a maximum spend.
496///
497/// Used to prevent surprise bills.
498pub const DEFAULT_MAX_MONTHLY_SPEND: Cents = Cents::from_dollars(10);
499
500async fn check_usage_limit(
501 state: &Arc<LlmState>,
502 provider: LanguageModelProvider,
503 model_name: &str,
504 claims: &LlmTokenClaims,
505) -> Result<()> {
506 if claims.is_staff {
507 return Ok(());
508 }
509
510 let model = state.db.model(provider, model_name)?;
511 let usage = state
512 .db
513 .get_usage(
514 UserId::from_proto(claims.user_id),
515 provider,
516 model_name,
517 Utc::now(),
518 )
519 .await?;
520 let free_tier = claims.free_tier_monthly_spending_limit();
521
522 if usage.spending_this_month >= free_tier {
523 if !claims.has_llm_subscription {
524 return Err(Error::http(
525 StatusCode::PAYMENT_REQUIRED,
526 "Maximum spending limit reached for this month.".to_string(),
527 ));
528 }
529
530 if (usage.spending_this_month - free_tier) >= Cents(claims.max_monthly_spend_in_cents) {
531 return Err(Error::Http(
532 StatusCode::FORBIDDEN,
533 "Maximum spending limit reached for this month.".to_string(),
534 [(
535 HeaderName::from_static(MAX_LLM_MONTHLY_SPEND_REACHED_HEADER_NAME),
536 HeaderValue::from_static("true"),
537 )]
538 .into_iter()
539 .collect(),
540 ));
541 }
542 }
543
544 let active_users = state.get_active_user_count(provider, model_name).await?;
545
546 let users_in_recent_minutes = active_users.users_in_recent_minutes.max(1);
547 let users_in_recent_days = active_users.users_in_recent_days.max(1);
548
549 let per_user_max_requests_per_minute =
550 model.max_requests_per_minute as usize / users_in_recent_minutes;
551 let per_user_max_tokens_per_minute =
552 model.max_tokens_per_minute as usize / users_in_recent_minutes;
553 let per_user_max_tokens_per_day = model.max_tokens_per_day as usize / users_in_recent_days;
554
555 let checks = [
556 (
557 usage.requests_this_minute,
558 per_user_max_requests_per_minute,
559 UsageMeasure::RequestsPerMinute,
560 ),
561 (
562 usage.tokens_this_minute,
563 per_user_max_tokens_per_minute,
564 UsageMeasure::TokensPerMinute,
565 ),
566 (
567 usage.tokens_this_day,
568 per_user_max_tokens_per_day,
569 UsageMeasure::TokensPerDay,
570 ),
571 ];
572
573 for (used, limit, usage_measure) in checks {
574 if used > limit {
575 let resource = match usage_measure {
576 UsageMeasure::RequestsPerMinute => "requests_per_minute",
577 UsageMeasure::TokensPerMinute => "tokens_per_minute",
578 UsageMeasure::TokensPerDay => "tokens_per_day",
579 };
580
581 tracing::info!(
582 target: "user rate limit",
583 user_id = claims.user_id,
584 login = claims.github_user_login,
585 authn.jti = claims.jti,
586 is_staff = claims.is_staff,
587 provider = provider.to_string(),
588 model = model.name,
589 requests_this_minute = usage.requests_this_minute,
590 tokens_this_minute = usage.tokens_this_minute,
591 tokens_this_day = usage.tokens_this_day,
592 users_in_recent_minutes = users_in_recent_minutes,
593 users_in_recent_days = users_in_recent_days,
594 max_requests_per_minute = per_user_max_requests_per_minute,
595 max_tokens_per_minute = per_user_max_tokens_per_minute,
596 max_tokens_per_day = per_user_max_tokens_per_day,
597 );
598
599 SnowflakeRow::new(
600 "Language Model Rate Limited",
601 claims.metrics_id,
602 claims.is_staff,
603 claims.system_id.clone(),
604 json!({
605 "usage": usage,
606 "users_in_recent_minutes": users_in_recent_minutes,
607 "users_in_recent_days": users_in_recent_days,
608 "max_requests_per_minute": per_user_max_requests_per_minute,
609 "max_tokens_per_minute": per_user_max_tokens_per_minute,
610 "max_tokens_per_day": per_user_max_tokens_per_day,
611 "plan": match claims.plan {
612 Plan::Free => "free".to_string(),
613 Plan::ZedPro => "zed_pro".to_string(),
614 },
615 "model": model.name.clone(),
616 "provider": provider.to_string(),
617 "usage_measure": resource.to_string(),
618 }),
619 )
620 .write(&state.kinesis_client, &state.config.kinesis_stream)
621 .await
622 .log_err();
623
624 return Err(Error::http(
625 StatusCode::TOO_MANY_REQUESTS,
626 format!("Rate limit exceeded. Maximum {} reached.", resource),
627 ));
628 }
629 }
630
631 Ok(())
632}
633
634struct CompletionChunk {
635 bytes: Vec<u8>,
636 input_tokens: usize,
637 output_tokens: usize,
638 cache_creation_input_tokens: usize,
639 cache_read_input_tokens: usize,
640}
641
642struct TokenCountingStream<S> {
643 state: Arc<LlmState>,
644 claims: LlmTokenClaims,
645 provider: LanguageModelProvider,
646 model: String,
647 tokens: TokenUsage,
648 inner_stream: S,
649}
650
651impl<S> Stream for TokenCountingStream<S>
652where
653 S: Stream<Item = Result<CompletionChunk, anyhow::Error>> + Unpin,
654{
655 type Item = Result<Vec<u8>, anyhow::Error>;
656
657 fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
658 match Pin::new(&mut self.inner_stream).poll_next(cx) {
659 Poll::Ready(Some(Ok(mut chunk))) => {
660 chunk.bytes.push(b'\n');
661 self.tokens.input += chunk.input_tokens;
662 self.tokens.output += chunk.output_tokens;
663 self.tokens.input_cache_creation += chunk.cache_creation_input_tokens;
664 self.tokens.input_cache_read += chunk.cache_read_input_tokens;
665 Poll::Ready(Some(Ok(chunk.bytes)))
666 }
667 Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))),
668 Poll::Ready(None) => Poll::Ready(None),
669 Poll::Pending => Poll::Pending,
670 }
671 }
672}
673
674impl<S> Drop for TokenCountingStream<S> {
675 fn drop(&mut self) {
676 let state = self.state.clone();
677 let claims = self.claims.clone();
678 let provider = self.provider;
679 let model = std::mem::take(&mut self.model);
680 let tokens = self.tokens;
681 self.state.executor.spawn_detached(async move {
682 let usage = state
683 .db
684 .record_usage(
685 UserId::from_proto(claims.user_id),
686 claims.is_staff,
687 provider,
688 &model,
689 tokens,
690 claims.has_llm_subscription,
691 Cents(claims.max_monthly_spend_in_cents),
692 claims.free_tier_monthly_spending_limit(),
693 Utc::now(),
694 )
695 .await
696 .log_err();
697
698 if let Some(usage) = usage {
699 tracing::info!(
700 target: "user usage",
701 user_id = claims.user_id,
702 login = claims.github_user_login,
703 authn.jti = claims.jti,
704 is_staff = claims.is_staff,
705 requests_this_minute = usage.requests_this_minute,
706 tokens_this_minute = usage.tokens_this_minute,
707 );
708
709 let properties = json!({
710 "has_llm_subscription": claims.has_llm_subscription,
711 "max_monthly_spend_in_cents": claims.max_monthly_spend_in_cents,
712 "plan": match claims.plan {
713 Plan::Free => "free".to_string(),
714 Plan::ZedPro => "zed_pro".to_string(),
715 },
716 "model": model,
717 "provider": provider,
718 "usage": usage,
719 "tokens": tokens
720 });
721 SnowflakeRow::new(
722 "Language Model Used",
723 claims.metrics_id,
724 claims.is_staff,
725 claims.system_id.clone(),
726 properties,
727 )
728 .write(&state.kinesis_client, &state.config.kinesis_stream)
729 .await
730 .log_err();
731 }
732 })
733 }
734}
735
736pub fn log_usage_periodically(state: Arc<LlmState>) {
737 state.executor.clone().spawn_detached(async move {
738 loop {
739 state
740 .executor
741 .sleep(std::time::Duration::from_secs(30))
742 .await;
743
744 for provider in LanguageModelProvider::iter() {
745 for model in state.db.model_names_for_provider(provider) {
746 if let Some(active_user_count) = state
747 .get_active_user_count(provider, &model)
748 .await
749 .log_err()
750 {
751 tracing::info!(
752 target: "active user counts",
753 provider = provider.to_string(),
754 model = model,
755 users_in_recent_minutes = active_user_count.users_in_recent_minutes,
756 users_in_recent_days = active_user_count.users_in_recent_days,
757 );
758 }
759 }
760 }
761
762 if let Some(usages) = state
763 .db
764 .get_application_wide_usages_by_model(Utc::now())
765 .await
766 .log_err()
767 {
768 for usage in usages {
769 tracing::info!(
770 target: "computed usage",
771 provider = usage.provider.to_string(),
772 model = usage.model,
773 requests_this_minute = usage.requests_this_minute,
774 tokens_this_minute = usage.tokens_this_minute,
775 );
776 }
777 }
778 }
779 })
780}