llm.rs

  1mod authorization;
  2pub mod db;
  3mod telemetry;
  4mod token;
  5
  6use crate::{
  7    api::CloudflareIpCountryHeader, build_clickhouse_client, db::UserId, executor::Executor,
  8    Config, Error, Result,
  9};
 10use anyhow::{anyhow, Context as _};
 11use authorization::authorize_access_to_language_model;
 12use axum::routing::get;
 13use axum::{
 14    body::Body,
 15    http::{self, HeaderName, HeaderValue, Request, StatusCode},
 16    middleware::{self, Next},
 17    response::{IntoResponse, Response},
 18    routing::post,
 19    Extension, Json, Router, TypedHeader,
 20};
 21use chrono::{DateTime, Duration, Utc};
 22use collections::HashMap;
 23use db::{usage_measure::UsageMeasure, ActiveUserCount, LlmDatabase};
 24use futures::{Stream, StreamExt as _};
 25use isahc_http_client::IsahcHttpClient;
 26use rpc::ListModelsResponse;
 27use rpc::{
 28    proto::Plan, LanguageModelProvider, PerformCompletionParams, EXPIRED_LLM_TOKEN_HEADER_NAME,
 29};
 30use std::{
 31    pin::Pin,
 32    sync::Arc,
 33    task::{Context, Poll},
 34};
 35use strum::IntoEnumIterator;
 36use telemetry::{report_llm_rate_limit, report_llm_usage, LlmRateLimitEventRow, LlmUsageEventRow};
 37use tokio::sync::RwLock;
 38use util::ResultExt;
 39
 40pub use token::*;
 41
 42pub struct LlmState {
 43    pub config: Config,
 44    pub executor: Executor,
 45    pub db: Arc<LlmDatabase>,
 46    pub http_client: IsahcHttpClient,
 47    pub clickhouse_client: Option<clickhouse::Client>,
 48    active_user_count_by_model:
 49        RwLock<HashMap<(LanguageModelProvider, String), (DateTime<Utc>, ActiveUserCount)>>,
 50}
 51
 52const ACTIVE_USER_COUNT_CACHE_DURATION: Duration = Duration::seconds(30);
 53
 54impl LlmState {
 55    pub async fn new(config: Config, executor: Executor) -> Result<Arc<Self>> {
 56        let database_url = config
 57            .llm_database_url
 58            .as_ref()
 59            .ok_or_else(|| anyhow!("missing LLM_DATABASE_URL"))?;
 60        let max_connections = config
 61            .llm_database_max_connections
 62            .ok_or_else(|| anyhow!("missing LLM_DATABASE_MAX_CONNECTIONS"))?;
 63
 64        let mut db_options = db::ConnectOptions::new(database_url);
 65        db_options.max_connections(max_connections);
 66        let mut db = LlmDatabase::new(db_options, executor.clone()).await?;
 67        db.initialize().await?;
 68
 69        let db = Arc::new(db);
 70
 71        let user_agent = format!("Zed Server/{}", env!("CARGO_PKG_VERSION"));
 72        let http_client = IsahcHttpClient::builder()
 73            .default_header("User-Agent", user_agent)
 74            .build()
 75            .map(IsahcHttpClient::from)
 76            .context("failed to construct http client")?;
 77
 78        let this = Self {
 79            executor,
 80            db,
 81            http_client,
 82            clickhouse_client: config
 83                .clickhouse_url
 84                .as_ref()
 85                .and_then(|_| build_clickhouse_client(&config).log_err()),
 86            active_user_count_by_model: RwLock::new(HashMap::default()),
 87            config,
 88        };
 89
 90        Ok(Arc::new(this))
 91    }
 92
 93    pub async fn get_active_user_count(
 94        &self,
 95        provider: LanguageModelProvider,
 96        model: &str,
 97    ) -> Result<ActiveUserCount> {
 98        let now = Utc::now();
 99
100        {
101            let active_user_count_by_model = self.active_user_count_by_model.read().await;
102            if let Some((last_updated, count)) =
103                active_user_count_by_model.get(&(provider, model.to_string()))
104            {
105                if now - *last_updated < ACTIVE_USER_COUNT_CACHE_DURATION {
106                    return Ok(*count);
107                }
108            }
109        }
110
111        let mut cache = self.active_user_count_by_model.write().await;
112        let new_count = self.db.get_active_user_count(provider, model, now).await?;
113        cache.insert((provider, model.to_string()), (now, new_count));
114        Ok(new_count)
115    }
116}
117
118pub fn routes() -> Router<(), Body> {
119    Router::new()
120        .route("/models", get(list_models))
121        .route("/completion", post(perform_completion))
122        .layer(middleware::from_fn(validate_api_token))
123}
124
125async fn validate_api_token<B>(mut req: Request<B>, next: Next<B>) -> impl IntoResponse {
126    let token = req
127        .headers()
128        .get(http::header::AUTHORIZATION)
129        .and_then(|header| header.to_str().ok())
130        .ok_or_else(|| {
131            Error::http(
132                StatusCode::BAD_REQUEST,
133                "missing authorization header".to_string(),
134            )
135        })?
136        .strip_prefix("Bearer ")
137        .ok_or_else(|| {
138            Error::http(
139                StatusCode::BAD_REQUEST,
140                "invalid authorization header".to_string(),
141            )
142        })?;
143
144    let state = req.extensions().get::<Arc<LlmState>>().unwrap();
145    match LlmTokenClaims::validate(token, &state.config) {
146        Ok(claims) => {
147            if state.db.is_access_token_revoked(&claims.jti).await? {
148                return Err(Error::http(
149                    StatusCode::UNAUTHORIZED,
150                    "unauthorized".to_string(),
151                ));
152            }
153
154            tracing::Span::current()
155                .record("user_id", claims.user_id)
156                .record("login", claims.github_user_login.clone())
157                .record("authn.jti", &claims.jti)
158                .record("is_staff", claims.is_staff);
159
160            req.extensions_mut().insert(claims);
161            Ok::<_, Error>(next.run(req).await.into_response())
162        }
163        Err(ValidateLlmTokenError::Expired) => Err(Error::Http(
164            StatusCode::UNAUTHORIZED,
165            "unauthorized".to_string(),
166            [(
167                HeaderName::from_static(EXPIRED_LLM_TOKEN_HEADER_NAME),
168                HeaderValue::from_static("true"),
169            )]
170            .into_iter()
171            .collect(),
172        )),
173        Err(_err) => Err(Error::http(
174            StatusCode::UNAUTHORIZED,
175            "unauthorized".to_string(),
176        )),
177    }
178}
179
180async fn list_models(
181    Extension(state): Extension<Arc<LlmState>>,
182    Extension(claims): Extension<LlmTokenClaims>,
183    country_code_header: Option<TypedHeader<CloudflareIpCountryHeader>>,
184) -> Result<Json<ListModelsResponse>> {
185    let country_code = country_code_header.map(|header| header.to_string());
186
187    let mut accessible_models = Vec::new();
188
189    for (provider, model) in state.db.all_models() {
190        let authorize_result = authorize_access_to_language_model(
191            &state.config,
192            &claims,
193            country_code.as_deref(),
194            provider,
195            &model.name,
196        );
197
198        if authorize_result.is_ok() {
199            accessible_models.push(rpc::LanguageModel {
200                provider,
201                name: model.name,
202            });
203        }
204    }
205
206    Ok(Json(ListModelsResponse {
207        models: accessible_models,
208    }))
209}
210
211async fn perform_completion(
212    Extension(state): Extension<Arc<LlmState>>,
213    Extension(claims): Extension<LlmTokenClaims>,
214    country_code_header: Option<TypedHeader<CloudflareIpCountryHeader>>,
215    Json(params): Json<PerformCompletionParams>,
216) -> Result<impl IntoResponse> {
217    let model = normalize_model_name(
218        state.db.model_names_for_provider(params.provider),
219        params.model,
220    );
221
222    authorize_access_to_language_model(
223        &state.config,
224        &claims,
225        country_code_header
226            .map(|header| header.to_string())
227            .as_deref(),
228        params.provider,
229        &model,
230    )?;
231
232    check_usage_limit(&state, params.provider, &model, &claims).await?;
233
234    let stream = match params.provider {
235        LanguageModelProvider::Anthropic => {
236            let api_key = if claims.is_staff {
237                state
238                    .config
239                    .anthropic_staff_api_key
240                    .as_ref()
241                    .context("no Anthropic AI staff API key configured on the server")?
242            } else {
243                state
244                    .config
245                    .anthropic_api_key
246                    .as_ref()
247                    .context("no Anthropic AI API key configured on the server")?
248            };
249
250            let mut request: anthropic::Request =
251                serde_json::from_str(params.provider_request.get())?;
252
253            // Override the model on the request with the latest version of the model that is
254            // known to the server.
255            //
256            // Right now, we use the version that's defined in `model.id()`, but we will likely
257            // want to change this code once a new version of an Anthropic model is released,
258            // so that users can use the new version, without having to update Zed.
259            request.model = match model.as_str() {
260                "claude-3-5-sonnet" => anthropic::Model::Claude3_5Sonnet.id().to_string(),
261                "claude-3-opus" => anthropic::Model::Claude3Opus.id().to_string(),
262                "claude-3-haiku" => anthropic::Model::Claude3Haiku.id().to_string(),
263                "claude-3-sonnet" => anthropic::Model::Claude3Sonnet.id().to_string(),
264                _ => request.model,
265            };
266
267            let (chunks, rate_limit_info) = anthropic::stream_completion_with_rate_limit_info(
268                &state.http_client,
269                anthropic::ANTHROPIC_API_URL,
270                api_key,
271                request,
272                None,
273            )
274            .await
275            .map_err(|err| match err {
276                anthropic::AnthropicError::ApiError(ref api_error) => match api_error.code() {
277                    Some(anthropic::ApiErrorCode::RateLimitError) => {
278                        tracing::info!(
279                            target: "upstream rate limit exceeded",
280                            user_id = claims.user_id,
281                            login = claims.github_user_login,
282                            authn.jti = claims.jti,
283                            is_staff = claims.is_staff,
284                            provider = params.provider.to_string(),
285                            model = model
286                        );
287
288                        Error::http(
289                            StatusCode::TOO_MANY_REQUESTS,
290                            "Upstream Anthropic rate limit exceeded.".to_string(),
291                        )
292                    }
293                    Some(anthropic::ApiErrorCode::InvalidRequestError) => {
294                        Error::http(StatusCode::BAD_REQUEST, api_error.message.clone())
295                    }
296                    Some(anthropic::ApiErrorCode::OverloadedError) => {
297                        Error::http(StatusCode::SERVICE_UNAVAILABLE, api_error.message.clone())
298                    }
299                    Some(_) => {
300                        Error::http(StatusCode::INTERNAL_SERVER_ERROR, api_error.message.clone())
301                    }
302                    None => Error::Internal(anyhow!(err)),
303                },
304                anthropic::AnthropicError::Other(err) => Error::Internal(err),
305            })?;
306
307            if let Some(rate_limit_info) = rate_limit_info {
308                tracing::info!(
309                    target: "upstream rate limit",
310                    is_staff = claims.is_staff,
311                    provider = params.provider.to_string(),
312                    model = model,
313                    tokens_remaining = rate_limit_info.tokens_remaining,
314                    requests_remaining = rate_limit_info.requests_remaining,
315                    requests_reset = ?rate_limit_info.requests_reset,
316                    tokens_reset = ?rate_limit_info.tokens_reset,
317                );
318            }
319
320            chunks
321                .map(move |event| {
322                    let chunk = event?;
323                    let (
324                        input_tokens,
325                        output_tokens,
326                        cache_creation_input_tokens,
327                        cache_read_input_tokens,
328                    ) = match &chunk {
329                        anthropic::Event::MessageStart {
330                            message: anthropic::Response { usage, .. },
331                        }
332                        | anthropic::Event::MessageDelta { usage, .. } => (
333                            usage.input_tokens.unwrap_or(0) as usize,
334                            usage.output_tokens.unwrap_or(0) as usize,
335                            usage.cache_creation_input_tokens.unwrap_or(0) as usize,
336                            usage.cache_read_input_tokens.unwrap_or(0) as usize,
337                        ),
338                        _ => (0, 0, 0, 0),
339                    };
340
341                    anyhow::Ok(CompletionChunk {
342                        bytes: serde_json::to_vec(&chunk).unwrap(),
343                        input_tokens,
344                        output_tokens,
345                        cache_creation_input_tokens,
346                        cache_read_input_tokens,
347                    })
348                })
349                .boxed()
350        }
351        LanguageModelProvider::OpenAi => {
352            let api_key = state
353                .config
354                .openai_api_key
355                .as_ref()
356                .context("no OpenAI API key configured on the server")?;
357            let chunks = open_ai::stream_completion(
358                &state.http_client,
359                open_ai::OPEN_AI_API_URL,
360                api_key,
361                serde_json::from_str(params.provider_request.get())?,
362                None,
363            )
364            .await?;
365
366            chunks
367                .map(|event| {
368                    event.map(|chunk| {
369                        let input_tokens =
370                            chunk.usage.as_ref().map_or(0, |u| u.prompt_tokens) as usize;
371                        let output_tokens =
372                            chunk.usage.as_ref().map_or(0, |u| u.completion_tokens) as usize;
373                        CompletionChunk {
374                            bytes: serde_json::to_vec(&chunk).unwrap(),
375                            input_tokens,
376                            output_tokens,
377                            cache_creation_input_tokens: 0,
378                            cache_read_input_tokens: 0,
379                        }
380                    })
381                })
382                .boxed()
383        }
384        LanguageModelProvider::Google => {
385            let api_key = state
386                .config
387                .google_ai_api_key
388                .as_ref()
389                .context("no Google AI API key configured on the server")?;
390            let chunks = google_ai::stream_generate_content(
391                &state.http_client,
392                google_ai::API_URL,
393                api_key,
394                serde_json::from_str(params.provider_request.get())?,
395                None,
396            )
397            .await?;
398
399            chunks
400                .map(|event| {
401                    event.map(|chunk| {
402                        // TODO - implement token counting for Google AI
403                        CompletionChunk {
404                            bytes: serde_json::to_vec(&chunk).unwrap(),
405                            input_tokens: 0,
406                            output_tokens: 0,
407                            cache_creation_input_tokens: 0,
408                            cache_read_input_tokens: 0,
409                        }
410                    })
411                })
412                .boxed()
413        }
414    };
415
416    Ok(Response::new(Body::wrap_stream(TokenCountingStream {
417        state,
418        claims,
419        provider: params.provider,
420        model,
421        input_tokens: 0,
422        output_tokens: 0,
423        cache_creation_input_tokens: 0,
424        cache_read_input_tokens: 0,
425        inner_stream: stream,
426    })))
427}
428
429fn normalize_model_name(known_models: Vec<String>, name: String) -> String {
430    if let Some(known_model_name) = known_models
431        .iter()
432        .filter(|known_model_name| name.starts_with(known_model_name.as_str()))
433        .max_by_key(|known_model_name| known_model_name.len())
434    {
435        known_model_name.to_string()
436    } else {
437        name
438    }
439}
440
441/// The maximum monthly spending an individual user can reach before they have to pay.
442pub const MONTHLY_SPENDING_LIMIT_IN_CENTS: usize = 5 * 100;
443
444/// The maximum lifetime spending an individual user can reach before being cut off.
445///
446/// Represented in cents.
447const LIFETIME_SPENDING_LIMIT_IN_CENTS: usize = 1_000 * 100;
448
449async fn check_usage_limit(
450    state: &Arc<LlmState>,
451    provider: LanguageModelProvider,
452    model_name: &str,
453    claims: &LlmTokenClaims,
454) -> Result<()> {
455    let model = state.db.model(provider, model_name)?;
456    let usage = state
457        .db
458        .get_usage(
459            UserId::from_proto(claims.user_id),
460            provider,
461            model_name,
462            Utc::now(),
463        )
464        .await?;
465
466    if state.config.is_llm_billing_enabled() {
467        if usage.spending_this_month >= MONTHLY_SPENDING_LIMIT_IN_CENTS {
468            if !claims.has_llm_subscription.unwrap_or(false) {
469                return Err(Error::http(
470                    StatusCode::PAYMENT_REQUIRED,
471                    "Maximum spending limit reached for this month.".to_string(),
472                ));
473            }
474        }
475    }
476
477    // TODO: Remove this once we've rolled out monthly spending limits.
478    if usage.lifetime_spending >= LIFETIME_SPENDING_LIMIT_IN_CENTS {
479        return Err(Error::http(
480            StatusCode::FORBIDDEN,
481            "Maximum spending limit reached.".to_string(),
482        ));
483    }
484
485    let active_users = state.get_active_user_count(provider, model_name).await?;
486
487    let users_in_recent_minutes = active_users.users_in_recent_minutes.max(1);
488    let users_in_recent_days = active_users.users_in_recent_days.max(1);
489
490    let per_user_max_requests_per_minute =
491        model.max_requests_per_minute as usize / users_in_recent_minutes;
492    let per_user_max_tokens_per_minute =
493        model.max_tokens_per_minute as usize / users_in_recent_minutes;
494    let per_user_max_tokens_per_day = model.max_tokens_per_day as usize / users_in_recent_days;
495
496    let checks = [
497        (
498            usage.requests_this_minute,
499            per_user_max_requests_per_minute,
500            UsageMeasure::RequestsPerMinute,
501        ),
502        (
503            usage.tokens_this_minute,
504            per_user_max_tokens_per_minute,
505            UsageMeasure::TokensPerMinute,
506        ),
507        (
508            usage.tokens_this_day,
509            per_user_max_tokens_per_day,
510            UsageMeasure::TokensPerDay,
511        ),
512    ];
513
514    for (used, limit, usage_measure) in checks {
515        // Temporarily bypass rate-limiting for staff members.
516        if claims.is_staff {
517            continue;
518        }
519
520        if used > limit {
521            let resource = match usage_measure {
522                UsageMeasure::RequestsPerMinute => "requests_per_minute",
523                UsageMeasure::TokensPerMinute => "tokens_per_minute",
524                UsageMeasure::TokensPerDay => "tokens_per_day",
525            };
526
527            if let Some(client) = state.clickhouse_client.as_ref() {
528                tracing::info!(
529                    target: "user rate limit",
530                    user_id = claims.user_id,
531                    login = claims.github_user_login,
532                    authn.jti = claims.jti,
533                    is_staff = claims.is_staff,
534                    provider = provider.to_string(),
535                    model = model.name,
536                    requests_this_minute = usage.requests_this_minute,
537                    tokens_this_minute = usage.tokens_this_minute,
538                    tokens_this_day = usage.tokens_this_day,
539                    users_in_recent_minutes = users_in_recent_minutes,
540                    users_in_recent_days = users_in_recent_days,
541                    max_requests_per_minute = per_user_max_requests_per_minute,
542                    max_tokens_per_minute = per_user_max_tokens_per_minute,
543                    max_tokens_per_day = per_user_max_tokens_per_day,
544                );
545
546                report_llm_rate_limit(
547                    client,
548                    LlmRateLimitEventRow {
549                        time: Utc::now().timestamp_millis(),
550                        user_id: claims.user_id as i32,
551                        is_staff: claims.is_staff,
552                        plan: match claims.plan {
553                            Plan::Free => "free".to_string(),
554                            Plan::ZedPro => "zed_pro".to_string(),
555                        },
556                        model: model.name.clone(),
557                        provider: provider.to_string(),
558                        usage_measure: resource.to_string(),
559                        requests_this_minute: usage.requests_this_minute as u64,
560                        tokens_this_minute: usage.tokens_this_minute as u64,
561                        tokens_this_day: usage.tokens_this_day as u64,
562                        users_in_recent_minutes: users_in_recent_minutes as u64,
563                        users_in_recent_days: users_in_recent_days as u64,
564                        max_requests_per_minute: per_user_max_requests_per_minute as u64,
565                        max_tokens_per_minute: per_user_max_tokens_per_minute as u64,
566                        max_tokens_per_day: per_user_max_tokens_per_day as u64,
567                    },
568                )
569                .await
570                .log_err();
571            }
572
573            return Err(Error::http(
574                StatusCode::TOO_MANY_REQUESTS,
575                format!("Rate limit exceeded. Maximum {} reached.", resource),
576            ));
577        }
578    }
579
580    Ok(())
581}
582
583struct CompletionChunk {
584    bytes: Vec<u8>,
585    input_tokens: usize,
586    output_tokens: usize,
587    cache_creation_input_tokens: usize,
588    cache_read_input_tokens: usize,
589}
590
591struct TokenCountingStream<S> {
592    state: Arc<LlmState>,
593    claims: LlmTokenClaims,
594    provider: LanguageModelProvider,
595    model: String,
596    input_tokens: usize,
597    output_tokens: usize,
598    cache_creation_input_tokens: usize,
599    cache_read_input_tokens: usize,
600    inner_stream: S,
601}
602
603impl<S> Stream for TokenCountingStream<S>
604where
605    S: Stream<Item = Result<CompletionChunk, anyhow::Error>> + Unpin,
606{
607    type Item = Result<Vec<u8>, anyhow::Error>;
608
609    fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
610        match Pin::new(&mut self.inner_stream).poll_next(cx) {
611            Poll::Ready(Some(Ok(mut chunk))) => {
612                chunk.bytes.push(b'\n');
613                self.input_tokens += chunk.input_tokens;
614                self.output_tokens += chunk.output_tokens;
615                self.cache_creation_input_tokens += chunk.cache_creation_input_tokens;
616                self.cache_read_input_tokens += chunk.cache_read_input_tokens;
617                Poll::Ready(Some(Ok(chunk.bytes)))
618            }
619            Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))),
620            Poll::Ready(None) => Poll::Ready(None),
621            Poll::Pending => Poll::Pending,
622        }
623    }
624}
625
626impl<S> Drop for TokenCountingStream<S> {
627    fn drop(&mut self) {
628        let state = self.state.clone();
629        let claims = self.claims.clone();
630        let provider = self.provider;
631        let model = std::mem::take(&mut self.model);
632        let input_token_count = self.input_tokens;
633        let output_token_count = self.output_tokens;
634        let cache_creation_input_token_count = self.cache_creation_input_tokens;
635        let cache_read_input_token_count = self.cache_read_input_tokens;
636        self.state.executor.spawn_detached(async move {
637            let usage = state
638                .db
639                .record_usage(
640                    UserId::from_proto(claims.user_id),
641                    claims.is_staff,
642                    provider,
643                    &model,
644                    input_token_count,
645                    cache_creation_input_token_count,
646                    cache_read_input_token_count,
647                    output_token_count,
648                    Utc::now(),
649                )
650                .await
651                .log_err();
652
653            if let Some(usage) = usage {
654                tracing::info!(
655                    target: "user usage",
656                    user_id = claims.user_id,
657                    login = claims.github_user_login,
658                    authn.jti = claims.jti,
659                    is_staff = claims.is_staff,
660                    requests_this_minute = usage.requests_this_minute,
661                    tokens_this_minute = usage.tokens_this_minute,
662                );
663
664                if let Some(clickhouse_client) = state.clickhouse_client.as_ref() {
665                    report_llm_usage(
666                        clickhouse_client,
667                        LlmUsageEventRow {
668                            time: Utc::now().timestamp_millis(),
669                            user_id: claims.user_id as i32,
670                            is_staff: claims.is_staff,
671                            plan: match claims.plan {
672                                Plan::Free => "free".to_string(),
673                                Plan::ZedPro => "zed_pro".to_string(),
674                            },
675                            model,
676                            provider: provider.to_string(),
677                            input_token_count: input_token_count as u64,
678                            cache_creation_input_token_count: cache_creation_input_token_count
679                                as u64,
680                            cache_read_input_token_count: cache_read_input_token_count as u64,
681                            output_token_count: output_token_count as u64,
682                            requests_this_minute: usage.requests_this_minute as u64,
683                            tokens_this_minute: usage.tokens_this_minute as u64,
684                            tokens_this_day: usage.tokens_this_day as u64,
685                            input_tokens_this_month: usage.input_tokens_this_month as u64,
686                            cache_creation_input_tokens_this_month: usage
687                                .cache_creation_input_tokens_this_month
688                                as u64,
689                            cache_read_input_tokens_this_month: usage
690                                .cache_read_input_tokens_this_month
691                                as u64,
692                            output_tokens_this_month: usage.output_tokens_this_month as u64,
693                            spending_this_month: usage.spending_this_month as u64,
694                            lifetime_spending: usage.lifetime_spending as u64,
695                        },
696                    )
697                    .await
698                    .log_err();
699                }
700            }
701        })
702    }
703}
704
705pub fn log_usage_periodically(state: Arc<LlmState>) {
706    state.executor.clone().spawn_detached(async move {
707        loop {
708            state
709                .executor
710                .sleep(std::time::Duration::from_secs(30))
711                .await;
712
713            for provider in LanguageModelProvider::iter() {
714                for model in state.db.model_names_for_provider(provider) {
715                    if let Some(active_user_count) = state
716                        .get_active_user_count(provider, &model)
717                        .await
718                        .log_err()
719                    {
720                        tracing::info!(
721                            target: "active user counts",
722                            provider = provider.to_string(),
723                            model = model,
724                            users_in_recent_minutes = active_user_count.users_in_recent_minutes,
725                            users_in_recent_days = active_user_count.users_in_recent_days,
726                        );
727                    }
728                }
729            }
730
731            if let Some(usages) = state
732                .db
733                .get_application_wide_usages_by_model(Utc::now())
734                .await
735                .log_err()
736            {
737                for usage in usages {
738                    tracing::info!(
739                        target: "computed usage",
740                        provider = usage.provider.to_string(),
741                        model = usage.model,
742                        requests_this_minute = usage.requests_this_minute,
743                        tokens_this_minute = usage.tokens_this_minute,
744                    );
745                }
746            }
747        }
748    })
749}