llm.rs

  1mod authorization;
  2pub mod db;
  3mod telemetry;
  4mod token;
  5
  6use crate::{
  7    api::CloudflareIpCountryHeader, build_clickhouse_client, db::UserId, executor::Executor,
  8    Config, Error, Result,
  9};
 10use anyhow::{anyhow, Context as _};
 11use authorization::authorize_access_to_language_model;
 12use axum::routing::get;
 13use axum::{
 14    body::Body,
 15    http::{self, HeaderName, HeaderValue, Request, StatusCode},
 16    middleware::{self, Next},
 17    response::{IntoResponse, Response},
 18    routing::post,
 19    Extension, Json, Router, TypedHeader,
 20};
 21use chrono::{DateTime, Duration, Utc};
 22use collections::HashMap;
 23use db::{usage_measure::UsageMeasure, ActiveUserCount, LlmDatabase};
 24use futures::{Stream, StreamExt as _};
 25
 26use reqwest_client::ReqwestClient;
 27use rpc::ListModelsResponse;
 28use rpc::{
 29    proto::Plan, LanguageModelProvider, PerformCompletionParams, EXPIRED_LLM_TOKEN_HEADER_NAME,
 30};
 31use std::{
 32    pin::Pin,
 33    sync::Arc,
 34    task::{Context, Poll},
 35};
 36use strum::IntoEnumIterator;
 37use telemetry::{report_llm_rate_limit, report_llm_usage, LlmRateLimitEventRow, LlmUsageEventRow};
 38use tokio::sync::RwLock;
 39use util::ResultExt;
 40
 41pub use token::*;
 42
 43pub struct LlmState {
 44    pub config: Config,
 45    pub executor: Executor,
 46    pub db: Arc<LlmDatabase>,
 47    pub http_client: ReqwestClient,
 48    pub clickhouse_client: Option<clickhouse::Client>,
 49    active_user_count_by_model:
 50        RwLock<HashMap<(LanguageModelProvider, String), (DateTime<Utc>, ActiveUserCount)>>,
 51}
 52
 53const ACTIVE_USER_COUNT_CACHE_DURATION: Duration = Duration::seconds(30);
 54
 55impl LlmState {
 56    pub async fn new(config: Config, executor: Executor) -> Result<Arc<Self>> {
 57        let database_url = config
 58            .llm_database_url
 59            .as_ref()
 60            .ok_or_else(|| anyhow!("missing LLM_DATABASE_URL"))?;
 61        let max_connections = config
 62            .llm_database_max_connections
 63            .ok_or_else(|| anyhow!("missing LLM_DATABASE_MAX_CONNECTIONS"))?;
 64
 65        let mut db_options = db::ConnectOptions::new(database_url);
 66        db_options.max_connections(max_connections);
 67        let mut db = LlmDatabase::new(db_options, executor.clone()).await?;
 68        db.initialize().await?;
 69
 70        let db = Arc::new(db);
 71
 72        let user_agent = format!("Zed Server/{}", env!("CARGO_PKG_VERSION"));
 73        let http_client =
 74            ReqwestClient::user_agent(&user_agent).context("failed to construct http client")?;
 75
 76        let this = Self {
 77            executor,
 78            db,
 79            http_client,
 80            clickhouse_client: config
 81                .clickhouse_url
 82                .as_ref()
 83                .and_then(|_| build_clickhouse_client(&config).log_err()),
 84            active_user_count_by_model: RwLock::new(HashMap::default()),
 85            config,
 86        };
 87
 88        Ok(Arc::new(this))
 89    }
 90
 91    pub async fn get_active_user_count(
 92        &self,
 93        provider: LanguageModelProvider,
 94        model: &str,
 95    ) -> Result<ActiveUserCount> {
 96        let now = Utc::now();
 97
 98        {
 99            let active_user_count_by_model = self.active_user_count_by_model.read().await;
100            if let Some((last_updated, count)) =
101                active_user_count_by_model.get(&(provider, model.to_string()))
102            {
103                if now - *last_updated < ACTIVE_USER_COUNT_CACHE_DURATION {
104                    return Ok(*count);
105                }
106            }
107        }
108
109        let mut cache = self.active_user_count_by_model.write().await;
110        let new_count = self.db.get_active_user_count(provider, model, now).await?;
111        cache.insert((provider, model.to_string()), (now, new_count));
112        Ok(new_count)
113    }
114}
115
116pub fn routes() -> Router<(), Body> {
117    Router::new()
118        .route("/models", get(list_models))
119        .route("/completion", post(perform_completion))
120        .layer(middleware::from_fn(validate_api_token))
121}
122
123async fn validate_api_token<B>(mut req: Request<B>, next: Next<B>) -> impl IntoResponse {
124    let token = req
125        .headers()
126        .get(http::header::AUTHORIZATION)
127        .and_then(|header| header.to_str().ok())
128        .ok_or_else(|| {
129            Error::http(
130                StatusCode::BAD_REQUEST,
131                "missing authorization header".to_string(),
132            )
133        })?
134        .strip_prefix("Bearer ")
135        .ok_or_else(|| {
136            Error::http(
137                StatusCode::BAD_REQUEST,
138                "invalid authorization header".to_string(),
139            )
140        })?;
141
142    let state = req.extensions().get::<Arc<LlmState>>().unwrap();
143    match LlmTokenClaims::validate(token, &state.config) {
144        Ok(claims) => {
145            if state.db.is_access_token_revoked(&claims.jti).await? {
146                return Err(Error::http(
147                    StatusCode::UNAUTHORIZED,
148                    "unauthorized".to_string(),
149                ));
150            }
151
152            tracing::Span::current()
153                .record("user_id", claims.user_id)
154                .record("login", claims.github_user_login.clone())
155                .record("authn.jti", &claims.jti)
156                .record("is_staff", claims.is_staff);
157
158            req.extensions_mut().insert(claims);
159            Ok::<_, Error>(next.run(req).await.into_response())
160        }
161        Err(ValidateLlmTokenError::Expired) => Err(Error::Http(
162            StatusCode::UNAUTHORIZED,
163            "unauthorized".to_string(),
164            [(
165                HeaderName::from_static(EXPIRED_LLM_TOKEN_HEADER_NAME),
166                HeaderValue::from_static("true"),
167            )]
168            .into_iter()
169            .collect(),
170        )),
171        Err(_err) => Err(Error::http(
172            StatusCode::UNAUTHORIZED,
173            "unauthorized".to_string(),
174        )),
175    }
176}
177
178async fn list_models(
179    Extension(state): Extension<Arc<LlmState>>,
180    Extension(claims): Extension<LlmTokenClaims>,
181    country_code_header: Option<TypedHeader<CloudflareIpCountryHeader>>,
182) -> Result<Json<ListModelsResponse>> {
183    let country_code = country_code_header.map(|header| header.to_string());
184
185    let mut accessible_models = Vec::new();
186
187    for (provider, model) in state.db.all_models() {
188        let authorize_result = authorize_access_to_language_model(
189            &state.config,
190            &claims,
191            country_code.as_deref(),
192            provider,
193            &model.name,
194        );
195
196        if authorize_result.is_ok() {
197            accessible_models.push(rpc::LanguageModel {
198                provider,
199                name: model.name,
200            });
201        }
202    }
203
204    Ok(Json(ListModelsResponse {
205        models: accessible_models,
206    }))
207}
208
209async fn perform_completion(
210    Extension(state): Extension<Arc<LlmState>>,
211    Extension(claims): Extension<LlmTokenClaims>,
212    country_code_header: Option<TypedHeader<CloudflareIpCountryHeader>>,
213    Json(params): Json<PerformCompletionParams>,
214) -> Result<impl IntoResponse> {
215    let model = normalize_model_name(
216        state.db.model_names_for_provider(params.provider),
217        params.model,
218    );
219
220    authorize_access_to_language_model(
221        &state.config,
222        &claims,
223        country_code_header
224            .map(|header| header.to_string())
225            .as_deref(),
226        params.provider,
227        &model,
228    )?;
229
230    check_usage_limit(&state, params.provider, &model, &claims).await?;
231
232    let stream = match params.provider {
233        LanguageModelProvider::Anthropic => {
234            let api_key = if claims.is_staff {
235                state
236                    .config
237                    .anthropic_staff_api_key
238                    .as_ref()
239                    .context("no Anthropic AI staff API key configured on the server")?
240            } else {
241                state
242                    .config
243                    .anthropic_api_key
244                    .as_ref()
245                    .context("no Anthropic AI API key configured on the server")?
246            };
247
248            let mut request: anthropic::Request =
249                serde_json::from_str(params.provider_request.get())?;
250
251            // Override the model on the request with the latest version of the model that is
252            // known to the server.
253            //
254            // Right now, we use the version that's defined in `model.id()`, but we will likely
255            // want to change this code once a new version of an Anthropic model is released,
256            // so that users can use the new version, without having to update Zed.
257            request.model = match model.as_str() {
258                "claude-3-5-sonnet" => anthropic::Model::Claude3_5Sonnet.id().to_string(),
259                "claude-3-opus" => anthropic::Model::Claude3Opus.id().to_string(),
260                "claude-3-haiku" => anthropic::Model::Claude3Haiku.id().to_string(),
261                "claude-3-sonnet" => anthropic::Model::Claude3Sonnet.id().to_string(),
262                _ => request.model,
263            };
264
265            let (chunks, rate_limit_info) = anthropic::stream_completion_with_rate_limit_info(
266                &state.http_client,
267                anthropic::ANTHROPIC_API_URL,
268                api_key,
269                request,
270                None,
271            )
272            .await
273            .map_err(|err| match err {
274                anthropic::AnthropicError::ApiError(ref api_error) => match api_error.code() {
275                    Some(anthropic::ApiErrorCode::RateLimitError) => {
276                        tracing::info!(
277                            target: "upstream rate limit exceeded",
278                            user_id = claims.user_id,
279                            login = claims.github_user_login,
280                            authn.jti = claims.jti,
281                            is_staff = claims.is_staff,
282                            provider = params.provider.to_string(),
283                            model = model
284                        );
285
286                        Error::http(
287                            StatusCode::TOO_MANY_REQUESTS,
288                            "Upstream Anthropic rate limit exceeded.".to_string(),
289                        )
290                    }
291                    Some(anthropic::ApiErrorCode::InvalidRequestError) => {
292                        Error::http(StatusCode::BAD_REQUEST, api_error.message.clone())
293                    }
294                    Some(anthropic::ApiErrorCode::OverloadedError) => {
295                        Error::http(StatusCode::SERVICE_UNAVAILABLE, api_error.message.clone())
296                    }
297                    Some(_) => {
298                        Error::http(StatusCode::INTERNAL_SERVER_ERROR, api_error.message.clone())
299                    }
300                    None => Error::Internal(anyhow!(err)),
301                },
302                anthropic::AnthropicError::Other(err) => Error::Internal(err),
303            })?;
304
305            if let Some(rate_limit_info) = rate_limit_info {
306                tracing::info!(
307                    target: "upstream rate limit",
308                    is_staff = claims.is_staff,
309                    provider = params.provider.to_string(),
310                    model = model,
311                    tokens_remaining = rate_limit_info.tokens_remaining,
312                    requests_remaining = rate_limit_info.requests_remaining,
313                    requests_reset = ?rate_limit_info.requests_reset,
314                    tokens_reset = ?rate_limit_info.tokens_reset,
315                );
316            }
317
318            chunks
319                .map(move |event| {
320                    let chunk = event?;
321                    let (
322                        input_tokens,
323                        output_tokens,
324                        cache_creation_input_tokens,
325                        cache_read_input_tokens,
326                    ) = match &chunk {
327                        anthropic::Event::MessageStart {
328                            message: anthropic::Response { usage, .. },
329                        }
330                        | anthropic::Event::MessageDelta { usage, .. } => (
331                            usage.input_tokens.unwrap_or(0) as usize,
332                            usage.output_tokens.unwrap_or(0) as usize,
333                            usage.cache_creation_input_tokens.unwrap_or(0) as usize,
334                            usage.cache_read_input_tokens.unwrap_or(0) as usize,
335                        ),
336                        _ => (0, 0, 0, 0),
337                    };
338
339                    anyhow::Ok(CompletionChunk {
340                        bytes: serde_json::to_vec(&chunk).unwrap(),
341                        input_tokens,
342                        output_tokens,
343                        cache_creation_input_tokens,
344                        cache_read_input_tokens,
345                    })
346                })
347                .boxed()
348        }
349        LanguageModelProvider::OpenAi => {
350            let api_key = state
351                .config
352                .openai_api_key
353                .as_ref()
354                .context("no OpenAI API key configured on the server")?;
355            let chunks = open_ai::stream_completion(
356                &state.http_client,
357                open_ai::OPEN_AI_API_URL,
358                api_key,
359                serde_json::from_str(params.provider_request.get())?,
360                None,
361            )
362            .await?;
363
364            chunks
365                .map(|event| {
366                    event.map(|chunk| {
367                        let input_tokens =
368                            chunk.usage.as_ref().map_or(0, |u| u.prompt_tokens) as usize;
369                        let output_tokens =
370                            chunk.usage.as_ref().map_or(0, |u| u.completion_tokens) as usize;
371                        CompletionChunk {
372                            bytes: serde_json::to_vec(&chunk).unwrap(),
373                            input_tokens,
374                            output_tokens,
375                            cache_creation_input_tokens: 0,
376                            cache_read_input_tokens: 0,
377                        }
378                    })
379                })
380                .boxed()
381        }
382        LanguageModelProvider::Google => {
383            let api_key = state
384                .config
385                .google_ai_api_key
386                .as_ref()
387                .context("no Google AI API key configured on the server")?;
388            let chunks = google_ai::stream_generate_content(
389                &state.http_client,
390                google_ai::API_URL,
391                api_key,
392                serde_json::from_str(params.provider_request.get())?,
393                None,
394            )
395            .await?;
396
397            chunks
398                .map(|event| {
399                    event.map(|chunk| {
400                        // TODO - implement token counting for Google AI
401                        CompletionChunk {
402                            bytes: serde_json::to_vec(&chunk).unwrap(),
403                            input_tokens: 0,
404                            output_tokens: 0,
405                            cache_creation_input_tokens: 0,
406                            cache_read_input_tokens: 0,
407                        }
408                    })
409                })
410                .boxed()
411        }
412    };
413
414    Ok(Response::new(Body::wrap_stream(TokenCountingStream {
415        state,
416        claims,
417        provider: params.provider,
418        model,
419        input_tokens: 0,
420        output_tokens: 0,
421        cache_creation_input_tokens: 0,
422        cache_read_input_tokens: 0,
423        inner_stream: stream,
424    })))
425}
426
427fn normalize_model_name(known_models: Vec<String>, name: String) -> String {
428    if let Some(known_model_name) = known_models
429        .iter()
430        .filter(|known_model_name| name.starts_with(known_model_name.as_str()))
431        .max_by_key(|known_model_name| known_model_name.len())
432    {
433        known_model_name.to_string()
434    } else {
435        name
436    }
437}
438
439/// The maximum lifetime spending an individual user can reach before being cut off.
440///
441/// Represented in cents.
442const LIFETIME_SPENDING_LIMIT_IN_CENTS: usize = 1_000 * 100;
443
444async fn check_usage_limit(
445    state: &Arc<LlmState>,
446    provider: LanguageModelProvider,
447    model_name: &str,
448    claims: &LlmTokenClaims,
449) -> Result<()> {
450    let model = state.db.model(provider, model_name)?;
451    let usage = state
452        .db
453        .get_usage(
454            UserId::from_proto(claims.user_id),
455            provider,
456            model_name,
457            Utc::now(),
458        )
459        .await?;
460
461    if usage.lifetime_spending >= LIFETIME_SPENDING_LIMIT_IN_CENTS {
462        return Err(Error::http(
463            StatusCode::FORBIDDEN,
464            "Maximum spending limit reached.".to_string(),
465        ));
466    }
467
468    let active_users = state.get_active_user_count(provider, model_name).await?;
469
470    let users_in_recent_minutes = active_users.users_in_recent_minutes.max(1);
471    let users_in_recent_days = active_users.users_in_recent_days.max(1);
472
473    let per_user_max_requests_per_minute =
474        model.max_requests_per_minute as usize / users_in_recent_minutes;
475    let per_user_max_tokens_per_minute =
476        model.max_tokens_per_minute as usize / users_in_recent_minutes;
477    let per_user_max_tokens_per_day = model.max_tokens_per_day as usize / users_in_recent_days;
478
479    let checks = [
480        (
481            usage.requests_this_minute,
482            per_user_max_requests_per_minute,
483            UsageMeasure::RequestsPerMinute,
484        ),
485        (
486            usage.tokens_this_minute,
487            per_user_max_tokens_per_minute,
488            UsageMeasure::TokensPerMinute,
489        ),
490        (
491            usage.tokens_this_day,
492            per_user_max_tokens_per_day,
493            UsageMeasure::TokensPerDay,
494        ),
495    ];
496
497    for (used, limit, usage_measure) in checks {
498        // Temporarily bypass rate-limiting for staff members.
499        if claims.is_staff {
500            continue;
501        }
502
503        if used > limit {
504            let resource = match usage_measure {
505                UsageMeasure::RequestsPerMinute => "requests_per_minute",
506                UsageMeasure::TokensPerMinute => "tokens_per_minute",
507                UsageMeasure::TokensPerDay => "tokens_per_day",
508                _ => "",
509            };
510
511            if let Some(client) = state.clickhouse_client.as_ref() {
512                tracing::info!(
513                    target: "user rate limit",
514                    user_id = claims.user_id,
515                    login = claims.github_user_login,
516                    authn.jti = claims.jti,
517                    is_staff = claims.is_staff,
518                    provider = provider.to_string(),
519                    model = model.name,
520                    requests_this_minute = usage.requests_this_minute,
521                    tokens_this_minute = usage.tokens_this_minute,
522                    tokens_this_day = usage.tokens_this_day,
523                    users_in_recent_minutes = users_in_recent_minutes,
524                    users_in_recent_days = users_in_recent_days,
525                    max_requests_per_minute = per_user_max_requests_per_minute,
526                    max_tokens_per_minute = per_user_max_tokens_per_minute,
527                    max_tokens_per_day = per_user_max_tokens_per_day,
528                );
529
530                report_llm_rate_limit(
531                    client,
532                    LlmRateLimitEventRow {
533                        time: Utc::now().timestamp_millis(),
534                        user_id: claims.user_id as i32,
535                        is_staff: claims.is_staff,
536                        plan: match claims.plan {
537                            Plan::Free => "free".to_string(),
538                            Plan::ZedPro => "zed_pro".to_string(),
539                        },
540                        model: model.name.clone(),
541                        provider: provider.to_string(),
542                        usage_measure: resource.to_string(),
543                        requests_this_minute: usage.requests_this_minute as u64,
544                        tokens_this_minute: usage.tokens_this_minute as u64,
545                        tokens_this_day: usage.tokens_this_day as u64,
546                        users_in_recent_minutes: users_in_recent_minutes as u64,
547                        users_in_recent_days: users_in_recent_days as u64,
548                        max_requests_per_minute: per_user_max_requests_per_minute as u64,
549                        max_tokens_per_minute: per_user_max_tokens_per_minute as u64,
550                        max_tokens_per_day: per_user_max_tokens_per_day as u64,
551                    },
552                )
553                .await
554                .log_err();
555            }
556
557            return Err(Error::http(
558                StatusCode::TOO_MANY_REQUESTS,
559                format!("Rate limit exceeded. Maximum {} reached.", resource),
560            ));
561        }
562    }
563
564    Ok(())
565}
566
567struct CompletionChunk {
568    bytes: Vec<u8>,
569    input_tokens: usize,
570    output_tokens: usize,
571    cache_creation_input_tokens: usize,
572    cache_read_input_tokens: usize,
573}
574
575struct TokenCountingStream<S> {
576    state: Arc<LlmState>,
577    claims: LlmTokenClaims,
578    provider: LanguageModelProvider,
579    model: String,
580    input_tokens: usize,
581    output_tokens: usize,
582    cache_creation_input_tokens: usize,
583    cache_read_input_tokens: usize,
584    inner_stream: S,
585}
586
587impl<S> Stream for TokenCountingStream<S>
588where
589    S: Stream<Item = Result<CompletionChunk, anyhow::Error>> + Unpin,
590{
591    type Item = Result<Vec<u8>, anyhow::Error>;
592
593    fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
594        match Pin::new(&mut self.inner_stream).poll_next(cx) {
595            Poll::Ready(Some(Ok(mut chunk))) => {
596                chunk.bytes.push(b'\n');
597                self.input_tokens += chunk.input_tokens;
598                self.output_tokens += chunk.output_tokens;
599                self.cache_creation_input_tokens += chunk.cache_creation_input_tokens;
600                self.cache_read_input_tokens += chunk.cache_read_input_tokens;
601                Poll::Ready(Some(Ok(chunk.bytes)))
602            }
603            Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))),
604            Poll::Ready(None) => Poll::Ready(None),
605            Poll::Pending => Poll::Pending,
606        }
607    }
608}
609
610impl<S> Drop for TokenCountingStream<S> {
611    fn drop(&mut self) {
612        let state = self.state.clone();
613        let claims = self.claims.clone();
614        let provider = self.provider;
615        let model = std::mem::take(&mut self.model);
616        let input_token_count = self.input_tokens;
617        let output_token_count = self.output_tokens;
618        let cache_creation_input_token_count = self.cache_creation_input_tokens;
619        let cache_read_input_token_count = self.cache_read_input_tokens;
620        self.state.executor.spawn_detached(async move {
621            let usage = state
622                .db
623                .record_usage(
624                    UserId::from_proto(claims.user_id),
625                    claims.is_staff,
626                    provider,
627                    &model,
628                    input_token_count,
629                    cache_creation_input_token_count,
630                    cache_read_input_token_count,
631                    output_token_count,
632                    Utc::now(),
633                )
634                .await
635                .log_err();
636
637            if let Some(usage) = usage {
638                tracing::info!(
639                    target: "user usage",
640                    user_id = claims.user_id,
641                    login = claims.github_user_login,
642                    authn.jti = claims.jti,
643                    is_staff = claims.is_staff,
644                    requests_this_minute = usage.requests_this_minute,
645                    tokens_this_minute = usage.tokens_this_minute,
646                );
647
648                if let Some(clickhouse_client) = state.clickhouse_client.as_ref() {
649                    report_llm_usage(
650                        clickhouse_client,
651                        LlmUsageEventRow {
652                            time: Utc::now().timestamp_millis(),
653                            user_id: claims.user_id as i32,
654                            is_staff: claims.is_staff,
655                            plan: match claims.plan {
656                                Plan::Free => "free".to_string(),
657                                Plan::ZedPro => "zed_pro".to_string(),
658                            },
659                            model,
660                            provider: provider.to_string(),
661                            input_token_count: input_token_count as u64,
662                            cache_creation_input_token_count: cache_creation_input_token_count
663                                as u64,
664                            cache_read_input_token_count: cache_read_input_token_count as u64,
665                            output_token_count: output_token_count as u64,
666                            requests_this_minute: usage.requests_this_minute as u64,
667                            tokens_this_minute: usage.tokens_this_minute as u64,
668                            tokens_this_day: usage.tokens_this_day as u64,
669                            input_tokens_this_month: usage.input_tokens_this_month as u64,
670                            cache_creation_input_tokens_this_month: usage
671                                .cache_creation_input_tokens_this_month
672                                as u64,
673                            cache_read_input_tokens_this_month: usage
674                                .cache_read_input_tokens_this_month
675                                as u64,
676                            output_tokens_this_month: usage.output_tokens_this_month as u64,
677                            spending_this_month: usage.spending_this_month as u64,
678                            lifetime_spending: usage.lifetime_spending as u64,
679                        },
680                    )
681                    .await
682                    .log_err();
683                }
684            }
685        })
686    }
687}
688
689pub fn log_usage_periodically(state: Arc<LlmState>) {
690    state.executor.clone().spawn_detached(async move {
691        loop {
692            state
693                .executor
694                .sleep(std::time::Duration::from_secs(30))
695                .await;
696
697            for provider in LanguageModelProvider::iter() {
698                for model in state.db.model_names_for_provider(provider) {
699                    if let Some(active_user_count) = state
700                        .get_active_user_count(provider, &model)
701                        .await
702                        .log_err()
703                    {
704                        tracing::info!(
705                            target: "active user counts",
706                            provider = provider.to_string(),
707                            model = model,
708                            users_in_recent_minutes = active_user_count.users_in_recent_minutes,
709                            users_in_recent_days = active_user_count.users_in_recent_days,
710                        );
711                    }
712                }
713            }
714
715            if let Some(usages) = state
716                .db
717                .get_application_wide_usages_by_model(Utc::now())
718                .await
719                .log_err()
720            {
721                for usage in usages {
722                    tracing::info!(
723                        target: "computed usage",
724                        provider = usage.provider.to_string(),
725                        model = usage.model,
726                        requests_this_minute = usage.requests_this_minute,
727                        tokens_this_minute = usage.tokens_this_minute,
728                    );
729                }
730            }
731        }
732    })
733}