llm.rs

  1mod authorization;
  2pub mod db;
  3mod telemetry;
  4mod token;
  5
  6use crate::{
  7    api::CloudflareIpCountryHeader, build_clickhouse_client, db::UserId, executor::Executor,
  8    Config, Error, Result,
  9};
 10use anyhow::{anyhow, Context as _};
 11use authorization::authorize_access_to_language_model;
 12use axum::routing::get;
 13use axum::{
 14    body::Body,
 15    http::{self, HeaderName, HeaderValue, Request, StatusCode},
 16    middleware::{self, Next},
 17    response::{IntoResponse, Response},
 18    routing::post,
 19    Extension, Json, Router, TypedHeader,
 20};
 21use chrono::{DateTime, Duration, Utc};
 22use collections::HashMap;
 23use db::{usage_measure::UsageMeasure, ActiveUserCount, LlmDatabase};
 24use futures::{Stream, StreamExt as _};
 25
 26use reqwest_client::ReqwestClient;
 27use rpc::ListModelsResponse;
 28use rpc::{
 29    proto::Plan, LanguageModelProvider, PerformCompletionParams, EXPIRED_LLM_TOKEN_HEADER_NAME,
 30};
 31use std::{
 32    pin::Pin,
 33    sync::Arc,
 34    task::{Context, Poll},
 35};
 36use strum::IntoEnumIterator;
 37use telemetry::{report_llm_rate_limit, report_llm_usage, LlmRateLimitEventRow, LlmUsageEventRow};
 38use tokio::sync::RwLock;
 39use util::ResultExt;
 40
 41pub use token::*;
 42
 43pub struct LlmState {
 44    pub config: Config,
 45    pub executor: Executor,
 46    pub db: Arc<LlmDatabase>,
 47    pub http_client: ReqwestClient,
 48    pub clickhouse_client: Option<clickhouse::Client>,
 49    active_user_count_by_model:
 50        RwLock<HashMap<(LanguageModelProvider, String), (DateTime<Utc>, ActiveUserCount)>>,
 51}
 52
 53const ACTIVE_USER_COUNT_CACHE_DURATION: Duration = Duration::seconds(30);
 54
 55impl LlmState {
 56    pub async fn new(config: Config, executor: Executor) -> Result<Arc<Self>> {
 57        let database_url = config
 58            .llm_database_url
 59            .as_ref()
 60            .ok_or_else(|| anyhow!("missing LLM_DATABASE_URL"))?;
 61        let max_connections = config
 62            .llm_database_max_connections
 63            .ok_or_else(|| anyhow!("missing LLM_DATABASE_MAX_CONNECTIONS"))?;
 64
 65        let mut db_options = db::ConnectOptions::new(database_url);
 66        db_options.max_connections(max_connections);
 67        let mut db = LlmDatabase::new(db_options, executor.clone()).await?;
 68        db.initialize().await?;
 69
 70        let db = Arc::new(db);
 71
 72        let user_agent = format!("Zed Server/{}", env!("CARGO_PKG_VERSION"));
 73        let http_client =
 74            ReqwestClient::user_agent(&user_agent).context("failed to construct http client")?;
 75
 76        let this = Self {
 77            executor,
 78            db,
 79            http_client,
 80            clickhouse_client: config
 81                .clickhouse_url
 82                .as_ref()
 83                .and_then(|_| build_clickhouse_client(&config).log_err()),
 84            active_user_count_by_model: RwLock::new(HashMap::default()),
 85            config,
 86        };
 87
 88        Ok(Arc::new(this))
 89    }
 90
 91    pub async fn get_active_user_count(
 92        &self,
 93        provider: LanguageModelProvider,
 94        model: &str,
 95    ) -> Result<ActiveUserCount> {
 96        let now = Utc::now();
 97
 98        {
 99            let active_user_count_by_model = self.active_user_count_by_model.read().await;
100            if let Some((last_updated, count)) =
101                active_user_count_by_model.get(&(provider, model.to_string()))
102            {
103                if now - *last_updated < ACTIVE_USER_COUNT_CACHE_DURATION {
104                    return Ok(*count);
105                }
106            }
107        }
108
109        let mut cache = self.active_user_count_by_model.write().await;
110        let new_count = self.db.get_active_user_count(provider, model, now).await?;
111        cache.insert((provider, model.to_string()), (now, new_count));
112        Ok(new_count)
113    }
114}
115
116pub fn routes() -> Router<(), Body> {
117    Router::new()
118        .route("/models", get(list_models))
119        .route("/completion", post(perform_completion))
120        .layer(middleware::from_fn(validate_api_token))
121}
122
123async fn validate_api_token<B>(mut req: Request<B>, next: Next<B>) -> impl IntoResponse {
124    let token = req
125        .headers()
126        .get(http::header::AUTHORIZATION)
127        .and_then(|header| header.to_str().ok())
128        .ok_or_else(|| {
129            Error::http(
130                StatusCode::BAD_REQUEST,
131                "missing authorization header".to_string(),
132            )
133        })?
134        .strip_prefix("Bearer ")
135        .ok_or_else(|| {
136            Error::http(
137                StatusCode::BAD_REQUEST,
138                "invalid authorization header".to_string(),
139            )
140        })?;
141
142    let state = req.extensions().get::<Arc<LlmState>>().unwrap();
143    match LlmTokenClaims::validate(token, &state.config) {
144        Ok(claims) => {
145            if state.db.is_access_token_revoked(&claims.jti).await? {
146                return Err(Error::http(
147                    StatusCode::UNAUTHORIZED,
148                    "unauthorized".to_string(),
149                ));
150            }
151
152            tracing::Span::current()
153                .record("user_id", claims.user_id)
154                .record("login", claims.github_user_login.clone())
155                .record("authn.jti", &claims.jti)
156                .record("is_staff", claims.is_staff);
157
158            req.extensions_mut().insert(claims);
159            Ok::<_, Error>(next.run(req).await.into_response())
160        }
161        Err(ValidateLlmTokenError::Expired) => Err(Error::Http(
162            StatusCode::UNAUTHORIZED,
163            "unauthorized".to_string(),
164            [(
165                HeaderName::from_static(EXPIRED_LLM_TOKEN_HEADER_NAME),
166                HeaderValue::from_static("true"),
167            )]
168            .into_iter()
169            .collect(),
170        )),
171        Err(_err) => Err(Error::http(
172            StatusCode::UNAUTHORIZED,
173            "unauthorized".to_string(),
174        )),
175    }
176}
177
178async fn list_models(
179    Extension(state): Extension<Arc<LlmState>>,
180    Extension(claims): Extension<LlmTokenClaims>,
181    country_code_header: Option<TypedHeader<CloudflareIpCountryHeader>>,
182) -> Result<Json<ListModelsResponse>> {
183    let country_code = country_code_header.map(|header| header.to_string());
184
185    let mut accessible_models = Vec::new();
186
187    for (provider, model) in state.db.all_models() {
188        let authorize_result = authorize_access_to_language_model(
189            &state.config,
190            &claims,
191            country_code.as_deref(),
192            provider,
193            &model.name,
194        );
195
196        if authorize_result.is_ok() {
197            accessible_models.push(rpc::LanguageModel {
198                provider,
199                name: model.name,
200            });
201        }
202    }
203
204    Ok(Json(ListModelsResponse {
205        models: accessible_models,
206    }))
207}
208
209async fn perform_completion(
210    Extension(state): Extension<Arc<LlmState>>,
211    Extension(claims): Extension<LlmTokenClaims>,
212    country_code_header: Option<TypedHeader<CloudflareIpCountryHeader>>,
213    Json(params): Json<PerformCompletionParams>,
214) -> Result<impl IntoResponse> {
215    let model = normalize_model_name(
216        state.db.model_names_for_provider(params.provider),
217        params.model,
218    );
219
220    authorize_access_to_language_model(
221        &state.config,
222        &claims,
223        country_code_header
224            .map(|header| header.to_string())
225            .as_deref(),
226        params.provider,
227        &model,
228    )?;
229
230    check_usage_limit(&state, params.provider, &model, &claims).await?;
231
232    let stream = match params.provider {
233        LanguageModelProvider::Anthropic => {
234            let api_key = if claims.is_staff {
235                state
236                    .config
237                    .anthropic_staff_api_key
238                    .as_ref()
239                    .context("no Anthropic AI staff API key configured on the server")?
240            } else {
241                state
242                    .config
243                    .anthropic_api_key
244                    .as_ref()
245                    .context("no Anthropic AI API key configured on the server")?
246            };
247
248            let mut request: anthropic::Request =
249                serde_json::from_str(params.provider_request.get())?;
250
251            // Override the model on the request with the latest version of the model that is
252            // known to the server.
253            //
254            // Right now, we use the version that's defined in `model.id()`, but we will likely
255            // want to change this code once a new version of an Anthropic model is released,
256            // so that users can use the new version, without having to update Zed.
257            request.model = match model.as_str() {
258                "claude-3-5-sonnet" => anthropic::Model::Claude3_5Sonnet.id().to_string(),
259                "claude-3-opus" => anthropic::Model::Claude3Opus.id().to_string(),
260                "claude-3-haiku" => anthropic::Model::Claude3Haiku.id().to_string(),
261                "claude-3-sonnet" => anthropic::Model::Claude3Sonnet.id().to_string(),
262                _ => request.model,
263            };
264
265            let (chunks, rate_limit_info) = anthropic::stream_completion_with_rate_limit_info(
266                &state.http_client,
267                anthropic::ANTHROPIC_API_URL,
268                api_key,
269                request,
270                None,
271            )
272            .await
273            .map_err(|err| match err {
274                anthropic::AnthropicError::ApiError(ref api_error) => match api_error.code() {
275                    Some(anthropic::ApiErrorCode::RateLimitError) => {
276                        tracing::info!(
277                            target: "upstream rate limit exceeded",
278                            user_id = claims.user_id,
279                            login = claims.github_user_login,
280                            authn.jti = claims.jti,
281                            is_staff = claims.is_staff,
282                            provider = params.provider.to_string(),
283                            model = model
284                        );
285
286                        Error::http(
287                            StatusCode::TOO_MANY_REQUESTS,
288                            "Upstream Anthropic rate limit exceeded.".to_string(),
289                        )
290                    }
291                    Some(anthropic::ApiErrorCode::InvalidRequestError) => {
292                        Error::http(StatusCode::BAD_REQUEST, api_error.message.clone())
293                    }
294                    Some(anthropic::ApiErrorCode::OverloadedError) => {
295                        Error::http(StatusCode::SERVICE_UNAVAILABLE, api_error.message.clone())
296                    }
297                    Some(_) => {
298                        Error::http(StatusCode::INTERNAL_SERVER_ERROR, api_error.message.clone())
299                    }
300                    None => Error::Internal(anyhow!(err)),
301                },
302                anthropic::AnthropicError::Other(err) => Error::Internal(err),
303            })?;
304
305            if let Some(rate_limit_info) = rate_limit_info {
306                tracing::info!(
307                    target: "upstream rate limit",
308                    is_staff = claims.is_staff,
309                    provider = params.provider.to_string(),
310                    model = model,
311                    tokens_remaining = rate_limit_info.tokens_remaining,
312                    requests_remaining = rate_limit_info.requests_remaining,
313                    requests_reset = ?rate_limit_info.requests_reset,
314                    tokens_reset = ?rate_limit_info.tokens_reset,
315                );
316            }
317
318            chunks
319                .map(move |event| {
320                    let chunk = event?;
321                    let (input_tokens, output_tokens) = match &chunk {
322                        anthropic::Event::MessageStart {
323                            message: anthropic::Response { usage, .. },
324                        }
325                        | anthropic::Event::MessageDelta { usage, .. } => (
326                            usage.input_tokens.unwrap_or(0) as usize,
327                            usage.output_tokens.unwrap_or(0) as usize,
328                        ),
329                        _ => (0, 0),
330                    };
331
332                    anyhow::Ok((
333                        serde_json::to_vec(&chunk).unwrap(),
334                        input_tokens,
335                        output_tokens,
336                    ))
337                })
338                .boxed()
339        }
340        LanguageModelProvider::OpenAi => {
341            let api_key = state
342                .config
343                .openai_api_key
344                .as_ref()
345                .context("no OpenAI API key configured on the server")?;
346            let chunks = open_ai::stream_completion(
347                &state.http_client,
348                open_ai::OPEN_AI_API_URL,
349                api_key,
350                serde_json::from_str(params.provider_request.get())?,
351                None,
352            )
353            .await?;
354
355            chunks
356                .map(|event| {
357                    event.map(|chunk| {
358                        let input_tokens =
359                            chunk.usage.as_ref().map_or(0, |u| u.prompt_tokens) as usize;
360                        let output_tokens =
361                            chunk.usage.as_ref().map_or(0, |u| u.completion_tokens) as usize;
362                        (
363                            serde_json::to_vec(&chunk).unwrap(),
364                            input_tokens,
365                            output_tokens,
366                        )
367                    })
368                })
369                .boxed()
370        }
371        LanguageModelProvider::Google => {
372            let api_key = state
373                .config
374                .google_ai_api_key
375                .as_ref()
376                .context("no Google AI API key configured on the server")?;
377            let chunks = google_ai::stream_generate_content(
378                &state.http_client,
379                google_ai::API_URL,
380                api_key,
381                serde_json::from_str(params.provider_request.get())?,
382                None,
383            )
384            .await?;
385
386            chunks
387                .map(|event| {
388                    event.map(|chunk| {
389                        // TODO - implement token counting for Google AI
390                        let input_tokens = 0;
391                        let output_tokens = 0;
392                        (
393                            serde_json::to_vec(&chunk).unwrap(),
394                            input_tokens,
395                            output_tokens,
396                        )
397                    })
398                })
399                .boxed()
400        }
401    };
402
403    Ok(Response::new(Body::wrap_stream(TokenCountingStream {
404        state,
405        claims,
406        provider: params.provider,
407        model,
408        input_tokens: 0,
409        output_tokens: 0,
410        inner_stream: stream,
411    })))
412}
413
414fn normalize_model_name(known_models: Vec<String>, name: String) -> String {
415    if let Some(known_model_name) = known_models
416        .iter()
417        .filter(|known_model_name| name.starts_with(known_model_name.as_str()))
418        .max_by_key(|known_model_name| known_model_name.len())
419    {
420        known_model_name.to_string()
421    } else {
422        name
423    }
424}
425
426/// The maximum lifetime spending an individual user can reach before being cut off.
427///
428/// Represented in cents.
429const LIFETIME_SPENDING_LIMIT_IN_CENTS: usize = 1_000 * 100;
430
431async fn check_usage_limit(
432    state: &Arc<LlmState>,
433    provider: LanguageModelProvider,
434    model_name: &str,
435    claims: &LlmTokenClaims,
436) -> Result<()> {
437    let model = state.db.model(provider, model_name)?;
438    let usage = state
439        .db
440        .get_usage(
441            UserId::from_proto(claims.user_id),
442            provider,
443            model_name,
444            Utc::now(),
445        )
446        .await?;
447
448    if usage.lifetime_spending >= LIFETIME_SPENDING_LIMIT_IN_CENTS {
449        return Err(Error::http(
450            StatusCode::FORBIDDEN,
451            "Maximum spending limit reached.".to_string(),
452        ));
453    }
454
455    let active_users = state.get_active_user_count(provider, model_name).await?;
456
457    let users_in_recent_minutes = active_users.users_in_recent_minutes.max(1);
458    let users_in_recent_days = active_users.users_in_recent_days.max(1);
459
460    let per_user_max_requests_per_minute =
461        model.max_requests_per_minute as usize / users_in_recent_minutes;
462    let per_user_max_tokens_per_minute =
463        model.max_tokens_per_minute as usize / users_in_recent_minutes;
464    let per_user_max_tokens_per_day = model.max_tokens_per_day as usize / users_in_recent_days;
465
466    let checks = [
467        (
468            usage.requests_this_minute,
469            per_user_max_requests_per_minute,
470            UsageMeasure::RequestsPerMinute,
471        ),
472        (
473            usage.tokens_this_minute,
474            per_user_max_tokens_per_minute,
475            UsageMeasure::TokensPerMinute,
476        ),
477        (
478            usage.tokens_this_day,
479            per_user_max_tokens_per_day,
480            UsageMeasure::TokensPerDay,
481        ),
482    ];
483
484    for (used, limit, usage_measure) in checks {
485        // Temporarily bypass rate-limiting for staff members.
486        if claims.is_staff {
487            continue;
488        }
489
490        if used > limit {
491            let resource = match usage_measure {
492                UsageMeasure::RequestsPerMinute => "requests_per_minute",
493                UsageMeasure::TokensPerMinute => "tokens_per_minute",
494                UsageMeasure::TokensPerDay => "tokens_per_day",
495                _ => "",
496            };
497
498            if let Some(client) = state.clickhouse_client.as_ref() {
499                tracing::info!(
500                    target: "user rate limit",
501                    user_id = claims.user_id,
502                    login = claims.github_user_login,
503                    authn.jti = claims.jti,
504                    is_staff = claims.is_staff,
505                    provider = provider.to_string(),
506                    model = model.name,
507                    requests_this_minute = usage.requests_this_minute,
508                    tokens_this_minute = usage.tokens_this_minute,
509                    tokens_this_day = usage.tokens_this_day,
510                    users_in_recent_minutes = users_in_recent_minutes,
511                    users_in_recent_days = users_in_recent_days,
512                    max_requests_per_minute = per_user_max_requests_per_minute,
513                    max_tokens_per_minute = per_user_max_tokens_per_minute,
514                    max_tokens_per_day = per_user_max_tokens_per_day,
515                );
516
517                report_llm_rate_limit(
518                    client,
519                    LlmRateLimitEventRow {
520                        time: Utc::now().timestamp_millis(),
521                        user_id: claims.user_id as i32,
522                        is_staff: claims.is_staff,
523                        plan: match claims.plan {
524                            Plan::Free => "free".to_string(),
525                            Plan::ZedPro => "zed_pro".to_string(),
526                        },
527                        model: model.name.clone(),
528                        provider: provider.to_string(),
529                        usage_measure: resource.to_string(),
530                        requests_this_minute: usage.requests_this_minute as u64,
531                        tokens_this_minute: usage.tokens_this_minute as u64,
532                        tokens_this_day: usage.tokens_this_day as u64,
533                        users_in_recent_minutes: users_in_recent_minutes as u64,
534                        users_in_recent_days: users_in_recent_days as u64,
535                        max_requests_per_minute: per_user_max_requests_per_minute as u64,
536                        max_tokens_per_minute: per_user_max_tokens_per_minute as u64,
537                        max_tokens_per_day: per_user_max_tokens_per_day as u64,
538                    },
539                )
540                .await
541                .log_err();
542            }
543
544            return Err(Error::http(
545                StatusCode::TOO_MANY_REQUESTS,
546                format!("Rate limit exceeded. Maximum {} reached.", resource),
547            ));
548        }
549    }
550
551    Ok(())
552}
553
554struct TokenCountingStream<S> {
555    state: Arc<LlmState>,
556    claims: LlmTokenClaims,
557    provider: LanguageModelProvider,
558    model: String,
559    input_tokens: usize,
560    output_tokens: usize,
561    inner_stream: S,
562}
563
564impl<S> Stream for TokenCountingStream<S>
565where
566    S: Stream<Item = Result<(Vec<u8>, usize, usize), anyhow::Error>> + Unpin,
567{
568    type Item = Result<Vec<u8>, anyhow::Error>;
569
570    fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
571        match Pin::new(&mut self.inner_stream).poll_next(cx) {
572            Poll::Ready(Some(Ok((mut bytes, input_tokens, output_tokens)))) => {
573                bytes.push(b'\n');
574                self.input_tokens += input_tokens;
575                self.output_tokens += output_tokens;
576                Poll::Ready(Some(Ok(bytes)))
577            }
578            Poll::Ready(Some(Err(e))) => Poll::Ready(Some(Err(e))),
579            Poll::Ready(None) => Poll::Ready(None),
580            Poll::Pending => Poll::Pending,
581        }
582    }
583}
584
585impl<S> Drop for TokenCountingStream<S> {
586    fn drop(&mut self) {
587        let state = self.state.clone();
588        let claims = self.claims.clone();
589        let provider = self.provider;
590        let model = std::mem::take(&mut self.model);
591        let input_token_count = self.input_tokens;
592        let output_token_count = self.output_tokens;
593        self.state.executor.spawn_detached(async move {
594            let usage = state
595                .db
596                .record_usage(
597                    UserId::from_proto(claims.user_id),
598                    claims.is_staff,
599                    provider,
600                    &model,
601                    input_token_count,
602                    output_token_count,
603                    Utc::now(),
604                )
605                .await
606                .log_err();
607
608            if let Some(usage) = usage {
609                tracing::info!(
610                    target: "user usage",
611                    user_id = claims.user_id,
612                    login = claims.github_user_login,
613                    authn.jti = claims.jti,
614                    is_staff = claims.is_staff,
615                    requests_this_minute = usage.requests_this_minute,
616                    tokens_this_minute = usage.tokens_this_minute,
617                );
618
619                if let Some(clickhouse_client) = state.clickhouse_client.as_ref() {
620                    report_llm_usage(
621                        clickhouse_client,
622                        LlmUsageEventRow {
623                            time: Utc::now().timestamp_millis(),
624                            user_id: claims.user_id as i32,
625                            is_staff: claims.is_staff,
626                            plan: match claims.plan {
627                                Plan::Free => "free".to_string(),
628                                Plan::ZedPro => "zed_pro".to_string(),
629                            },
630                            model,
631                            provider: provider.to_string(),
632                            input_token_count: input_token_count as u64,
633                            output_token_count: output_token_count as u64,
634                            requests_this_minute: usage.requests_this_minute as u64,
635                            tokens_this_minute: usage.tokens_this_minute as u64,
636                            tokens_this_day: usage.tokens_this_day as u64,
637                            input_tokens_this_month: usage.input_tokens_this_month as u64,
638                            output_tokens_this_month: usage.output_tokens_this_month as u64,
639                            spending_this_month: usage.spending_this_month as u64,
640                            lifetime_spending: usage.lifetime_spending as u64,
641                        },
642                    )
643                    .await
644                    .log_err();
645                }
646            }
647        })
648    }
649}
650
651pub fn log_usage_periodically(state: Arc<LlmState>) {
652    state.executor.clone().spawn_detached(async move {
653        loop {
654            state
655                .executor
656                .sleep(std::time::Duration::from_secs(30))
657                .await;
658
659            for provider in LanguageModelProvider::iter() {
660                for model in state.db.model_names_for_provider(provider) {
661                    if let Some(active_user_count) = state
662                        .get_active_user_count(provider, &model)
663                        .await
664                        .log_err()
665                    {
666                        tracing::info!(
667                            target: "active user counts",
668                            provider = provider.to_string(),
669                            model = model,
670                            users_in_recent_minutes = active_user_count.users_in_recent_minutes,
671                            users_in_recent_days = active_user_count.users_in_recent_days,
672                        );
673                    }
674                }
675            }
676
677            if let Some(usages) = state
678                .db
679                .get_application_wide_usages_by_model(Utc::now())
680                .await
681                .log_err()
682            {
683                for usage in usages {
684                    tracing::info!(
685                        target: "computed usage",
686                        provider = usage.provider.to_string(),
687                        model = usage.model,
688                        requests_this_minute = usage.requests_this_minute,
689                        tokens_this_minute = usage.tokens_this_minute,
690                    );
691                }
692            }
693        }
694    })
695}