language_model.rs

  1mod model;
  2mod rate_limiter;
  3mod registry;
  4mod request;
  5mod role;
  6mod telemetry;
  7
  8#[cfg(any(test, feature = "test-support"))]
  9pub mod fake_provider;
 10
 11use anthropic::{AnthropicError, parse_prompt_too_long};
 12use anyhow::{Result, anyhow};
 13use client::Client;
 14use futures::FutureExt;
 15use futures::{StreamExt, future::BoxFuture, stream::BoxStream};
 16use gpui::{AnyElement, AnyView, App, AsyncApp, SharedString, Task, Window};
 17use http_client::{StatusCode, http};
 18use icons::IconName;
 19use parking_lot::Mutex;
 20use schemars::JsonSchema;
 21use serde::{Deserialize, Serialize, de::DeserializeOwned};
 22use std::ops::{Add, Sub};
 23use std::str::FromStr;
 24use std::sync::Arc;
 25use std::time::Duration;
 26use std::{fmt, io};
 27use thiserror::Error;
 28use util::serde::is_default;
 29use zed_llm_client::{CompletionMode, CompletionRequestStatus};
 30
 31pub use crate::model::*;
 32pub use crate::rate_limiter::*;
 33pub use crate::registry::*;
 34pub use crate::request::*;
 35pub use crate::role::*;
 36pub use crate::telemetry::*;
 37
 38pub const ANTHROPIC_PROVIDER_ID: LanguageModelProviderId =
 39    LanguageModelProviderId::new("anthropic");
 40pub const ANTHROPIC_PROVIDER_NAME: LanguageModelProviderName =
 41    LanguageModelProviderName::new("Anthropic");
 42
 43pub const GOOGLE_PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("google");
 44pub const GOOGLE_PROVIDER_NAME: LanguageModelProviderName =
 45    LanguageModelProviderName::new("Google AI");
 46
 47pub const OPEN_AI_PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("openai");
 48pub const OPEN_AI_PROVIDER_NAME: LanguageModelProviderName =
 49    LanguageModelProviderName::new("OpenAI");
 50
 51pub const ZED_CLOUD_PROVIDER_ID: LanguageModelProviderId = LanguageModelProviderId::new("zed.dev");
 52pub const ZED_CLOUD_PROVIDER_NAME: LanguageModelProviderName =
 53    LanguageModelProviderName::new("Zed");
 54
 55pub fn init(client: Arc<Client>, cx: &mut App) {
 56    init_settings(cx);
 57    RefreshLlmTokenListener::register(client.clone(), cx);
 58}
 59
 60pub fn init_settings(cx: &mut App) {
 61    registry::init(cx);
 62}
 63
 64/// Configuration for caching language model messages.
 65#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, JsonSchema)]
 66pub struct LanguageModelCacheConfiguration {
 67    pub max_cache_anchors: usize,
 68    pub should_speculate: bool,
 69    pub min_total_token: u64,
 70}
 71
 72/// A completion event from a language model.
 73#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)]
 74pub enum LanguageModelCompletionEvent {
 75    StatusUpdate(CompletionRequestStatus),
 76    Stop(StopReason),
 77    Text(String),
 78    Thinking {
 79        text: String,
 80        signature: Option<String>,
 81    },
 82    RedactedThinking {
 83        data: String,
 84    },
 85    ToolUse(LanguageModelToolUse),
 86    ToolUseJsonParseError {
 87        id: LanguageModelToolUseId,
 88        tool_name: Arc<str>,
 89        raw_input: Arc<str>,
 90        json_parse_error: String,
 91    },
 92    StartMessage {
 93        message_id: String,
 94    },
 95    UsageUpdate(TokenUsage),
 96}
 97
 98#[derive(Error, Debug)]
 99pub enum LanguageModelCompletionError {
100    #[error("prompt too large for context window")]
101    PromptTooLarge { tokens: Option<u64> },
102    #[error("missing {provider} API key")]
103    NoApiKey { provider: LanguageModelProviderName },
104    #[error("{provider}'s API rate limit exceeded")]
105    RateLimitExceeded {
106        provider: LanguageModelProviderName,
107        retry_after: Option<Duration>,
108    },
109    #[error("{provider}'s API servers are overloaded right now")]
110    ServerOverloaded {
111        provider: LanguageModelProviderName,
112        retry_after: Option<Duration>,
113    },
114    #[error("{provider}'s API server reported an internal server error: {message}")]
115    ApiInternalServerError {
116        provider: LanguageModelProviderName,
117        message: String,
118    },
119    #[error("{message}")]
120    UpstreamProviderError {
121        message: String,
122        status: StatusCode,
123        retry_after: Option<Duration>,
124    },
125    #[error("HTTP response error from {provider}'s API: status {status_code} - {message:?}")]
126    HttpResponseError {
127        provider: LanguageModelProviderName,
128        status_code: StatusCode,
129        message: String,
130    },
131
132    // Client errors
133    #[error("invalid request format to {provider}'s API: {message}")]
134    BadRequestFormat {
135        provider: LanguageModelProviderName,
136        message: String,
137    },
138    #[error("authentication error with {provider}'s API: {message}")]
139    AuthenticationError {
140        provider: LanguageModelProviderName,
141        message: String,
142    },
143    #[error("permission error with {provider}'s API: {message}")]
144    PermissionError {
145        provider: LanguageModelProviderName,
146        message: String,
147    },
148    #[error("language model provider API endpoint not found")]
149    ApiEndpointNotFound { provider: LanguageModelProviderName },
150    #[error("I/O error reading response from {provider}'s API")]
151    ApiReadResponseError {
152        provider: LanguageModelProviderName,
153        #[source]
154        error: io::Error,
155    },
156    #[error("error serializing request to {provider} API")]
157    SerializeRequest {
158        provider: LanguageModelProviderName,
159        #[source]
160        error: serde_json::Error,
161    },
162    #[error("error building request body to {provider} API")]
163    BuildRequestBody {
164        provider: LanguageModelProviderName,
165        #[source]
166        error: http::Error,
167    },
168    #[error("error sending HTTP request to {provider} API")]
169    HttpSend {
170        provider: LanguageModelProviderName,
171        #[source]
172        error: anyhow::Error,
173    },
174    #[error("error deserializing {provider} API response")]
175    DeserializeResponse {
176        provider: LanguageModelProviderName,
177        #[source]
178        error: serde_json::Error,
179    },
180
181    // TODO: Ideally this would be removed in favor of having a comprehensive list of errors.
182    #[error(transparent)]
183    Other(#[from] anyhow::Error),
184}
185
186impl LanguageModelCompletionError {
187    pub fn from_cloud_failure(
188        upstream_provider: LanguageModelProviderName,
189        code: String,
190        message: String,
191        retry_after: Option<Duration>,
192    ) -> Self {
193        if let Some(tokens) = parse_prompt_too_long(&message) {
194            // TODO: currently Anthropic PAYLOAD_TOO_LARGE response may cause INTERNAL_SERVER_ERROR
195            // to be reported. This is a temporary workaround to handle this in the case where the
196            // token limit has been exceeded.
197            Self::PromptTooLarge {
198                tokens: Some(tokens),
199            }
200        } else if let Some(status_code) = code
201            .strip_prefix("upstream_http_")
202            .and_then(|code| StatusCode::from_str(code).ok())
203        {
204            Self::from_http_status(upstream_provider, status_code, message, retry_after)
205        } else if let Some(status_code) = code
206            .strip_prefix("http_")
207            .and_then(|code| StatusCode::from_str(code).ok())
208        {
209            Self::from_http_status(ZED_CLOUD_PROVIDER_NAME, status_code, message, retry_after)
210        } else {
211            anyhow!("completion request failed, code: {code}, message: {message}").into()
212        }
213    }
214
215    pub fn from_http_status(
216        provider: LanguageModelProviderName,
217        status_code: StatusCode,
218        message: String,
219        retry_after: Option<Duration>,
220    ) -> Self {
221        match status_code {
222            StatusCode::BAD_REQUEST => Self::BadRequestFormat { provider, message },
223            StatusCode::UNAUTHORIZED => Self::AuthenticationError { provider, message },
224            StatusCode::FORBIDDEN => Self::PermissionError { provider, message },
225            StatusCode::NOT_FOUND => Self::ApiEndpointNotFound { provider },
226            StatusCode::PAYLOAD_TOO_LARGE => Self::PromptTooLarge {
227                tokens: parse_prompt_too_long(&message),
228            },
229            StatusCode::TOO_MANY_REQUESTS => Self::RateLimitExceeded {
230                provider,
231                retry_after,
232            },
233            StatusCode::INTERNAL_SERVER_ERROR => Self::ApiInternalServerError { provider, message },
234            StatusCode::SERVICE_UNAVAILABLE => Self::ServerOverloaded {
235                provider,
236                retry_after,
237            },
238            _ if status_code.as_u16() == 529 => Self::ServerOverloaded {
239                provider,
240                retry_after,
241            },
242            _ => Self::HttpResponseError {
243                provider,
244                status_code,
245                message,
246            },
247        }
248    }
249}
250
251impl From<AnthropicError> for LanguageModelCompletionError {
252    fn from(error: AnthropicError) -> Self {
253        let provider = ANTHROPIC_PROVIDER_NAME;
254        match error {
255            AnthropicError::SerializeRequest(error) => Self::SerializeRequest { provider, error },
256            AnthropicError::BuildRequestBody(error) => Self::BuildRequestBody { provider, error },
257            AnthropicError::HttpSend(error) => Self::HttpSend { provider, error },
258            AnthropicError::DeserializeResponse(error) => {
259                Self::DeserializeResponse { provider, error }
260            }
261            AnthropicError::ReadResponse(error) => Self::ApiReadResponseError { provider, error },
262            AnthropicError::HttpResponseError {
263                status_code,
264                message,
265            } => Self::HttpResponseError {
266                provider,
267                status_code,
268                message,
269            },
270            AnthropicError::RateLimit { retry_after } => Self::RateLimitExceeded {
271                provider,
272                retry_after: Some(retry_after),
273            },
274            AnthropicError::ServerOverloaded { retry_after } => Self::ServerOverloaded {
275                provider,
276                retry_after: retry_after,
277            },
278            AnthropicError::ApiError(api_error) => api_error.into(),
279        }
280    }
281}
282
283impl From<anthropic::ApiError> for LanguageModelCompletionError {
284    fn from(error: anthropic::ApiError) -> Self {
285        use anthropic::ApiErrorCode::*;
286        let provider = ANTHROPIC_PROVIDER_NAME;
287        match error.code() {
288            Some(code) => match code {
289                InvalidRequestError => Self::BadRequestFormat {
290                    provider,
291                    message: error.message,
292                },
293                AuthenticationError => Self::AuthenticationError {
294                    provider,
295                    message: error.message,
296                },
297                PermissionError => Self::PermissionError {
298                    provider,
299                    message: error.message,
300                },
301                NotFoundError => Self::ApiEndpointNotFound { provider },
302                RequestTooLarge => Self::PromptTooLarge {
303                    tokens: parse_prompt_too_long(&error.message),
304                },
305                RateLimitError => Self::RateLimitExceeded {
306                    provider,
307                    retry_after: None,
308                },
309                ApiError => Self::ApiInternalServerError {
310                    provider,
311                    message: error.message,
312                },
313                OverloadedError => Self::ServerOverloaded {
314                    provider,
315                    retry_after: None,
316                },
317            },
318            None => Self::Other(error.into()),
319        }
320    }
321}
322
323/// Indicates the format used to define the input schema for a language model tool.
324#[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)]
325pub enum LanguageModelToolSchemaFormat {
326    /// A JSON schema, see https://json-schema.org
327    JsonSchema,
328    /// A subset of an OpenAPI 3.0 schema object supported by Google AI, see https://ai.google.dev/api/caching#Schema
329    JsonSchemaSubset,
330}
331
332#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize)]
333#[serde(rename_all = "snake_case")]
334pub enum StopReason {
335    EndTurn,
336    MaxTokens,
337    ToolUse,
338    Refusal,
339}
340
341#[derive(Debug, PartialEq, Clone, Copy, Serialize, Deserialize, Default)]
342pub struct TokenUsage {
343    #[serde(default, skip_serializing_if = "is_default")]
344    pub input_tokens: u64,
345    #[serde(default, skip_serializing_if = "is_default")]
346    pub output_tokens: u64,
347    #[serde(default, skip_serializing_if = "is_default")]
348    pub cache_creation_input_tokens: u64,
349    #[serde(default, skip_serializing_if = "is_default")]
350    pub cache_read_input_tokens: u64,
351}
352
353impl TokenUsage {
354    pub fn total_tokens(&self) -> u64 {
355        self.input_tokens
356            + self.output_tokens
357            + self.cache_read_input_tokens
358            + self.cache_creation_input_tokens
359    }
360}
361
362impl Add<TokenUsage> for TokenUsage {
363    type Output = Self;
364
365    fn add(self, other: Self) -> Self {
366        Self {
367            input_tokens: self.input_tokens + other.input_tokens,
368            output_tokens: self.output_tokens + other.output_tokens,
369            cache_creation_input_tokens: self.cache_creation_input_tokens
370                + other.cache_creation_input_tokens,
371            cache_read_input_tokens: self.cache_read_input_tokens + other.cache_read_input_tokens,
372        }
373    }
374}
375
376impl Sub<TokenUsage> for TokenUsage {
377    type Output = Self;
378
379    fn sub(self, other: Self) -> Self {
380        Self {
381            input_tokens: self.input_tokens - other.input_tokens,
382            output_tokens: self.output_tokens - other.output_tokens,
383            cache_creation_input_tokens: self.cache_creation_input_tokens
384                - other.cache_creation_input_tokens,
385            cache_read_input_tokens: self.cache_read_input_tokens - other.cache_read_input_tokens,
386        }
387    }
388}
389
390#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
391pub struct LanguageModelToolUseId(Arc<str>);
392
393impl fmt::Display for LanguageModelToolUseId {
394    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
395        write!(f, "{}", self.0)
396    }
397}
398
399impl<T> From<T> for LanguageModelToolUseId
400where
401    T: Into<Arc<str>>,
402{
403    fn from(value: T) -> Self {
404        Self(value.into())
405    }
406}
407
408#[derive(Debug, PartialEq, Eq, Hash, Clone, Serialize, Deserialize)]
409pub struct LanguageModelToolUse {
410    pub id: LanguageModelToolUseId,
411    pub name: Arc<str>,
412    pub raw_input: String,
413    pub input: serde_json::Value,
414    pub is_input_complete: bool,
415}
416
417pub struct LanguageModelTextStream {
418    pub message_id: Option<String>,
419    pub stream: BoxStream<'static, Result<String, LanguageModelCompletionError>>,
420    // Has complete token usage after the stream has finished
421    pub last_token_usage: Arc<Mutex<TokenUsage>>,
422}
423
424impl Default for LanguageModelTextStream {
425    fn default() -> Self {
426        Self {
427            message_id: None,
428            stream: Box::pin(futures::stream::empty()),
429            last_token_usage: Arc::new(Mutex::new(TokenUsage::default())),
430        }
431    }
432}
433
434pub trait LanguageModel: Send + Sync {
435    fn id(&self) -> LanguageModelId;
436    fn name(&self) -> LanguageModelName;
437    fn provider_id(&self) -> LanguageModelProviderId;
438    fn provider_name(&self) -> LanguageModelProviderName;
439    fn upstream_provider_id(&self) -> LanguageModelProviderId {
440        self.provider_id()
441    }
442    fn upstream_provider_name(&self) -> LanguageModelProviderName {
443        self.provider_name()
444    }
445
446    fn telemetry_id(&self) -> String;
447
448    fn api_key(&self, _cx: &App) -> Option<String> {
449        None
450    }
451
452    /// Whether this model supports images
453    fn supports_images(&self) -> bool;
454
455    /// Whether this model supports tools.
456    fn supports_tools(&self) -> bool;
457
458    /// Whether this model supports choosing which tool to use.
459    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool;
460
461    /// Returns whether this model supports "burn mode";
462    fn supports_burn_mode(&self) -> bool {
463        false
464    }
465
466    fn tool_input_format(&self) -> LanguageModelToolSchemaFormat {
467        LanguageModelToolSchemaFormat::JsonSchema
468    }
469
470    fn max_token_count(&self) -> u64;
471    /// Returns the maximum token count for this model in burn mode (If `supports_burn_mode` is `false` this returns `None`)
472    fn max_token_count_in_burn_mode(&self) -> Option<u64> {
473        None
474    }
475    fn max_output_tokens(&self) -> Option<u64> {
476        None
477    }
478
479    fn count_tokens(
480        &self,
481        request: LanguageModelRequest,
482        cx: &App,
483    ) -> BoxFuture<'static, Result<u64>>;
484
485    fn stream_completion(
486        &self,
487        request: LanguageModelRequest,
488        cx: &AsyncApp,
489    ) -> BoxFuture<
490        'static,
491        Result<
492            BoxStream<'static, Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>,
493            LanguageModelCompletionError,
494        >,
495    >;
496
497    fn stream_completion_text(
498        &self,
499        request: LanguageModelRequest,
500        cx: &AsyncApp,
501    ) -> BoxFuture<'static, Result<LanguageModelTextStream, LanguageModelCompletionError>> {
502        let future = self.stream_completion(request, cx);
503
504        async move {
505            let events = future.await?;
506            let mut events = events.fuse();
507            let mut message_id = None;
508            let mut first_item_text = None;
509            let last_token_usage = Arc::new(Mutex::new(TokenUsage::default()));
510
511            if let Some(first_event) = events.next().await {
512                match first_event {
513                    Ok(LanguageModelCompletionEvent::StartMessage { message_id: id }) => {
514                        message_id = Some(id.clone());
515                    }
516                    Ok(LanguageModelCompletionEvent::Text(text)) => {
517                        first_item_text = Some(text);
518                    }
519                    _ => (),
520                }
521            }
522
523            let stream = futures::stream::iter(first_item_text.map(Ok))
524                .chain(events.filter_map({
525                    let last_token_usage = last_token_usage.clone();
526                    move |result| {
527                        let last_token_usage = last_token_usage.clone();
528                        async move {
529                            match result {
530                                Ok(LanguageModelCompletionEvent::StatusUpdate { .. }) => None,
531                                Ok(LanguageModelCompletionEvent::StartMessage { .. }) => None,
532                                Ok(LanguageModelCompletionEvent::Text(text)) => Some(Ok(text)),
533                                Ok(LanguageModelCompletionEvent::Thinking { .. }) => None,
534                                Ok(LanguageModelCompletionEvent::RedactedThinking { .. }) => None,
535                                Ok(LanguageModelCompletionEvent::Stop(_)) => None,
536                                Ok(LanguageModelCompletionEvent::ToolUse(_)) => None,
537                                Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
538                                    ..
539                                }) => None,
540                                Ok(LanguageModelCompletionEvent::UsageUpdate(token_usage)) => {
541                                    *last_token_usage.lock() = token_usage;
542                                    None
543                                }
544                                Err(err) => Some(Err(err)),
545                            }
546                        }
547                    }
548                }))
549                .boxed();
550
551            Ok(LanguageModelTextStream {
552                message_id,
553                stream,
554                last_token_usage,
555            })
556        }
557        .boxed()
558    }
559
560    fn cache_configuration(&self) -> Option<LanguageModelCacheConfiguration> {
561        None
562    }
563
564    #[cfg(any(test, feature = "test-support"))]
565    fn as_fake(&self) -> &fake_provider::FakeLanguageModel {
566        unimplemented!()
567    }
568}
569
570pub trait LanguageModelExt: LanguageModel {
571    fn max_token_count_for_mode(&self, mode: CompletionMode) -> u64 {
572        match mode {
573            CompletionMode::Normal => self.max_token_count(),
574            CompletionMode::Max => self
575                .max_token_count_in_burn_mode()
576                .unwrap_or_else(|| self.max_token_count()),
577        }
578    }
579}
580impl LanguageModelExt for dyn LanguageModel {}
581
582pub trait LanguageModelTool: 'static + DeserializeOwned + JsonSchema {
583    fn name() -> String;
584    fn description() -> String;
585}
586
587/// An error that occurred when trying to authenticate the language model provider.
588#[derive(Debug, Error)]
589pub enum AuthenticateError {
590    #[error("credentials not found")]
591    CredentialsNotFound,
592    #[error(transparent)]
593    Other(#[from] anyhow::Error),
594}
595
596pub trait LanguageModelProvider: 'static {
597    fn id(&self) -> LanguageModelProviderId;
598    fn name(&self) -> LanguageModelProviderName;
599    fn icon(&self) -> IconName {
600        IconName::ZedAssistant
601    }
602    fn default_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>>;
603    fn default_fast_model(&self, cx: &App) -> Option<Arc<dyn LanguageModel>>;
604    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>>;
605    fn recommended_models(&self, _cx: &App) -> Vec<Arc<dyn LanguageModel>> {
606        Vec::new()
607    }
608    fn is_authenticated(&self, cx: &App) -> bool;
609    fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>>;
610    fn configuration_view(&self, window: &mut Window, cx: &mut App) -> AnyView;
611    fn must_accept_terms(&self, _cx: &App) -> bool {
612        false
613    }
614    fn render_accept_terms(
615        &self,
616        _view: LanguageModelProviderTosView,
617        _cx: &mut App,
618    ) -> Option<AnyElement> {
619        None
620    }
621    fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>>;
622}
623
624#[derive(PartialEq, Eq)]
625pub enum LanguageModelProviderTosView {
626    /// When there are some past interactions in the Agent Panel.
627    ThreadEmptyState,
628    /// When there are no past interactions in the Agent Panel.
629    ThreadFreshStart,
630    PromptEditorPopup,
631    Configuration,
632}
633
634pub trait LanguageModelProviderState: 'static {
635    type ObservableEntity;
636
637    fn observable_entity(&self) -> Option<gpui::Entity<Self::ObservableEntity>>;
638
639    fn subscribe<T: 'static>(
640        &self,
641        cx: &mut gpui::Context<T>,
642        callback: impl Fn(&mut T, &mut gpui::Context<T>) + 'static,
643    ) -> Option<gpui::Subscription> {
644        let entity = self.observable_entity()?;
645        Some(cx.observe(&entity, move |this, _, cx| {
646            callback(this, cx);
647        }))
648    }
649}
650
651#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd, Serialize, Deserialize)]
652pub struct LanguageModelId(pub SharedString);
653
654#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
655pub struct LanguageModelName(pub SharedString);
656
657#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
658pub struct LanguageModelProviderId(pub SharedString);
659
660#[derive(Clone, Eq, PartialEq, Hash, Debug, Ord, PartialOrd)]
661pub struct LanguageModelProviderName(pub SharedString);
662
663impl LanguageModelProviderId {
664    pub const fn new(id: &'static str) -> Self {
665        Self(SharedString::new_static(id))
666    }
667}
668
669impl LanguageModelProviderName {
670    pub const fn new(id: &'static str) -> Self {
671        Self(SharedString::new_static(id))
672    }
673}
674
675impl fmt::Display for LanguageModelProviderId {
676    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
677        write!(f, "{}", self.0)
678    }
679}
680
681impl fmt::Display for LanguageModelProviderName {
682    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
683        write!(f, "{}", self.0)
684    }
685}
686
687impl From<String> for LanguageModelId {
688    fn from(value: String) -> Self {
689        Self(SharedString::from(value))
690    }
691}
692
693impl From<String> for LanguageModelName {
694    fn from(value: String) -> Self {
695        Self(SharedString::from(value))
696    }
697}
698
699impl From<String> for LanguageModelProviderId {
700    fn from(value: String) -> Self {
701        Self(SharedString::from(value))
702    }
703}
704
705impl From<String> for LanguageModelProviderName {
706    fn from(value: String) -> Self {
707        Self(SharedString::from(value))
708    }
709}