open_ai.rs

   1use anyhow::{Result, anyhow};
   2use collections::{BTreeMap, HashMap};
   3use futures::Stream;
   4use futures::{FutureExt, StreamExt, future::BoxFuture};
   5use gpui::{AnyView, App, AsyncApp, Context, Entity, SharedString, Task, Window};
   6use http_client::HttpClient;
   7use language_model::{
   8    ApiKeyState, AuthenticateError, EnvVar, IconOrSvg, LanguageModel, LanguageModelCompletionError,
   9    LanguageModelCompletionEvent, LanguageModelId, LanguageModelImage, LanguageModelName,
  10    LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
  11    LanguageModelProviderState, LanguageModelRequest, LanguageModelRequestMessage,
  12    LanguageModelToolChoice, LanguageModelToolResult, LanguageModelToolResultContent,
  13    LanguageModelToolUse, LanguageModelToolUseId, MessageContent, RateLimiter, Role, StopReason,
  14    TokenUsage, env_var,
  15};
  16use menu;
  17use open_ai::responses::{
  18    ResponseFunctionCallItem, ResponseFunctionCallOutputItem, ResponseInputContent,
  19    ResponseInputItem, ResponseMessageItem,
  20};
  21use open_ai::{
  22    ImageUrl, Model, OPEN_AI_API_URL, ReasoningEffort, ResponseStreamEvent,
  23    responses::{
  24        Request as ResponseRequest, ResponseOutputItem, ResponseSummary as ResponsesSummary,
  25        ResponseUsage as ResponsesUsage, StreamEvent as ResponsesStreamEvent, stream_response,
  26    },
  27    stream_completion,
  28};
  29use settings::{OpenAiAvailableModel as AvailableModel, Settings, SettingsStore};
  30use std::pin::Pin;
  31use std::str::FromStr as _;
  32use std::sync::{Arc, LazyLock};
  33use strum::IntoEnumIterator;
  34use ui::{ButtonLink, ConfiguredApiCard, List, ListBulletItem, prelude::*};
  35use ui_input::InputField;
  36use util::ResultExt;
  37
  38const PROVIDER_ID: LanguageModelProviderId = language_model::OPEN_AI_PROVIDER_ID;
  39const PROVIDER_NAME: LanguageModelProviderName = language_model::OPEN_AI_PROVIDER_NAME;
  40
  41const API_KEY_ENV_VAR_NAME: &str = "OPENAI_API_KEY";
  42static API_KEY_ENV_VAR: LazyLock<EnvVar> = env_var!(API_KEY_ENV_VAR_NAME);
  43
  44#[derive(Default, Clone, Debug, PartialEq)]
  45pub struct OpenAiSettings {
  46    pub api_url: String,
  47    pub available_models: Vec<AvailableModel>,
  48}
  49
  50pub struct OpenAiLanguageModelProvider {
  51    http_client: Arc<dyn HttpClient>,
  52    state: Entity<State>,
  53}
  54
  55pub struct State {
  56    api_key_state: ApiKeyState,
  57}
  58
  59impl State {
  60    fn is_authenticated(&self) -> bool {
  61        self.api_key_state.has_key()
  62    }
  63
  64    fn set_api_key(&mut self, api_key: Option<String>, cx: &mut Context<Self>) -> Task<Result<()>> {
  65        let api_url = OpenAiLanguageModelProvider::api_url(cx);
  66        self.api_key_state
  67            .store(api_url, api_key, |this| &mut this.api_key_state, cx)
  68    }
  69
  70    fn authenticate(&mut self, cx: &mut Context<Self>) -> Task<Result<(), AuthenticateError>> {
  71        let api_url = OpenAiLanguageModelProvider::api_url(cx);
  72        self.api_key_state
  73            .load_if_needed(api_url, |this| &mut this.api_key_state, cx)
  74    }
  75}
  76
  77impl OpenAiLanguageModelProvider {
  78    pub fn new(http_client: Arc<dyn HttpClient>, cx: &mut App) -> Self {
  79        let state = cx.new(|cx| {
  80            cx.observe_global::<SettingsStore>(|this: &mut State, cx| {
  81                let api_url = Self::api_url(cx);
  82                this.api_key_state
  83                    .handle_url_change(api_url, |this| &mut this.api_key_state, cx);
  84                cx.notify();
  85            })
  86            .detach();
  87            State {
  88                api_key_state: ApiKeyState::new(Self::api_url(cx), (*API_KEY_ENV_VAR).clone()),
  89            }
  90        });
  91
  92        Self { http_client, state }
  93    }
  94
  95    fn create_language_model(&self, model: open_ai::Model) -> Arc<dyn LanguageModel> {
  96        Arc::new(OpenAiLanguageModel {
  97            id: LanguageModelId::from(model.id().to_string()),
  98            model,
  99            state: self.state.clone(),
 100            http_client: self.http_client.clone(),
 101            request_limiter: RateLimiter::new(4),
 102        })
 103    }
 104
 105    fn settings(cx: &App) -> &OpenAiSettings {
 106        &crate::AllLanguageModelSettings::get_global(cx).openai
 107    }
 108
 109    fn api_url(cx: &App) -> SharedString {
 110        let api_url = &Self::settings(cx).api_url;
 111        if api_url.is_empty() {
 112            open_ai::OPEN_AI_API_URL.into()
 113        } else {
 114            SharedString::new(api_url.as_str())
 115        }
 116    }
 117}
 118
 119impl LanguageModelProviderState for OpenAiLanguageModelProvider {
 120    type ObservableEntity = State;
 121
 122    fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
 123        Some(self.state.clone())
 124    }
 125}
 126
 127impl LanguageModelProvider for OpenAiLanguageModelProvider {
 128    fn id(&self) -> LanguageModelProviderId {
 129        PROVIDER_ID
 130    }
 131
 132    fn name(&self) -> LanguageModelProviderName {
 133        PROVIDER_NAME
 134    }
 135
 136    fn icon(&self) -> IconOrSvg {
 137        IconOrSvg::Icon(IconName::AiOpenAi)
 138    }
 139
 140    fn default_model(&self, _cx: &App) -> Option<Arc<dyn LanguageModel>> {
 141        Some(self.create_language_model(open_ai::Model::default()))
 142    }
 143
 144    fn default_fast_model(&self, _cx: &App) -> Option<Arc<dyn LanguageModel>> {
 145        Some(self.create_language_model(open_ai::Model::default_fast()))
 146    }
 147
 148    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 149        let mut models = BTreeMap::default();
 150
 151        // Add base models from open_ai::Model::iter()
 152        for model in open_ai::Model::iter() {
 153            if !matches!(model, open_ai::Model::Custom { .. }) {
 154                models.insert(model.id().to_string(), model);
 155            }
 156        }
 157
 158        // Override with available models from settings
 159        for model in &OpenAiLanguageModelProvider::settings(cx).available_models {
 160            models.insert(
 161                model.name.clone(),
 162                open_ai::Model::Custom {
 163                    name: model.name.clone(),
 164                    display_name: model.display_name.clone(),
 165                    max_tokens: model.max_tokens,
 166                    max_output_tokens: model.max_output_tokens,
 167                    max_completion_tokens: model.max_completion_tokens,
 168                    reasoning_effort: model.reasoning_effort.clone(),
 169                    supports_chat_completions: model.capabilities.chat_completions,
 170                },
 171            );
 172        }
 173
 174        models
 175            .into_values()
 176            .map(|model| self.create_language_model(model))
 177            .collect()
 178    }
 179
 180    fn is_authenticated(&self, cx: &App) -> bool {
 181        self.state.read(cx).is_authenticated()
 182    }
 183
 184    fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
 185        self.state.update(cx, |state, cx| state.authenticate(cx))
 186    }
 187
 188    fn configuration_view(
 189        &self,
 190        _target_agent: language_model::ConfigurationViewTargetAgent,
 191        window: &mut Window,
 192        cx: &mut App,
 193    ) -> AnyView {
 194        cx.new(|cx| ConfigurationView::new(self.state.clone(), window, cx))
 195            .into()
 196    }
 197
 198    fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>> {
 199        self.state
 200            .update(cx, |state, cx| state.set_api_key(None, cx))
 201    }
 202}
 203
 204pub struct OpenAiLanguageModel {
 205    id: LanguageModelId,
 206    model: open_ai::Model,
 207    state: Entity<State>,
 208    http_client: Arc<dyn HttpClient>,
 209    request_limiter: RateLimiter,
 210}
 211
 212impl OpenAiLanguageModel {
 213    fn stream_completion(
 214        &self,
 215        request: open_ai::Request,
 216        bypass_rate_limit: bool,
 217        cx: &AsyncApp,
 218    ) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<ResponseStreamEvent>>>>
 219    {
 220        let http_client = self.http_client.clone();
 221
 222        let (api_key, api_url) = self.state.read_with(cx, |state, cx| {
 223            let api_url = OpenAiLanguageModelProvider::api_url(cx);
 224            (state.api_key_state.key(&api_url), api_url)
 225        });
 226
 227        let future = self.request_limiter.stream_with_bypass(
 228            async move {
 229                let provider = PROVIDER_NAME;
 230                let Some(api_key) = api_key else {
 231                    return Err(LanguageModelCompletionError::NoApiKey { provider });
 232                };
 233                let request = stream_completion(
 234                    http_client.as_ref(),
 235                    provider.0.as_str(),
 236                    &api_url,
 237                    &api_key,
 238                    request,
 239                );
 240                let response = request.await?;
 241                Ok(response)
 242            },
 243            bypass_rate_limit,
 244        );
 245
 246        async move { Ok(future.await?.boxed()) }.boxed()
 247    }
 248
 249    fn stream_response(
 250        &self,
 251        request: ResponseRequest,
 252        bypass_rate_limit: bool,
 253        cx: &AsyncApp,
 254    ) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<ResponsesStreamEvent>>>>
 255    {
 256        let http_client = self.http_client.clone();
 257
 258        let (api_key, api_url) = self.state.read_with(cx, |state, cx| {
 259            let api_url = OpenAiLanguageModelProvider::api_url(cx);
 260            (state.api_key_state.key(&api_url), api_url)
 261        });
 262
 263        let provider = PROVIDER_NAME;
 264        let future = self.request_limiter.stream_with_bypass(
 265            async move {
 266                let Some(api_key) = api_key else {
 267                    return Err(LanguageModelCompletionError::NoApiKey { provider });
 268                };
 269                let request = stream_response(
 270                    http_client.as_ref(),
 271                    provider.0.as_str(),
 272                    &api_url,
 273                    &api_key,
 274                    request,
 275                );
 276                let response = request.await?;
 277                Ok(response)
 278            },
 279            bypass_rate_limit,
 280        );
 281
 282        async move { Ok(future.await?.boxed()) }.boxed()
 283    }
 284}
 285
 286impl LanguageModel for OpenAiLanguageModel {
 287    fn id(&self) -> LanguageModelId {
 288        self.id.clone()
 289    }
 290
 291    fn name(&self) -> LanguageModelName {
 292        LanguageModelName::from(self.model.display_name().to_string())
 293    }
 294
 295    fn provider_id(&self) -> LanguageModelProviderId {
 296        PROVIDER_ID
 297    }
 298
 299    fn provider_name(&self) -> LanguageModelProviderName {
 300        PROVIDER_NAME
 301    }
 302
 303    fn supports_tools(&self) -> bool {
 304        true
 305    }
 306
 307    fn supports_images(&self) -> bool {
 308        use open_ai::Model;
 309        match &self.model {
 310            Model::FourOmni
 311            | Model::FourOmniMini
 312            | Model::FourPointOne
 313            | Model::FourPointOneMini
 314            | Model::FourPointOneNano
 315            | Model::Five
 316            | Model::FiveCodex
 317            | Model::FiveMini
 318            | Model::FiveNano
 319            | Model::FivePointOne
 320            | Model::FivePointTwo
 321            | Model::FivePointTwoCodex
 322            | Model::O1
 323            | Model::O3
 324            | Model::O4Mini => true,
 325            Model::ThreePointFiveTurbo
 326            | Model::Four
 327            | Model::FourTurbo
 328            | Model::O3Mini
 329            | Model::Custom { .. } => false,
 330        }
 331    }
 332
 333    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
 334        match choice {
 335            LanguageModelToolChoice::Auto => true,
 336            LanguageModelToolChoice::Any => true,
 337            LanguageModelToolChoice::None => true,
 338        }
 339    }
 340
 341    fn supports_split_token_display(&self) -> bool {
 342        true
 343    }
 344
 345    fn telemetry_id(&self) -> String {
 346        format!("openai/{}", self.model.id())
 347    }
 348
 349    fn max_token_count(&self) -> u64 {
 350        self.model.max_token_count()
 351    }
 352
 353    fn max_output_tokens(&self) -> Option<u64> {
 354        self.model.max_output_tokens()
 355    }
 356
 357    fn count_tokens(
 358        &self,
 359        request: LanguageModelRequest,
 360        cx: &App,
 361    ) -> BoxFuture<'static, Result<u64>> {
 362        count_open_ai_tokens(request, self.model.clone(), cx)
 363    }
 364
 365    fn stream_completion(
 366        &self,
 367        request: LanguageModelRequest,
 368        cx: &AsyncApp,
 369    ) -> BoxFuture<
 370        'static,
 371        Result<
 372            futures::stream::BoxStream<
 373                'static,
 374                Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
 375            >,
 376            LanguageModelCompletionError,
 377        >,
 378    > {
 379        let bypass_rate_limit = request.bypass_rate_limit;
 380        if self.model.supports_chat_completions() {
 381            let request = into_open_ai(
 382                request,
 383                self.model.id(),
 384                self.model.supports_parallel_tool_calls(),
 385                self.model.supports_prompt_cache_key(),
 386                self.max_output_tokens(),
 387                self.model.reasoning_effort(),
 388            );
 389            let completions = self.stream_completion(request, bypass_rate_limit, cx);
 390            async move {
 391                let mapper = OpenAiEventMapper::new();
 392                Ok(mapper.map_stream(completions.await?).boxed())
 393            }
 394            .boxed()
 395        } else {
 396            let request = into_open_ai_response(
 397                request,
 398                self.model.id(),
 399                self.model.supports_parallel_tool_calls(),
 400                self.model.supports_prompt_cache_key(),
 401                self.max_output_tokens(),
 402                self.model.reasoning_effort(),
 403            );
 404            let completions = self.stream_response(request, bypass_rate_limit, cx);
 405            async move {
 406                let mapper = OpenAiResponseEventMapper::new();
 407                Ok(mapper.map_stream(completions.await?).boxed())
 408            }
 409            .boxed()
 410        }
 411    }
 412}
 413
 414pub fn into_open_ai(
 415    request: LanguageModelRequest,
 416    model_id: &str,
 417    supports_parallel_tool_calls: bool,
 418    supports_prompt_cache_key: bool,
 419    max_output_tokens: Option<u64>,
 420    reasoning_effort: Option<ReasoningEffort>,
 421) -> open_ai::Request {
 422    let stream = !model_id.starts_with("o1-");
 423
 424    let mut messages = Vec::new();
 425    for message in request.messages {
 426        for content in message.content {
 427            match content {
 428                MessageContent::Text(text) | MessageContent::Thinking { text, .. } => {
 429                    let should_add = if message.role == Role::User {
 430                        // Including whitespace-only user messages can cause error with OpenAI compatible APIs
 431                        // See https://github.com/zed-industries/zed/issues/40097
 432                        !text.trim().is_empty()
 433                    } else {
 434                        !text.is_empty()
 435                    };
 436                    if should_add {
 437                        add_message_content_part(
 438                            open_ai::MessagePart::Text { text },
 439                            message.role,
 440                            &mut messages,
 441                        );
 442                    }
 443                }
 444                MessageContent::RedactedThinking(_) => {}
 445                MessageContent::Image(image) => {
 446                    add_message_content_part(
 447                        open_ai::MessagePart::Image {
 448                            image_url: ImageUrl {
 449                                url: image.to_base64_url(),
 450                                detail: None,
 451                            },
 452                        },
 453                        message.role,
 454                        &mut messages,
 455                    );
 456                }
 457                MessageContent::ToolUse(tool_use) => {
 458                    let tool_call = open_ai::ToolCall {
 459                        id: tool_use.id.to_string(),
 460                        content: open_ai::ToolCallContent::Function {
 461                            function: open_ai::FunctionContent {
 462                                name: tool_use.name.to_string(),
 463                                arguments: serde_json::to_string(&tool_use.input)
 464                                    .unwrap_or_default(),
 465                            },
 466                        },
 467                    };
 468
 469                    if let Some(open_ai::RequestMessage::Assistant { tool_calls, .. }) =
 470                        messages.last_mut()
 471                    {
 472                        tool_calls.push(tool_call);
 473                    } else {
 474                        messages.push(open_ai::RequestMessage::Assistant {
 475                            content: None,
 476                            tool_calls: vec![tool_call],
 477                        });
 478                    }
 479                }
 480                MessageContent::ToolResult(tool_result) => {
 481                    let content = match &tool_result.content {
 482                        LanguageModelToolResultContent::Text(text) => {
 483                            vec![open_ai::MessagePart::Text {
 484                                text: text.to_string(),
 485                            }]
 486                        }
 487                        LanguageModelToolResultContent::Image(image) => {
 488                            vec![open_ai::MessagePart::Image {
 489                                image_url: ImageUrl {
 490                                    url: image.to_base64_url(),
 491                                    detail: None,
 492                                },
 493                            }]
 494                        }
 495                    };
 496
 497                    messages.push(open_ai::RequestMessage::Tool {
 498                        content: content.into(),
 499                        tool_call_id: tool_result.tool_use_id.to_string(),
 500                    });
 501                }
 502            }
 503        }
 504    }
 505
 506    open_ai::Request {
 507        model: model_id.into(),
 508        messages,
 509        stream,
 510        stop: request.stop,
 511        temperature: request.temperature.or(Some(1.0)),
 512        max_completion_tokens: max_output_tokens,
 513        parallel_tool_calls: if supports_parallel_tool_calls && !request.tools.is_empty() {
 514            // Disable parallel tool calls, as the Agent currently expects a maximum of one per turn.
 515            Some(false)
 516        } else {
 517            None
 518        },
 519        prompt_cache_key: if supports_prompt_cache_key {
 520            request.thread_id
 521        } else {
 522            None
 523        },
 524        tools: request
 525            .tools
 526            .into_iter()
 527            .map(|tool| open_ai::ToolDefinition::Function {
 528                function: open_ai::FunctionDefinition {
 529                    name: tool.name,
 530                    description: Some(tool.description),
 531                    parameters: Some(tool.input_schema),
 532                },
 533            })
 534            .collect(),
 535        tool_choice: request.tool_choice.map(|choice| match choice {
 536            LanguageModelToolChoice::Auto => open_ai::ToolChoice::Auto,
 537            LanguageModelToolChoice::Any => open_ai::ToolChoice::Required,
 538            LanguageModelToolChoice::None => open_ai::ToolChoice::None,
 539        }),
 540        reasoning_effort,
 541    }
 542}
 543
 544pub fn into_open_ai_response(
 545    request: LanguageModelRequest,
 546    model_id: &str,
 547    supports_parallel_tool_calls: bool,
 548    supports_prompt_cache_key: bool,
 549    max_output_tokens: Option<u64>,
 550    reasoning_effort: Option<ReasoningEffort>,
 551) -> ResponseRequest {
 552    let stream = !model_id.starts_with("o1-");
 553
 554    let LanguageModelRequest {
 555        thread_id,
 556        prompt_id: _,
 557        intent: _,
 558        messages,
 559        tools,
 560        tool_choice,
 561        stop: _,
 562        temperature,
 563        thinking_allowed: _,
 564        bypass_rate_limit: _,
 565    } = request;
 566
 567    let mut input_items = Vec::new();
 568    for (index, message) in messages.into_iter().enumerate() {
 569        append_message_to_response_items(message, index, &mut input_items);
 570    }
 571
 572    let tools: Vec<_> = tools
 573        .into_iter()
 574        .map(|tool| open_ai::responses::ToolDefinition::Function {
 575            name: tool.name,
 576            description: Some(tool.description),
 577            parameters: Some(tool.input_schema),
 578            strict: None,
 579        })
 580        .collect();
 581
 582    ResponseRequest {
 583        model: model_id.into(),
 584        input: input_items,
 585        stream,
 586        temperature,
 587        top_p: None,
 588        max_output_tokens,
 589        parallel_tool_calls: if tools.is_empty() {
 590            None
 591        } else {
 592            Some(supports_parallel_tool_calls)
 593        },
 594        tool_choice: tool_choice.map(|choice| match choice {
 595            LanguageModelToolChoice::Auto => open_ai::ToolChoice::Auto,
 596            LanguageModelToolChoice::Any => open_ai::ToolChoice::Required,
 597            LanguageModelToolChoice::None => open_ai::ToolChoice::None,
 598        }),
 599        tools,
 600        prompt_cache_key: if supports_prompt_cache_key {
 601            thread_id
 602        } else {
 603            None
 604        },
 605        reasoning: reasoning_effort.map(|effort| open_ai::responses::ReasoningConfig { effort }),
 606    }
 607}
 608
 609fn append_message_to_response_items(
 610    message: LanguageModelRequestMessage,
 611    index: usize,
 612    input_items: &mut Vec<ResponseInputItem>,
 613) {
 614    let mut content_parts: Vec<ResponseInputContent> = Vec::new();
 615
 616    for content in message.content {
 617        match content {
 618            MessageContent::Text(text) => {
 619                push_response_text_part(&message.role, text, &mut content_parts);
 620            }
 621            MessageContent::Thinking { text, .. } => {
 622                push_response_text_part(&message.role, text, &mut content_parts);
 623            }
 624            MessageContent::RedactedThinking(_) => {}
 625            MessageContent::Image(image) => {
 626                push_response_image_part(&message.role, image, &mut content_parts);
 627            }
 628            MessageContent::ToolUse(tool_use) => {
 629                flush_response_parts(&message.role, index, &mut content_parts, input_items);
 630                let call_id = tool_use.id.to_string();
 631                input_items.push(ResponseInputItem::FunctionCall(ResponseFunctionCallItem {
 632                    call_id,
 633                    name: tool_use.name.to_string(),
 634                    arguments: tool_use.raw_input,
 635                }));
 636            }
 637            MessageContent::ToolResult(tool_result) => {
 638                flush_response_parts(&message.role, index, &mut content_parts, input_items);
 639                input_items.push(ResponseInputItem::FunctionCallOutput(
 640                    ResponseFunctionCallOutputItem {
 641                        call_id: tool_result.tool_use_id.to_string(),
 642                        output: tool_result_output(&tool_result),
 643                    },
 644                ));
 645            }
 646        }
 647    }
 648
 649    flush_response_parts(&message.role, index, &mut content_parts, input_items);
 650}
 651
 652fn push_response_text_part(
 653    role: &Role,
 654    text: impl Into<String>,
 655    parts: &mut Vec<ResponseInputContent>,
 656) {
 657    let text = text.into();
 658    if text.trim().is_empty() {
 659        return;
 660    }
 661
 662    match role {
 663        Role::Assistant => parts.push(ResponseInputContent::OutputText {
 664            text,
 665            annotations: Vec::new(),
 666        }),
 667        _ => parts.push(ResponseInputContent::Text { text }),
 668    }
 669}
 670
 671fn push_response_image_part(
 672    role: &Role,
 673    image: LanguageModelImage,
 674    parts: &mut Vec<ResponseInputContent>,
 675) {
 676    match role {
 677        Role::Assistant => parts.push(ResponseInputContent::OutputText {
 678            text: "[image omitted]".to_string(),
 679            annotations: Vec::new(),
 680        }),
 681        _ => parts.push(ResponseInputContent::Image {
 682            image_url: image.to_base64_url(),
 683        }),
 684    }
 685}
 686
 687fn flush_response_parts(
 688    role: &Role,
 689    _index: usize,
 690    parts: &mut Vec<ResponseInputContent>,
 691    input_items: &mut Vec<ResponseInputItem>,
 692) {
 693    if parts.is_empty() {
 694        return;
 695    }
 696
 697    let item = ResponseInputItem::Message(ResponseMessageItem {
 698        role: match role {
 699            Role::User => open_ai::Role::User,
 700            Role::Assistant => open_ai::Role::Assistant,
 701            Role::System => open_ai::Role::System,
 702        },
 703        content: parts.clone(),
 704    });
 705
 706    input_items.push(item);
 707    parts.clear();
 708}
 709
 710fn tool_result_output(result: &LanguageModelToolResult) -> String {
 711    if let Some(output) = &result.output {
 712        match output {
 713            serde_json::Value::String(text) => text.clone(),
 714            serde_json::Value::Null => String::new(),
 715            _ => output.to_string(),
 716        }
 717    } else {
 718        match &result.content {
 719            LanguageModelToolResultContent::Text(text) => text.to_string(),
 720            LanguageModelToolResultContent::Image(image) => image.to_base64_url(),
 721        }
 722    }
 723}
 724
 725fn add_message_content_part(
 726    new_part: open_ai::MessagePart,
 727    role: Role,
 728    messages: &mut Vec<open_ai::RequestMessage>,
 729) {
 730    match (role, messages.last_mut()) {
 731        (Role::User, Some(open_ai::RequestMessage::User { content }))
 732        | (
 733            Role::Assistant,
 734            Some(open_ai::RequestMessage::Assistant {
 735                content: Some(content),
 736                ..
 737            }),
 738        )
 739        | (Role::System, Some(open_ai::RequestMessage::System { content, .. })) => {
 740            content.push_part(new_part);
 741        }
 742        _ => {
 743            messages.push(match role {
 744                Role::User => open_ai::RequestMessage::User {
 745                    content: open_ai::MessageContent::from(vec![new_part]),
 746                },
 747                Role::Assistant => open_ai::RequestMessage::Assistant {
 748                    content: Some(open_ai::MessageContent::from(vec![new_part])),
 749                    tool_calls: Vec::new(),
 750                },
 751                Role::System => open_ai::RequestMessage::System {
 752                    content: open_ai::MessageContent::from(vec![new_part]),
 753                },
 754            });
 755        }
 756    }
 757}
 758
 759pub struct OpenAiEventMapper {
 760    tool_calls_by_index: HashMap<usize, RawToolCall>,
 761}
 762
 763impl OpenAiEventMapper {
 764    pub fn new() -> Self {
 765        Self {
 766            tool_calls_by_index: HashMap::default(),
 767        }
 768    }
 769
 770    pub fn map_stream(
 771        mut self,
 772        events: Pin<Box<dyn Send + Stream<Item = Result<ResponseStreamEvent>>>>,
 773    ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 774    {
 775        events.flat_map(move |event| {
 776            futures::stream::iter(match event {
 777                Ok(event) => self.map_event(event),
 778                Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
 779            })
 780        })
 781    }
 782
 783    pub fn map_event(
 784        &mut self,
 785        event: ResponseStreamEvent,
 786    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
 787        let mut events = Vec::new();
 788        if let Some(usage) = event.usage {
 789            events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
 790                input_tokens: usage.prompt_tokens,
 791                output_tokens: usage.completion_tokens,
 792                cache_creation_input_tokens: 0,
 793                cache_read_input_tokens: 0,
 794            })));
 795        }
 796
 797        let Some(choice) = event.choices.first() else {
 798            return events;
 799        };
 800
 801        if let Some(delta) = choice.delta.as_ref() {
 802            if let Some(reasoning_content) = delta.reasoning_content.clone() {
 803                if !reasoning_content.is_empty() {
 804                    events.push(Ok(LanguageModelCompletionEvent::Thinking {
 805                        text: reasoning_content,
 806                        signature: None,
 807                    }));
 808                }
 809            }
 810            if let Some(content) = delta.content.clone() {
 811                if !content.is_empty() {
 812                    events.push(Ok(LanguageModelCompletionEvent::Text(content)));
 813                }
 814            }
 815
 816            if let Some(tool_calls) = delta.tool_calls.as_ref() {
 817                for tool_call in tool_calls {
 818                    let entry = self.tool_calls_by_index.entry(tool_call.index).or_default();
 819
 820                    if let Some(tool_id) = tool_call.id.clone() {
 821                        entry.id = tool_id;
 822                    }
 823
 824                    if let Some(function) = tool_call.function.as_ref() {
 825                        if let Some(name) = function.name.clone() {
 826                            entry.name = name;
 827                        }
 828
 829                        if let Some(arguments) = function.arguments.clone() {
 830                            entry.arguments.push_str(&arguments);
 831                        }
 832                    }
 833                }
 834            }
 835        }
 836
 837        match choice.finish_reason.as_deref() {
 838            Some("stop") => {
 839                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
 840            }
 841            Some("tool_calls") => {
 842                events.extend(self.tool_calls_by_index.drain().map(|(_, tool_call)| {
 843                    match serde_json::Value::from_str(&tool_call.arguments) {
 844                        Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
 845                            LanguageModelToolUse {
 846                                id: tool_call.id.clone().into(),
 847                                name: tool_call.name.as_str().into(),
 848                                is_input_complete: true,
 849                                input,
 850                                raw_input: tool_call.arguments.clone(),
 851                                thought_signature: None,
 852                            },
 853                        )),
 854                        Err(error) => Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
 855                            id: tool_call.id.into(),
 856                            tool_name: tool_call.name.into(),
 857                            raw_input: tool_call.arguments.clone().into(),
 858                            json_parse_error: error.to_string(),
 859                        }),
 860                    }
 861                }));
 862
 863                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
 864            }
 865            Some(stop_reason) => {
 866                log::error!("Unexpected OpenAI stop_reason: {stop_reason:?}",);
 867                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
 868            }
 869            None => {}
 870        }
 871
 872        events
 873    }
 874}
 875
 876#[derive(Default)]
 877struct RawToolCall {
 878    id: String,
 879    name: String,
 880    arguments: String,
 881}
 882
 883pub struct OpenAiResponseEventMapper {
 884    function_calls_by_item: HashMap<String, PendingResponseFunctionCall>,
 885    pending_stop_reason: Option<StopReason>,
 886}
 887
 888#[derive(Default)]
 889struct PendingResponseFunctionCall {
 890    call_id: String,
 891    name: Arc<str>,
 892    arguments: String,
 893}
 894
 895impl OpenAiResponseEventMapper {
 896    pub fn new() -> Self {
 897        Self {
 898            function_calls_by_item: HashMap::default(),
 899            pending_stop_reason: None,
 900        }
 901    }
 902
 903    pub fn map_stream(
 904        mut self,
 905        events: Pin<Box<dyn Send + Stream<Item = Result<ResponsesStreamEvent>>>>,
 906    ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 907    {
 908        events.flat_map(move |event| {
 909            futures::stream::iter(match event {
 910                Ok(event) => self.map_event(event),
 911                Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
 912            })
 913        })
 914    }
 915
 916    pub fn map_event(
 917        &mut self,
 918        event: ResponsesStreamEvent,
 919    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
 920        match event {
 921            ResponsesStreamEvent::OutputItemAdded { item, .. } => {
 922                let mut events = Vec::new();
 923
 924                match &item {
 925                    ResponseOutputItem::Message(message) => {
 926                        if let Some(id) = &message.id {
 927                            events.push(Ok(LanguageModelCompletionEvent::StartMessage {
 928                                message_id: id.clone(),
 929                            }));
 930                        }
 931                    }
 932                    ResponseOutputItem::FunctionCall(function_call) => {
 933                        if let Some(item_id) = function_call.id.clone() {
 934                            let call_id = function_call
 935                                .call_id
 936                                .clone()
 937                                .or_else(|| function_call.id.clone())
 938                                .unwrap_or_else(|| item_id.clone());
 939                            let entry = PendingResponseFunctionCall {
 940                                call_id,
 941                                name: Arc::<str>::from(
 942                                    function_call.name.clone().unwrap_or_default(),
 943                                ),
 944                                arguments: function_call.arguments.clone(),
 945                            };
 946                            self.function_calls_by_item.insert(item_id, entry);
 947                        }
 948                    }
 949                    ResponseOutputItem::Unknown => {}
 950                }
 951                events
 952            }
 953            ResponsesStreamEvent::OutputTextDelta { delta, .. } => {
 954                if delta.is_empty() {
 955                    Vec::new()
 956                } else {
 957                    vec![Ok(LanguageModelCompletionEvent::Text(delta))]
 958                }
 959            }
 960            ResponsesStreamEvent::FunctionCallArgumentsDelta { item_id, delta, .. } => {
 961                if let Some(entry) = self.function_calls_by_item.get_mut(&item_id) {
 962                    entry.arguments.push_str(&delta);
 963                }
 964                Vec::new()
 965            }
 966            ResponsesStreamEvent::FunctionCallArgumentsDone {
 967                item_id, arguments, ..
 968            } => {
 969                if let Some(mut entry) = self.function_calls_by_item.remove(&item_id) {
 970                    if !arguments.is_empty() {
 971                        entry.arguments = arguments;
 972                    }
 973                    let raw_input = entry.arguments.clone();
 974                    self.pending_stop_reason = Some(StopReason::ToolUse);
 975                    match serde_json::from_str::<serde_json::Value>(&entry.arguments) {
 976                        Ok(input) => {
 977                            vec![Ok(LanguageModelCompletionEvent::ToolUse(
 978                                LanguageModelToolUse {
 979                                    id: LanguageModelToolUseId::from(entry.call_id.clone()),
 980                                    name: entry.name.clone(),
 981                                    is_input_complete: true,
 982                                    input,
 983                                    raw_input,
 984                                    thought_signature: None,
 985                                },
 986                            ))]
 987                        }
 988                        Err(error) => {
 989                            vec![Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
 990                                id: LanguageModelToolUseId::from(entry.call_id.clone()),
 991                                tool_name: entry.name.clone(),
 992                                raw_input: Arc::<str>::from(raw_input),
 993                                json_parse_error: error.to_string(),
 994                            })]
 995                        }
 996                    }
 997                } else {
 998                    Vec::new()
 999                }
1000            }
1001            ResponsesStreamEvent::Completed { response } => {
1002                self.handle_completion(response, StopReason::EndTurn)
1003            }
1004            ResponsesStreamEvent::Incomplete { response } => {
1005                let reason = response
1006                    .status_details
1007                    .as_ref()
1008                    .and_then(|details| details.reason.as_deref());
1009                let stop_reason = match reason {
1010                    Some("max_output_tokens") => StopReason::MaxTokens,
1011                    Some("content_filter") => {
1012                        self.pending_stop_reason = Some(StopReason::Refusal);
1013                        StopReason::Refusal
1014                    }
1015                    _ => self
1016                        .pending_stop_reason
1017                        .take()
1018                        .unwrap_or(StopReason::EndTurn),
1019                };
1020
1021                let mut events = Vec::new();
1022                if self.pending_stop_reason.is_none() {
1023                    events.extend(self.emit_tool_calls_from_output(&response.output));
1024                }
1025                if let Some(usage) = response.usage.as_ref() {
1026                    events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
1027                        token_usage_from_response_usage(usage),
1028                    )));
1029                }
1030                events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
1031                events
1032            }
1033            ResponsesStreamEvent::Failed { response } => {
1034                let message = response
1035                    .status_details
1036                    .and_then(|details| details.error)
1037                    .map(|error| error.to_string())
1038                    .unwrap_or_else(|| "response failed".to_string());
1039                vec![Err(LanguageModelCompletionError::Other(anyhow!(message)))]
1040            }
1041            ResponsesStreamEvent::Error { error }
1042            | ResponsesStreamEvent::GenericError { error } => {
1043                vec![Err(LanguageModelCompletionError::Other(anyhow!(format!(
1044                    "{error:?}"
1045                ))))]
1046            }
1047            ResponsesStreamEvent::OutputTextDone { .. } => Vec::new(),
1048            ResponsesStreamEvent::OutputItemDone { .. }
1049            | ResponsesStreamEvent::ContentPartAdded { .. }
1050            | ResponsesStreamEvent::ContentPartDone { .. }
1051            | ResponsesStreamEvent::Created { .. }
1052            | ResponsesStreamEvent::InProgress { .. }
1053            | ResponsesStreamEvent::Unknown => Vec::new(),
1054        }
1055    }
1056
1057    fn handle_completion(
1058        &mut self,
1059        response: ResponsesSummary,
1060        default_reason: StopReason,
1061    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
1062        let mut events = Vec::new();
1063
1064        if self.pending_stop_reason.is_none() {
1065            events.extend(self.emit_tool_calls_from_output(&response.output));
1066        }
1067
1068        if let Some(usage) = response.usage.as_ref() {
1069            events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
1070                token_usage_from_response_usage(usage),
1071            )));
1072        }
1073
1074        let stop_reason = self.pending_stop_reason.take().unwrap_or(default_reason);
1075        events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
1076        events
1077    }
1078
1079    fn emit_tool_calls_from_output(
1080        &mut self,
1081        output: &[ResponseOutputItem],
1082    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
1083        let mut events = Vec::new();
1084        for item in output {
1085            if let ResponseOutputItem::FunctionCall(function_call) = item {
1086                let Some(call_id) = function_call
1087                    .call_id
1088                    .clone()
1089                    .or_else(|| function_call.id.clone())
1090                else {
1091                    log::error!(
1092                        "Function call item missing both call_id and id: {:?}",
1093                        function_call
1094                    );
1095                    continue;
1096                };
1097                let name: Arc<str> = Arc::from(function_call.name.clone().unwrap_or_default());
1098                let arguments = &function_call.arguments;
1099                if !arguments.is_empty() {
1100                    self.pending_stop_reason = Some(StopReason::ToolUse);
1101                    match serde_json::from_str::<serde_json::Value>(arguments) {
1102                        Ok(input) => {
1103                            events.push(Ok(LanguageModelCompletionEvent::ToolUse(
1104                                LanguageModelToolUse {
1105                                    id: LanguageModelToolUseId::from(call_id.clone()),
1106                                    name: name.clone(),
1107                                    is_input_complete: true,
1108                                    input,
1109                                    raw_input: arguments.clone(),
1110                                    thought_signature: None,
1111                                },
1112                            )));
1113                        }
1114                        Err(error) => {
1115                            events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
1116                                id: LanguageModelToolUseId::from(call_id.clone()),
1117                                tool_name: name.clone(),
1118                                raw_input: Arc::<str>::from(arguments.clone()),
1119                                json_parse_error: error.to_string(),
1120                            }));
1121                        }
1122                    }
1123                }
1124            }
1125        }
1126        events
1127    }
1128}
1129
1130fn token_usage_from_response_usage(usage: &ResponsesUsage) -> TokenUsage {
1131    TokenUsage {
1132        input_tokens: usage.input_tokens.unwrap_or_default(),
1133        output_tokens: usage.output_tokens.unwrap_or_default(),
1134        cache_creation_input_tokens: 0,
1135        cache_read_input_tokens: 0,
1136    }
1137}
1138
1139pub(crate) fn collect_tiktoken_messages(
1140    request: LanguageModelRequest,
1141) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
1142    request
1143        .messages
1144        .into_iter()
1145        .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
1146            role: match message.role {
1147                Role::User => "user".into(),
1148                Role::Assistant => "assistant".into(),
1149                Role::System => "system".into(),
1150            },
1151            content: Some(message.string_contents()),
1152            name: None,
1153            function_call: None,
1154        })
1155        .collect::<Vec<_>>()
1156}
1157
1158pub fn count_open_ai_tokens(
1159    request: LanguageModelRequest,
1160    model: Model,
1161    cx: &App,
1162) -> BoxFuture<'static, Result<u64>> {
1163    cx.background_spawn(async move {
1164        let messages = collect_tiktoken_messages(request);
1165        match model {
1166            Model::Custom { max_tokens, .. } => {
1167                let model = if max_tokens >= 100_000 {
1168                    // If the max tokens is 100k or more, it is likely the o200k_base tokenizer from gpt4o
1169                    "gpt-4o"
1170                } else {
1171                    // Otherwise fallback to gpt-4, since only cl100k_base and o200k_base are
1172                    // supported with this tiktoken method
1173                    "gpt-4"
1174                };
1175                tiktoken_rs::num_tokens_from_messages(model, &messages)
1176            }
1177            // Currently supported by tiktoken_rs
1178            // Sometimes tiktoken-rs is behind on model support. If that is the case, make a new branch
1179            // arm with an override. We enumerate all supported models here so that we can check if new
1180            // models are supported yet or not.
1181            Model::ThreePointFiveTurbo
1182            | Model::Four
1183            | Model::FourTurbo
1184            | Model::FourOmni
1185            | Model::FourOmniMini
1186            | Model::FourPointOne
1187            | Model::FourPointOneMini
1188            | Model::FourPointOneNano
1189            | Model::O1
1190            | Model::O3
1191            | Model::O3Mini
1192            | Model::O4Mini
1193            | Model::Five
1194            | Model::FiveCodex
1195            | Model::FiveMini
1196            | Model::FiveNano => tiktoken_rs::num_tokens_from_messages(model.id(), &messages),
1197            // GPT-5.1, 5.2, and 5.2-codex don't have dedicated tiktoken support; use gpt-5 tokenizer
1198            Model::FivePointOne | Model::FivePointTwo | Model::FivePointTwoCodex => {
1199                tiktoken_rs::num_tokens_from_messages("gpt-5", &messages)
1200            }
1201        }
1202        .map(|tokens| tokens as u64)
1203    })
1204    .boxed()
1205}
1206
1207struct ConfigurationView {
1208    api_key_editor: Entity<InputField>,
1209    state: Entity<State>,
1210    load_credentials_task: Option<Task<()>>,
1211}
1212
1213impl ConfigurationView {
1214    fn new(state: Entity<State>, window: &mut Window, cx: &mut Context<Self>) -> Self {
1215        let api_key_editor = cx.new(|cx| {
1216            InputField::new(
1217                window,
1218                cx,
1219                "sk-000000000000000000000000000000000000000000000000",
1220            )
1221        });
1222
1223        cx.observe(&state, |_, _, cx| {
1224            cx.notify();
1225        })
1226        .detach();
1227
1228        let load_credentials_task = Some(cx.spawn_in(window, {
1229            let state = state.clone();
1230            async move |this, cx| {
1231                if let Some(task) = Some(state.update(cx, |state, cx| state.authenticate(cx))) {
1232                    // We don't log an error, because "not signed in" is also an error.
1233                    let _ = task.await;
1234                }
1235                this.update(cx, |this, cx| {
1236                    this.load_credentials_task = None;
1237                    cx.notify();
1238                })
1239                .log_err();
1240            }
1241        }));
1242
1243        Self {
1244            api_key_editor,
1245            state,
1246            load_credentials_task,
1247        }
1248    }
1249
1250    fn save_api_key(&mut self, _: &menu::Confirm, window: &mut Window, cx: &mut Context<Self>) {
1251        let api_key = self.api_key_editor.read(cx).text(cx).trim().to_string();
1252        if api_key.is_empty() {
1253            return;
1254        }
1255
1256        // url changes can cause the editor to be displayed again
1257        self.api_key_editor
1258            .update(cx, |editor, cx| editor.set_text("", window, cx));
1259
1260        let state = self.state.clone();
1261        cx.spawn_in(window, async move |_, cx| {
1262            state
1263                .update(cx, |state, cx| state.set_api_key(Some(api_key), cx))
1264                .await
1265        })
1266        .detach_and_log_err(cx);
1267    }
1268
1269    fn reset_api_key(&mut self, window: &mut Window, cx: &mut Context<Self>) {
1270        self.api_key_editor
1271            .update(cx, |input, cx| input.set_text("", window, cx));
1272
1273        let state = self.state.clone();
1274        cx.spawn_in(window, async move |_, cx| {
1275            state
1276                .update(cx, |state, cx| state.set_api_key(None, cx))
1277                .await
1278        })
1279        .detach_and_log_err(cx);
1280    }
1281
1282    fn should_render_editor(&self, cx: &mut Context<Self>) -> bool {
1283        !self.state.read(cx).is_authenticated()
1284    }
1285}
1286
1287impl Render for ConfigurationView {
1288    fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1289        let env_var_set = self.state.read(cx).api_key_state.is_from_env_var();
1290        let configured_card_label = if env_var_set {
1291            format!("API key set in {API_KEY_ENV_VAR_NAME} environment variable")
1292        } else {
1293            let api_url = OpenAiLanguageModelProvider::api_url(cx);
1294            if api_url == OPEN_AI_API_URL {
1295                "API key configured".to_string()
1296            } else {
1297                format!("API key configured for {}", api_url)
1298            }
1299        };
1300
1301        let api_key_section = if self.should_render_editor(cx) {
1302            v_flex()
1303                .on_action(cx.listener(Self::save_api_key))
1304                .child(Label::new("To use Zed's agent with OpenAI, you need to add an API key. Follow these steps:"))
1305                .child(
1306                    List::new()
1307                        .child(
1308                            ListBulletItem::new("")
1309                                .child(Label::new("Create one by visiting"))
1310                                .child(ButtonLink::new("OpenAI's console", "https://platform.openai.com/api-keys"))
1311                        )
1312                        .child(
1313                            ListBulletItem::new("Ensure your OpenAI account has credits")
1314                        )
1315                        .child(
1316                            ListBulletItem::new("Paste your API key below and hit enter to start using the agent")
1317                        ),
1318                )
1319                .child(self.api_key_editor.clone())
1320                .child(
1321                    Label::new(format!(
1322                        "You can also set the {API_KEY_ENV_VAR_NAME} environment variable and restart Zed."
1323                    ))
1324                    .size(LabelSize::Small)
1325                    .color(Color::Muted),
1326                )
1327                .child(
1328                    Label::new(
1329                        "Note that having a subscription for another service like GitHub Copilot won't work.",
1330                    )
1331                    .size(LabelSize::Small).color(Color::Muted),
1332                )
1333                .into_any_element()
1334        } else {
1335            ConfiguredApiCard::new(configured_card_label)
1336                .disabled(env_var_set)
1337                .on_click(cx.listener(|this, _, window, cx| this.reset_api_key(window, cx)))
1338                .when(env_var_set, |this| {
1339                    this.tooltip_label(format!("To reset your API key, unset the {API_KEY_ENV_VAR_NAME} environment variable."))
1340                })
1341                .into_any_element()
1342        };
1343
1344        let compatible_api_section = h_flex()
1345            .mt_1p5()
1346            .gap_0p5()
1347            .flex_wrap()
1348            .when(self.should_render_editor(cx), |this| {
1349                this.pt_1p5()
1350                    .border_t_1()
1351                    .border_color(cx.theme().colors().border_variant)
1352            })
1353            .child(
1354                h_flex()
1355                    .gap_2()
1356                    .child(
1357                        Icon::new(IconName::Info)
1358                            .size(IconSize::XSmall)
1359                            .color(Color::Muted),
1360                    )
1361                    .child(Label::new("Zed also supports OpenAI-compatible models.")),
1362            )
1363            .child(
1364                Button::new("docs", "Learn More")
1365                    .icon(IconName::ArrowUpRight)
1366                    .icon_size(IconSize::Small)
1367                    .icon_color(Color::Muted)
1368                    .on_click(move |_, _window, cx| {
1369                        cx.open_url("https://zed.dev/docs/ai/llm-providers#openai-api-compatible")
1370                    }),
1371            );
1372
1373        if self.load_credentials_task.is_some() {
1374            div().child(Label::new("Loading credentials…")).into_any()
1375        } else {
1376            v_flex()
1377                .size_full()
1378                .child(api_key_section)
1379                .child(compatible_api_section)
1380                .into_any()
1381        }
1382    }
1383}
1384
1385#[cfg(test)]
1386mod tests {
1387    use futures::{StreamExt, executor::block_on};
1388    use gpui::TestAppContext;
1389    use language_model::{LanguageModelRequestMessage, LanguageModelRequestTool};
1390    use open_ai::responses::{
1391        ResponseFunctionToolCall, ResponseOutputItem, ResponseOutputMessage, ResponseStatusDetails,
1392        ResponseSummary, ResponseUsage, StreamEvent as ResponsesStreamEvent,
1393    };
1394    use pretty_assertions::assert_eq;
1395    use serde_json::json;
1396
1397    use super::*;
1398
1399    fn map_response_events(events: Vec<ResponsesStreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1400        block_on(async {
1401            OpenAiResponseEventMapper::new()
1402                .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1403                .collect::<Vec<_>>()
1404                .await
1405                .into_iter()
1406                .map(Result::unwrap)
1407                .collect()
1408        })
1409    }
1410
1411    fn response_item_message(id: &str) -> ResponseOutputItem {
1412        ResponseOutputItem::Message(ResponseOutputMessage {
1413            id: Some(id.to_string()),
1414            role: Some("assistant".to_string()),
1415            status: Some("in_progress".to_string()),
1416            content: vec![],
1417        })
1418    }
1419
1420    fn response_item_function_call(id: &str, args: Option<&str>) -> ResponseOutputItem {
1421        ResponseOutputItem::FunctionCall(ResponseFunctionToolCall {
1422            id: Some(id.to_string()),
1423            status: Some("in_progress".to_string()),
1424            name: Some("get_weather".to_string()),
1425            call_id: Some("call_123".to_string()),
1426            arguments: args.map(|s| s.to_string()).unwrap_or_default(),
1427        })
1428    }
1429
1430    #[gpui::test]
1431    fn tiktoken_rs_support(cx: &TestAppContext) {
1432        let request = LanguageModelRequest {
1433            thread_id: None,
1434            prompt_id: None,
1435            intent: None,
1436            messages: vec![LanguageModelRequestMessage {
1437                role: Role::User,
1438                content: vec![MessageContent::Text("message".into())],
1439                cache: false,
1440                reasoning_details: None,
1441            }],
1442            tools: vec![],
1443            tool_choice: None,
1444            stop: vec![],
1445            temperature: None,
1446            thinking_allowed: true,
1447            bypass_rate_limit: false,
1448        };
1449
1450        // Validate that all models are supported by tiktoken-rs
1451        for model in Model::iter() {
1452            let count = cx
1453                .foreground_executor()
1454                .block_on(count_open_ai_tokens(
1455                    request.clone(),
1456                    model,
1457                    &cx.app.borrow(),
1458                ))
1459                .unwrap();
1460            assert!(count > 0);
1461        }
1462    }
1463
1464    #[test]
1465    fn responses_stream_maps_text_and_usage() {
1466        let events = vec![
1467            ResponsesStreamEvent::OutputItemAdded {
1468                output_index: 0,
1469                sequence_number: None,
1470                item: response_item_message("msg_123"),
1471            },
1472            ResponsesStreamEvent::OutputTextDelta {
1473                item_id: "msg_123".into(),
1474                output_index: 0,
1475                content_index: Some(0),
1476                delta: "Hello".into(),
1477            },
1478            ResponsesStreamEvent::Completed {
1479                response: ResponseSummary {
1480                    usage: Some(ResponseUsage {
1481                        input_tokens: Some(5),
1482                        output_tokens: Some(3),
1483                        total_tokens: Some(8),
1484                    }),
1485                    ..Default::default()
1486                },
1487            },
1488        ];
1489
1490        let mapped = map_response_events(events);
1491        assert!(matches!(
1492            mapped[0],
1493            LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_123"
1494        ));
1495        assert!(matches!(
1496            mapped[1],
1497            LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1498        ));
1499        assert!(matches!(
1500            mapped[2],
1501            LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1502                input_tokens: 5,
1503                output_tokens: 3,
1504                ..
1505            })
1506        ));
1507        assert!(matches!(
1508            mapped[3],
1509            LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1510        ));
1511    }
1512
1513    #[test]
1514    fn into_open_ai_response_builds_complete_payload() {
1515        let tool_call_id = LanguageModelToolUseId::from("call-42");
1516        let tool_input = json!({ "city": "Boston" });
1517        let tool_arguments = serde_json::to_string(&tool_input).unwrap();
1518        let tool_use = LanguageModelToolUse {
1519            id: tool_call_id.clone(),
1520            name: Arc::from("get_weather"),
1521            raw_input: tool_arguments.clone(),
1522            input: tool_input,
1523            is_input_complete: true,
1524            thought_signature: None,
1525        };
1526        let tool_result = LanguageModelToolResult {
1527            tool_use_id: tool_call_id,
1528            tool_name: Arc::from("get_weather"),
1529            is_error: false,
1530            content: LanguageModelToolResultContent::Text(Arc::from("Sunny")),
1531            output: Some(json!({ "forecast": "Sunny" })),
1532        };
1533        let user_image = LanguageModelImage {
1534            source: SharedString::from("aGVsbG8="),
1535            size: None,
1536        };
1537        let expected_image_url = user_image.to_base64_url();
1538
1539        let request = LanguageModelRequest {
1540            thread_id: Some("thread-123".into()),
1541            prompt_id: None,
1542            intent: None,
1543            messages: vec![
1544                LanguageModelRequestMessage {
1545                    role: Role::System,
1546                    content: vec![MessageContent::Text("System context".into())],
1547                    cache: false,
1548                    reasoning_details: None,
1549                },
1550                LanguageModelRequestMessage {
1551                    role: Role::User,
1552                    content: vec![
1553                        MessageContent::Text("Please check the weather.".into()),
1554                        MessageContent::Image(user_image),
1555                    ],
1556                    cache: false,
1557                    reasoning_details: None,
1558                },
1559                LanguageModelRequestMessage {
1560                    role: Role::Assistant,
1561                    content: vec![
1562                        MessageContent::Text("Looking that up.".into()),
1563                        MessageContent::ToolUse(tool_use),
1564                    ],
1565                    cache: false,
1566                    reasoning_details: None,
1567                },
1568                LanguageModelRequestMessage {
1569                    role: Role::Assistant,
1570                    content: vec![MessageContent::ToolResult(tool_result)],
1571                    cache: false,
1572                    reasoning_details: None,
1573                },
1574            ],
1575            tools: vec![LanguageModelRequestTool {
1576                name: "get_weather".into(),
1577                description: "Fetches the weather".into(),
1578                input_schema: json!({ "type": "object" }),
1579            }],
1580            tool_choice: Some(LanguageModelToolChoice::Any),
1581            stop: vec!["<STOP>".into()],
1582            temperature: None,
1583            thinking_allowed: false,
1584            bypass_rate_limit: false,
1585        };
1586
1587        let response = into_open_ai_response(
1588            request,
1589            "custom-model",
1590            true,
1591            true,
1592            Some(2048),
1593            Some(ReasoningEffort::Low),
1594        );
1595
1596        let serialized = serde_json::to_value(&response).unwrap();
1597        let expected = json!({
1598            "model": "custom-model",
1599            "input": [
1600                {
1601                    "type": "message",
1602                    "role": "system",
1603                    "content": [
1604                        { "type": "input_text", "text": "System context" }
1605                    ]
1606                },
1607                {
1608                    "type": "message",
1609                    "role": "user",
1610                    "content": [
1611                        { "type": "input_text", "text": "Please check the weather." },
1612                        { "type": "input_image", "image_url": expected_image_url }
1613                    ]
1614                },
1615                {
1616                    "type": "message",
1617                    "role": "assistant",
1618                    "content": [
1619                        { "type": "output_text", "text": "Looking that up.", "annotations": [] }
1620                    ]
1621                },
1622                {
1623                    "type": "function_call",
1624                    "call_id": "call-42",
1625                    "name": "get_weather",
1626                    "arguments": tool_arguments
1627                },
1628                {
1629                    "type": "function_call_output",
1630                    "call_id": "call-42",
1631                    "output": "{\"forecast\":\"Sunny\"}"
1632                }
1633            ],
1634            "stream": true,
1635            "max_output_tokens": 2048,
1636            "parallel_tool_calls": true,
1637            "tool_choice": "required",
1638            "tools": [
1639                {
1640                    "type": "function",
1641                    "name": "get_weather",
1642                    "description": "Fetches the weather",
1643                    "parameters": { "type": "object" }
1644                }
1645            ],
1646            "prompt_cache_key": "thread-123",
1647            "reasoning": { "effort": "low" }
1648        });
1649
1650        assert_eq!(serialized, expected);
1651    }
1652
1653    #[test]
1654    fn responses_stream_maps_tool_calls() {
1655        let events = vec![
1656            ResponsesStreamEvent::OutputItemAdded {
1657                output_index: 0,
1658                sequence_number: None,
1659                item: response_item_function_call("item_fn", Some("{\"city\":\"Bos")),
1660            },
1661            ResponsesStreamEvent::FunctionCallArgumentsDelta {
1662                item_id: "item_fn".into(),
1663                output_index: 0,
1664                delta: "ton\"}".into(),
1665                sequence_number: None,
1666            },
1667            ResponsesStreamEvent::FunctionCallArgumentsDone {
1668                item_id: "item_fn".into(),
1669                output_index: 0,
1670                arguments: "{\"city\":\"Boston\"}".into(),
1671                sequence_number: None,
1672            },
1673            ResponsesStreamEvent::Completed {
1674                response: ResponseSummary::default(),
1675            },
1676        ];
1677
1678        let mapped = map_response_events(events);
1679        assert!(matches!(
1680            mapped[0],
1681            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse {
1682                ref id,
1683                ref name,
1684                ref raw_input,
1685                ..
1686            }) if id.to_string() == "call_123"
1687                && name.as_ref() == "get_weather"
1688                && raw_input == "{\"city\":\"Boston\"}"
1689        ));
1690        assert!(matches!(
1691            mapped[1],
1692            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1693        ));
1694    }
1695
1696    #[test]
1697    fn responses_stream_uses_max_tokens_stop_reason() {
1698        let events = vec![ResponsesStreamEvent::Incomplete {
1699            response: ResponseSummary {
1700                status_details: Some(ResponseStatusDetails {
1701                    reason: Some("max_output_tokens".into()),
1702                    r#type: Some("incomplete".into()),
1703                    error: None,
1704                }),
1705                usage: Some(ResponseUsage {
1706                    input_tokens: Some(10),
1707                    output_tokens: Some(20),
1708                    total_tokens: Some(30),
1709                }),
1710                ..Default::default()
1711            },
1712        }];
1713
1714        let mapped = map_response_events(events);
1715        assert!(matches!(
1716            mapped[0],
1717            LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1718                input_tokens: 10,
1719                output_tokens: 20,
1720                ..
1721            })
1722        ));
1723        assert!(matches!(
1724            mapped[1],
1725            LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1726        ));
1727    }
1728
1729    #[test]
1730    fn responses_stream_handles_multiple_tool_calls() {
1731        let events = vec![
1732            ResponsesStreamEvent::OutputItemAdded {
1733                output_index: 0,
1734                sequence_number: None,
1735                item: response_item_function_call("item_fn1", Some("{\"city\":\"NYC\"}")),
1736            },
1737            ResponsesStreamEvent::FunctionCallArgumentsDone {
1738                item_id: "item_fn1".into(),
1739                output_index: 0,
1740                arguments: "{\"city\":\"NYC\"}".into(),
1741                sequence_number: None,
1742            },
1743            ResponsesStreamEvent::OutputItemAdded {
1744                output_index: 1,
1745                sequence_number: None,
1746                item: response_item_function_call("item_fn2", Some("{\"city\":\"LA\"}")),
1747            },
1748            ResponsesStreamEvent::FunctionCallArgumentsDone {
1749                item_id: "item_fn2".into(),
1750                output_index: 1,
1751                arguments: "{\"city\":\"LA\"}".into(),
1752                sequence_number: None,
1753            },
1754            ResponsesStreamEvent::Completed {
1755                response: ResponseSummary::default(),
1756            },
1757        ];
1758
1759        let mapped = map_response_events(events);
1760        assert_eq!(mapped.len(), 3);
1761        assert!(matches!(
1762            mapped[0],
1763            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1764            if raw_input == "{\"city\":\"NYC\"}"
1765        ));
1766        assert!(matches!(
1767            mapped[1],
1768            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1769            if raw_input == "{\"city\":\"LA\"}"
1770        ));
1771        assert!(matches!(
1772            mapped[2],
1773            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1774        ));
1775    }
1776
1777    #[test]
1778    fn responses_stream_handles_mixed_text_and_tool_calls() {
1779        let events = vec![
1780            ResponsesStreamEvent::OutputItemAdded {
1781                output_index: 0,
1782                sequence_number: None,
1783                item: response_item_message("msg_123"),
1784            },
1785            ResponsesStreamEvent::OutputTextDelta {
1786                item_id: "msg_123".into(),
1787                output_index: 0,
1788                content_index: Some(0),
1789                delta: "Let me check that".into(),
1790            },
1791            ResponsesStreamEvent::OutputItemAdded {
1792                output_index: 1,
1793                sequence_number: None,
1794                item: response_item_function_call("item_fn", Some("{\"query\":\"test\"}")),
1795            },
1796            ResponsesStreamEvent::FunctionCallArgumentsDone {
1797                item_id: "item_fn".into(),
1798                output_index: 1,
1799                arguments: "{\"query\":\"test\"}".into(),
1800                sequence_number: None,
1801            },
1802            ResponsesStreamEvent::Completed {
1803                response: ResponseSummary::default(),
1804            },
1805        ];
1806
1807        let mapped = map_response_events(events);
1808        assert!(matches!(
1809            mapped[0],
1810            LanguageModelCompletionEvent::StartMessage { .. }
1811        ));
1812        assert!(matches!(
1813            mapped[1],
1814            LanguageModelCompletionEvent::Text(ref text) if text == "Let me check that"
1815        ));
1816        assert!(matches!(
1817            mapped[2],
1818            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1819            if raw_input == "{\"query\":\"test\"}"
1820        ));
1821        assert!(matches!(
1822            mapped[3],
1823            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1824        ));
1825    }
1826
1827    #[test]
1828    fn responses_stream_handles_json_parse_error() {
1829        let events = vec![
1830            ResponsesStreamEvent::OutputItemAdded {
1831                output_index: 0,
1832                sequence_number: None,
1833                item: response_item_function_call("item_fn", Some("{invalid json")),
1834            },
1835            ResponsesStreamEvent::FunctionCallArgumentsDone {
1836                item_id: "item_fn".into(),
1837                output_index: 0,
1838                arguments: "{invalid json".into(),
1839                sequence_number: None,
1840            },
1841            ResponsesStreamEvent::Completed {
1842                response: ResponseSummary::default(),
1843            },
1844        ];
1845
1846        let mapped = map_response_events(events);
1847        assert!(matches!(
1848            mapped[0],
1849            LanguageModelCompletionEvent::ToolUseJsonParseError {
1850                ref raw_input,
1851                ..
1852            } if raw_input.as_ref() == "{invalid json"
1853        ));
1854    }
1855
1856    #[test]
1857    fn responses_stream_handles_incomplete_function_call() {
1858        let events = vec![
1859            ResponsesStreamEvent::OutputItemAdded {
1860                output_index: 0,
1861                sequence_number: None,
1862                item: response_item_function_call("item_fn", Some("{\"city\":")),
1863            },
1864            ResponsesStreamEvent::FunctionCallArgumentsDelta {
1865                item_id: "item_fn".into(),
1866                output_index: 0,
1867                delta: "\"Boston\"".into(),
1868                sequence_number: None,
1869            },
1870            ResponsesStreamEvent::Incomplete {
1871                response: ResponseSummary {
1872                    status_details: Some(ResponseStatusDetails {
1873                        reason: Some("max_output_tokens".into()),
1874                        r#type: Some("incomplete".into()),
1875                        error: None,
1876                    }),
1877                    output: vec![response_item_function_call(
1878                        "item_fn",
1879                        Some("{\"city\":\"Boston\"}"),
1880                    )],
1881                    ..Default::default()
1882                },
1883            },
1884        ];
1885
1886        let mapped = map_response_events(events);
1887        assert!(matches!(
1888            mapped[0],
1889            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1890            if raw_input == "{\"city\":\"Boston\"}"
1891        ));
1892        assert!(matches!(
1893            mapped[1],
1894            LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1895        ));
1896    }
1897
1898    #[test]
1899    fn responses_stream_incomplete_does_not_duplicate_tool_calls() {
1900        let events = vec![
1901            ResponsesStreamEvent::OutputItemAdded {
1902                output_index: 0,
1903                sequence_number: None,
1904                item: response_item_function_call("item_fn", Some("{\"city\":\"Boston\"}")),
1905            },
1906            ResponsesStreamEvent::FunctionCallArgumentsDone {
1907                item_id: "item_fn".into(),
1908                output_index: 0,
1909                arguments: "{\"city\":\"Boston\"}".into(),
1910                sequence_number: None,
1911            },
1912            ResponsesStreamEvent::Incomplete {
1913                response: ResponseSummary {
1914                    status_details: Some(ResponseStatusDetails {
1915                        reason: Some("max_output_tokens".into()),
1916                        r#type: Some("incomplete".into()),
1917                        error: None,
1918                    }),
1919                    output: vec![response_item_function_call(
1920                        "item_fn",
1921                        Some("{\"city\":\"Boston\"}"),
1922                    )],
1923                    ..Default::default()
1924                },
1925            },
1926        ];
1927
1928        let mapped = map_response_events(events);
1929        assert_eq!(mapped.len(), 2);
1930        assert!(matches!(
1931            mapped[0],
1932            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1933            if raw_input == "{\"city\":\"Boston\"}"
1934        ));
1935        assert!(matches!(
1936            mapped[1],
1937            LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1938        ));
1939    }
1940}