open_ai.rs

   1use anyhow::{Result, anyhow};
   2use collections::{BTreeMap, HashMap};
   3use futures::Stream;
   4use futures::{FutureExt, StreamExt, future::BoxFuture};
   5use gpui::{AnyView, App, AsyncApp, Context, Entity, SharedString, Task, Window};
   6use http_client::HttpClient;
   7use language_model::{
   8    ApiKeyState, AuthenticateError, EnvVar, IconOrSvg, LanguageModel, LanguageModelCompletionError,
   9    LanguageModelCompletionEvent, LanguageModelId, LanguageModelImage, LanguageModelName,
  10    LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
  11    LanguageModelProviderState, LanguageModelRequest, LanguageModelRequestMessage,
  12    LanguageModelToolChoice, LanguageModelToolResult, LanguageModelToolResultContent,
  13    LanguageModelToolUse, LanguageModelToolUseId, MessageContent, RateLimiter, Role, StopReason,
  14    TokenUsage, env_var,
  15};
  16use menu;
  17use open_ai::responses::{
  18    ResponseFunctionCallItem, ResponseFunctionCallOutputItem, ResponseInputContent,
  19    ResponseInputItem, ResponseMessageItem,
  20};
  21use open_ai::{
  22    ImageUrl, Model, OPEN_AI_API_URL, ReasoningEffort, ResponseStreamEvent,
  23    responses::{
  24        Request as ResponseRequest, ResponseOutputItem, ResponseSummary as ResponsesSummary,
  25        ResponseUsage as ResponsesUsage, StreamEvent as ResponsesStreamEvent, stream_response,
  26    },
  27    stream_completion,
  28};
  29use settings::{OpenAiAvailableModel as AvailableModel, Settings, SettingsStore};
  30use std::pin::Pin;
  31use std::str::FromStr as _;
  32use std::sync::{Arc, LazyLock};
  33use strum::IntoEnumIterator;
  34use ui::{ButtonLink, ConfiguredApiCard, List, ListBulletItem, prelude::*};
  35use ui_input::InputField;
  36use util::ResultExt;
  37
  38const PROVIDER_ID: LanguageModelProviderId = language_model::OPEN_AI_PROVIDER_ID;
  39const PROVIDER_NAME: LanguageModelProviderName = language_model::OPEN_AI_PROVIDER_NAME;
  40
  41const API_KEY_ENV_VAR_NAME: &str = "OPENAI_API_KEY";
  42static API_KEY_ENV_VAR: LazyLock<EnvVar> = env_var!(API_KEY_ENV_VAR_NAME);
  43
  44#[derive(Default, Clone, Debug, PartialEq)]
  45pub struct OpenAiSettings {
  46    pub api_url: String,
  47    pub available_models: Vec<AvailableModel>,
  48}
  49
  50pub struct OpenAiLanguageModelProvider {
  51    http_client: Arc<dyn HttpClient>,
  52    state: Entity<State>,
  53}
  54
  55pub struct State {
  56    api_key_state: ApiKeyState,
  57}
  58
  59impl State {
  60    fn is_authenticated(&self) -> bool {
  61        self.api_key_state.has_key()
  62    }
  63
  64    fn set_api_key(&mut self, api_key: Option<String>, cx: &mut Context<Self>) -> Task<Result<()>> {
  65        let api_url = OpenAiLanguageModelProvider::api_url(cx);
  66        self.api_key_state
  67            .store(api_url, api_key, |this| &mut this.api_key_state, cx)
  68    }
  69
  70    fn authenticate(&mut self, cx: &mut Context<Self>) -> Task<Result<(), AuthenticateError>> {
  71        let api_url = OpenAiLanguageModelProvider::api_url(cx);
  72        self.api_key_state
  73            .load_if_needed(api_url, |this| &mut this.api_key_state, cx)
  74    }
  75}
  76
  77impl OpenAiLanguageModelProvider {
  78    pub fn new(http_client: Arc<dyn HttpClient>, cx: &mut App) -> Self {
  79        let state = cx.new(|cx| {
  80            cx.observe_global::<SettingsStore>(|this: &mut State, cx| {
  81                let api_url = Self::api_url(cx);
  82                this.api_key_state
  83                    .handle_url_change(api_url, |this| &mut this.api_key_state, cx);
  84                cx.notify();
  85            })
  86            .detach();
  87            State {
  88                api_key_state: ApiKeyState::new(Self::api_url(cx), (*API_KEY_ENV_VAR).clone()),
  89            }
  90        });
  91
  92        Self { http_client, state }
  93    }
  94
  95    fn create_language_model(&self, model: open_ai::Model) -> Arc<dyn LanguageModel> {
  96        Arc::new(OpenAiLanguageModel {
  97            id: LanguageModelId::from(model.id().to_string()),
  98            model,
  99            state: self.state.clone(),
 100            http_client: self.http_client.clone(),
 101            request_limiter: RateLimiter::new(4),
 102        })
 103    }
 104
 105    fn settings(cx: &App) -> &OpenAiSettings {
 106        &crate::AllLanguageModelSettings::get_global(cx).openai
 107    }
 108
 109    fn api_url(cx: &App) -> SharedString {
 110        let api_url = &Self::settings(cx).api_url;
 111        if api_url.is_empty() {
 112            open_ai::OPEN_AI_API_URL.into()
 113        } else {
 114            SharedString::new(api_url.as_str())
 115        }
 116    }
 117}
 118
 119impl LanguageModelProviderState for OpenAiLanguageModelProvider {
 120    type ObservableEntity = State;
 121
 122    fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
 123        Some(self.state.clone())
 124    }
 125}
 126
 127impl LanguageModelProvider for OpenAiLanguageModelProvider {
 128    fn id(&self) -> LanguageModelProviderId {
 129        PROVIDER_ID
 130    }
 131
 132    fn name(&self) -> LanguageModelProviderName {
 133        PROVIDER_NAME
 134    }
 135
 136    fn icon(&self) -> IconOrSvg {
 137        IconOrSvg::Icon(IconName::AiOpenAi)
 138    }
 139
 140    fn default_model(&self, _cx: &App) -> Option<Arc<dyn LanguageModel>> {
 141        Some(self.create_language_model(open_ai::Model::default()))
 142    }
 143
 144    fn default_fast_model(&self, _cx: &App) -> Option<Arc<dyn LanguageModel>> {
 145        Some(self.create_language_model(open_ai::Model::default_fast()))
 146    }
 147
 148    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 149        let mut models = BTreeMap::default();
 150
 151        // Add base models from open_ai::Model::iter()
 152        for model in open_ai::Model::iter() {
 153            if !matches!(model, open_ai::Model::Custom { .. }) {
 154                models.insert(model.id().to_string(), model);
 155            }
 156        }
 157
 158        // Override with available models from settings
 159        for model in &OpenAiLanguageModelProvider::settings(cx).available_models {
 160            models.insert(
 161                model.name.clone(),
 162                open_ai::Model::Custom {
 163                    name: model.name.clone(),
 164                    display_name: model.display_name.clone(),
 165                    max_tokens: model.max_tokens,
 166                    max_output_tokens: model.max_output_tokens,
 167                    max_completion_tokens: model.max_completion_tokens,
 168                    reasoning_effort: model.reasoning_effort.clone(),
 169                    supports_chat_completions: model.capabilities.chat_completions,
 170                },
 171            );
 172        }
 173
 174        models
 175            .into_values()
 176            .map(|model| self.create_language_model(model))
 177            .collect()
 178    }
 179
 180    fn is_authenticated(&self, cx: &App) -> bool {
 181        self.state.read(cx).is_authenticated()
 182    }
 183
 184    fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
 185        self.state.update(cx, |state, cx| state.authenticate(cx))
 186    }
 187
 188    fn configuration_view(
 189        &self,
 190        _target_agent: language_model::ConfigurationViewTargetAgent,
 191        window: &mut Window,
 192        cx: &mut App,
 193    ) -> AnyView {
 194        cx.new(|cx| ConfigurationView::new(self.state.clone(), window, cx))
 195            .into()
 196    }
 197
 198    fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>> {
 199        self.state
 200            .update(cx, |state, cx| state.set_api_key(None, cx))
 201    }
 202}
 203
 204pub struct OpenAiLanguageModel {
 205    id: LanguageModelId,
 206    model: open_ai::Model,
 207    state: Entity<State>,
 208    http_client: Arc<dyn HttpClient>,
 209    request_limiter: RateLimiter,
 210}
 211
 212impl OpenAiLanguageModel {
 213    fn stream_completion(
 214        &self,
 215        request: open_ai::Request,
 216        cx: &AsyncApp,
 217    ) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<ResponseStreamEvent>>>>
 218    {
 219        let http_client = self.http_client.clone();
 220
 221        let (api_key, api_url) = self.state.read_with(cx, |state, cx| {
 222            let api_url = OpenAiLanguageModelProvider::api_url(cx);
 223            (state.api_key_state.key(&api_url), api_url)
 224        });
 225
 226        let future = self.request_limiter.stream(async move {
 227            let provider = PROVIDER_NAME;
 228            let Some(api_key) = api_key else {
 229                return Err(LanguageModelCompletionError::NoApiKey { provider });
 230            };
 231            let request = stream_completion(
 232                http_client.as_ref(),
 233                provider.0.as_str(),
 234                &api_url,
 235                &api_key,
 236                request,
 237            );
 238            let response = request.await?;
 239            Ok(response)
 240        });
 241
 242        async move { Ok(future.await?.boxed()) }.boxed()
 243    }
 244
 245    fn stream_response(
 246        &self,
 247        request: ResponseRequest,
 248        cx: &AsyncApp,
 249    ) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<ResponsesStreamEvent>>>>
 250    {
 251        let http_client = self.http_client.clone();
 252
 253        let (api_key, api_url) = self.state.read_with(cx, |state, cx| {
 254            let api_url = OpenAiLanguageModelProvider::api_url(cx);
 255            (state.api_key_state.key(&api_url), api_url)
 256        });
 257
 258        let provider = PROVIDER_NAME;
 259        let future = self.request_limiter.stream(async move {
 260            let Some(api_key) = api_key else {
 261                return Err(LanguageModelCompletionError::NoApiKey { provider });
 262            };
 263            let request = stream_response(
 264                http_client.as_ref(),
 265                provider.0.as_str(),
 266                &api_url,
 267                &api_key,
 268                request,
 269            );
 270            let response = request.await?;
 271            Ok(response)
 272        });
 273
 274        async move { Ok(future.await?.boxed()) }.boxed()
 275    }
 276}
 277
 278impl LanguageModel for OpenAiLanguageModel {
 279    fn id(&self) -> LanguageModelId {
 280        self.id.clone()
 281    }
 282
 283    fn name(&self) -> LanguageModelName {
 284        LanguageModelName::from(self.model.display_name().to_string())
 285    }
 286
 287    fn provider_id(&self) -> LanguageModelProviderId {
 288        PROVIDER_ID
 289    }
 290
 291    fn provider_name(&self) -> LanguageModelProviderName {
 292        PROVIDER_NAME
 293    }
 294
 295    fn supports_tools(&self) -> bool {
 296        true
 297    }
 298
 299    fn supports_images(&self) -> bool {
 300        use open_ai::Model;
 301        match &self.model {
 302            Model::FourOmni
 303            | Model::FourOmniMini
 304            | Model::FourPointOne
 305            | Model::FourPointOneMini
 306            | Model::FourPointOneNano
 307            | Model::Five
 308            | Model::FiveCodex
 309            | Model::FiveMini
 310            | Model::FiveNano
 311            | Model::FivePointOne
 312            | Model::FivePointTwo
 313            | Model::FivePointTwoCodex
 314            | Model::O1
 315            | Model::O3
 316            | Model::O4Mini => true,
 317            Model::ThreePointFiveTurbo
 318            | Model::Four
 319            | Model::FourTurbo
 320            | Model::O3Mini
 321            | Model::Custom { .. } => false,
 322        }
 323    }
 324
 325    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
 326        match choice {
 327            LanguageModelToolChoice::Auto => true,
 328            LanguageModelToolChoice::Any => true,
 329            LanguageModelToolChoice::None => true,
 330        }
 331    }
 332
 333    fn supports_split_token_display(&self) -> bool {
 334        true
 335    }
 336
 337    fn telemetry_id(&self) -> String {
 338        format!("openai/{}", self.model.id())
 339    }
 340
 341    fn max_token_count(&self) -> u64 {
 342        self.model.max_token_count()
 343    }
 344
 345    fn max_output_tokens(&self) -> Option<u64> {
 346        self.model.max_output_tokens()
 347    }
 348
 349    fn count_tokens(
 350        &self,
 351        request: LanguageModelRequest,
 352        cx: &App,
 353    ) -> BoxFuture<'static, Result<u64>> {
 354        count_open_ai_tokens(request, self.model.clone(), cx)
 355    }
 356
 357    fn stream_completion(
 358        &self,
 359        request: LanguageModelRequest,
 360        cx: &AsyncApp,
 361    ) -> BoxFuture<
 362        'static,
 363        Result<
 364            futures::stream::BoxStream<
 365                'static,
 366                Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
 367            >,
 368            LanguageModelCompletionError,
 369        >,
 370    > {
 371        if self.model.supports_chat_completions() {
 372            let request = into_open_ai(
 373                request,
 374                self.model.id(),
 375                self.model.supports_parallel_tool_calls(),
 376                self.model.supports_prompt_cache_key(),
 377                self.max_output_tokens(),
 378                self.model.reasoning_effort(),
 379            );
 380            let completions = self.stream_completion(request, cx);
 381            async move {
 382                let mapper = OpenAiEventMapper::new();
 383                Ok(mapper.map_stream(completions.await?).boxed())
 384            }
 385            .boxed()
 386        } else {
 387            let request = into_open_ai_response(
 388                request,
 389                self.model.id(),
 390                self.model.supports_parallel_tool_calls(),
 391                self.model.supports_prompt_cache_key(),
 392                self.max_output_tokens(),
 393                self.model.reasoning_effort(),
 394            );
 395            let completions = self.stream_response(request, cx);
 396            async move {
 397                let mapper = OpenAiResponseEventMapper::new();
 398                Ok(mapper.map_stream(completions.await?).boxed())
 399            }
 400            .boxed()
 401        }
 402    }
 403}
 404
 405pub fn into_open_ai(
 406    request: LanguageModelRequest,
 407    model_id: &str,
 408    supports_parallel_tool_calls: bool,
 409    supports_prompt_cache_key: bool,
 410    max_output_tokens: Option<u64>,
 411    reasoning_effort: Option<ReasoningEffort>,
 412) -> open_ai::Request {
 413    let stream = !model_id.starts_with("o1-");
 414
 415    let mut messages = Vec::new();
 416    for message in request.messages {
 417        for content in message.content {
 418            match content {
 419                MessageContent::Text(text) | MessageContent::Thinking { text, .. } => {
 420                    let should_add = if message.role == Role::User {
 421                        // Including whitespace-only user messages can cause error with OpenAI compatible APIs
 422                        // See https://github.com/zed-industries/zed/issues/40097
 423                        !text.trim().is_empty()
 424                    } else {
 425                        !text.is_empty()
 426                    };
 427                    if should_add {
 428                        add_message_content_part(
 429                            open_ai::MessagePart::Text { text },
 430                            message.role,
 431                            &mut messages,
 432                        );
 433                    }
 434                }
 435                MessageContent::RedactedThinking(_) => {}
 436                MessageContent::Image(image) => {
 437                    add_message_content_part(
 438                        open_ai::MessagePart::Image {
 439                            image_url: ImageUrl {
 440                                url: image.to_base64_url(),
 441                                detail: None,
 442                            },
 443                        },
 444                        message.role,
 445                        &mut messages,
 446                    );
 447                }
 448                MessageContent::ToolUse(tool_use) => {
 449                    let tool_call = open_ai::ToolCall {
 450                        id: tool_use.id.to_string(),
 451                        content: open_ai::ToolCallContent::Function {
 452                            function: open_ai::FunctionContent {
 453                                name: tool_use.name.to_string(),
 454                                arguments: serde_json::to_string(&tool_use.input)
 455                                    .unwrap_or_default(),
 456                            },
 457                        },
 458                    };
 459
 460                    if let Some(open_ai::RequestMessage::Assistant { tool_calls, .. }) =
 461                        messages.last_mut()
 462                    {
 463                        tool_calls.push(tool_call);
 464                    } else {
 465                        messages.push(open_ai::RequestMessage::Assistant {
 466                            content: None,
 467                            tool_calls: vec![tool_call],
 468                        });
 469                    }
 470                }
 471                MessageContent::ToolResult(tool_result) => {
 472                    let content = match &tool_result.content {
 473                        LanguageModelToolResultContent::Text(text) => {
 474                            vec![open_ai::MessagePart::Text {
 475                                text: text.to_string(),
 476                            }]
 477                        }
 478                        LanguageModelToolResultContent::Image(image) => {
 479                            vec![open_ai::MessagePart::Image {
 480                                image_url: ImageUrl {
 481                                    url: image.to_base64_url(),
 482                                    detail: None,
 483                                },
 484                            }]
 485                        }
 486                    };
 487
 488                    messages.push(open_ai::RequestMessage::Tool {
 489                        content: content.into(),
 490                        tool_call_id: tool_result.tool_use_id.to_string(),
 491                    });
 492                }
 493            }
 494        }
 495    }
 496
 497    open_ai::Request {
 498        model: model_id.into(),
 499        messages,
 500        stream,
 501        stop: request.stop,
 502        temperature: request.temperature.or(Some(1.0)),
 503        max_completion_tokens: max_output_tokens,
 504        parallel_tool_calls: if supports_parallel_tool_calls && !request.tools.is_empty() {
 505            // Disable parallel tool calls, as the Agent currently expects a maximum of one per turn.
 506            Some(false)
 507        } else {
 508            None
 509        },
 510        prompt_cache_key: if supports_prompt_cache_key {
 511            request.thread_id
 512        } else {
 513            None
 514        },
 515        tools: request
 516            .tools
 517            .into_iter()
 518            .map(|tool| open_ai::ToolDefinition::Function {
 519                function: open_ai::FunctionDefinition {
 520                    name: tool.name,
 521                    description: Some(tool.description),
 522                    parameters: Some(tool.input_schema),
 523                },
 524            })
 525            .collect(),
 526        tool_choice: request.tool_choice.map(|choice| match choice {
 527            LanguageModelToolChoice::Auto => open_ai::ToolChoice::Auto,
 528            LanguageModelToolChoice::Any => open_ai::ToolChoice::Required,
 529            LanguageModelToolChoice::None => open_ai::ToolChoice::None,
 530        }),
 531        reasoning_effort,
 532    }
 533}
 534
 535pub fn into_open_ai_response(
 536    request: LanguageModelRequest,
 537    model_id: &str,
 538    supports_parallel_tool_calls: bool,
 539    supports_prompt_cache_key: bool,
 540    max_output_tokens: Option<u64>,
 541    reasoning_effort: Option<ReasoningEffort>,
 542) -> ResponseRequest {
 543    let stream = !model_id.starts_with("o1-");
 544
 545    let LanguageModelRequest {
 546        thread_id,
 547        prompt_id: _,
 548        intent: _,
 549        messages,
 550        tools,
 551        tool_choice,
 552        stop: _,
 553        temperature,
 554        thinking_allowed: _,
 555    } = request;
 556
 557    let mut input_items = Vec::new();
 558    for (index, message) in messages.into_iter().enumerate() {
 559        append_message_to_response_items(message, index, &mut input_items);
 560    }
 561
 562    let tools: Vec<_> = tools
 563        .into_iter()
 564        .map(|tool| open_ai::responses::ToolDefinition::Function {
 565            name: tool.name,
 566            description: Some(tool.description),
 567            parameters: Some(tool.input_schema),
 568            strict: None,
 569        })
 570        .collect();
 571
 572    ResponseRequest {
 573        model: model_id.into(),
 574        input: input_items,
 575        stream,
 576        temperature,
 577        top_p: None,
 578        max_output_tokens,
 579        parallel_tool_calls: if tools.is_empty() {
 580            None
 581        } else {
 582            Some(supports_parallel_tool_calls)
 583        },
 584        tool_choice: tool_choice.map(|choice| match choice {
 585            LanguageModelToolChoice::Auto => open_ai::ToolChoice::Auto,
 586            LanguageModelToolChoice::Any => open_ai::ToolChoice::Required,
 587            LanguageModelToolChoice::None => open_ai::ToolChoice::None,
 588        }),
 589        tools,
 590        prompt_cache_key: if supports_prompt_cache_key {
 591            thread_id
 592        } else {
 593            None
 594        },
 595        reasoning: reasoning_effort.map(|effort| open_ai::responses::ReasoningConfig { effort }),
 596    }
 597}
 598
 599fn append_message_to_response_items(
 600    message: LanguageModelRequestMessage,
 601    index: usize,
 602    input_items: &mut Vec<ResponseInputItem>,
 603) {
 604    let mut content_parts: Vec<ResponseInputContent> = Vec::new();
 605
 606    for content in message.content {
 607        match content {
 608            MessageContent::Text(text) => {
 609                push_response_text_part(&message.role, text, &mut content_parts);
 610            }
 611            MessageContent::Thinking { text, .. } => {
 612                push_response_text_part(&message.role, text, &mut content_parts);
 613            }
 614            MessageContent::RedactedThinking(_) => {}
 615            MessageContent::Image(image) => {
 616                push_response_image_part(&message.role, image, &mut content_parts);
 617            }
 618            MessageContent::ToolUse(tool_use) => {
 619                flush_response_parts(&message.role, index, &mut content_parts, input_items);
 620                let call_id = tool_use.id.to_string();
 621                input_items.push(ResponseInputItem::FunctionCall(ResponseFunctionCallItem {
 622                    call_id,
 623                    name: tool_use.name.to_string(),
 624                    arguments: tool_use.raw_input,
 625                }));
 626            }
 627            MessageContent::ToolResult(tool_result) => {
 628                flush_response_parts(&message.role, index, &mut content_parts, input_items);
 629                input_items.push(ResponseInputItem::FunctionCallOutput(
 630                    ResponseFunctionCallOutputItem {
 631                        call_id: tool_result.tool_use_id.to_string(),
 632                        output: tool_result_output(&tool_result),
 633                    },
 634                ));
 635            }
 636        }
 637    }
 638
 639    flush_response_parts(&message.role, index, &mut content_parts, input_items);
 640}
 641
 642fn push_response_text_part(
 643    role: &Role,
 644    text: impl Into<String>,
 645    parts: &mut Vec<ResponseInputContent>,
 646) {
 647    let text = text.into();
 648    if text.trim().is_empty() {
 649        return;
 650    }
 651
 652    match role {
 653        Role::Assistant => parts.push(ResponseInputContent::OutputText {
 654            text,
 655            annotations: Vec::new(),
 656        }),
 657        _ => parts.push(ResponseInputContent::Text { text }),
 658    }
 659}
 660
 661fn push_response_image_part(
 662    role: &Role,
 663    image: LanguageModelImage,
 664    parts: &mut Vec<ResponseInputContent>,
 665) {
 666    match role {
 667        Role::Assistant => parts.push(ResponseInputContent::OutputText {
 668            text: "[image omitted]".to_string(),
 669            annotations: Vec::new(),
 670        }),
 671        _ => parts.push(ResponseInputContent::Image {
 672            image_url: image.to_base64_url(),
 673        }),
 674    }
 675}
 676
 677fn flush_response_parts(
 678    role: &Role,
 679    _index: usize,
 680    parts: &mut Vec<ResponseInputContent>,
 681    input_items: &mut Vec<ResponseInputItem>,
 682) {
 683    if parts.is_empty() {
 684        return;
 685    }
 686
 687    let item = ResponseInputItem::Message(ResponseMessageItem {
 688        role: match role {
 689            Role::User => open_ai::Role::User,
 690            Role::Assistant => open_ai::Role::Assistant,
 691            Role::System => open_ai::Role::System,
 692        },
 693        content: parts.clone(),
 694    });
 695
 696    input_items.push(item);
 697    parts.clear();
 698}
 699
 700fn tool_result_output(result: &LanguageModelToolResult) -> String {
 701    if let Some(output) = &result.output {
 702        match output {
 703            serde_json::Value::String(text) => text.clone(),
 704            serde_json::Value::Null => String::new(),
 705            _ => output.to_string(),
 706        }
 707    } else {
 708        match &result.content {
 709            LanguageModelToolResultContent::Text(text) => text.to_string(),
 710            LanguageModelToolResultContent::Image(image) => image.to_base64_url(),
 711        }
 712    }
 713}
 714
 715fn add_message_content_part(
 716    new_part: open_ai::MessagePart,
 717    role: Role,
 718    messages: &mut Vec<open_ai::RequestMessage>,
 719) {
 720    match (role, messages.last_mut()) {
 721        (Role::User, Some(open_ai::RequestMessage::User { content }))
 722        | (
 723            Role::Assistant,
 724            Some(open_ai::RequestMessage::Assistant {
 725                content: Some(content),
 726                ..
 727            }),
 728        )
 729        | (Role::System, Some(open_ai::RequestMessage::System { content, .. })) => {
 730            content.push_part(new_part);
 731        }
 732        _ => {
 733            messages.push(match role {
 734                Role::User => open_ai::RequestMessage::User {
 735                    content: open_ai::MessageContent::from(vec![new_part]),
 736                },
 737                Role::Assistant => open_ai::RequestMessage::Assistant {
 738                    content: Some(open_ai::MessageContent::from(vec![new_part])),
 739                    tool_calls: Vec::new(),
 740                },
 741                Role::System => open_ai::RequestMessage::System {
 742                    content: open_ai::MessageContent::from(vec![new_part]),
 743                },
 744            });
 745        }
 746    }
 747}
 748
 749pub struct OpenAiEventMapper {
 750    tool_calls_by_index: HashMap<usize, RawToolCall>,
 751}
 752
 753impl OpenAiEventMapper {
 754    pub fn new() -> Self {
 755        Self {
 756            tool_calls_by_index: HashMap::default(),
 757        }
 758    }
 759
 760    pub fn map_stream(
 761        mut self,
 762        events: Pin<Box<dyn Send + Stream<Item = Result<ResponseStreamEvent>>>>,
 763    ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 764    {
 765        events.flat_map(move |event| {
 766            futures::stream::iter(match event {
 767                Ok(event) => self.map_event(event),
 768                Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
 769            })
 770        })
 771    }
 772
 773    pub fn map_event(
 774        &mut self,
 775        event: ResponseStreamEvent,
 776    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
 777        let mut events = Vec::new();
 778        if let Some(usage) = event.usage {
 779            events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
 780                input_tokens: usage.prompt_tokens,
 781                output_tokens: usage.completion_tokens,
 782                cache_creation_input_tokens: 0,
 783                cache_read_input_tokens: 0,
 784            })));
 785        }
 786
 787        let Some(choice) = event.choices.first() else {
 788            return events;
 789        };
 790
 791        if let Some(delta) = choice.delta.as_ref() {
 792            if let Some(reasoning_content) = delta.reasoning_content.clone() {
 793                if !reasoning_content.is_empty() {
 794                    events.push(Ok(LanguageModelCompletionEvent::Thinking {
 795                        text: reasoning_content,
 796                        signature: None,
 797                    }));
 798                }
 799            }
 800            if let Some(content) = delta.content.clone() {
 801                if !content.is_empty() {
 802                    events.push(Ok(LanguageModelCompletionEvent::Text(content)));
 803                }
 804            }
 805
 806            if let Some(tool_calls) = delta.tool_calls.as_ref() {
 807                for tool_call in tool_calls {
 808                    let entry = self.tool_calls_by_index.entry(tool_call.index).or_default();
 809
 810                    if let Some(tool_id) = tool_call.id.clone() {
 811                        entry.id = tool_id;
 812                    }
 813
 814                    if let Some(function) = tool_call.function.as_ref() {
 815                        if let Some(name) = function.name.clone() {
 816                            entry.name = name;
 817                        }
 818
 819                        if let Some(arguments) = function.arguments.clone() {
 820                            entry.arguments.push_str(&arguments);
 821                        }
 822                    }
 823                }
 824            }
 825        }
 826
 827        match choice.finish_reason.as_deref() {
 828            Some("stop") => {
 829                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
 830            }
 831            Some("tool_calls") => {
 832                events.extend(self.tool_calls_by_index.drain().map(|(_, tool_call)| {
 833                    match serde_json::Value::from_str(&tool_call.arguments) {
 834                        Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
 835                            LanguageModelToolUse {
 836                                id: tool_call.id.clone().into(),
 837                                name: tool_call.name.as_str().into(),
 838                                is_input_complete: true,
 839                                input,
 840                                raw_input: tool_call.arguments.clone(),
 841                                thought_signature: None,
 842                            },
 843                        )),
 844                        Err(error) => Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
 845                            id: tool_call.id.into(),
 846                            tool_name: tool_call.name.into(),
 847                            raw_input: tool_call.arguments.clone().into(),
 848                            json_parse_error: error.to_string(),
 849                        }),
 850                    }
 851                }));
 852
 853                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
 854            }
 855            Some(stop_reason) => {
 856                log::error!("Unexpected OpenAI stop_reason: {stop_reason:?}",);
 857                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
 858            }
 859            None => {}
 860        }
 861
 862        events
 863    }
 864}
 865
 866#[derive(Default)]
 867struct RawToolCall {
 868    id: String,
 869    name: String,
 870    arguments: String,
 871}
 872
 873pub struct OpenAiResponseEventMapper {
 874    function_calls_by_item: HashMap<String, PendingResponseFunctionCall>,
 875    pending_stop_reason: Option<StopReason>,
 876}
 877
 878#[derive(Default)]
 879struct PendingResponseFunctionCall {
 880    call_id: String,
 881    name: Arc<str>,
 882    arguments: String,
 883}
 884
 885impl OpenAiResponseEventMapper {
 886    pub fn new() -> Self {
 887        Self {
 888            function_calls_by_item: HashMap::default(),
 889            pending_stop_reason: None,
 890        }
 891    }
 892
 893    pub fn map_stream(
 894        mut self,
 895        events: Pin<Box<dyn Send + Stream<Item = Result<ResponsesStreamEvent>>>>,
 896    ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 897    {
 898        events.flat_map(move |event| {
 899            futures::stream::iter(match event {
 900                Ok(event) => self.map_event(event),
 901                Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
 902            })
 903        })
 904    }
 905
 906    pub fn map_event(
 907        &mut self,
 908        event: ResponsesStreamEvent,
 909    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
 910        match event {
 911            ResponsesStreamEvent::OutputItemAdded { item, .. } => {
 912                let mut events = Vec::new();
 913
 914                match &item {
 915                    ResponseOutputItem::Message(message) => {
 916                        if let Some(id) = &message.id {
 917                            events.push(Ok(LanguageModelCompletionEvent::StartMessage {
 918                                message_id: id.clone(),
 919                            }));
 920                        }
 921                    }
 922                    ResponseOutputItem::FunctionCall(function_call) => {
 923                        if let Some(item_id) = function_call.id.clone() {
 924                            let call_id = function_call
 925                                .call_id
 926                                .clone()
 927                                .or_else(|| function_call.id.clone())
 928                                .unwrap_or_else(|| item_id.clone());
 929                            let entry = PendingResponseFunctionCall {
 930                                call_id,
 931                                name: Arc::<str>::from(
 932                                    function_call.name.clone().unwrap_or_default(),
 933                                ),
 934                                arguments: function_call.arguments.clone(),
 935                            };
 936                            self.function_calls_by_item.insert(item_id, entry);
 937                        }
 938                    }
 939                    ResponseOutputItem::Unknown => {}
 940                }
 941                events
 942            }
 943            ResponsesStreamEvent::OutputTextDelta { delta, .. } => {
 944                if delta.is_empty() {
 945                    Vec::new()
 946                } else {
 947                    vec![Ok(LanguageModelCompletionEvent::Text(delta))]
 948                }
 949            }
 950            ResponsesStreamEvent::FunctionCallArgumentsDelta { item_id, delta, .. } => {
 951                if let Some(entry) = self.function_calls_by_item.get_mut(&item_id) {
 952                    entry.arguments.push_str(&delta);
 953                }
 954                Vec::new()
 955            }
 956            ResponsesStreamEvent::FunctionCallArgumentsDone {
 957                item_id, arguments, ..
 958            } => {
 959                if let Some(mut entry) = self.function_calls_by_item.remove(&item_id) {
 960                    if !arguments.is_empty() {
 961                        entry.arguments = arguments;
 962                    }
 963                    let raw_input = entry.arguments.clone();
 964                    self.pending_stop_reason = Some(StopReason::ToolUse);
 965                    match serde_json::from_str::<serde_json::Value>(&entry.arguments) {
 966                        Ok(input) => {
 967                            vec![Ok(LanguageModelCompletionEvent::ToolUse(
 968                                LanguageModelToolUse {
 969                                    id: LanguageModelToolUseId::from(entry.call_id.clone()),
 970                                    name: entry.name.clone(),
 971                                    is_input_complete: true,
 972                                    input,
 973                                    raw_input,
 974                                    thought_signature: None,
 975                                },
 976                            ))]
 977                        }
 978                        Err(error) => {
 979                            vec![Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
 980                                id: LanguageModelToolUseId::from(entry.call_id.clone()),
 981                                tool_name: entry.name.clone(),
 982                                raw_input: Arc::<str>::from(raw_input),
 983                                json_parse_error: error.to_string(),
 984                            })]
 985                        }
 986                    }
 987                } else {
 988                    Vec::new()
 989                }
 990            }
 991            ResponsesStreamEvent::Completed { response } => {
 992                self.handle_completion(response, StopReason::EndTurn)
 993            }
 994            ResponsesStreamEvent::Incomplete { response } => {
 995                let reason = response
 996                    .status_details
 997                    .as_ref()
 998                    .and_then(|details| details.reason.as_deref());
 999                let stop_reason = match reason {
1000                    Some("max_output_tokens") => StopReason::MaxTokens,
1001                    Some("content_filter") => {
1002                        self.pending_stop_reason = Some(StopReason::Refusal);
1003                        StopReason::Refusal
1004                    }
1005                    _ => self
1006                        .pending_stop_reason
1007                        .take()
1008                        .unwrap_or(StopReason::EndTurn),
1009                };
1010
1011                let mut events = Vec::new();
1012                if self.pending_stop_reason.is_none() {
1013                    events.extend(self.emit_tool_calls_from_output(&response.output));
1014                }
1015                if let Some(usage) = response.usage.as_ref() {
1016                    events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
1017                        token_usage_from_response_usage(usage),
1018                    )));
1019                }
1020                events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
1021                events
1022            }
1023            ResponsesStreamEvent::Failed { response } => {
1024                let message = response
1025                    .status_details
1026                    .and_then(|details| details.error)
1027                    .map(|error| error.to_string())
1028                    .unwrap_or_else(|| "response failed".to_string());
1029                vec![Err(LanguageModelCompletionError::Other(anyhow!(message)))]
1030            }
1031            ResponsesStreamEvent::Error { error }
1032            | ResponsesStreamEvent::GenericError { error } => {
1033                vec![Err(LanguageModelCompletionError::Other(anyhow!(format!(
1034                    "{error:?}"
1035                ))))]
1036            }
1037            ResponsesStreamEvent::OutputTextDone { .. } => Vec::new(),
1038            ResponsesStreamEvent::OutputItemDone { .. }
1039            | ResponsesStreamEvent::ContentPartAdded { .. }
1040            | ResponsesStreamEvent::ContentPartDone { .. }
1041            | ResponsesStreamEvent::Created { .. }
1042            | ResponsesStreamEvent::InProgress { .. }
1043            | ResponsesStreamEvent::Unknown => Vec::new(),
1044        }
1045    }
1046
1047    fn handle_completion(
1048        &mut self,
1049        response: ResponsesSummary,
1050        default_reason: StopReason,
1051    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
1052        let mut events = Vec::new();
1053
1054        if self.pending_stop_reason.is_none() {
1055            events.extend(self.emit_tool_calls_from_output(&response.output));
1056        }
1057
1058        if let Some(usage) = response.usage.as_ref() {
1059            events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
1060                token_usage_from_response_usage(usage),
1061            )));
1062        }
1063
1064        let stop_reason = self.pending_stop_reason.take().unwrap_or(default_reason);
1065        events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
1066        events
1067    }
1068
1069    fn emit_tool_calls_from_output(
1070        &mut self,
1071        output: &[ResponseOutputItem],
1072    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
1073        let mut events = Vec::new();
1074        for item in output {
1075            if let ResponseOutputItem::FunctionCall(function_call) = item {
1076                let Some(call_id) = function_call
1077                    .call_id
1078                    .clone()
1079                    .or_else(|| function_call.id.clone())
1080                else {
1081                    log::error!(
1082                        "Function call item missing both call_id and id: {:?}",
1083                        function_call
1084                    );
1085                    continue;
1086                };
1087                let name: Arc<str> = Arc::from(function_call.name.clone().unwrap_or_default());
1088                let arguments = &function_call.arguments;
1089                if !arguments.is_empty() {
1090                    self.pending_stop_reason = Some(StopReason::ToolUse);
1091                    match serde_json::from_str::<serde_json::Value>(arguments) {
1092                        Ok(input) => {
1093                            events.push(Ok(LanguageModelCompletionEvent::ToolUse(
1094                                LanguageModelToolUse {
1095                                    id: LanguageModelToolUseId::from(call_id.clone()),
1096                                    name: name.clone(),
1097                                    is_input_complete: true,
1098                                    input,
1099                                    raw_input: arguments.clone(),
1100                                    thought_signature: None,
1101                                },
1102                            )));
1103                        }
1104                        Err(error) => {
1105                            events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
1106                                id: LanguageModelToolUseId::from(call_id.clone()),
1107                                tool_name: name.clone(),
1108                                raw_input: Arc::<str>::from(arguments.clone()),
1109                                json_parse_error: error.to_string(),
1110                            }));
1111                        }
1112                    }
1113                }
1114            }
1115        }
1116        events
1117    }
1118}
1119
1120fn token_usage_from_response_usage(usage: &ResponsesUsage) -> TokenUsage {
1121    TokenUsage {
1122        input_tokens: usage.input_tokens.unwrap_or_default(),
1123        output_tokens: usage.output_tokens.unwrap_or_default(),
1124        cache_creation_input_tokens: 0,
1125        cache_read_input_tokens: 0,
1126    }
1127}
1128
1129pub(crate) fn collect_tiktoken_messages(
1130    request: LanguageModelRequest,
1131) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
1132    request
1133        .messages
1134        .into_iter()
1135        .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
1136            role: match message.role {
1137                Role::User => "user".into(),
1138                Role::Assistant => "assistant".into(),
1139                Role::System => "system".into(),
1140            },
1141            content: Some(message.string_contents()),
1142            name: None,
1143            function_call: None,
1144        })
1145        .collect::<Vec<_>>()
1146}
1147
1148pub fn count_open_ai_tokens(
1149    request: LanguageModelRequest,
1150    model: Model,
1151    cx: &App,
1152) -> BoxFuture<'static, Result<u64>> {
1153    cx.background_spawn(async move {
1154        let messages = collect_tiktoken_messages(request);
1155        match model {
1156            Model::Custom { max_tokens, .. } => {
1157                let model = if max_tokens >= 100_000 {
1158                    // If the max tokens is 100k or more, it is likely the o200k_base tokenizer from gpt4o
1159                    "gpt-4o"
1160                } else {
1161                    // Otherwise fallback to gpt-4, since only cl100k_base and o200k_base are
1162                    // supported with this tiktoken method
1163                    "gpt-4"
1164                };
1165                tiktoken_rs::num_tokens_from_messages(model, &messages)
1166            }
1167            // Currently supported by tiktoken_rs
1168            // Sometimes tiktoken-rs is behind on model support. If that is the case, make a new branch
1169            // arm with an override. We enumerate all supported models here so that we can check if new
1170            // models are supported yet or not.
1171            Model::ThreePointFiveTurbo
1172            | Model::Four
1173            | Model::FourTurbo
1174            | Model::FourOmni
1175            | Model::FourOmniMini
1176            | Model::FourPointOne
1177            | Model::FourPointOneMini
1178            | Model::FourPointOneNano
1179            | Model::O1
1180            | Model::O3
1181            | Model::O3Mini
1182            | Model::O4Mini
1183            | Model::Five
1184            | Model::FiveCodex
1185            | Model::FiveMini
1186            | Model::FiveNano => tiktoken_rs::num_tokens_from_messages(model.id(), &messages),
1187            // GPT-5.1, 5.2, and 5.2-codex don't have dedicated tiktoken support; use gpt-5 tokenizer
1188            Model::FivePointOne | Model::FivePointTwo | Model::FivePointTwoCodex => {
1189                tiktoken_rs::num_tokens_from_messages("gpt-5", &messages)
1190            }
1191        }
1192        .map(|tokens| tokens as u64)
1193    })
1194    .boxed()
1195}
1196
1197struct ConfigurationView {
1198    api_key_editor: Entity<InputField>,
1199    state: Entity<State>,
1200    load_credentials_task: Option<Task<()>>,
1201}
1202
1203impl ConfigurationView {
1204    fn new(state: Entity<State>, window: &mut Window, cx: &mut Context<Self>) -> Self {
1205        let api_key_editor = cx.new(|cx| {
1206            InputField::new(
1207                window,
1208                cx,
1209                "sk-000000000000000000000000000000000000000000000000",
1210            )
1211        });
1212
1213        cx.observe(&state, |_, _, cx| {
1214            cx.notify();
1215        })
1216        .detach();
1217
1218        let load_credentials_task = Some(cx.spawn_in(window, {
1219            let state = state.clone();
1220            async move |this, cx| {
1221                if let Some(task) = Some(state.update(cx, |state, cx| state.authenticate(cx))) {
1222                    // We don't log an error, because "not signed in" is also an error.
1223                    let _ = task.await;
1224                }
1225                this.update(cx, |this, cx| {
1226                    this.load_credentials_task = None;
1227                    cx.notify();
1228                })
1229                .log_err();
1230            }
1231        }));
1232
1233        Self {
1234            api_key_editor,
1235            state,
1236            load_credentials_task,
1237        }
1238    }
1239
1240    fn save_api_key(&mut self, _: &menu::Confirm, window: &mut Window, cx: &mut Context<Self>) {
1241        let api_key = self.api_key_editor.read(cx).text(cx).trim().to_string();
1242        if api_key.is_empty() {
1243            return;
1244        }
1245
1246        // url changes can cause the editor to be displayed again
1247        self.api_key_editor
1248            .update(cx, |editor, cx| editor.set_text("", window, cx));
1249
1250        let state = self.state.clone();
1251        cx.spawn_in(window, async move |_, cx| {
1252            state
1253                .update(cx, |state, cx| state.set_api_key(Some(api_key), cx))
1254                .await
1255        })
1256        .detach_and_log_err(cx);
1257    }
1258
1259    fn reset_api_key(&mut self, window: &mut Window, cx: &mut Context<Self>) {
1260        self.api_key_editor
1261            .update(cx, |input, cx| input.set_text("", window, cx));
1262
1263        let state = self.state.clone();
1264        cx.spawn_in(window, async move |_, cx| {
1265            state
1266                .update(cx, |state, cx| state.set_api_key(None, cx))
1267                .await
1268        })
1269        .detach_and_log_err(cx);
1270    }
1271
1272    fn should_render_editor(&self, cx: &mut Context<Self>) -> bool {
1273        !self.state.read(cx).is_authenticated()
1274    }
1275}
1276
1277impl Render for ConfigurationView {
1278    fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1279        let env_var_set = self.state.read(cx).api_key_state.is_from_env_var();
1280        let configured_card_label = if env_var_set {
1281            format!("API key set in {API_KEY_ENV_VAR_NAME} environment variable")
1282        } else {
1283            let api_url = OpenAiLanguageModelProvider::api_url(cx);
1284            if api_url == OPEN_AI_API_URL {
1285                "API key configured".to_string()
1286            } else {
1287                format!("API key configured for {}", api_url)
1288            }
1289        };
1290
1291        let api_key_section = if self.should_render_editor(cx) {
1292            v_flex()
1293                .on_action(cx.listener(Self::save_api_key))
1294                .child(Label::new("To use Zed's agent with OpenAI, you need to add an API key. Follow these steps:"))
1295                .child(
1296                    List::new()
1297                        .child(
1298                            ListBulletItem::new("")
1299                                .child(Label::new("Create one by visiting"))
1300                                .child(ButtonLink::new("OpenAI's console", "https://platform.openai.com/api-keys"))
1301                        )
1302                        .child(
1303                            ListBulletItem::new("Ensure your OpenAI account has credits")
1304                        )
1305                        .child(
1306                            ListBulletItem::new("Paste your API key below and hit enter to start using the agent")
1307                        ),
1308                )
1309                .child(self.api_key_editor.clone())
1310                .child(
1311                    Label::new(format!(
1312                        "You can also set the {API_KEY_ENV_VAR_NAME} environment variable and restart Zed."
1313                    ))
1314                    .size(LabelSize::Small)
1315                    .color(Color::Muted),
1316                )
1317                .child(
1318                    Label::new(
1319                        "Note that having a subscription for another service like GitHub Copilot won't work.",
1320                    )
1321                    .size(LabelSize::Small).color(Color::Muted),
1322                )
1323                .into_any_element()
1324        } else {
1325            ConfiguredApiCard::new(configured_card_label)
1326                .disabled(env_var_set)
1327                .on_click(cx.listener(|this, _, window, cx| this.reset_api_key(window, cx)))
1328                .when(env_var_set, |this| {
1329                    this.tooltip_label(format!("To reset your API key, unset the {API_KEY_ENV_VAR_NAME} environment variable."))
1330                })
1331                .into_any_element()
1332        };
1333
1334        let compatible_api_section = h_flex()
1335            .mt_1p5()
1336            .gap_0p5()
1337            .flex_wrap()
1338            .when(self.should_render_editor(cx), |this| {
1339                this.pt_1p5()
1340                    .border_t_1()
1341                    .border_color(cx.theme().colors().border_variant)
1342            })
1343            .child(
1344                h_flex()
1345                    .gap_2()
1346                    .child(
1347                        Icon::new(IconName::Info)
1348                            .size(IconSize::XSmall)
1349                            .color(Color::Muted),
1350                    )
1351                    .child(Label::new("Zed also supports OpenAI-compatible models.")),
1352            )
1353            .child(
1354                Button::new("docs", "Learn More")
1355                    .icon(IconName::ArrowUpRight)
1356                    .icon_size(IconSize::Small)
1357                    .icon_color(Color::Muted)
1358                    .on_click(move |_, _window, cx| {
1359                        cx.open_url("https://zed.dev/docs/ai/llm-providers#openai-api-compatible")
1360                    }),
1361            );
1362
1363        if self.load_credentials_task.is_some() {
1364            div().child(Label::new("Loading credentials…")).into_any()
1365        } else {
1366            v_flex()
1367                .size_full()
1368                .child(api_key_section)
1369                .child(compatible_api_section)
1370                .into_any()
1371        }
1372    }
1373}
1374
1375#[cfg(test)]
1376mod tests {
1377    use futures::{StreamExt, executor::block_on};
1378    use gpui::TestAppContext;
1379    use language_model::{LanguageModelRequestMessage, LanguageModelRequestTool};
1380    use open_ai::responses::{
1381        ResponseFunctionToolCall, ResponseOutputItem, ResponseOutputMessage, ResponseStatusDetails,
1382        ResponseSummary, ResponseUsage, StreamEvent as ResponsesStreamEvent,
1383    };
1384    use pretty_assertions::assert_eq;
1385    use serde_json::json;
1386
1387    use super::*;
1388
1389    fn map_response_events(events: Vec<ResponsesStreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1390        block_on(async {
1391            OpenAiResponseEventMapper::new()
1392                .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1393                .collect::<Vec<_>>()
1394                .await
1395                .into_iter()
1396                .map(Result::unwrap)
1397                .collect()
1398        })
1399    }
1400
1401    fn response_item_message(id: &str) -> ResponseOutputItem {
1402        ResponseOutputItem::Message(ResponseOutputMessage {
1403            id: Some(id.to_string()),
1404            role: Some("assistant".to_string()),
1405            status: Some("in_progress".to_string()),
1406            content: vec![],
1407        })
1408    }
1409
1410    fn response_item_function_call(id: &str, args: Option<&str>) -> ResponseOutputItem {
1411        ResponseOutputItem::FunctionCall(ResponseFunctionToolCall {
1412            id: Some(id.to_string()),
1413            status: Some("in_progress".to_string()),
1414            name: Some("get_weather".to_string()),
1415            call_id: Some("call_123".to_string()),
1416            arguments: args.map(|s| s.to_string()).unwrap_or_default(),
1417        })
1418    }
1419
1420    #[gpui::test]
1421    fn tiktoken_rs_support(cx: &TestAppContext) {
1422        let request = LanguageModelRequest {
1423            thread_id: None,
1424            prompt_id: None,
1425            intent: None,
1426            messages: vec![LanguageModelRequestMessage {
1427                role: Role::User,
1428                content: vec![MessageContent::Text("message".into())],
1429                cache: false,
1430                reasoning_details: None,
1431            }],
1432            tools: vec![],
1433            tool_choice: None,
1434            stop: vec![],
1435            temperature: None,
1436            thinking_allowed: true,
1437        };
1438
1439        // Validate that all models are supported by tiktoken-rs
1440        for model in Model::iter() {
1441            let count = cx
1442                .foreground_executor()
1443                .block_on(count_open_ai_tokens(
1444                    request.clone(),
1445                    model,
1446                    &cx.app.borrow(),
1447                ))
1448                .unwrap();
1449            assert!(count > 0);
1450        }
1451    }
1452
1453    #[test]
1454    fn responses_stream_maps_text_and_usage() {
1455        let events = vec![
1456            ResponsesStreamEvent::OutputItemAdded {
1457                output_index: 0,
1458                sequence_number: None,
1459                item: response_item_message("msg_123"),
1460            },
1461            ResponsesStreamEvent::OutputTextDelta {
1462                item_id: "msg_123".into(),
1463                output_index: 0,
1464                content_index: Some(0),
1465                delta: "Hello".into(),
1466            },
1467            ResponsesStreamEvent::Completed {
1468                response: ResponseSummary {
1469                    usage: Some(ResponseUsage {
1470                        input_tokens: Some(5),
1471                        output_tokens: Some(3),
1472                        total_tokens: Some(8),
1473                    }),
1474                    ..Default::default()
1475                },
1476            },
1477        ];
1478
1479        let mapped = map_response_events(events);
1480        assert!(matches!(
1481            mapped[0],
1482            LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_123"
1483        ));
1484        assert!(matches!(
1485            mapped[1],
1486            LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1487        ));
1488        assert!(matches!(
1489            mapped[2],
1490            LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1491                input_tokens: 5,
1492                output_tokens: 3,
1493                ..
1494            })
1495        ));
1496        assert!(matches!(
1497            mapped[3],
1498            LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1499        ));
1500    }
1501
1502    #[test]
1503    fn into_open_ai_response_builds_complete_payload() {
1504        let tool_call_id = LanguageModelToolUseId::from("call-42");
1505        let tool_input = json!({ "city": "Boston" });
1506        let tool_arguments = serde_json::to_string(&tool_input).unwrap();
1507        let tool_use = LanguageModelToolUse {
1508            id: tool_call_id.clone(),
1509            name: Arc::from("get_weather"),
1510            raw_input: tool_arguments.clone(),
1511            input: tool_input,
1512            is_input_complete: true,
1513            thought_signature: None,
1514        };
1515        let tool_result = LanguageModelToolResult {
1516            tool_use_id: tool_call_id,
1517            tool_name: Arc::from("get_weather"),
1518            is_error: false,
1519            content: LanguageModelToolResultContent::Text(Arc::from("Sunny")),
1520            output: Some(json!({ "forecast": "Sunny" })),
1521        };
1522        let user_image = LanguageModelImage {
1523            source: SharedString::from("aGVsbG8="),
1524            size: None,
1525        };
1526        let expected_image_url = user_image.to_base64_url();
1527
1528        let request = LanguageModelRequest {
1529            thread_id: Some("thread-123".into()),
1530            prompt_id: None,
1531            intent: None,
1532            messages: vec![
1533                LanguageModelRequestMessage {
1534                    role: Role::System,
1535                    content: vec![MessageContent::Text("System context".into())],
1536                    cache: false,
1537                    reasoning_details: None,
1538                },
1539                LanguageModelRequestMessage {
1540                    role: Role::User,
1541                    content: vec![
1542                        MessageContent::Text("Please check the weather.".into()),
1543                        MessageContent::Image(user_image),
1544                    ],
1545                    cache: false,
1546                    reasoning_details: None,
1547                },
1548                LanguageModelRequestMessage {
1549                    role: Role::Assistant,
1550                    content: vec![
1551                        MessageContent::Text("Looking that up.".into()),
1552                        MessageContent::ToolUse(tool_use),
1553                    ],
1554                    cache: false,
1555                    reasoning_details: None,
1556                },
1557                LanguageModelRequestMessage {
1558                    role: Role::Assistant,
1559                    content: vec![MessageContent::ToolResult(tool_result)],
1560                    cache: false,
1561                    reasoning_details: None,
1562                },
1563            ],
1564            tools: vec![LanguageModelRequestTool {
1565                name: "get_weather".into(),
1566                description: "Fetches the weather".into(),
1567                input_schema: json!({ "type": "object" }),
1568            }],
1569            tool_choice: Some(LanguageModelToolChoice::Any),
1570            stop: vec!["<STOP>".into()],
1571            temperature: None,
1572            thinking_allowed: false,
1573        };
1574
1575        let response = into_open_ai_response(
1576            request,
1577            "custom-model",
1578            true,
1579            true,
1580            Some(2048),
1581            Some(ReasoningEffort::Low),
1582        );
1583
1584        let serialized = serde_json::to_value(&response).unwrap();
1585        let expected = json!({
1586            "model": "custom-model",
1587            "input": [
1588                {
1589                    "type": "message",
1590                    "role": "system",
1591                    "content": [
1592                        { "type": "input_text", "text": "System context" }
1593                    ]
1594                },
1595                {
1596                    "type": "message",
1597                    "role": "user",
1598                    "content": [
1599                        { "type": "input_text", "text": "Please check the weather." },
1600                        { "type": "input_image", "image_url": expected_image_url }
1601                    ]
1602                },
1603                {
1604                    "type": "message",
1605                    "role": "assistant",
1606                    "content": [
1607                        { "type": "output_text", "text": "Looking that up.", "annotations": [] }
1608                    ]
1609                },
1610                {
1611                    "type": "function_call",
1612                    "call_id": "call-42",
1613                    "name": "get_weather",
1614                    "arguments": tool_arguments
1615                },
1616                {
1617                    "type": "function_call_output",
1618                    "call_id": "call-42",
1619                    "output": "{\"forecast\":\"Sunny\"}"
1620                }
1621            ],
1622            "stream": true,
1623            "max_output_tokens": 2048,
1624            "parallel_tool_calls": true,
1625            "tool_choice": "required",
1626            "tools": [
1627                {
1628                    "type": "function",
1629                    "name": "get_weather",
1630                    "description": "Fetches the weather",
1631                    "parameters": { "type": "object" }
1632                }
1633            ],
1634            "prompt_cache_key": "thread-123",
1635            "reasoning": { "effort": "low" }
1636        });
1637
1638        assert_eq!(serialized, expected);
1639    }
1640
1641    #[test]
1642    fn responses_stream_maps_tool_calls() {
1643        let events = vec![
1644            ResponsesStreamEvent::OutputItemAdded {
1645                output_index: 0,
1646                sequence_number: None,
1647                item: response_item_function_call("item_fn", Some("{\"city\":\"Bos")),
1648            },
1649            ResponsesStreamEvent::FunctionCallArgumentsDelta {
1650                item_id: "item_fn".into(),
1651                output_index: 0,
1652                delta: "ton\"}".into(),
1653                sequence_number: None,
1654            },
1655            ResponsesStreamEvent::FunctionCallArgumentsDone {
1656                item_id: "item_fn".into(),
1657                output_index: 0,
1658                arguments: "{\"city\":\"Boston\"}".into(),
1659                sequence_number: None,
1660            },
1661            ResponsesStreamEvent::Completed {
1662                response: ResponseSummary::default(),
1663            },
1664        ];
1665
1666        let mapped = map_response_events(events);
1667        assert!(matches!(
1668            mapped[0],
1669            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse {
1670                ref id,
1671                ref name,
1672                ref raw_input,
1673                ..
1674            }) if id.to_string() == "call_123"
1675                && name.as_ref() == "get_weather"
1676                && raw_input == "{\"city\":\"Boston\"}"
1677        ));
1678        assert!(matches!(
1679            mapped[1],
1680            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1681        ));
1682    }
1683
1684    #[test]
1685    fn responses_stream_uses_max_tokens_stop_reason() {
1686        let events = vec![ResponsesStreamEvent::Incomplete {
1687            response: ResponseSummary {
1688                status_details: Some(ResponseStatusDetails {
1689                    reason: Some("max_output_tokens".into()),
1690                    r#type: Some("incomplete".into()),
1691                    error: None,
1692                }),
1693                usage: Some(ResponseUsage {
1694                    input_tokens: Some(10),
1695                    output_tokens: Some(20),
1696                    total_tokens: Some(30),
1697                }),
1698                ..Default::default()
1699            },
1700        }];
1701
1702        let mapped = map_response_events(events);
1703        assert!(matches!(
1704            mapped[0],
1705            LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1706                input_tokens: 10,
1707                output_tokens: 20,
1708                ..
1709            })
1710        ));
1711        assert!(matches!(
1712            mapped[1],
1713            LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1714        ));
1715    }
1716
1717    #[test]
1718    fn responses_stream_handles_multiple_tool_calls() {
1719        let events = vec![
1720            ResponsesStreamEvent::OutputItemAdded {
1721                output_index: 0,
1722                sequence_number: None,
1723                item: response_item_function_call("item_fn1", Some("{\"city\":\"NYC\"}")),
1724            },
1725            ResponsesStreamEvent::FunctionCallArgumentsDone {
1726                item_id: "item_fn1".into(),
1727                output_index: 0,
1728                arguments: "{\"city\":\"NYC\"}".into(),
1729                sequence_number: None,
1730            },
1731            ResponsesStreamEvent::OutputItemAdded {
1732                output_index: 1,
1733                sequence_number: None,
1734                item: response_item_function_call("item_fn2", Some("{\"city\":\"LA\"}")),
1735            },
1736            ResponsesStreamEvent::FunctionCallArgumentsDone {
1737                item_id: "item_fn2".into(),
1738                output_index: 1,
1739                arguments: "{\"city\":\"LA\"}".into(),
1740                sequence_number: None,
1741            },
1742            ResponsesStreamEvent::Completed {
1743                response: ResponseSummary::default(),
1744            },
1745        ];
1746
1747        let mapped = map_response_events(events);
1748        assert_eq!(mapped.len(), 3);
1749        assert!(matches!(
1750            mapped[0],
1751            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1752            if raw_input == "{\"city\":\"NYC\"}"
1753        ));
1754        assert!(matches!(
1755            mapped[1],
1756            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1757            if raw_input == "{\"city\":\"LA\"}"
1758        ));
1759        assert!(matches!(
1760            mapped[2],
1761            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1762        ));
1763    }
1764
1765    #[test]
1766    fn responses_stream_handles_mixed_text_and_tool_calls() {
1767        let events = vec![
1768            ResponsesStreamEvent::OutputItemAdded {
1769                output_index: 0,
1770                sequence_number: None,
1771                item: response_item_message("msg_123"),
1772            },
1773            ResponsesStreamEvent::OutputTextDelta {
1774                item_id: "msg_123".into(),
1775                output_index: 0,
1776                content_index: Some(0),
1777                delta: "Let me check that".into(),
1778            },
1779            ResponsesStreamEvent::OutputItemAdded {
1780                output_index: 1,
1781                sequence_number: None,
1782                item: response_item_function_call("item_fn", Some("{\"query\":\"test\"}")),
1783            },
1784            ResponsesStreamEvent::FunctionCallArgumentsDone {
1785                item_id: "item_fn".into(),
1786                output_index: 1,
1787                arguments: "{\"query\":\"test\"}".into(),
1788                sequence_number: None,
1789            },
1790            ResponsesStreamEvent::Completed {
1791                response: ResponseSummary::default(),
1792            },
1793        ];
1794
1795        let mapped = map_response_events(events);
1796        assert!(matches!(
1797            mapped[0],
1798            LanguageModelCompletionEvent::StartMessage { .. }
1799        ));
1800        assert!(matches!(
1801            mapped[1],
1802            LanguageModelCompletionEvent::Text(ref text) if text == "Let me check that"
1803        ));
1804        assert!(matches!(
1805            mapped[2],
1806            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1807            if raw_input == "{\"query\":\"test\"}"
1808        ));
1809        assert!(matches!(
1810            mapped[3],
1811            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1812        ));
1813    }
1814
1815    #[test]
1816    fn responses_stream_handles_json_parse_error() {
1817        let events = vec![
1818            ResponsesStreamEvent::OutputItemAdded {
1819                output_index: 0,
1820                sequence_number: None,
1821                item: response_item_function_call("item_fn", Some("{invalid json")),
1822            },
1823            ResponsesStreamEvent::FunctionCallArgumentsDone {
1824                item_id: "item_fn".into(),
1825                output_index: 0,
1826                arguments: "{invalid json".into(),
1827                sequence_number: None,
1828            },
1829            ResponsesStreamEvent::Completed {
1830                response: ResponseSummary::default(),
1831            },
1832        ];
1833
1834        let mapped = map_response_events(events);
1835        assert!(matches!(
1836            mapped[0],
1837            LanguageModelCompletionEvent::ToolUseJsonParseError {
1838                ref raw_input,
1839                ..
1840            } if raw_input.as_ref() == "{invalid json"
1841        ));
1842    }
1843
1844    #[test]
1845    fn responses_stream_handles_incomplete_function_call() {
1846        let events = vec![
1847            ResponsesStreamEvent::OutputItemAdded {
1848                output_index: 0,
1849                sequence_number: None,
1850                item: response_item_function_call("item_fn", Some("{\"city\":")),
1851            },
1852            ResponsesStreamEvent::FunctionCallArgumentsDelta {
1853                item_id: "item_fn".into(),
1854                output_index: 0,
1855                delta: "\"Boston\"".into(),
1856                sequence_number: None,
1857            },
1858            ResponsesStreamEvent::Incomplete {
1859                response: ResponseSummary {
1860                    status_details: Some(ResponseStatusDetails {
1861                        reason: Some("max_output_tokens".into()),
1862                        r#type: Some("incomplete".into()),
1863                        error: None,
1864                    }),
1865                    output: vec![response_item_function_call(
1866                        "item_fn",
1867                        Some("{\"city\":\"Boston\"}"),
1868                    )],
1869                    ..Default::default()
1870                },
1871            },
1872        ];
1873
1874        let mapped = map_response_events(events);
1875        assert!(matches!(
1876            mapped[0],
1877            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1878            if raw_input == "{\"city\":\"Boston\"}"
1879        ));
1880        assert!(matches!(
1881            mapped[1],
1882            LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1883        ));
1884    }
1885
1886    #[test]
1887    fn responses_stream_incomplete_does_not_duplicate_tool_calls() {
1888        let events = vec![
1889            ResponsesStreamEvent::OutputItemAdded {
1890                output_index: 0,
1891                sequence_number: None,
1892                item: response_item_function_call("item_fn", Some("{\"city\":\"Boston\"}")),
1893            },
1894            ResponsesStreamEvent::FunctionCallArgumentsDone {
1895                item_id: "item_fn".into(),
1896                output_index: 0,
1897                arguments: "{\"city\":\"Boston\"}".into(),
1898                sequence_number: None,
1899            },
1900            ResponsesStreamEvent::Incomplete {
1901                response: ResponseSummary {
1902                    status_details: Some(ResponseStatusDetails {
1903                        reason: Some("max_output_tokens".into()),
1904                        r#type: Some("incomplete".into()),
1905                        error: None,
1906                    }),
1907                    output: vec![response_item_function_call(
1908                        "item_fn",
1909                        Some("{\"city\":\"Boston\"}"),
1910                    )],
1911                    ..Default::default()
1912                },
1913            },
1914        ];
1915
1916        let mapped = map_response_events(events);
1917        assert_eq!(mapped.len(), 2);
1918        assert!(matches!(
1919            mapped[0],
1920            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1921            if raw_input == "{\"city\":\"Boston\"}"
1922        ));
1923        assert!(matches!(
1924            mapped[1],
1925            LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1926        ));
1927    }
1928}