open_ai.rs

   1use anyhow::{Result, anyhow};
   2use collections::{BTreeMap, HashMap};
   3use futures::Stream;
   4use futures::{FutureExt, StreamExt, future::BoxFuture};
   5use gpui::{AnyView, App, AsyncApp, Context, Entity, SharedString, Task, Window};
   6use http_client::HttpClient;
   7use language_model::{
   8    ApiKeyState, AuthenticateError, EnvVar, IconOrSvg, LanguageModel, LanguageModelCompletionError,
   9    LanguageModelCompletionEvent, LanguageModelId, LanguageModelImage, LanguageModelName,
  10    LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
  11    LanguageModelProviderState, LanguageModelRequest, LanguageModelRequestMessage,
  12    LanguageModelToolChoice, LanguageModelToolResult, LanguageModelToolResultContent,
  13    LanguageModelToolUse, LanguageModelToolUseId, MessageContent, RateLimiter, Role, StopReason,
  14    TokenUsage, env_var,
  15};
  16use menu;
  17use open_ai::responses::{
  18    ResponseFunctionCallItem, ResponseFunctionCallOutputItem, ResponseInputContent,
  19    ResponseInputItem, ResponseMessageItem,
  20};
  21use open_ai::{
  22    ImageUrl, Model, OPEN_AI_API_URL, ReasoningEffort, ResponseStreamEvent,
  23    responses::{
  24        Request as ResponseRequest, ResponseOutputItem, ResponseSummary as ResponsesSummary,
  25        ResponseUsage as ResponsesUsage, StreamEvent as ResponsesStreamEvent, stream_response,
  26    },
  27    stream_completion,
  28};
  29use settings::{OpenAiAvailableModel as AvailableModel, Settings, SettingsStore};
  30use std::pin::Pin;
  31use std::str::FromStr as _;
  32use std::sync::{Arc, LazyLock};
  33use strum::IntoEnumIterator;
  34use ui::{ButtonLink, ConfiguredApiCard, List, ListBulletItem, prelude::*};
  35use ui_input::InputField;
  36use util::ResultExt;
  37
  38const PROVIDER_ID: LanguageModelProviderId = language_model::OPEN_AI_PROVIDER_ID;
  39const PROVIDER_NAME: LanguageModelProviderName = language_model::OPEN_AI_PROVIDER_NAME;
  40
  41const API_KEY_ENV_VAR_NAME: &str = "OPENAI_API_KEY";
  42static API_KEY_ENV_VAR: LazyLock<EnvVar> = env_var!(API_KEY_ENV_VAR_NAME);
  43
  44#[derive(Default, Clone, Debug, PartialEq)]
  45pub struct OpenAiSettings {
  46    pub api_url: String,
  47    pub available_models: Vec<AvailableModel>,
  48}
  49
  50pub struct OpenAiLanguageModelProvider {
  51    http_client: Arc<dyn HttpClient>,
  52    state: Entity<State>,
  53}
  54
  55pub struct State {
  56    api_key_state: ApiKeyState,
  57}
  58
  59impl State {
  60    fn is_authenticated(&self) -> bool {
  61        self.api_key_state.has_key()
  62    }
  63
  64    fn set_api_key(&mut self, api_key: Option<String>, cx: &mut Context<Self>) -> Task<Result<()>> {
  65        let api_url = OpenAiLanguageModelProvider::api_url(cx);
  66        self.api_key_state
  67            .store(api_url, api_key, |this| &mut this.api_key_state, cx)
  68    }
  69
  70    fn authenticate(&mut self, cx: &mut Context<Self>) -> Task<Result<(), AuthenticateError>> {
  71        let api_url = OpenAiLanguageModelProvider::api_url(cx);
  72        self.api_key_state
  73            .load_if_needed(api_url, |this| &mut this.api_key_state, cx)
  74    }
  75}
  76
  77impl OpenAiLanguageModelProvider {
  78    pub fn new(http_client: Arc<dyn HttpClient>, cx: &mut App) -> Self {
  79        let state = cx.new(|cx| {
  80            cx.observe_global::<SettingsStore>(|this: &mut State, cx| {
  81                let api_url = Self::api_url(cx);
  82                this.api_key_state
  83                    .handle_url_change(api_url, |this| &mut this.api_key_state, cx);
  84                cx.notify();
  85            })
  86            .detach();
  87            State {
  88                api_key_state: ApiKeyState::new(Self::api_url(cx), (*API_KEY_ENV_VAR).clone()),
  89            }
  90        });
  91
  92        Self { http_client, state }
  93    }
  94
  95    fn create_language_model(&self, model: open_ai::Model) -> Arc<dyn LanguageModel> {
  96        Arc::new(OpenAiLanguageModel {
  97            id: LanguageModelId::from(model.id().to_string()),
  98            model,
  99            state: self.state.clone(),
 100            http_client: self.http_client.clone(),
 101            request_limiter: RateLimiter::new(4),
 102        })
 103    }
 104
 105    fn settings(cx: &App) -> &OpenAiSettings {
 106        &crate::AllLanguageModelSettings::get_global(cx).openai
 107    }
 108
 109    fn api_url(cx: &App) -> SharedString {
 110        let api_url = &Self::settings(cx).api_url;
 111        if api_url.is_empty() {
 112            open_ai::OPEN_AI_API_URL.into()
 113        } else {
 114            SharedString::new(api_url.as_str())
 115        }
 116    }
 117}
 118
 119impl LanguageModelProviderState for OpenAiLanguageModelProvider {
 120    type ObservableEntity = State;
 121
 122    fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
 123        Some(self.state.clone())
 124    }
 125}
 126
 127impl LanguageModelProvider for OpenAiLanguageModelProvider {
 128    fn id(&self) -> LanguageModelProviderId {
 129        PROVIDER_ID
 130    }
 131
 132    fn name(&self) -> LanguageModelProviderName {
 133        PROVIDER_NAME
 134    }
 135
 136    fn icon(&self) -> IconOrSvg {
 137        IconOrSvg::Icon(IconName::AiOpenAi)
 138    }
 139
 140    fn default_model(&self, _cx: &App) -> Option<Arc<dyn LanguageModel>> {
 141        Some(self.create_language_model(open_ai::Model::default()))
 142    }
 143
 144    fn default_fast_model(&self, _cx: &App) -> Option<Arc<dyn LanguageModel>> {
 145        Some(self.create_language_model(open_ai::Model::default_fast()))
 146    }
 147
 148    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 149        let mut models = BTreeMap::default();
 150
 151        // Add base models from open_ai::Model::iter()
 152        for model in open_ai::Model::iter() {
 153            if !matches!(model, open_ai::Model::Custom { .. }) {
 154                models.insert(model.id().to_string(), model);
 155            }
 156        }
 157
 158        // Override with available models from settings
 159        for model in &OpenAiLanguageModelProvider::settings(cx).available_models {
 160            models.insert(
 161                model.name.clone(),
 162                open_ai::Model::Custom {
 163                    name: model.name.clone(),
 164                    display_name: model.display_name.clone(),
 165                    max_tokens: model.max_tokens,
 166                    max_output_tokens: model.max_output_tokens,
 167                    max_completion_tokens: model.max_completion_tokens,
 168                    reasoning_effort: model.reasoning_effort.clone(),
 169                    supports_chat_completions: model.capabilities.chat_completions,
 170                },
 171            );
 172        }
 173
 174        models
 175            .into_values()
 176            .map(|model| self.create_language_model(model))
 177            .collect()
 178    }
 179
 180    fn is_authenticated(&self, cx: &App) -> bool {
 181        self.state.read(cx).is_authenticated()
 182    }
 183
 184    fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
 185        self.state.update(cx, |state, cx| state.authenticate(cx))
 186    }
 187
 188    fn configuration_view(
 189        &self,
 190        _target_agent: language_model::ConfigurationViewTargetAgent,
 191        window: &mut Window,
 192        cx: &mut App,
 193    ) -> AnyView {
 194        cx.new(|cx| ConfigurationView::new(self.state.clone(), window, cx))
 195            .into()
 196    }
 197
 198    fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>> {
 199        self.state
 200            .update(cx, |state, cx| state.set_api_key(None, cx))
 201    }
 202}
 203
 204pub struct OpenAiLanguageModel {
 205    id: LanguageModelId,
 206    model: open_ai::Model,
 207    state: Entity<State>,
 208    http_client: Arc<dyn HttpClient>,
 209    request_limiter: RateLimiter,
 210}
 211
 212impl OpenAiLanguageModel {
 213    fn stream_completion(
 214        &self,
 215        request: open_ai::Request,
 216        cx: &AsyncApp,
 217    ) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<ResponseStreamEvent>>>>
 218    {
 219        let http_client = self.http_client.clone();
 220
 221        let (api_key, api_url) = self.state.read_with(cx, |state, cx| {
 222            let api_url = OpenAiLanguageModelProvider::api_url(cx);
 223            (state.api_key_state.key(&api_url), api_url)
 224        });
 225
 226        let future = self.request_limiter.stream(async move {
 227            let provider = PROVIDER_NAME;
 228            let Some(api_key) = api_key else {
 229                return Err(LanguageModelCompletionError::NoApiKey { provider });
 230            };
 231            let request = stream_completion(
 232                http_client.as_ref(),
 233                provider.0.as_str(),
 234                &api_url,
 235                &api_key,
 236                request,
 237            );
 238            let response = request.await?;
 239            Ok(response)
 240        });
 241
 242        async move { Ok(future.await?.boxed()) }.boxed()
 243    }
 244
 245    fn stream_response(
 246        &self,
 247        request: ResponseRequest,
 248        cx: &AsyncApp,
 249    ) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<ResponsesStreamEvent>>>>
 250    {
 251        let http_client = self.http_client.clone();
 252
 253        let (api_key, api_url) = self.state.read_with(cx, |state, cx| {
 254            let api_url = OpenAiLanguageModelProvider::api_url(cx);
 255            (state.api_key_state.key(&api_url), api_url)
 256        });
 257
 258        let provider = PROVIDER_NAME;
 259        let future = self.request_limiter.stream(async move {
 260            let Some(api_key) = api_key else {
 261                return Err(LanguageModelCompletionError::NoApiKey { provider });
 262            };
 263            let request = stream_response(
 264                http_client.as_ref(),
 265                provider.0.as_str(),
 266                &api_url,
 267                &api_key,
 268                request,
 269            );
 270            let response = request.await?;
 271            Ok(response)
 272        });
 273
 274        async move { Ok(future.await?.boxed()) }.boxed()
 275    }
 276}
 277
 278impl LanguageModel for OpenAiLanguageModel {
 279    fn id(&self) -> LanguageModelId {
 280        self.id.clone()
 281    }
 282
 283    fn name(&self) -> LanguageModelName {
 284        LanguageModelName::from(self.model.display_name().to_string())
 285    }
 286
 287    fn provider_id(&self) -> LanguageModelProviderId {
 288        PROVIDER_ID
 289    }
 290
 291    fn provider_name(&self) -> LanguageModelProviderName {
 292        PROVIDER_NAME
 293    }
 294
 295    fn supports_tools(&self) -> bool {
 296        true
 297    }
 298
 299    fn supports_images(&self) -> bool {
 300        use open_ai::Model;
 301        match &self.model {
 302            Model::FourOmni
 303            | Model::FourOmniMini
 304            | Model::FourPointOne
 305            | Model::FourPointOneMini
 306            | Model::FourPointOneNano
 307            | Model::Five
 308            | Model::FiveCodex
 309            | Model::FiveMini
 310            | Model::FiveNano
 311            | Model::FivePointOne
 312            | Model::FivePointTwo
 313            | Model::FivePointTwoCodex
 314            | Model::O1
 315            | Model::O3
 316            | Model::O4Mini => true,
 317            Model::ThreePointFiveTurbo
 318            | Model::Four
 319            | Model::FourTurbo
 320            | Model::O3Mini
 321            | Model::Custom { .. } => false,
 322        }
 323    }
 324
 325    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
 326        match choice {
 327            LanguageModelToolChoice::Auto => true,
 328            LanguageModelToolChoice::Any => true,
 329            LanguageModelToolChoice::None => true,
 330        }
 331    }
 332
 333    fn supports_split_token_display(&self) -> bool {
 334        true
 335    }
 336
 337    fn telemetry_id(&self) -> String {
 338        format!("openai/{}", self.model.id())
 339    }
 340
 341    fn max_token_count(&self) -> u64 {
 342        self.model.max_token_count()
 343    }
 344
 345    fn max_output_tokens(&self) -> Option<u64> {
 346        self.model.max_output_tokens()
 347    }
 348
 349    fn count_tokens(
 350        &self,
 351        request: LanguageModelRequest,
 352        cx: &App,
 353    ) -> BoxFuture<'static, Result<u64>> {
 354        count_open_ai_tokens(request, self.model.clone(), cx)
 355    }
 356
 357    fn stream_completion(
 358        &self,
 359        request: LanguageModelRequest,
 360        cx: &AsyncApp,
 361    ) -> BoxFuture<
 362        'static,
 363        Result<
 364            futures::stream::BoxStream<
 365                'static,
 366                Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
 367            >,
 368            LanguageModelCompletionError,
 369        >,
 370    > {
 371        if self.model.supports_chat_completions() {
 372            let request = into_open_ai(
 373                request,
 374                self.model.id(),
 375                self.model.supports_parallel_tool_calls(),
 376                self.model.supports_prompt_cache_key(),
 377                self.max_output_tokens(),
 378                self.model.reasoning_effort(),
 379            );
 380            let completions = self.stream_completion(request, cx);
 381            async move {
 382                let mapper = OpenAiEventMapper::new();
 383                Ok(mapper.map_stream(completions.await?).boxed())
 384            }
 385            .boxed()
 386        } else {
 387            let request = into_open_ai_response(
 388                request,
 389                self.model.id(),
 390                self.model.supports_parallel_tool_calls(),
 391                self.model.supports_prompt_cache_key(),
 392                self.max_output_tokens(),
 393                self.model.reasoning_effort(),
 394            );
 395            let completions = self.stream_response(request, cx);
 396            async move {
 397                let mapper = OpenAiResponseEventMapper::new();
 398                Ok(mapper.map_stream(completions.await?).boxed())
 399            }
 400            .boxed()
 401        }
 402    }
 403}
 404
 405pub fn into_open_ai(
 406    request: LanguageModelRequest,
 407    model_id: &str,
 408    supports_parallel_tool_calls: bool,
 409    supports_prompt_cache_key: bool,
 410    max_output_tokens: Option<u64>,
 411    reasoning_effort: Option<ReasoningEffort>,
 412) -> open_ai::Request {
 413    let stream = !model_id.starts_with("o1-");
 414
 415    let mut messages = Vec::new();
 416    for message in request.messages {
 417        for content in message.content {
 418            match content {
 419                MessageContent::Text(text) | MessageContent::Thinking { text, .. } => {
 420                    if !text.trim().is_empty() {
 421                        add_message_content_part(
 422                            open_ai::MessagePart::Text { text },
 423                            message.role,
 424                            &mut messages,
 425                        );
 426                    }
 427                }
 428                MessageContent::RedactedThinking(_) => {}
 429                MessageContent::Image(image) => {
 430                    add_message_content_part(
 431                        open_ai::MessagePart::Image {
 432                            image_url: ImageUrl {
 433                                url: image.to_base64_url(),
 434                                detail: None,
 435                            },
 436                        },
 437                        message.role,
 438                        &mut messages,
 439                    );
 440                }
 441                MessageContent::ToolUse(tool_use) => {
 442                    let tool_call = open_ai::ToolCall {
 443                        id: tool_use.id.to_string(),
 444                        content: open_ai::ToolCallContent::Function {
 445                            function: open_ai::FunctionContent {
 446                                name: tool_use.name.to_string(),
 447                                arguments: serde_json::to_string(&tool_use.input)
 448                                    .unwrap_or_default(),
 449                            },
 450                        },
 451                    };
 452
 453                    if let Some(open_ai::RequestMessage::Assistant { tool_calls, .. }) =
 454                        messages.last_mut()
 455                    {
 456                        tool_calls.push(tool_call);
 457                    } else {
 458                        messages.push(open_ai::RequestMessage::Assistant {
 459                            content: None,
 460                            tool_calls: vec![tool_call],
 461                        });
 462                    }
 463                }
 464                MessageContent::ToolResult(tool_result) => {
 465                    let content = match &tool_result.content {
 466                        LanguageModelToolResultContent::Text(text) => {
 467                            vec![open_ai::MessagePart::Text {
 468                                text: text.to_string(),
 469                            }]
 470                        }
 471                        LanguageModelToolResultContent::Image(image) => {
 472                            vec![open_ai::MessagePart::Image {
 473                                image_url: ImageUrl {
 474                                    url: image.to_base64_url(),
 475                                    detail: None,
 476                                },
 477                            }]
 478                        }
 479                    };
 480
 481                    messages.push(open_ai::RequestMessage::Tool {
 482                        content: content.into(),
 483                        tool_call_id: tool_result.tool_use_id.to_string(),
 484                    });
 485                }
 486            }
 487        }
 488    }
 489
 490    open_ai::Request {
 491        model: model_id.into(),
 492        messages,
 493        stream,
 494        stop: request.stop,
 495        temperature: request.temperature.or(Some(1.0)),
 496        max_completion_tokens: max_output_tokens,
 497        parallel_tool_calls: if supports_parallel_tool_calls && !request.tools.is_empty() {
 498            // Disable parallel tool calls, as the Agent currently expects a maximum of one per turn.
 499            Some(false)
 500        } else {
 501            None
 502        },
 503        prompt_cache_key: if supports_prompt_cache_key {
 504            request.thread_id
 505        } else {
 506            None
 507        },
 508        tools: request
 509            .tools
 510            .into_iter()
 511            .map(|tool| open_ai::ToolDefinition::Function {
 512                function: open_ai::FunctionDefinition {
 513                    name: tool.name,
 514                    description: Some(tool.description),
 515                    parameters: Some(tool.input_schema),
 516                },
 517            })
 518            .collect(),
 519        tool_choice: request.tool_choice.map(|choice| match choice {
 520            LanguageModelToolChoice::Auto => open_ai::ToolChoice::Auto,
 521            LanguageModelToolChoice::Any => open_ai::ToolChoice::Required,
 522            LanguageModelToolChoice::None => open_ai::ToolChoice::None,
 523        }),
 524        reasoning_effort,
 525    }
 526}
 527
 528pub fn into_open_ai_response(
 529    request: LanguageModelRequest,
 530    model_id: &str,
 531    supports_parallel_tool_calls: bool,
 532    supports_prompt_cache_key: bool,
 533    max_output_tokens: Option<u64>,
 534    reasoning_effort: Option<ReasoningEffort>,
 535) -> ResponseRequest {
 536    let stream = !model_id.starts_with("o1-");
 537
 538    let LanguageModelRequest {
 539        thread_id,
 540        prompt_id: _,
 541        intent: _,
 542        messages,
 543        tools,
 544        tool_choice,
 545        stop: _,
 546        temperature,
 547        thinking_allowed: _,
 548    } = request;
 549
 550    let mut input_items = Vec::new();
 551    for (index, message) in messages.into_iter().enumerate() {
 552        append_message_to_response_items(message, index, &mut input_items);
 553    }
 554
 555    let tools: Vec<_> = tools
 556        .into_iter()
 557        .map(|tool| open_ai::responses::ToolDefinition::Function {
 558            name: tool.name,
 559            description: Some(tool.description),
 560            parameters: Some(tool.input_schema),
 561            strict: None,
 562        })
 563        .collect();
 564
 565    ResponseRequest {
 566        model: model_id.into(),
 567        input: input_items,
 568        stream,
 569        temperature,
 570        top_p: None,
 571        max_output_tokens,
 572        parallel_tool_calls: if tools.is_empty() {
 573            None
 574        } else {
 575            Some(supports_parallel_tool_calls)
 576        },
 577        tool_choice: tool_choice.map(|choice| match choice {
 578            LanguageModelToolChoice::Auto => open_ai::ToolChoice::Auto,
 579            LanguageModelToolChoice::Any => open_ai::ToolChoice::Required,
 580            LanguageModelToolChoice::None => open_ai::ToolChoice::None,
 581        }),
 582        tools,
 583        prompt_cache_key: if supports_prompt_cache_key {
 584            thread_id
 585        } else {
 586            None
 587        },
 588        reasoning: reasoning_effort.map(|effort| open_ai::responses::ReasoningConfig { effort }),
 589    }
 590}
 591
 592fn append_message_to_response_items(
 593    message: LanguageModelRequestMessage,
 594    index: usize,
 595    input_items: &mut Vec<ResponseInputItem>,
 596) {
 597    let mut content_parts: Vec<ResponseInputContent> = Vec::new();
 598
 599    for content in message.content {
 600        match content {
 601            MessageContent::Text(text) => {
 602                push_response_text_part(&message.role, text, &mut content_parts);
 603            }
 604            MessageContent::Thinking { text, .. } => {
 605                push_response_text_part(&message.role, text, &mut content_parts);
 606            }
 607            MessageContent::RedactedThinking(_) => {}
 608            MessageContent::Image(image) => {
 609                push_response_image_part(&message.role, image, &mut content_parts);
 610            }
 611            MessageContent::ToolUse(tool_use) => {
 612                flush_response_parts(&message.role, index, &mut content_parts, input_items);
 613                let call_id = tool_use.id.to_string();
 614                input_items.push(ResponseInputItem::FunctionCall(ResponseFunctionCallItem {
 615                    call_id,
 616                    name: tool_use.name.to_string(),
 617                    arguments: tool_use.raw_input,
 618                }));
 619            }
 620            MessageContent::ToolResult(tool_result) => {
 621                flush_response_parts(&message.role, index, &mut content_parts, input_items);
 622                input_items.push(ResponseInputItem::FunctionCallOutput(
 623                    ResponseFunctionCallOutputItem {
 624                        call_id: tool_result.tool_use_id.to_string(),
 625                        output: tool_result_output(&tool_result),
 626                    },
 627                ));
 628            }
 629        }
 630    }
 631
 632    flush_response_parts(&message.role, index, &mut content_parts, input_items);
 633}
 634
 635fn push_response_text_part(
 636    role: &Role,
 637    text: impl Into<String>,
 638    parts: &mut Vec<ResponseInputContent>,
 639) {
 640    let text = text.into();
 641    if text.trim().is_empty() {
 642        return;
 643    }
 644
 645    match role {
 646        Role::Assistant => parts.push(ResponseInputContent::OutputText {
 647            text,
 648            annotations: Vec::new(),
 649        }),
 650        _ => parts.push(ResponseInputContent::Text { text }),
 651    }
 652}
 653
 654fn push_response_image_part(
 655    role: &Role,
 656    image: LanguageModelImage,
 657    parts: &mut Vec<ResponseInputContent>,
 658) {
 659    match role {
 660        Role::Assistant => parts.push(ResponseInputContent::OutputText {
 661            text: "[image omitted]".to_string(),
 662            annotations: Vec::new(),
 663        }),
 664        _ => parts.push(ResponseInputContent::Image {
 665            image_url: image.to_base64_url(),
 666        }),
 667    }
 668}
 669
 670fn flush_response_parts(
 671    role: &Role,
 672    _index: usize,
 673    parts: &mut Vec<ResponseInputContent>,
 674    input_items: &mut Vec<ResponseInputItem>,
 675) {
 676    if parts.is_empty() {
 677        return;
 678    }
 679
 680    let item = ResponseInputItem::Message(ResponseMessageItem {
 681        role: match role {
 682            Role::User => open_ai::Role::User,
 683            Role::Assistant => open_ai::Role::Assistant,
 684            Role::System => open_ai::Role::System,
 685        },
 686        content: parts.clone(),
 687    });
 688
 689    input_items.push(item);
 690    parts.clear();
 691}
 692
 693fn tool_result_output(result: &LanguageModelToolResult) -> String {
 694    if let Some(output) = &result.output {
 695        match output {
 696            serde_json::Value::String(text) => text.clone(),
 697            serde_json::Value::Null => String::new(),
 698            _ => output.to_string(),
 699        }
 700    } else {
 701        match &result.content {
 702            LanguageModelToolResultContent::Text(text) => text.to_string(),
 703            LanguageModelToolResultContent::Image(image) => image.to_base64_url(),
 704        }
 705    }
 706}
 707
 708fn add_message_content_part(
 709    new_part: open_ai::MessagePart,
 710    role: Role,
 711    messages: &mut Vec<open_ai::RequestMessage>,
 712) {
 713    match (role, messages.last_mut()) {
 714        (Role::User, Some(open_ai::RequestMessage::User { content }))
 715        | (
 716            Role::Assistant,
 717            Some(open_ai::RequestMessage::Assistant {
 718                content: Some(content),
 719                ..
 720            }),
 721        )
 722        | (Role::System, Some(open_ai::RequestMessage::System { content, .. })) => {
 723            content.push_part(new_part);
 724        }
 725        _ => {
 726            messages.push(match role {
 727                Role::User => open_ai::RequestMessage::User {
 728                    content: open_ai::MessageContent::from(vec![new_part]),
 729                },
 730                Role::Assistant => open_ai::RequestMessage::Assistant {
 731                    content: Some(open_ai::MessageContent::from(vec![new_part])),
 732                    tool_calls: Vec::new(),
 733                },
 734                Role::System => open_ai::RequestMessage::System {
 735                    content: open_ai::MessageContent::from(vec![new_part]),
 736                },
 737            });
 738        }
 739    }
 740}
 741
 742pub struct OpenAiEventMapper {
 743    tool_calls_by_index: HashMap<usize, RawToolCall>,
 744}
 745
 746impl OpenAiEventMapper {
 747    pub fn new() -> Self {
 748        Self {
 749            tool_calls_by_index: HashMap::default(),
 750        }
 751    }
 752
 753    pub fn map_stream(
 754        mut self,
 755        events: Pin<Box<dyn Send + Stream<Item = Result<ResponseStreamEvent>>>>,
 756    ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 757    {
 758        events.flat_map(move |event| {
 759            futures::stream::iter(match event {
 760                Ok(event) => self.map_event(event),
 761                Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
 762            })
 763        })
 764    }
 765
 766    pub fn map_event(
 767        &mut self,
 768        event: ResponseStreamEvent,
 769    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
 770        let mut events = Vec::new();
 771        if let Some(usage) = event.usage {
 772            events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
 773                input_tokens: usage.prompt_tokens,
 774                output_tokens: usage.completion_tokens,
 775                cache_creation_input_tokens: 0,
 776                cache_read_input_tokens: 0,
 777            })));
 778        }
 779
 780        let Some(choice) = event.choices.first() else {
 781            return events;
 782        };
 783
 784        if let Some(delta) = choice.delta.as_ref() {
 785            if let Some(content) = delta.content.clone() {
 786                events.push(Ok(LanguageModelCompletionEvent::Text(content)));
 787            }
 788
 789            if let Some(tool_calls) = delta.tool_calls.as_ref() {
 790                for tool_call in tool_calls {
 791                    let entry = self.tool_calls_by_index.entry(tool_call.index).or_default();
 792
 793                    if let Some(tool_id) = tool_call.id.clone() {
 794                        entry.id = tool_id;
 795                    }
 796
 797                    if let Some(function) = tool_call.function.as_ref() {
 798                        if let Some(name) = function.name.clone() {
 799                            entry.name = name;
 800                        }
 801
 802                        if let Some(arguments) = function.arguments.clone() {
 803                            entry.arguments.push_str(&arguments);
 804                        }
 805                    }
 806                }
 807            }
 808        }
 809
 810        match choice.finish_reason.as_deref() {
 811            Some("stop") => {
 812                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
 813            }
 814            Some("tool_calls") => {
 815                events.extend(self.tool_calls_by_index.drain().map(|(_, tool_call)| {
 816                    match serde_json::Value::from_str(&tool_call.arguments) {
 817                        Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
 818                            LanguageModelToolUse {
 819                                id: tool_call.id.clone().into(),
 820                                name: tool_call.name.as_str().into(),
 821                                is_input_complete: true,
 822                                input,
 823                                raw_input: tool_call.arguments.clone(),
 824                                thought_signature: None,
 825                            },
 826                        )),
 827                        Err(error) => Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
 828                            id: tool_call.id.into(),
 829                            tool_name: tool_call.name.into(),
 830                            raw_input: tool_call.arguments.clone().into(),
 831                            json_parse_error: error.to_string(),
 832                        }),
 833                    }
 834                }));
 835
 836                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
 837            }
 838            Some(stop_reason) => {
 839                log::error!("Unexpected OpenAI stop_reason: {stop_reason:?}",);
 840                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
 841            }
 842            None => {}
 843        }
 844
 845        events
 846    }
 847}
 848
 849#[derive(Default)]
 850struct RawToolCall {
 851    id: String,
 852    name: String,
 853    arguments: String,
 854}
 855
 856pub struct OpenAiResponseEventMapper {
 857    function_calls_by_item: HashMap<String, PendingResponseFunctionCall>,
 858    pending_stop_reason: Option<StopReason>,
 859}
 860
 861#[derive(Default)]
 862struct PendingResponseFunctionCall {
 863    call_id: String,
 864    name: Arc<str>,
 865    arguments: String,
 866}
 867
 868impl OpenAiResponseEventMapper {
 869    pub fn new() -> Self {
 870        Self {
 871            function_calls_by_item: HashMap::default(),
 872            pending_stop_reason: None,
 873        }
 874    }
 875
 876    pub fn map_stream(
 877        mut self,
 878        events: Pin<Box<dyn Send + Stream<Item = Result<ResponsesStreamEvent>>>>,
 879    ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 880    {
 881        events.flat_map(move |event| {
 882            futures::stream::iter(match event {
 883                Ok(event) => self.map_event(event),
 884                Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
 885            })
 886        })
 887    }
 888
 889    pub fn map_event(
 890        &mut self,
 891        event: ResponsesStreamEvent,
 892    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
 893        match event {
 894            ResponsesStreamEvent::OutputItemAdded { item, .. } => {
 895                let mut events = Vec::new();
 896
 897                match &item {
 898                    ResponseOutputItem::Message(message) => {
 899                        if let Some(id) = &message.id {
 900                            events.push(Ok(LanguageModelCompletionEvent::StartMessage {
 901                                message_id: id.clone(),
 902                            }));
 903                        }
 904                    }
 905                    ResponseOutputItem::FunctionCall(function_call) => {
 906                        if let Some(item_id) = function_call.id.clone() {
 907                            let call_id = function_call
 908                                .call_id
 909                                .clone()
 910                                .or_else(|| function_call.id.clone())
 911                                .unwrap_or_else(|| item_id.clone());
 912                            let entry = PendingResponseFunctionCall {
 913                                call_id,
 914                                name: Arc::<str>::from(
 915                                    function_call.name.clone().unwrap_or_default(),
 916                                ),
 917                                arguments: function_call.arguments.clone(),
 918                            };
 919                            self.function_calls_by_item.insert(item_id, entry);
 920                        }
 921                    }
 922                    ResponseOutputItem::Unknown => {}
 923                }
 924                events
 925            }
 926            ResponsesStreamEvent::OutputTextDelta { delta, .. } => {
 927                if delta.is_empty() {
 928                    Vec::new()
 929                } else {
 930                    vec![Ok(LanguageModelCompletionEvent::Text(delta))]
 931                }
 932            }
 933            ResponsesStreamEvent::FunctionCallArgumentsDelta { item_id, delta, .. } => {
 934                if let Some(entry) = self.function_calls_by_item.get_mut(&item_id) {
 935                    entry.arguments.push_str(&delta);
 936                }
 937                Vec::new()
 938            }
 939            ResponsesStreamEvent::FunctionCallArgumentsDone {
 940                item_id, arguments, ..
 941            } => {
 942                if let Some(mut entry) = self.function_calls_by_item.remove(&item_id) {
 943                    if !arguments.is_empty() {
 944                        entry.arguments = arguments;
 945                    }
 946                    let raw_input = entry.arguments.clone();
 947                    self.pending_stop_reason = Some(StopReason::ToolUse);
 948                    match serde_json::from_str::<serde_json::Value>(&entry.arguments) {
 949                        Ok(input) => {
 950                            vec![Ok(LanguageModelCompletionEvent::ToolUse(
 951                                LanguageModelToolUse {
 952                                    id: LanguageModelToolUseId::from(entry.call_id.clone()),
 953                                    name: entry.name.clone(),
 954                                    is_input_complete: true,
 955                                    input,
 956                                    raw_input,
 957                                    thought_signature: None,
 958                                },
 959                            ))]
 960                        }
 961                        Err(error) => {
 962                            vec![Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
 963                                id: LanguageModelToolUseId::from(entry.call_id.clone()),
 964                                tool_name: entry.name.clone(),
 965                                raw_input: Arc::<str>::from(raw_input),
 966                                json_parse_error: error.to_string(),
 967                            })]
 968                        }
 969                    }
 970                } else {
 971                    Vec::new()
 972                }
 973            }
 974            ResponsesStreamEvent::Completed { response } => {
 975                self.handle_completion(response, StopReason::EndTurn)
 976            }
 977            ResponsesStreamEvent::Incomplete { response } => {
 978                let reason = response
 979                    .status_details
 980                    .as_ref()
 981                    .and_then(|details| details.reason.as_deref());
 982                let stop_reason = match reason {
 983                    Some("max_output_tokens") => StopReason::MaxTokens,
 984                    Some("content_filter") => {
 985                        self.pending_stop_reason = Some(StopReason::Refusal);
 986                        StopReason::Refusal
 987                    }
 988                    _ => self
 989                        .pending_stop_reason
 990                        .take()
 991                        .unwrap_or(StopReason::EndTurn),
 992                };
 993
 994                let mut events = Vec::new();
 995                if self.pending_stop_reason.is_none() {
 996                    events.extend(self.emit_tool_calls_from_output(&response.output));
 997                }
 998                if let Some(usage) = response.usage.as_ref() {
 999                    events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
1000                        token_usage_from_response_usage(usage),
1001                    )));
1002                }
1003                events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
1004                events
1005            }
1006            ResponsesStreamEvent::Failed { response } => {
1007                let message = response
1008                    .status_details
1009                    .and_then(|details| details.error)
1010                    .map(|error| error.to_string())
1011                    .unwrap_or_else(|| "response failed".to_string());
1012                vec![Err(LanguageModelCompletionError::Other(anyhow!(message)))]
1013            }
1014            ResponsesStreamEvent::Error { error }
1015            | ResponsesStreamEvent::GenericError { error } => {
1016                vec![Err(LanguageModelCompletionError::Other(anyhow!(format!(
1017                    "{error:?}"
1018                ))))]
1019            }
1020            ResponsesStreamEvent::OutputTextDone { .. } => Vec::new(),
1021            ResponsesStreamEvent::OutputItemDone { .. }
1022            | ResponsesStreamEvent::ContentPartAdded { .. }
1023            | ResponsesStreamEvent::ContentPartDone { .. }
1024            | ResponsesStreamEvent::Created { .. }
1025            | ResponsesStreamEvent::InProgress { .. }
1026            | ResponsesStreamEvent::Unknown => Vec::new(),
1027        }
1028    }
1029
1030    fn handle_completion(
1031        &mut self,
1032        response: ResponsesSummary,
1033        default_reason: StopReason,
1034    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
1035        let mut events = Vec::new();
1036
1037        if self.pending_stop_reason.is_none() {
1038            events.extend(self.emit_tool_calls_from_output(&response.output));
1039        }
1040
1041        if let Some(usage) = response.usage.as_ref() {
1042            events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
1043                token_usage_from_response_usage(usage),
1044            )));
1045        }
1046
1047        let stop_reason = self.pending_stop_reason.take().unwrap_or(default_reason);
1048        events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
1049        events
1050    }
1051
1052    fn emit_tool_calls_from_output(
1053        &mut self,
1054        output: &[ResponseOutputItem],
1055    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
1056        let mut events = Vec::new();
1057        for item in output {
1058            if let ResponseOutputItem::FunctionCall(function_call) = item {
1059                let Some(call_id) = function_call
1060                    .call_id
1061                    .clone()
1062                    .or_else(|| function_call.id.clone())
1063                else {
1064                    log::error!(
1065                        "Function call item missing both call_id and id: {:?}",
1066                        function_call
1067                    );
1068                    continue;
1069                };
1070                let name: Arc<str> = Arc::from(function_call.name.clone().unwrap_or_default());
1071                let arguments = &function_call.arguments;
1072                if !arguments.is_empty() {
1073                    self.pending_stop_reason = Some(StopReason::ToolUse);
1074                    match serde_json::from_str::<serde_json::Value>(arguments) {
1075                        Ok(input) => {
1076                            events.push(Ok(LanguageModelCompletionEvent::ToolUse(
1077                                LanguageModelToolUse {
1078                                    id: LanguageModelToolUseId::from(call_id.clone()),
1079                                    name: name.clone(),
1080                                    is_input_complete: true,
1081                                    input,
1082                                    raw_input: arguments.clone(),
1083                                    thought_signature: None,
1084                                },
1085                            )));
1086                        }
1087                        Err(error) => {
1088                            events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
1089                                id: LanguageModelToolUseId::from(call_id.clone()),
1090                                tool_name: name.clone(),
1091                                raw_input: Arc::<str>::from(arguments.clone()),
1092                                json_parse_error: error.to_string(),
1093                            }));
1094                        }
1095                    }
1096                }
1097            }
1098        }
1099        events
1100    }
1101}
1102
1103fn token_usage_from_response_usage(usage: &ResponsesUsage) -> TokenUsage {
1104    TokenUsage {
1105        input_tokens: usage.input_tokens.unwrap_or_default(),
1106        output_tokens: usage.output_tokens.unwrap_or_default(),
1107        cache_creation_input_tokens: 0,
1108        cache_read_input_tokens: 0,
1109    }
1110}
1111
1112pub(crate) fn collect_tiktoken_messages(
1113    request: LanguageModelRequest,
1114) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
1115    request
1116        .messages
1117        .into_iter()
1118        .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
1119            role: match message.role {
1120                Role::User => "user".into(),
1121                Role::Assistant => "assistant".into(),
1122                Role::System => "system".into(),
1123            },
1124            content: Some(message.string_contents()),
1125            name: None,
1126            function_call: None,
1127        })
1128        .collect::<Vec<_>>()
1129}
1130
1131pub fn count_open_ai_tokens(
1132    request: LanguageModelRequest,
1133    model: Model,
1134    cx: &App,
1135) -> BoxFuture<'static, Result<u64>> {
1136    cx.background_spawn(async move {
1137        let messages = collect_tiktoken_messages(request);
1138        match model {
1139            Model::Custom { max_tokens, .. } => {
1140                let model = if max_tokens >= 100_000 {
1141                    // If the max tokens is 100k or more, it is likely the o200k_base tokenizer from gpt4o
1142                    "gpt-4o"
1143                } else {
1144                    // Otherwise fallback to gpt-4, since only cl100k_base and o200k_base are
1145                    // supported with this tiktoken method
1146                    "gpt-4"
1147                };
1148                tiktoken_rs::num_tokens_from_messages(model, &messages)
1149            }
1150            // Currently supported by tiktoken_rs
1151            // Sometimes tiktoken-rs is behind on model support. If that is the case, make a new branch
1152            // arm with an override. We enumerate all supported models here so that we can check if new
1153            // models are supported yet or not.
1154            Model::ThreePointFiveTurbo
1155            | Model::Four
1156            | Model::FourTurbo
1157            | Model::FourOmni
1158            | Model::FourOmniMini
1159            | Model::FourPointOne
1160            | Model::FourPointOneMini
1161            | Model::FourPointOneNano
1162            | Model::O1
1163            | Model::O3
1164            | Model::O3Mini
1165            | Model::O4Mini
1166            | Model::Five
1167            | Model::FiveCodex
1168            | Model::FiveMini
1169            | Model::FiveNano => tiktoken_rs::num_tokens_from_messages(model.id(), &messages),
1170            // GPT-5.1, 5.2, and 5.2-codex don't have dedicated tiktoken support; use gpt-5 tokenizer
1171            Model::FivePointOne | Model::FivePointTwo | Model::FivePointTwoCodex => {
1172                tiktoken_rs::num_tokens_from_messages("gpt-5", &messages)
1173            }
1174        }
1175        .map(|tokens| tokens as u64)
1176    })
1177    .boxed()
1178}
1179
1180struct ConfigurationView {
1181    api_key_editor: Entity<InputField>,
1182    state: Entity<State>,
1183    load_credentials_task: Option<Task<()>>,
1184}
1185
1186impl ConfigurationView {
1187    fn new(state: Entity<State>, window: &mut Window, cx: &mut Context<Self>) -> Self {
1188        let api_key_editor = cx.new(|cx| {
1189            InputField::new(
1190                window,
1191                cx,
1192                "sk-000000000000000000000000000000000000000000000000",
1193            )
1194        });
1195
1196        cx.observe(&state, |_, _, cx| {
1197            cx.notify();
1198        })
1199        .detach();
1200
1201        let load_credentials_task = Some(cx.spawn_in(window, {
1202            let state = state.clone();
1203            async move |this, cx| {
1204                if let Some(task) = Some(state.update(cx, |state, cx| state.authenticate(cx))) {
1205                    // We don't log an error, because "not signed in" is also an error.
1206                    let _ = task.await;
1207                }
1208                this.update(cx, |this, cx| {
1209                    this.load_credentials_task = None;
1210                    cx.notify();
1211                })
1212                .log_err();
1213            }
1214        }));
1215
1216        Self {
1217            api_key_editor,
1218            state,
1219            load_credentials_task,
1220        }
1221    }
1222
1223    fn save_api_key(&mut self, _: &menu::Confirm, window: &mut Window, cx: &mut Context<Self>) {
1224        let api_key = self.api_key_editor.read(cx).text(cx).trim().to_string();
1225        if api_key.is_empty() {
1226            return;
1227        }
1228
1229        // url changes can cause the editor to be displayed again
1230        self.api_key_editor
1231            .update(cx, |editor, cx| editor.set_text("", window, cx));
1232
1233        let state = self.state.clone();
1234        cx.spawn_in(window, async move |_, cx| {
1235            state
1236                .update(cx, |state, cx| state.set_api_key(Some(api_key), cx))
1237                .await
1238        })
1239        .detach_and_log_err(cx);
1240    }
1241
1242    fn reset_api_key(&mut self, window: &mut Window, cx: &mut Context<Self>) {
1243        self.api_key_editor
1244            .update(cx, |input, cx| input.set_text("", window, cx));
1245
1246        let state = self.state.clone();
1247        cx.spawn_in(window, async move |_, cx| {
1248            state
1249                .update(cx, |state, cx| state.set_api_key(None, cx))
1250                .await
1251        })
1252        .detach_and_log_err(cx);
1253    }
1254
1255    fn should_render_editor(&self, cx: &mut Context<Self>) -> bool {
1256        !self.state.read(cx).is_authenticated()
1257    }
1258}
1259
1260impl Render for ConfigurationView {
1261    fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1262        let env_var_set = self.state.read(cx).api_key_state.is_from_env_var();
1263        let configured_card_label = if env_var_set {
1264            format!("API key set in {API_KEY_ENV_VAR_NAME} environment variable")
1265        } else {
1266            let api_url = OpenAiLanguageModelProvider::api_url(cx);
1267            if api_url == OPEN_AI_API_URL {
1268                "API key configured".to_string()
1269            } else {
1270                format!("API key configured for {}", api_url)
1271            }
1272        };
1273
1274        let api_key_section = if self.should_render_editor(cx) {
1275            v_flex()
1276                .on_action(cx.listener(Self::save_api_key))
1277                .child(Label::new("To use Zed's agent with OpenAI, you need to add an API key. Follow these steps:"))
1278                .child(
1279                    List::new()
1280                        .child(
1281                            ListBulletItem::new("")
1282                                .child(Label::new("Create one by visiting"))
1283                                .child(ButtonLink::new("OpenAI's console", "https://platform.openai.com/api-keys"))
1284                        )
1285                        .child(
1286                            ListBulletItem::new("Ensure your OpenAI account has credits")
1287                        )
1288                        .child(
1289                            ListBulletItem::new("Paste your API key below and hit enter to start using the agent")
1290                        ),
1291                )
1292                .child(self.api_key_editor.clone())
1293                .child(
1294                    Label::new(format!(
1295                        "You can also set the {API_KEY_ENV_VAR_NAME} environment variable and restart Zed."
1296                    ))
1297                    .size(LabelSize::Small)
1298                    .color(Color::Muted),
1299                )
1300                .child(
1301                    Label::new(
1302                        "Note that having a subscription for another service like GitHub Copilot won't work.",
1303                    )
1304                    .size(LabelSize::Small).color(Color::Muted),
1305                )
1306                .into_any_element()
1307        } else {
1308            ConfiguredApiCard::new(configured_card_label)
1309                .disabled(env_var_set)
1310                .on_click(cx.listener(|this, _, window, cx| this.reset_api_key(window, cx)))
1311                .when(env_var_set, |this| {
1312                    this.tooltip_label(format!("To reset your API key, unset the {API_KEY_ENV_VAR_NAME} environment variable."))
1313                })
1314                .into_any_element()
1315        };
1316
1317        let compatible_api_section = h_flex()
1318            .mt_1p5()
1319            .gap_0p5()
1320            .flex_wrap()
1321            .when(self.should_render_editor(cx), |this| {
1322                this.pt_1p5()
1323                    .border_t_1()
1324                    .border_color(cx.theme().colors().border_variant)
1325            })
1326            .child(
1327                h_flex()
1328                    .gap_2()
1329                    .child(
1330                        Icon::new(IconName::Info)
1331                            .size(IconSize::XSmall)
1332                            .color(Color::Muted),
1333                    )
1334                    .child(Label::new("Zed also supports OpenAI-compatible models.")),
1335            )
1336            .child(
1337                Button::new("docs", "Learn More")
1338                    .icon(IconName::ArrowUpRight)
1339                    .icon_size(IconSize::Small)
1340                    .icon_color(Color::Muted)
1341                    .on_click(move |_, _window, cx| {
1342                        cx.open_url("https://zed.dev/docs/ai/llm-providers#openai-api-compatible")
1343                    }),
1344            );
1345
1346        if self.load_credentials_task.is_some() {
1347            div().child(Label::new("Loading credentials…")).into_any()
1348        } else {
1349            v_flex()
1350                .size_full()
1351                .child(api_key_section)
1352                .child(compatible_api_section)
1353                .into_any()
1354        }
1355    }
1356}
1357
1358#[cfg(test)]
1359mod tests {
1360    use futures::{StreamExt, executor::block_on};
1361    use gpui::TestAppContext;
1362    use language_model::{LanguageModelRequestMessage, LanguageModelRequestTool};
1363    use open_ai::responses::{
1364        ResponseFunctionToolCall, ResponseOutputItem, ResponseOutputMessage, ResponseStatusDetails,
1365        ResponseSummary, ResponseUsage, StreamEvent as ResponsesStreamEvent,
1366    };
1367    use pretty_assertions::assert_eq;
1368    use serde_json::json;
1369
1370    use super::*;
1371
1372    fn map_response_events(events: Vec<ResponsesStreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1373        block_on(async {
1374            OpenAiResponseEventMapper::new()
1375                .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1376                .collect::<Vec<_>>()
1377                .await
1378                .into_iter()
1379                .map(Result::unwrap)
1380                .collect()
1381        })
1382    }
1383
1384    fn response_item_message(id: &str) -> ResponseOutputItem {
1385        ResponseOutputItem::Message(ResponseOutputMessage {
1386            id: Some(id.to_string()),
1387            role: Some("assistant".to_string()),
1388            status: Some("in_progress".to_string()),
1389            content: vec![],
1390        })
1391    }
1392
1393    fn response_item_function_call(id: &str, args: Option<&str>) -> ResponseOutputItem {
1394        ResponseOutputItem::FunctionCall(ResponseFunctionToolCall {
1395            id: Some(id.to_string()),
1396            status: Some("in_progress".to_string()),
1397            name: Some("get_weather".to_string()),
1398            call_id: Some("call_123".to_string()),
1399            arguments: args.map(|s| s.to_string()).unwrap_or_default(),
1400        })
1401    }
1402
1403    #[gpui::test]
1404    fn tiktoken_rs_support(cx: &TestAppContext) {
1405        let request = LanguageModelRequest {
1406            thread_id: None,
1407            prompt_id: None,
1408            intent: None,
1409            messages: vec![LanguageModelRequestMessage {
1410                role: Role::User,
1411                content: vec![MessageContent::Text("message".into())],
1412                cache: false,
1413                reasoning_details: None,
1414            }],
1415            tools: vec![],
1416            tool_choice: None,
1417            stop: vec![],
1418            temperature: None,
1419            thinking_allowed: true,
1420        };
1421
1422        // Validate that all models are supported by tiktoken-rs
1423        for model in Model::iter() {
1424            let count = cx
1425                .foreground_executor()
1426                .block_on(count_open_ai_tokens(
1427                    request.clone(),
1428                    model,
1429                    &cx.app.borrow(),
1430                ))
1431                .unwrap();
1432            assert!(count > 0);
1433        }
1434    }
1435
1436    #[test]
1437    fn responses_stream_maps_text_and_usage() {
1438        let events = vec![
1439            ResponsesStreamEvent::OutputItemAdded {
1440                output_index: 0,
1441                sequence_number: None,
1442                item: response_item_message("msg_123"),
1443            },
1444            ResponsesStreamEvent::OutputTextDelta {
1445                item_id: "msg_123".into(),
1446                output_index: 0,
1447                content_index: Some(0),
1448                delta: "Hello".into(),
1449            },
1450            ResponsesStreamEvent::Completed {
1451                response: ResponseSummary {
1452                    usage: Some(ResponseUsage {
1453                        input_tokens: Some(5),
1454                        output_tokens: Some(3),
1455                        total_tokens: Some(8),
1456                    }),
1457                    ..Default::default()
1458                },
1459            },
1460        ];
1461
1462        let mapped = map_response_events(events);
1463        assert!(matches!(
1464            mapped[0],
1465            LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_123"
1466        ));
1467        assert!(matches!(
1468            mapped[1],
1469            LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1470        ));
1471        assert!(matches!(
1472            mapped[2],
1473            LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1474                input_tokens: 5,
1475                output_tokens: 3,
1476                ..
1477            })
1478        ));
1479        assert!(matches!(
1480            mapped[3],
1481            LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1482        ));
1483    }
1484
1485    #[test]
1486    fn into_open_ai_response_builds_complete_payload() {
1487        let tool_call_id = LanguageModelToolUseId::from("call-42");
1488        let tool_input = json!({ "city": "Boston" });
1489        let tool_arguments = serde_json::to_string(&tool_input).unwrap();
1490        let tool_use = LanguageModelToolUse {
1491            id: tool_call_id.clone(),
1492            name: Arc::from("get_weather"),
1493            raw_input: tool_arguments.clone(),
1494            input: tool_input,
1495            is_input_complete: true,
1496            thought_signature: None,
1497        };
1498        let tool_result = LanguageModelToolResult {
1499            tool_use_id: tool_call_id,
1500            tool_name: Arc::from("get_weather"),
1501            is_error: false,
1502            content: LanguageModelToolResultContent::Text(Arc::from("Sunny")),
1503            output: Some(json!({ "forecast": "Sunny" })),
1504        };
1505        let user_image = LanguageModelImage {
1506            source: SharedString::from("aGVsbG8="),
1507            size: None,
1508        };
1509        let expected_image_url = user_image.to_base64_url();
1510
1511        let request = LanguageModelRequest {
1512            thread_id: Some("thread-123".into()),
1513            prompt_id: None,
1514            intent: None,
1515            messages: vec![
1516                LanguageModelRequestMessage {
1517                    role: Role::System,
1518                    content: vec![MessageContent::Text("System context".into())],
1519                    cache: false,
1520                    reasoning_details: None,
1521                },
1522                LanguageModelRequestMessage {
1523                    role: Role::User,
1524                    content: vec![
1525                        MessageContent::Text("Please check the weather.".into()),
1526                        MessageContent::Image(user_image),
1527                    ],
1528                    cache: false,
1529                    reasoning_details: None,
1530                },
1531                LanguageModelRequestMessage {
1532                    role: Role::Assistant,
1533                    content: vec![
1534                        MessageContent::Text("Looking that up.".into()),
1535                        MessageContent::ToolUse(tool_use),
1536                    ],
1537                    cache: false,
1538                    reasoning_details: None,
1539                },
1540                LanguageModelRequestMessage {
1541                    role: Role::Assistant,
1542                    content: vec![MessageContent::ToolResult(tool_result)],
1543                    cache: false,
1544                    reasoning_details: None,
1545                },
1546            ],
1547            tools: vec![LanguageModelRequestTool {
1548                name: "get_weather".into(),
1549                description: "Fetches the weather".into(),
1550                input_schema: json!({ "type": "object" }),
1551            }],
1552            tool_choice: Some(LanguageModelToolChoice::Any),
1553            stop: vec!["<STOP>".into()],
1554            temperature: None,
1555            thinking_allowed: false,
1556        };
1557
1558        let response = into_open_ai_response(
1559            request,
1560            "custom-model",
1561            true,
1562            true,
1563            Some(2048),
1564            Some(ReasoningEffort::Low),
1565        );
1566
1567        let serialized = serde_json::to_value(&response).unwrap();
1568        let expected = json!({
1569            "model": "custom-model",
1570            "input": [
1571                {
1572                    "type": "message",
1573                    "role": "system",
1574                    "content": [
1575                        { "type": "input_text", "text": "System context" }
1576                    ]
1577                },
1578                {
1579                    "type": "message",
1580                    "role": "user",
1581                    "content": [
1582                        { "type": "input_text", "text": "Please check the weather." },
1583                        { "type": "input_image", "image_url": expected_image_url }
1584                    ]
1585                },
1586                {
1587                    "type": "message",
1588                    "role": "assistant",
1589                    "content": [
1590                        { "type": "output_text", "text": "Looking that up.", "annotations": [] }
1591                    ]
1592                },
1593                {
1594                    "type": "function_call",
1595                    "call_id": "call-42",
1596                    "name": "get_weather",
1597                    "arguments": tool_arguments
1598                },
1599                {
1600                    "type": "function_call_output",
1601                    "call_id": "call-42",
1602                    "output": "{\"forecast\":\"Sunny\"}"
1603                }
1604            ],
1605            "stream": true,
1606            "max_output_tokens": 2048,
1607            "parallel_tool_calls": true,
1608            "tool_choice": "required",
1609            "tools": [
1610                {
1611                    "type": "function",
1612                    "name": "get_weather",
1613                    "description": "Fetches the weather",
1614                    "parameters": { "type": "object" }
1615                }
1616            ],
1617            "prompt_cache_key": "thread-123",
1618            "reasoning": { "effort": "low" }
1619        });
1620
1621        assert_eq!(serialized, expected);
1622    }
1623
1624    #[test]
1625    fn responses_stream_maps_tool_calls() {
1626        let events = vec![
1627            ResponsesStreamEvent::OutputItemAdded {
1628                output_index: 0,
1629                sequence_number: None,
1630                item: response_item_function_call("item_fn", Some("{\"city\":\"Bos")),
1631            },
1632            ResponsesStreamEvent::FunctionCallArgumentsDelta {
1633                item_id: "item_fn".into(),
1634                output_index: 0,
1635                delta: "ton\"}".into(),
1636                sequence_number: None,
1637            },
1638            ResponsesStreamEvent::FunctionCallArgumentsDone {
1639                item_id: "item_fn".into(),
1640                output_index: 0,
1641                arguments: "{\"city\":\"Boston\"}".into(),
1642                sequence_number: None,
1643            },
1644            ResponsesStreamEvent::Completed {
1645                response: ResponseSummary::default(),
1646            },
1647        ];
1648
1649        let mapped = map_response_events(events);
1650        assert!(matches!(
1651            mapped[0],
1652            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse {
1653                ref id,
1654                ref name,
1655                ref raw_input,
1656                ..
1657            }) if id.to_string() == "call_123"
1658                && name.as_ref() == "get_weather"
1659                && raw_input == "{\"city\":\"Boston\"}"
1660        ));
1661        assert!(matches!(
1662            mapped[1],
1663            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1664        ));
1665    }
1666
1667    #[test]
1668    fn responses_stream_uses_max_tokens_stop_reason() {
1669        let events = vec![ResponsesStreamEvent::Incomplete {
1670            response: ResponseSummary {
1671                status_details: Some(ResponseStatusDetails {
1672                    reason: Some("max_output_tokens".into()),
1673                    r#type: Some("incomplete".into()),
1674                    error: None,
1675                }),
1676                usage: Some(ResponseUsage {
1677                    input_tokens: Some(10),
1678                    output_tokens: Some(20),
1679                    total_tokens: Some(30),
1680                }),
1681                ..Default::default()
1682            },
1683        }];
1684
1685        let mapped = map_response_events(events);
1686        assert!(matches!(
1687            mapped[0],
1688            LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1689                input_tokens: 10,
1690                output_tokens: 20,
1691                ..
1692            })
1693        ));
1694        assert!(matches!(
1695            mapped[1],
1696            LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1697        ));
1698    }
1699
1700    #[test]
1701    fn responses_stream_handles_multiple_tool_calls() {
1702        let events = vec![
1703            ResponsesStreamEvent::OutputItemAdded {
1704                output_index: 0,
1705                sequence_number: None,
1706                item: response_item_function_call("item_fn1", Some("{\"city\":\"NYC\"}")),
1707            },
1708            ResponsesStreamEvent::FunctionCallArgumentsDone {
1709                item_id: "item_fn1".into(),
1710                output_index: 0,
1711                arguments: "{\"city\":\"NYC\"}".into(),
1712                sequence_number: None,
1713            },
1714            ResponsesStreamEvent::OutputItemAdded {
1715                output_index: 1,
1716                sequence_number: None,
1717                item: response_item_function_call("item_fn2", Some("{\"city\":\"LA\"}")),
1718            },
1719            ResponsesStreamEvent::FunctionCallArgumentsDone {
1720                item_id: "item_fn2".into(),
1721                output_index: 1,
1722                arguments: "{\"city\":\"LA\"}".into(),
1723                sequence_number: None,
1724            },
1725            ResponsesStreamEvent::Completed {
1726                response: ResponseSummary::default(),
1727            },
1728        ];
1729
1730        let mapped = map_response_events(events);
1731        assert_eq!(mapped.len(), 3);
1732        assert!(matches!(
1733            mapped[0],
1734            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1735            if raw_input == "{\"city\":\"NYC\"}"
1736        ));
1737        assert!(matches!(
1738            mapped[1],
1739            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1740            if raw_input == "{\"city\":\"LA\"}"
1741        ));
1742        assert!(matches!(
1743            mapped[2],
1744            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1745        ));
1746    }
1747
1748    #[test]
1749    fn responses_stream_handles_mixed_text_and_tool_calls() {
1750        let events = vec![
1751            ResponsesStreamEvent::OutputItemAdded {
1752                output_index: 0,
1753                sequence_number: None,
1754                item: response_item_message("msg_123"),
1755            },
1756            ResponsesStreamEvent::OutputTextDelta {
1757                item_id: "msg_123".into(),
1758                output_index: 0,
1759                content_index: Some(0),
1760                delta: "Let me check that".into(),
1761            },
1762            ResponsesStreamEvent::OutputItemAdded {
1763                output_index: 1,
1764                sequence_number: None,
1765                item: response_item_function_call("item_fn", Some("{\"query\":\"test\"}")),
1766            },
1767            ResponsesStreamEvent::FunctionCallArgumentsDone {
1768                item_id: "item_fn".into(),
1769                output_index: 1,
1770                arguments: "{\"query\":\"test\"}".into(),
1771                sequence_number: None,
1772            },
1773            ResponsesStreamEvent::Completed {
1774                response: ResponseSummary::default(),
1775            },
1776        ];
1777
1778        let mapped = map_response_events(events);
1779        assert!(matches!(
1780            mapped[0],
1781            LanguageModelCompletionEvent::StartMessage { .. }
1782        ));
1783        assert!(matches!(
1784            mapped[1],
1785            LanguageModelCompletionEvent::Text(ref text) if text == "Let me check that"
1786        ));
1787        assert!(matches!(
1788            mapped[2],
1789            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1790            if raw_input == "{\"query\":\"test\"}"
1791        ));
1792        assert!(matches!(
1793            mapped[3],
1794            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1795        ));
1796    }
1797
1798    #[test]
1799    fn responses_stream_handles_json_parse_error() {
1800        let events = vec![
1801            ResponsesStreamEvent::OutputItemAdded {
1802                output_index: 0,
1803                sequence_number: None,
1804                item: response_item_function_call("item_fn", Some("{invalid json")),
1805            },
1806            ResponsesStreamEvent::FunctionCallArgumentsDone {
1807                item_id: "item_fn".into(),
1808                output_index: 0,
1809                arguments: "{invalid json".into(),
1810                sequence_number: None,
1811            },
1812            ResponsesStreamEvent::Completed {
1813                response: ResponseSummary::default(),
1814            },
1815        ];
1816
1817        let mapped = map_response_events(events);
1818        assert!(matches!(
1819            mapped[0],
1820            LanguageModelCompletionEvent::ToolUseJsonParseError {
1821                ref raw_input,
1822                ..
1823            } if raw_input.as_ref() == "{invalid json"
1824        ));
1825    }
1826
1827    #[test]
1828    fn responses_stream_handles_incomplete_function_call() {
1829        let events = vec![
1830            ResponsesStreamEvent::OutputItemAdded {
1831                output_index: 0,
1832                sequence_number: None,
1833                item: response_item_function_call("item_fn", Some("{\"city\":")),
1834            },
1835            ResponsesStreamEvent::FunctionCallArgumentsDelta {
1836                item_id: "item_fn".into(),
1837                output_index: 0,
1838                delta: "\"Boston\"".into(),
1839                sequence_number: None,
1840            },
1841            ResponsesStreamEvent::Incomplete {
1842                response: ResponseSummary {
1843                    status_details: Some(ResponseStatusDetails {
1844                        reason: Some("max_output_tokens".into()),
1845                        r#type: Some("incomplete".into()),
1846                        error: None,
1847                    }),
1848                    output: vec![response_item_function_call(
1849                        "item_fn",
1850                        Some("{\"city\":\"Boston\"}"),
1851                    )],
1852                    ..Default::default()
1853                },
1854            },
1855        ];
1856
1857        let mapped = map_response_events(events);
1858        assert!(matches!(
1859            mapped[0],
1860            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1861            if raw_input == "{\"city\":\"Boston\"}"
1862        ));
1863        assert!(matches!(
1864            mapped[1],
1865            LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1866        ));
1867    }
1868
1869    #[test]
1870    fn responses_stream_incomplete_does_not_duplicate_tool_calls() {
1871        let events = vec![
1872            ResponsesStreamEvent::OutputItemAdded {
1873                output_index: 0,
1874                sequence_number: None,
1875                item: response_item_function_call("item_fn", Some("{\"city\":\"Boston\"}")),
1876            },
1877            ResponsesStreamEvent::FunctionCallArgumentsDone {
1878                item_id: "item_fn".into(),
1879                output_index: 0,
1880                arguments: "{\"city\":\"Boston\"}".into(),
1881                sequence_number: None,
1882            },
1883            ResponsesStreamEvent::Incomplete {
1884                response: ResponseSummary {
1885                    status_details: Some(ResponseStatusDetails {
1886                        reason: Some("max_output_tokens".into()),
1887                        r#type: Some("incomplete".into()),
1888                        error: None,
1889                    }),
1890                    output: vec![response_item_function_call(
1891                        "item_fn",
1892                        Some("{\"city\":\"Boston\"}"),
1893                    )],
1894                    ..Default::default()
1895                },
1896            },
1897        ];
1898
1899        let mapped = map_response_events(events);
1900        assert_eq!(mapped.len(), 2);
1901        assert!(matches!(
1902            mapped[0],
1903            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1904            if raw_input == "{\"city\":\"Boston\"}"
1905        ));
1906        assert!(matches!(
1907            mapped[1],
1908            LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1909        ));
1910    }
1911}