open_ai.rs

   1use anyhow::{Result, anyhow};
   2use collections::{BTreeMap, HashMap};
   3use futures::Stream;
   4use futures::{FutureExt, StreamExt, future, future::BoxFuture};
   5use gpui::{AnyView, App, AsyncApp, Context, Entity, SharedString, Task, Window};
   6use http_client::HttpClient;
   7use language_model::{
   8    ApiKeyState, AuthenticateError, EnvVar, IconOrSvg, LanguageModel, LanguageModelCompletionError,
   9    LanguageModelCompletionEvent, LanguageModelId, LanguageModelImage, LanguageModelName,
  10    LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
  11    LanguageModelProviderState, LanguageModelRequest, LanguageModelRequestMessage,
  12    LanguageModelToolChoice, LanguageModelToolResult, LanguageModelToolResultContent,
  13    LanguageModelToolUse, LanguageModelToolUseId, MessageContent, RateLimiter, Role, StopReason,
  14    TokenUsage, env_var,
  15};
  16use menu;
  17use open_ai::{
  18    ImageUrl, Model, OPEN_AI_API_URL, ReasoningEffort, ResponseStreamEvent,
  19    responses::{
  20        Request as ResponseRequest, ResponseOutputItem, ResponseSummary as ResponsesSummary,
  21        ResponseUsage as ResponsesUsage, StreamEvent as ResponsesStreamEvent, stream_response,
  22    },
  23    stream_completion,
  24};
  25use serde_json::{Value, json};
  26use settings::{OpenAiAvailableModel as AvailableModel, Settings, SettingsStore};
  27use std::pin::Pin;
  28use std::str::FromStr as _;
  29use std::sync::{Arc, LazyLock};
  30use strum::IntoEnumIterator;
  31use ui::{ButtonLink, ConfiguredApiCard, List, ListBulletItem, prelude::*};
  32use ui_input::InputField;
  33use util::ResultExt;
  34
  35const PROVIDER_ID: LanguageModelProviderId = language_model::OPEN_AI_PROVIDER_ID;
  36const PROVIDER_NAME: LanguageModelProviderName = language_model::OPEN_AI_PROVIDER_NAME;
  37
  38const API_KEY_ENV_VAR_NAME: &str = "OPENAI_API_KEY";
  39static API_KEY_ENV_VAR: LazyLock<EnvVar> = env_var!(API_KEY_ENV_VAR_NAME);
  40
  41#[derive(Default, Clone, Debug, PartialEq)]
  42pub struct OpenAiSettings {
  43    pub api_url: String,
  44    pub available_models: Vec<AvailableModel>,
  45}
  46
  47pub struct OpenAiLanguageModelProvider {
  48    http_client: Arc<dyn HttpClient>,
  49    state: Entity<State>,
  50}
  51
  52pub struct State {
  53    api_key_state: ApiKeyState,
  54}
  55
  56impl State {
  57    fn is_authenticated(&self) -> bool {
  58        self.api_key_state.has_key()
  59    }
  60
  61    fn set_api_key(&mut self, api_key: Option<String>, cx: &mut Context<Self>) -> Task<Result<()>> {
  62        let api_url = OpenAiLanguageModelProvider::api_url(cx);
  63        self.api_key_state
  64            .store(api_url, api_key, |this| &mut this.api_key_state, cx)
  65    }
  66
  67    fn authenticate(&mut self, cx: &mut Context<Self>) -> Task<Result<(), AuthenticateError>> {
  68        let api_url = OpenAiLanguageModelProvider::api_url(cx);
  69        self.api_key_state
  70            .load_if_needed(api_url, |this| &mut this.api_key_state, cx)
  71    }
  72}
  73
  74impl OpenAiLanguageModelProvider {
  75    pub fn new(http_client: Arc<dyn HttpClient>, cx: &mut App) -> Self {
  76        let state = cx.new(|cx| {
  77            cx.observe_global::<SettingsStore>(|this: &mut State, cx| {
  78                let api_url = Self::api_url(cx);
  79                this.api_key_state
  80                    .handle_url_change(api_url, |this| &mut this.api_key_state, cx);
  81                cx.notify();
  82            })
  83            .detach();
  84            State {
  85                api_key_state: ApiKeyState::new(Self::api_url(cx), (*API_KEY_ENV_VAR).clone()),
  86            }
  87        });
  88
  89        Self { http_client, state }
  90    }
  91
  92    fn create_language_model(&self, model: open_ai::Model) -> Arc<dyn LanguageModel> {
  93        Arc::new(OpenAiLanguageModel {
  94            id: LanguageModelId::from(model.id().to_string()),
  95            model,
  96            state: self.state.clone(),
  97            http_client: self.http_client.clone(),
  98            request_limiter: RateLimiter::new(4),
  99        })
 100    }
 101
 102    fn settings(cx: &App) -> &OpenAiSettings {
 103        &crate::AllLanguageModelSettings::get_global(cx).openai
 104    }
 105
 106    fn api_url(cx: &App) -> SharedString {
 107        let api_url = &Self::settings(cx).api_url;
 108        if api_url.is_empty() {
 109            open_ai::OPEN_AI_API_URL.into()
 110        } else {
 111            SharedString::new(api_url.as_str())
 112        }
 113    }
 114}
 115
 116impl LanguageModelProviderState for OpenAiLanguageModelProvider {
 117    type ObservableEntity = State;
 118
 119    fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
 120        Some(self.state.clone())
 121    }
 122}
 123
 124impl LanguageModelProvider for OpenAiLanguageModelProvider {
 125    fn id(&self) -> LanguageModelProviderId {
 126        PROVIDER_ID
 127    }
 128
 129    fn name(&self) -> LanguageModelProviderName {
 130        PROVIDER_NAME
 131    }
 132
 133    fn icon(&self) -> IconOrSvg {
 134        IconOrSvg::Icon(IconName::AiOpenAi)
 135    }
 136
 137    fn default_model(&self, _cx: &App) -> Option<Arc<dyn LanguageModel>> {
 138        Some(self.create_language_model(open_ai::Model::default()))
 139    }
 140
 141    fn default_fast_model(&self, _cx: &App) -> Option<Arc<dyn LanguageModel>> {
 142        Some(self.create_language_model(open_ai::Model::default_fast()))
 143    }
 144
 145    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 146        let mut models = BTreeMap::default();
 147
 148        // Add base models from open_ai::Model::iter()
 149        for model in open_ai::Model::iter() {
 150            if !matches!(model, open_ai::Model::Custom { .. }) {
 151                models.insert(model.id().to_string(), model);
 152            }
 153        }
 154
 155        // Override with available models from settings
 156        for model in &OpenAiLanguageModelProvider::settings(cx).available_models {
 157            models.insert(
 158                model.name.clone(),
 159                open_ai::Model::Custom {
 160                    name: model.name.clone(),
 161                    display_name: model.display_name.clone(),
 162                    max_tokens: model.max_tokens,
 163                    max_output_tokens: model.max_output_tokens,
 164                    max_completion_tokens: model.max_completion_tokens,
 165                    reasoning_effort: model.reasoning_effort.clone(),
 166                    supports_chat_completions: model.capabilities.chat_completions,
 167                },
 168            );
 169        }
 170
 171        models
 172            .into_values()
 173            .map(|model| self.create_language_model(model))
 174            .collect()
 175    }
 176
 177    fn is_authenticated(&self, cx: &App) -> bool {
 178        self.state.read(cx).is_authenticated()
 179    }
 180
 181    fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
 182        self.state.update(cx, |state, cx| state.authenticate(cx))
 183    }
 184
 185    fn configuration_view(
 186        &self,
 187        _target_agent: language_model::ConfigurationViewTargetAgent,
 188        window: &mut Window,
 189        cx: &mut App,
 190    ) -> AnyView {
 191        cx.new(|cx| ConfigurationView::new(self.state.clone(), window, cx))
 192            .into()
 193    }
 194
 195    fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>> {
 196        self.state
 197            .update(cx, |state, cx| state.set_api_key(None, cx))
 198    }
 199}
 200
 201pub struct OpenAiLanguageModel {
 202    id: LanguageModelId,
 203    model: open_ai::Model,
 204    state: Entity<State>,
 205    http_client: Arc<dyn HttpClient>,
 206    request_limiter: RateLimiter,
 207}
 208
 209impl OpenAiLanguageModel {
 210    fn stream_completion(
 211        &self,
 212        request: open_ai::Request,
 213        cx: &AsyncApp,
 214    ) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<ResponseStreamEvent>>>>
 215    {
 216        let http_client = self.http_client.clone();
 217
 218        let Ok((api_key, api_url)) = self.state.read_with(cx, |state, cx| {
 219            let api_url = OpenAiLanguageModelProvider::api_url(cx);
 220            (state.api_key_state.key(&api_url), api_url)
 221        }) else {
 222            return future::ready(Err(anyhow!("App state dropped"))).boxed();
 223        };
 224
 225        let future = self.request_limiter.stream(async move {
 226            let provider = PROVIDER_NAME;
 227            let Some(api_key) = api_key else {
 228                return Err(LanguageModelCompletionError::NoApiKey { provider });
 229            };
 230            let request = stream_completion(
 231                http_client.as_ref(),
 232                provider.0.as_str(),
 233                &api_url,
 234                &api_key,
 235                request,
 236            );
 237            let response = request.await?;
 238            Ok(response)
 239        });
 240
 241        async move { Ok(future.await?.boxed()) }.boxed()
 242    }
 243
 244    fn stream_response(
 245        &self,
 246        request: ResponseRequest,
 247        cx: &AsyncApp,
 248    ) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<ResponsesStreamEvent>>>>
 249    {
 250        let http_client = self.http_client.clone();
 251
 252        let Ok((api_key, api_url)) = self.state.read_with(cx, |state, cx| {
 253            let api_url = OpenAiLanguageModelProvider::api_url(cx);
 254            (state.api_key_state.key(&api_url), api_url)
 255        }) else {
 256            return future::ready(Err(anyhow!("App state dropped"))).boxed();
 257        };
 258
 259        let provider = PROVIDER_NAME;
 260        let future = self.request_limiter.stream(async move {
 261            let Some(api_key) = api_key else {
 262                return Err(LanguageModelCompletionError::NoApiKey { provider });
 263            };
 264            let request = stream_response(
 265                http_client.as_ref(),
 266                provider.0.as_str(),
 267                &api_url,
 268                &api_key,
 269                request,
 270            );
 271            let response = request.await?;
 272            Ok(response)
 273        });
 274
 275        async move { Ok(future.await?.boxed()) }.boxed()
 276    }
 277}
 278
 279impl LanguageModel for OpenAiLanguageModel {
 280    fn id(&self) -> LanguageModelId {
 281        self.id.clone()
 282    }
 283
 284    fn name(&self) -> LanguageModelName {
 285        LanguageModelName::from(self.model.display_name().to_string())
 286    }
 287
 288    fn provider_id(&self) -> LanguageModelProviderId {
 289        PROVIDER_ID
 290    }
 291
 292    fn provider_name(&self) -> LanguageModelProviderName {
 293        PROVIDER_NAME
 294    }
 295
 296    fn supports_tools(&self) -> bool {
 297        true
 298    }
 299
 300    fn supports_images(&self) -> bool {
 301        use open_ai::Model;
 302        match &self.model {
 303            Model::FourOmni
 304            | Model::FourOmniMini
 305            | Model::FourPointOne
 306            | Model::FourPointOneMini
 307            | Model::FourPointOneNano
 308            | Model::Five
 309            | Model::FiveCodex
 310            | Model::FiveMini
 311            | Model::FiveNano
 312            | Model::FivePointOne
 313            | Model::FivePointTwo
 314            | Model::FivePointTwoCodex
 315            | Model::O1
 316            | Model::O3
 317            | Model::O4Mini => true,
 318            Model::ThreePointFiveTurbo
 319            | Model::Four
 320            | Model::FourTurbo
 321            | Model::O3Mini
 322            | Model::Custom { .. } => false,
 323        }
 324    }
 325
 326    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
 327        match choice {
 328            LanguageModelToolChoice::Auto => true,
 329            LanguageModelToolChoice::Any => true,
 330            LanguageModelToolChoice::None => true,
 331        }
 332    }
 333
 334    fn telemetry_id(&self) -> String {
 335        format!("openai/{}", self.model.id())
 336    }
 337
 338    fn max_token_count(&self) -> u64 {
 339        self.model.max_token_count()
 340    }
 341
 342    fn max_output_tokens(&self) -> Option<u64> {
 343        self.model.max_output_tokens()
 344    }
 345
 346    fn count_tokens(
 347        &self,
 348        request: LanguageModelRequest,
 349        cx: &App,
 350    ) -> BoxFuture<'static, Result<u64>> {
 351        count_open_ai_tokens(request, self.model.clone(), cx)
 352    }
 353
 354    fn stream_completion(
 355        &self,
 356        request: LanguageModelRequest,
 357        cx: &AsyncApp,
 358    ) -> BoxFuture<
 359        'static,
 360        Result<
 361            futures::stream::BoxStream<
 362                'static,
 363                Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
 364            >,
 365            LanguageModelCompletionError,
 366        >,
 367    > {
 368        if self.model.supports_chat_completions() {
 369            let request = into_open_ai(
 370                request,
 371                self.model.id(),
 372                self.model.supports_parallel_tool_calls(),
 373                self.model.supports_prompt_cache_key(),
 374                self.max_output_tokens(),
 375                self.model.reasoning_effort(),
 376            );
 377            let completions = self.stream_completion(request, cx);
 378            async move {
 379                let mapper = OpenAiEventMapper::new();
 380                Ok(mapper.map_stream(completions.await?).boxed())
 381            }
 382            .boxed()
 383        } else {
 384            let request = into_open_ai_response(
 385                request,
 386                self.model.id(),
 387                self.model.supports_parallel_tool_calls(),
 388                self.model.supports_prompt_cache_key(),
 389                self.max_output_tokens(),
 390                self.model.reasoning_effort(),
 391            );
 392            let completions = self.stream_response(request, cx);
 393            async move {
 394                let mapper = OpenAiResponseEventMapper::new();
 395                Ok(mapper.map_stream(completions.await?).boxed())
 396            }
 397            .boxed()
 398        }
 399    }
 400}
 401
 402pub fn into_open_ai(
 403    request: LanguageModelRequest,
 404    model_id: &str,
 405    supports_parallel_tool_calls: bool,
 406    supports_prompt_cache_key: bool,
 407    max_output_tokens: Option<u64>,
 408    reasoning_effort: Option<ReasoningEffort>,
 409) -> open_ai::Request {
 410    let stream = !model_id.starts_with("o1-");
 411
 412    let mut messages = Vec::new();
 413    for message in request.messages {
 414        for content in message.content {
 415            match content {
 416                MessageContent::Text(text) | MessageContent::Thinking { text, .. } => {
 417                    if !text.trim().is_empty() {
 418                        add_message_content_part(
 419                            open_ai::MessagePart::Text { text },
 420                            message.role,
 421                            &mut messages,
 422                        );
 423                    }
 424                }
 425                MessageContent::RedactedThinking(_) => {}
 426                MessageContent::Image(image) => {
 427                    add_message_content_part(
 428                        open_ai::MessagePart::Image {
 429                            image_url: ImageUrl {
 430                                url: image.to_base64_url(),
 431                                detail: None,
 432                            },
 433                        },
 434                        message.role,
 435                        &mut messages,
 436                    );
 437                }
 438                MessageContent::ToolUse(tool_use) => {
 439                    let tool_call = open_ai::ToolCall {
 440                        id: tool_use.id.to_string(),
 441                        content: open_ai::ToolCallContent::Function {
 442                            function: open_ai::FunctionContent {
 443                                name: tool_use.name.to_string(),
 444                                arguments: serde_json::to_string(&tool_use.input)
 445                                    .unwrap_or_default(),
 446                            },
 447                        },
 448                    };
 449
 450                    if let Some(open_ai::RequestMessage::Assistant { tool_calls, .. }) =
 451                        messages.last_mut()
 452                    {
 453                        tool_calls.push(tool_call);
 454                    } else {
 455                        messages.push(open_ai::RequestMessage::Assistant {
 456                            content: None,
 457                            tool_calls: vec![tool_call],
 458                        });
 459                    }
 460                }
 461                MessageContent::ToolResult(tool_result) => {
 462                    let content = match &tool_result.content {
 463                        LanguageModelToolResultContent::Text(text) => {
 464                            vec![open_ai::MessagePart::Text {
 465                                text: text.to_string(),
 466                            }]
 467                        }
 468                        LanguageModelToolResultContent::Image(image) => {
 469                            vec![open_ai::MessagePart::Image {
 470                                image_url: ImageUrl {
 471                                    url: image.to_base64_url(),
 472                                    detail: None,
 473                                },
 474                            }]
 475                        }
 476                    };
 477
 478                    messages.push(open_ai::RequestMessage::Tool {
 479                        content: content.into(),
 480                        tool_call_id: tool_result.tool_use_id.to_string(),
 481                    });
 482                }
 483            }
 484        }
 485    }
 486
 487    open_ai::Request {
 488        model: model_id.into(),
 489        messages,
 490        stream,
 491        stop: request.stop,
 492        temperature: request.temperature.or(Some(1.0)),
 493        max_completion_tokens: max_output_tokens,
 494        parallel_tool_calls: if supports_parallel_tool_calls && !request.tools.is_empty() {
 495            // Disable parallel tool calls, as the Agent currently expects a maximum of one per turn.
 496            Some(false)
 497        } else {
 498            None
 499        },
 500        prompt_cache_key: if supports_prompt_cache_key {
 501            request.thread_id
 502        } else {
 503            None
 504        },
 505        tools: request
 506            .tools
 507            .into_iter()
 508            .map(|tool| open_ai::ToolDefinition::Function {
 509                function: open_ai::FunctionDefinition {
 510                    name: tool.name,
 511                    description: Some(tool.description),
 512                    parameters: Some(tool.input_schema),
 513                },
 514            })
 515            .collect(),
 516        tool_choice: request.tool_choice.map(|choice| match choice {
 517            LanguageModelToolChoice::Auto => open_ai::ToolChoice::Auto,
 518            LanguageModelToolChoice::Any => open_ai::ToolChoice::Required,
 519            LanguageModelToolChoice::None => open_ai::ToolChoice::None,
 520        }),
 521        reasoning_effort,
 522    }
 523}
 524
 525pub fn into_open_ai_response(
 526    request: LanguageModelRequest,
 527    model_id: &str,
 528    supports_parallel_tool_calls: bool,
 529    supports_prompt_cache_key: bool,
 530    max_output_tokens: Option<u64>,
 531    reasoning_effort: Option<ReasoningEffort>,
 532) -> ResponseRequest {
 533    let stream = !model_id.starts_with("o1-");
 534
 535    let LanguageModelRequest {
 536        thread_id,
 537        prompt_id: _,
 538        intent: _,
 539        mode: _,
 540        messages,
 541        tools,
 542        tool_choice,
 543        stop: _,
 544        temperature,
 545        thinking_allowed: _,
 546    } = request;
 547
 548    let mut input_items = Vec::new();
 549    for (index, message) in messages.into_iter().enumerate() {
 550        append_message_to_response_items(message, index, &mut input_items);
 551    }
 552
 553    let tools: Vec<_> = tools
 554        .into_iter()
 555        .map(|tool| open_ai::responses::ToolDefinition::Function {
 556            name: tool.name,
 557            description: Some(tool.description),
 558            parameters: Some(tool.input_schema),
 559            strict: None,
 560        })
 561        .collect();
 562
 563    ResponseRequest {
 564        model: model_id.into(),
 565        input: input_items,
 566        stream,
 567        temperature,
 568        top_p: None,
 569        max_output_tokens,
 570        parallel_tool_calls: if tools.is_empty() {
 571            None
 572        } else {
 573            Some(supports_parallel_tool_calls)
 574        },
 575        tool_choice: tool_choice.map(|choice| match choice {
 576            LanguageModelToolChoice::Auto => open_ai::ToolChoice::Auto,
 577            LanguageModelToolChoice::Any => open_ai::ToolChoice::Required,
 578            LanguageModelToolChoice::None => open_ai::ToolChoice::None,
 579        }),
 580        tools,
 581        prompt_cache_key: if supports_prompt_cache_key {
 582            thread_id
 583        } else {
 584            None
 585        },
 586        reasoning: reasoning_effort.map(|effort| open_ai::responses::ReasoningConfig { effort }),
 587    }
 588}
 589
 590fn append_message_to_response_items(
 591    message: LanguageModelRequestMessage,
 592    index: usize,
 593    input_items: &mut Vec<Value>,
 594) {
 595    let mut content_parts: Vec<Value> = Vec::new();
 596
 597    for content in message.content {
 598        match content {
 599            MessageContent::Text(text) => {
 600                push_response_text_part(&message.role, text, &mut content_parts);
 601            }
 602            MessageContent::Thinking { text, .. } => {
 603                push_response_text_part(&message.role, text, &mut content_parts);
 604            }
 605            MessageContent::RedactedThinking(_) => {}
 606            MessageContent::Image(image) => {
 607                push_response_image_part(&message.role, image, &mut content_parts);
 608            }
 609            MessageContent::ToolUse(tool_use) => {
 610                flush_response_parts(&message.role, index, &mut content_parts, input_items);
 611                let call_id = tool_use.id.to_string();
 612                input_items.push(json!({
 613                    "type": "function_call",
 614                    "call_id": call_id,
 615                    "name": tool_use.name,
 616                    "arguments": tool_use.raw_input,
 617                }));
 618            }
 619            MessageContent::ToolResult(tool_result) => {
 620                flush_response_parts(&message.role, index, &mut content_parts, input_items);
 621                input_items.push(json!({
 622                    "type": "function_call_output",
 623                    "call_id": tool_result.tool_use_id.to_string(),
 624                    "output": tool_result_output(&tool_result),
 625                }));
 626            }
 627        }
 628    }
 629
 630    flush_response_parts(&message.role, index, &mut content_parts, input_items);
 631}
 632
 633fn push_response_text_part(role: &Role, text: impl Into<String>, parts: &mut Vec<Value>) {
 634    let text = text.into();
 635    if text.trim().is_empty() {
 636        return;
 637    }
 638
 639    match role {
 640        Role::Assistant => parts.push(json!({
 641            "type": "output_text",
 642            "text": text,
 643            "annotations": [],
 644        })),
 645        _ => parts.push(json!({
 646            "type": "input_text",
 647            "text": text,
 648        })),
 649    }
 650}
 651
 652fn push_response_image_part(role: &Role, image: LanguageModelImage, parts: &mut Vec<Value>) {
 653    match role {
 654        Role::Assistant => parts.push(json!({
 655            "type": "output_text",
 656            "text": "[image omitted]",
 657            "annotations": [],
 658        })),
 659        _ => parts.push(json!({
 660            "type": "input_image",
 661            "image_url": image.to_base64_url(),
 662        })),
 663    }
 664}
 665
 666fn flush_response_parts(
 667    role: &Role,
 668    _index: usize,
 669    parts: &mut Vec<Value>,
 670    input_items: &mut Vec<Value>,
 671) {
 672    if parts.is_empty() {
 673        return;
 674    }
 675
 676    let item = match role {
 677        Role::Assistant => json!({
 678            "type": "message",
 679            "role": "assistant",
 680            "status": "completed",
 681            "content": parts.clone(),
 682        }),
 683        Role::User => json!({
 684            "type": "message",
 685            "role": "user",
 686            "content": parts.clone(),
 687        }),
 688        Role::System => json!({
 689            "type": "message",
 690            "role": "system",
 691            "content": parts.clone(),
 692        }),
 693    };
 694
 695    input_items.push(item);
 696    parts.clear();
 697}
 698
 699fn tool_result_output(result: &LanguageModelToolResult) -> String {
 700    if let Some(output) = &result.output {
 701        match output {
 702            serde_json::Value::String(text) => text.clone(),
 703            serde_json::Value::Null => String::new(),
 704            _ => output.to_string(),
 705        }
 706    } else {
 707        match &result.content {
 708            LanguageModelToolResultContent::Text(text) => text.to_string(),
 709            LanguageModelToolResultContent::Image(image) => image.to_base64_url(),
 710        }
 711    }
 712}
 713
 714fn add_message_content_part(
 715    new_part: open_ai::MessagePart,
 716    role: Role,
 717    messages: &mut Vec<open_ai::RequestMessage>,
 718) {
 719    match (role, messages.last_mut()) {
 720        (Role::User, Some(open_ai::RequestMessage::User { content }))
 721        | (
 722            Role::Assistant,
 723            Some(open_ai::RequestMessage::Assistant {
 724                content: Some(content),
 725                ..
 726            }),
 727        )
 728        | (Role::System, Some(open_ai::RequestMessage::System { content, .. })) => {
 729            content.push_part(new_part);
 730        }
 731        _ => {
 732            messages.push(match role {
 733                Role::User => open_ai::RequestMessage::User {
 734                    content: open_ai::MessageContent::from(vec![new_part]),
 735                },
 736                Role::Assistant => open_ai::RequestMessage::Assistant {
 737                    content: Some(open_ai::MessageContent::from(vec![new_part])),
 738                    tool_calls: Vec::new(),
 739                },
 740                Role::System => open_ai::RequestMessage::System {
 741                    content: open_ai::MessageContent::from(vec![new_part]),
 742                },
 743            });
 744        }
 745    }
 746}
 747
 748pub struct OpenAiEventMapper {
 749    tool_calls_by_index: HashMap<usize, RawToolCall>,
 750}
 751
 752impl OpenAiEventMapper {
 753    pub fn new() -> Self {
 754        Self {
 755            tool_calls_by_index: HashMap::default(),
 756        }
 757    }
 758
 759    pub fn map_stream(
 760        mut self,
 761        events: Pin<Box<dyn Send + Stream<Item = Result<ResponseStreamEvent>>>>,
 762    ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 763    {
 764        events.flat_map(move |event| {
 765            futures::stream::iter(match event {
 766                Ok(event) => self.map_event(event),
 767                Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
 768            })
 769        })
 770    }
 771
 772    pub fn map_event(
 773        &mut self,
 774        event: ResponseStreamEvent,
 775    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
 776        let mut events = Vec::new();
 777        if let Some(usage) = event.usage {
 778            events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
 779                input_tokens: usage.prompt_tokens,
 780                output_tokens: usage.completion_tokens,
 781                cache_creation_input_tokens: 0,
 782                cache_read_input_tokens: 0,
 783            })));
 784        }
 785
 786        let Some(choice) = event.choices.first() else {
 787            return events;
 788        };
 789
 790        if let Some(delta) = choice.delta.as_ref() {
 791            if let Some(content) = delta.content.clone() {
 792                events.push(Ok(LanguageModelCompletionEvent::Text(content)));
 793            }
 794
 795            if let Some(tool_calls) = delta.tool_calls.as_ref() {
 796                for tool_call in tool_calls {
 797                    let entry = self.tool_calls_by_index.entry(tool_call.index).or_default();
 798
 799                    if let Some(tool_id) = tool_call.id.clone() {
 800                        entry.id = tool_id;
 801                    }
 802
 803                    if let Some(function) = tool_call.function.as_ref() {
 804                        if let Some(name) = function.name.clone() {
 805                            entry.name = name;
 806                        }
 807
 808                        if let Some(arguments) = function.arguments.clone() {
 809                            entry.arguments.push_str(&arguments);
 810                        }
 811                    }
 812                }
 813            }
 814        }
 815
 816        match choice.finish_reason.as_deref() {
 817            Some("stop") => {
 818                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
 819            }
 820            Some("tool_calls") => {
 821                events.extend(self.tool_calls_by_index.drain().map(|(_, tool_call)| {
 822                    match serde_json::Value::from_str(&tool_call.arguments) {
 823                        Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
 824                            LanguageModelToolUse {
 825                                id: tool_call.id.clone().into(),
 826                                name: tool_call.name.as_str().into(),
 827                                is_input_complete: true,
 828                                input,
 829                                raw_input: tool_call.arguments.clone(),
 830                                thought_signature: None,
 831                            },
 832                        )),
 833                        Err(error) => Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
 834                            id: tool_call.id.into(),
 835                            tool_name: tool_call.name.into(),
 836                            raw_input: tool_call.arguments.clone().into(),
 837                            json_parse_error: error.to_string(),
 838                        }),
 839                    }
 840                }));
 841
 842                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
 843            }
 844            Some(stop_reason) => {
 845                log::error!("Unexpected OpenAI stop_reason: {stop_reason:?}",);
 846                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
 847            }
 848            None => {}
 849        }
 850
 851        events
 852    }
 853}
 854
 855#[derive(Default)]
 856struct RawToolCall {
 857    id: String,
 858    name: String,
 859    arguments: String,
 860}
 861
 862pub struct OpenAiResponseEventMapper {
 863    function_calls_by_item: HashMap<String, PendingResponseFunctionCall>,
 864    pending_stop_reason: Option<StopReason>,
 865}
 866
 867#[derive(Default)]
 868struct PendingResponseFunctionCall {
 869    call_id: String,
 870    name: Arc<str>,
 871    arguments: String,
 872}
 873
 874impl OpenAiResponseEventMapper {
 875    pub fn new() -> Self {
 876        Self {
 877            function_calls_by_item: HashMap::default(),
 878            pending_stop_reason: None,
 879        }
 880    }
 881
 882    pub fn map_stream(
 883        mut self,
 884        events: Pin<Box<dyn Send + Stream<Item = Result<ResponsesStreamEvent>>>>,
 885    ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 886    {
 887        events.flat_map(move |event| {
 888            futures::stream::iter(match event {
 889                Ok(event) => self.map_event(event),
 890                Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
 891            })
 892        })
 893    }
 894
 895    fn map_event(
 896        &mut self,
 897        event: ResponsesStreamEvent,
 898    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
 899        match event {
 900            ResponsesStreamEvent::OutputItemAdded { item, .. } => {
 901                let mut events = Vec::new();
 902
 903                match &item {
 904                    ResponseOutputItem::Message(message) => {
 905                        if let Some(id) = &message.id {
 906                            events.push(Ok(LanguageModelCompletionEvent::StartMessage {
 907                                message_id: id.clone(),
 908                            }));
 909                        }
 910                    }
 911                    ResponseOutputItem::FunctionCall(function_call) => {
 912                        if let Some(item_id) = function_call.id.clone() {
 913                            let call_id = function_call
 914                                .call_id
 915                                .clone()
 916                                .or_else(|| function_call.id.clone())
 917                                .unwrap_or_else(|| item_id.clone());
 918                            let entry = PendingResponseFunctionCall {
 919                                call_id,
 920                                name: Arc::<str>::from(
 921                                    function_call.name.clone().unwrap_or_default(),
 922                                ),
 923                                arguments: function_call.arguments.clone(),
 924                            };
 925                            self.function_calls_by_item.insert(item_id, entry);
 926                        }
 927                    }
 928                    ResponseOutputItem::Unknown => {}
 929                }
 930                events
 931            }
 932            ResponsesStreamEvent::OutputTextDelta { delta, .. } => {
 933                if delta.is_empty() {
 934                    Vec::new()
 935                } else {
 936                    vec![Ok(LanguageModelCompletionEvent::Text(delta))]
 937                }
 938            }
 939            ResponsesStreamEvent::FunctionCallArgumentsDelta { item_id, delta, .. } => {
 940                if let Some(entry) = self.function_calls_by_item.get_mut(&item_id) {
 941                    entry.arguments.push_str(&delta);
 942                }
 943                Vec::new()
 944            }
 945            ResponsesStreamEvent::FunctionCallArgumentsDone {
 946                item_id, arguments, ..
 947            } => {
 948                if let Some(mut entry) = self.function_calls_by_item.remove(&item_id) {
 949                    if !arguments.is_empty() {
 950                        entry.arguments = arguments;
 951                    }
 952                    let raw_input = entry.arguments.clone();
 953                    self.pending_stop_reason = Some(StopReason::ToolUse);
 954                    match serde_json::from_str::<serde_json::Value>(&entry.arguments) {
 955                        Ok(input) => {
 956                            vec![Ok(LanguageModelCompletionEvent::ToolUse(
 957                                LanguageModelToolUse {
 958                                    id: LanguageModelToolUseId::from(entry.call_id.clone()),
 959                                    name: entry.name.clone(),
 960                                    is_input_complete: true,
 961                                    input,
 962                                    raw_input,
 963                                    thought_signature: None,
 964                                },
 965                            ))]
 966                        }
 967                        Err(error) => {
 968                            vec![Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
 969                                id: LanguageModelToolUseId::from(entry.call_id.clone()),
 970                                tool_name: entry.name.clone(),
 971                                raw_input: Arc::<str>::from(raw_input),
 972                                json_parse_error: error.to_string(),
 973                            })]
 974                        }
 975                    }
 976                } else {
 977                    Vec::new()
 978                }
 979            }
 980            ResponsesStreamEvent::Completed { response } => {
 981                self.handle_completion(response, StopReason::EndTurn)
 982            }
 983            ResponsesStreamEvent::Incomplete { response } => {
 984                let reason = response
 985                    .status_details
 986                    .as_ref()
 987                    .and_then(|details| details.reason.as_deref());
 988                let stop_reason = match reason {
 989                    Some("max_output_tokens") => StopReason::MaxTokens,
 990                    Some("content_filter") => {
 991                        self.pending_stop_reason = Some(StopReason::Refusal);
 992                        StopReason::Refusal
 993                    }
 994                    _ => self
 995                        .pending_stop_reason
 996                        .take()
 997                        .unwrap_or(StopReason::EndTurn),
 998                };
 999
1000                let mut events = Vec::new();
1001                if self.pending_stop_reason.is_none() {
1002                    events.extend(self.emit_tool_calls_from_output(&response.output));
1003                }
1004                if let Some(usage) = response.usage.as_ref() {
1005                    events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
1006                        token_usage_from_response_usage(usage),
1007                    )));
1008                }
1009                events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
1010                events
1011            }
1012            ResponsesStreamEvent::Failed { response } => {
1013                let message = response
1014                    .status_details
1015                    .and_then(|details| details.error)
1016                    .map(|error| error.to_string())
1017                    .unwrap_or_else(|| "response failed".to_string());
1018                vec![Err(LanguageModelCompletionError::Other(anyhow!(message)))]
1019            }
1020            ResponsesStreamEvent::Error { error }
1021            | ResponsesStreamEvent::GenericError { error } => {
1022                vec![Err(LanguageModelCompletionError::Other(anyhow!(format!(
1023                    "{error:?}"
1024                ))))]
1025            }
1026            ResponsesStreamEvent::OutputTextDone { .. } => Vec::new(),
1027            ResponsesStreamEvent::OutputItemDone { .. }
1028            | ResponsesStreamEvent::ContentPartAdded { .. }
1029            | ResponsesStreamEvent::ContentPartDone { .. }
1030            | ResponsesStreamEvent::Created { .. }
1031            | ResponsesStreamEvent::InProgress { .. }
1032            | ResponsesStreamEvent::Unknown => Vec::new(),
1033        }
1034    }
1035
1036    fn handle_completion(
1037        &mut self,
1038        response: ResponsesSummary,
1039        default_reason: StopReason,
1040    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
1041        let mut events = Vec::new();
1042
1043        if self.pending_stop_reason.is_none() {
1044            events.extend(self.emit_tool_calls_from_output(&response.output));
1045        }
1046
1047        if let Some(usage) = response.usage.as_ref() {
1048            events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
1049                token_usage_from_response_usage(usage),
1050            )));
1051        }
1052
1053        let stop_reason = self.pending_stop_reason.take().unwrap_or(default_reason);
1054        events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
1055        events
1056    }
1057
1058    fn emit_tool_calls_from_output(
1059        &mut self,
1060        output: &[ResponseOutputItem],
1061    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
1062        let mut events = Vec::new();
1063        for item in output {
1064            if let ResponseOutputItem::FunctionCall(function_call) = item {
1065                let Some(call_id) = function_call
1066                    .call_id
1067                    .clone()
1068                    .or_else(|| function_call.id.clone())
1069                else {
1070                    log::error!(
1071                        "Function call item missing both call_id and id: {:?}",
1072                        function_call
1073                    );
1074                    continue;
1075                };
1076                let name: Arc<str> = Arc::from(function_call.name.clone().unwrap_or_default());
1077                let arguments = &function_call.arguments;
1078                if !arguments.is_empty() {
1079                    self.pending_stop_reason = Some(StopReason::ToolUse);
1080                    match serde_json::from_str::<serde_json::Value>(arguments) {
1081                        Ok(input) => {
1082                            events.push(Ok(LanguageModelCompletionEvent::ToolUse(
1083                                LanguageModelToolUse {
1084                                    id: LanguageModelToolUseId::from(call_id.clone()),
1085                                    name: name.clone(),
1086                                    is_input_complete: true,
1087                                    input,
1088                                    raw_input: arguments.clone(),
1089                                    thought_signature: None,
1090                                },
1091                            )));
1092                        }
1093                        Err(error) => {
1094                            events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
1095                                id: LanguageModelToolUseId::from(call_id.clone()),
1096                                tool_name: name.clone(),
1097                                raw_input: Arc::<str>::from(arguments.clone()),
1098                                json_parse_error: error.to_string(),
1099                            }));
1100                        }
1101                    }
1102                }
1103            }
1104        }
1105        events
1106    }
1107}
1108
1109fn token_usage_from_response_usage(usage: &ResponsesUsage) -> TokenUsage {
1110    TokenUsage {
1111        input_tokens: usage.input_tokens.unwrap_or_default(),
1112        output_tokens: usage.output_tokens.unwrap_or_default(),
1113        cache_creation_input_tokens: 0,
1114        cache_read_input_tokens: 0,
1115    }
1116}
1117
1118pub(crate) fn collect_tiktoken_messages(
1119    request: LanguageModelRequest,
1120) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
1121    request
1122        .messages
1123        .into_iter()
1124        .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
1125            role: match message.role {
1126                Role::User => "user".into(),
1127                Role::Assistant => "assistant".into(),
1128                Role::System => "system".into(),
1129            },
1130            content: Some(message.string_contents()),
1131            name: None,
1132            function_call: None,
1133        })
1134        .collect::<Vec<_>>()
1135}
1136
1137pub fn count_open_ai_tokens(
1138    request: LanguageModelRequest,
1139    model: Model,
1140    cx: &App,
1141) -> BoxFuture<'static, Result<u64>> {
1142    cx.background_spawn(async move {
1143        let messages = collect_tiktoken_messages(request);
1144        match model {
1145            Model::Custom { max_tokens, .. } => {
1146                let model = if max_tokens >= 100_000 {
1147                    // If the max tokens is 100k or more, it is likely the o200k_base tokenizer from gpt4o
1148                    "gpt-4o"
1149                } else {
1150                    // Otherwise fallback to gpt-4, since only cl100k_base and o200k_base are
1151                    // supported with this tiktoken method
1152                    "gpt-4"
1153                };
1154                tiktoken_rs::num_tokens_from_messages(model, &messages)
1155            }
1156            // Currently supported by tiktoken_rs
1157            // Sometimes tiktoken-rs is behind on model support. If that is the case, make a new branch
1158            // arm with an override. We enumerate all supported models here so that we can check if new
1159            // models are supported yet or not.
1160            Model::ThreePointFiveTurbo
1161            | Model::Four
1162            | Model::FourTurbo
1163            | Model::FourOmni
1164            | Model::FourOmniMini
1165            | Model::FourPointOne
1166            | Model::FourPointOneMini
1167            | Model::FourPointOneNano
1168            | Model::O1
1169            | Model::O3
1170            | Model::O3Mini
1171            | Model::O4Mini
1172            | Model::Five
1173            | Model::FiveCodex
1174            | Model::FiveMini
1175            | Model::FiveNano => tiktoken_rs::num_tokens_from_messages(model.id(), &messages),
1176            // GPT-5.1, 5.2, and 5.2-codex don't have dedicated tiktoken support; use gpt-5 tokenizer
1177            Model::FivePointOne | Model::FivePointTwo | Model::FivePointTwoCodex => {
1178                tiktoken_rs::num_tokens_from_messages("gpt-5", &messages)
1179            }
1180        }
1181        .map(|tokens| tokens as u64)
1182    })
1183    .boxed()
1184}
1185
1186struct ConfigurationView {
1187    api_key_editor: Entity<InputField>,
1188    state: Entity<State>,
1189    load_credentials_task: Option<Task<()>>,
1190}
1191
1192impl ConfigurationView {
1193    fn new(state: Entity<State>, window: &mut Window, cx: &mut Context<Self>) -> Self {
1194        let api_key_editor = cx.new(|cx| {
1195            InputField::new(
1196                window,
1197                cx,
1198                "sk-000000000000000000000000000000000000000000000000",
1199            )
1200        });
1201
1202        cx.observe(&state, |_, _, cx| {
1203            cx.notify();
1204        })
1205        .detach();
1206
1207        let load_credentials_task = Some(cx.spawn_in(window, {
1208            let state = state.clone();
1209            async move |this, cx| {
1210                if let Some(task) = state
1211                    .update(cx, |state, cx| state.authenticate(cx))
1212                    .log_err()
1213                {
1214                    // We don't log an error, because "not signed in" is also an error.
1215                    let _ = task.await;
1216                }
1217                this.update(cx, |this, cx| {
1218                    this.load_credentials_task = None;
1219                    cx.notify();
1220                })
1221                .log_err();
1222            }
1223        }));
1224
1225        Self {
1226            api_key_editor,
1227            state,
1228            load_credentials_task,
1229        }
1230    }
1231
1232    fn save_api_key(&mut self, _: &menu::Confirm, window: &mut Window, cx: &mut Context<Self>) {
1233        let api_key = self.api_key_editor.read(cx).text(cx).trim().to_string();
1234        if api_key.is_empty() {
1235            return;
1236        }
1237
1238        // url changes can cause the editor to be displayed again
1239        self.api_key_editor
1240            .update(cx, |editor, cx| editor.set_text("", window, cx));
1241
1242        let state = self.state.clone();
1243        cx.spawn_in(window, async move |_, cx| {
1244            state
1245                .update(cx, |state, cx| state.set_api_key(Some(api_key), cx))?
1246                .await
1247        })
1248        .detach_and_log_err(cx);
1249    }
1250
1251    fn reset_api_key(&mut self, window: &mut Window, cx: &mut Context<Self>) {
1252        self.api_key_editor
1253            .update(cx, |input, cx| input.set_text("", window, cx));
1254
1255        let state = self.state.clone();
1256        cx.spawn_in(window, async move |_, cx| {
1257            state
1258                .update(cx, |state, cx| state.set_api_key(None, cx))?
1259                .await
1260        })
1261        .detach_and_log_err(cx);
1262    }
1263
1264    fn should_render_editor(&self, cx: &mut Context<Self>) -> bool {
1265        !self.state.read(cx).is_authenticated()
1266    }
1267}
1268
1269impl Render for ConfigurationView {
1270    fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1271        let env_var_set = self.state.read(cx).api_key_state.is_from_env_var();
1272        let configured_card_label = if env_var_set {
1273            format!("API key set in {API_KEY_ENV_VAR_NAME} environment variable")
1274        } else {
1275            let api_url = OpenAiLanguageModelProvider::api_url(cx);
1276            if api_url == OPEN_AI_API_URL {
1277                "API key configured".to_string()
1278            } else {
1279                format!("API key configured for {}", api_url)
1280            }
1281        };
1282
1283        let api_key_section = if self.should_render_editor(cx) {
1284            v_flex()
1285                .on_action(cx.listener(Self::save_api_key))
1286                .child(Label::new("To use Zed's agent with OpenAI, you need to add an API key. Follow these steps:"))
1287                .child(
1288                    List::new()
1289                        .child(
1290                            ListBulletItem::new("")
1291                                .child(Label::new("Create one by visiting"))
1292                                .child(ButtonLink::new("OpenAI's console", "https://platform.openai.com/api-keys"))
1293                        )
1294                        .child(
1295                            ListBulletItem::new("Ensure your OpenAI account has credits")
1296                        )
1297                        .child(
1298                            ListBulletItem::new("Paste your API key below and hit enter to start using the agent")
1299                        ),
1300                )
1301                .child(self.api_key_editor.clone())
1302                .child(
1303                    Label::new(format!(
1304                        "You can also set the {API_KEY_ENV_VAR_NAME} environment variable and restart Zed."
1305                    ))
1306                    .size(LabelSize::Small)
1307                    .color(Color::Muted),
1308                )
1309                .child(
1310                    Label::new(
1311                        "Note that having a subscription for another service like GitHub Copilot won't work.",
1312                    )
1313                    .size(LabelSize::Small).color(Color::Muted),
1314                )
1315                .into_any_element()
1316        } else {
1317            ConfiguredApiCard::new(configured_card_label)
1318                .disabled(env_var_set)
1319                .on_click(cx.listener(|this, _, window, cx| this.reset_api_key(window, cx)))
1320                .when(env_var_set, |this| {
1321                    this.tooltip_label(format!("To reset your API key, unset the {API_KEY_ENV_VAR_NAME} environment variable."))
1322                })
1323                .into_any_element()
1324        };
1325
1326        let compatible_api_section = h_flex()
1327            .mt_1p5()
1328            .gap_0p5()
1329            .flex_wrap()
1330            .when(self.should_render_editor(cx), |this| {
1331                this.pt_1p5()
1332                    .border_t_1()
1333                    .border_color(cx.theme().colors().border_variant)
1334            })
1335            .child(
1336                h_flex()
1337                    .gap_2()
1338                    .child(
1339                        Icon::new(IconName::Info)
1340                            .size(IconSize::XSmall)
1341                            .color(Color::Muted),
1342                    )
1343                    .child(Label::new("Zed also supports OpenAI-compatible models.")),
1344            )
1345            .child(
1346                Button::new("docs", "Learn More")
1347                    .icon(IconName::ArrowUpRight)
1348                    .icon_size(IconSize::Small)
1349                    .icon_color(Color::Muted)
1350                    .on_click(move |_, _window, cx| {
1351                        cx.open_url("https://zed.dev/docs/ai/llm-providers#openai-api-compatible")
1352                    }),
1353            );
1354
1355        if self.load_credentials_task.is_some() {
1356            div().child(Label::new("Loading credentials…")).into_any()
1357        } else {
1358            v_flex()
1359                .size_full()
1360                .child(api_key_section)
1361                .child(compatible_api_section)
1362                .into_any()
1363        }
1364    }
1365}
1366
1367#[cfg(test)]
1368mod tests {
1369    use super::*;
1370    use futures::{StreamExt, executor::block_on};
1371    use gpui::TestAppContext;
1372    use language_model::{LanguageModelRequestMessage, LanguageModelRequestTool};
1373    use open_ai::responses::{
1374        ResponseFunctionToolCall, ResponseOutputItem, ResponseOutputMessage, ResponseStatusDetails,
1375        ResponseSummary, ResponseUsage, StreamEvent as ResponsesStreamEvent,
1376    };
1377    use pretty_assertions::assert_eq;
1378
1379    fn map_response_events(events: Vec<ResponsesStreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1380        block_on(async {
1381            OpenAiResponseEventMapper::new()
1382                .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1383                .collect::<Vec<_>>()
1384                .await
1385                .into_iter()
1386                .map(Result::unwrap)
1387                .collect()
1388        })
1389    }
1390
1391    fn response_item_message(id: &str) -> ResponseOutputItem {
1392        ResponseOutputItem::Message(ResponseOutputMessage {
1393            id: Some(id.to_string()),
1394            role: Some("assistant".to_string()),
1395            status: Some("in_progress".to_string()),
1396            content: vec![],
1397        })
1398    }
1399
1400    fn response_item_function_call(id: &str, args: Option<&str>) -> ResponseOutputItem {
1401        ResponseOutputItem::FunctionCall(ResponseFunctionToolCall {
1402            id: Some(id.to_string()),
1403            status: Some("in_progress".to_string()),
1404            name: Some("get_weather".to_string()),
1405            call_id: Some("call_123".to_string()),
1406            arguments: args.map(|s| s.to_string()).unwrap_or_default(),
1407        })
1408    }
1409
1410    #[gpui::test]
1411    fn tiktoken_rs_support(cx: &TestAppContext) {
1412        let request = LanguageModelRequest {
1413            thread_id: None,
1414            prompt_id: None,
1415            intent: None,
1416            mode: None,
1417            messages: vec![LanguageModelRequestMessage {
1418                role: Role::User,
1419                content: vec![MessageContent::Text("message".into())],
1420                cache: false,
1421                reasoning_details: None,
1422            }],
1423            tools: vec![],
1424            tool_choice: None,
1425            stop: vec![],
1426            temperature: None,
1427            thinking_allowed: true,
1428        };
1429
1430        // Validate that all models are supported by tiktoken-rs
1431        for model in Model::iter() {
1432            let count = cx
1433                .executor()
1434                .block(count_open_ai_tokens(
1435                    request.clone(),
1436                    model,
1437                    &cx.app.borrow(),
1438                ))
1439                .unwrap();
1440            assert!(count > 0);
1441        }
1442    }
1443
1444    #[test]
1445    fn responses_stream_maps_text_and_usage() {
1446        let events = vec![
1447            ResponsesStreamEvent::OutputItemAdded {
1448                output_index: 0,
1449                sequence_number: None,
1450                item: response_item_message("msg_123"),
1451            },
1452            ResponsesStreamEvent::OutputTextDelta {
1453                item_id: "msg_123".into(),
1454                output_index: 0,
1455                content_index: Some(0),
1456                delta: "Hello".into(),
1457            },
1458            ResponsesStreamEvent::Completed {
1459                response: ResponseSummary {
1460                    usage: Some(ResponseUsage {
1461                        input_tokens: Some(5),
1462                        output_tokens: Some(3),
1463                        total_tokens: Some(8),
1464                    }),
1465                    ..Default::default()
1466                },
1467            },
1468        ];
1469
1470        let mapped = map_response_events(events);
1471        assert!(matches!(
1472            mapped[0],
1473            LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_123"
1474        ));
1475        assert!(matches!(
1476            mapped[1],
1477            LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1478        ));
1479        assert!(matches!(
1480            mapped[2],
1481            LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1482                input_tokens: 5,
1483                output_tokens: 3,
1484                ..
1485            })
1486        ));
1487        assert!(matches!(
1488            mapped[3],
1489            LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1490        ));
1491    }
1492
1493    #[test]
1494    fn into_open_ai_response_builds_complete_payload() {
1495        let tool_call_id = LanguageModelToolUseId::from("call-42");
1496        let tool_input = json!({ "city": "Boston" });
1497        let tool_arguments = serde_json::to_string(&tool_input).unwrap();
1498        let tool_use = LanguageModelToolUse {
1499            id: tool_call_id.clone(),
1500            name: Arc::from("get_weather"),
1501            raw_input: tool_arguments.clone(),
1502            input: tool_input,
1503            is_input_complete: true,
1504            thought_signature: None,
1505        };
1506        let tool_result = LanguageModelToolResult {
1507            tool_use_id: tool_call_id,
1508            tool_name: Arc::from("get_weather"),
1509            is_error: false,
1510            content: LanguageModelToolResultContent::Text(Arc::from("Sunny")),
1511            output: Some(json!({ "forecast": "Sunny" })),
1512        };
1513        let user_image = LanguageModelImage {
1514            source: SharedString::from("aGVsbG8="),
1515            size: None,
1516        };
1517        let expected_image_url = user_image.to_base64_url();
1518
1519        let request = LanguageModelRequest {
1520            thread_id: Some("thread-123".into()),
1521            prompt_id: None,
1522            intent: None,
1523            mode: None,
1524            messages: vec![
1525                LanguageModelRequestMessage {
1526                    role: Role::System,
1527                    content: vec![MessageContent::Text("System context".into())],
1528                    cache: false,
1529                    reasoning_details: None,
1530                },
1531                LanguageModelRequestMessage {
1532                    role: Role::User,
1533                    content: vec![
1534                        MessageContent::Text("Please check the weather.".into()),
1535                        MessageContent::Image(user_image),
1536                    ],
1537                    cache: false,
1538                    reasoning_details: None,
1539                },
1540                LanguageModelRequestMessage {
1541                    role: Role::Assistant,
1542                    content: vec![
1543                        MessageContent::Text("Looking that up.".into()),
1544                        MessageContent::ToolUse(tool_use),
1545                    ],
1546                    cache: false,
1547                    reasoning_details: None,
1548                },
1549                LanguageModelRequestMessage {
1550                    role: Role::Assistant,
1551                    content: vec![MessageContent::ToolResult(tool_result)],
1552                    cache: false,
1553                    reasoning_details: None,
1554                },
1555            ],
1556            tools: vec![LanguageModelRequestTool {
1557                name: "get_weather".into(),
1558                description: "Fetches the weather".into(),
1559                input_schema: json!({ "type": "object" }),
1560            }],
1561            tool_choice: Some(LanguageModelToolChoice::Any),
1562            stop: vec!["<STOP>".into()],
1563            temperature: None,
1564            thinking_allowed: false,
1565        };
1566
1567        let response = into_open_ai_response(
1568            request,
1569            "custom-model",
1570            true,
1571            true,
1572            Some(2048),
1573            Some(ReasoningEffort::Low),
1574        );
1575
1576        let serialized = serde_json::to_value(&response).unwrap();
1577        let expected = json!({
1578            "model": "custom-model",
1579            "input": [
1580                {
1581                    "type": "message",
1582                    "role": "system",
1583                    "content": [
1584                        { "type": "input_text", "text": "System context" }
1585                    ]
1586                },
1587                {
1588                    "type": "message",
1589                    "role": "user",
1590                    "content": [
1591                        { "type": "input_text", "text": "Please check the weather." },
1592                        { "type": "input_image", "image_url": expected_image_url }
1593                    ]
1594                },
1595                {
1596                    "type": "message",
1597                    "role": "assistant",
1598                    "status": "completed",
1599                    "content": [
1600                        { "type": "output_text", "text": "Looking that up.", "annotations": [] }
1601                    ]
1602                },
1603                {
1604                    "type": "function_call",
1605                    "call_id": "call-42",
1606                    "name": "get_weather",
1607                    "arguments": tool_arguments
1608                },
1609                {
1610                    "type": "function_call_output",
1611                    "call_id": "call-42",
1612                    "output": "{\"forecast\":\"Sunny\"}"
1613                }
1614            ],
1615            "stream": true,
1616            "max_output_tokens": 2048,
1617            "parallel_tool_calls": true,
1618            "tool_choice": "required",
1619            "tools": [
1620                {
1621                    "type": "function",
1622                    "name": "get_weather",
1623                    "description": "Fetches the weather",
1624                    "parameters": { "type": "object" }
1625                }
1626            ],
1627            "prompt_cache_key": "thread-123",
1628            "reasoning": { "effort": "low" }
1629        });
1630
1631        assert_eq!(serialized, expected);
1632    }
1633
1634    #[test]
1635    fn responses_stream_maps_tool_calls() {
1636        let events = vec![
1637            ResponsesStreamEvent::OutputItemAdded {
1638                output_index: 0,
1639                sequence_number: None,
1640                item: response_item_function_call("item_fn", Some("{\"city\":\"Bos")),
1641            },
1642            ResponsesStreamEvent::FunctionCallArgumentsDelta {
1643                item_id: "item_fn".into(),
1644                output_index: 0,
1645                delta: "ton\"}".into(),
1646                sequence_number: None,
1647            },
1648            ResponsesStreamEvent::FunctionCallArgumentsDone {
1649                item_id: "item_fn".into(),
1650                output_index: 0,
1651                arguments: "{\"city\":\"Boston\"}".into(),
1652                sequence_number: None,
1653            },
1654            ResponsesStreamEvent::Completed {
1655                response: ResponseSummary::default(),
1656            },
1657        ];
1658
1659        let mapped = map_response_events(events);
1660        assert!(matches!(
1661            mapped[0],
1662            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse {
1663                ref id,
1664                ref name,
1665                ref raw_input,
1666                ..
1667            }) if id.to_string() == "call_123"
1668                && name.as_ref() == "get_weather"
1669                && raw_input == "{\"city\":\"Boston\"}"
1670        ));
1671        assert!(matches!(
1672            mapped[1],
1673            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1674        ));
1675    }
1676
1677    #[test]
1678    fn responses_stream_uses_max_tokens_stop_reason() {
1679        let events = vec![ResponsesStreamEvent::Incomplete {
1680            response: ResponseSummary {
1681                status_details: Some(ResponseStatusDetails {
1682                    reason: Some("max_output_tokens".into()),
1683                    r#type: Some("incomplete".into()),
1684                    error: None,
1685                }),
1686                usage: Some(ResponseUsage {
1687                    input_tokens: Some(10),
1688                    output_tokens: Some(20),
1689                    total_tokens: Some(30),
1690                }),
1691                ..Default::default()
1692            },
1693        }];
1694
1695        let mapped = map_response_events(events);
1696        assert!(matches!(
1697            mapped[0],
1698            LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1699                input_tokens: 10,
1700                output_tokens: 20,
1701                ..
1702            })
1703        ));
1704        assert!(matches!(
1705            mapped[1],
1706            LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1707        ));
1708    }
1709
1710    #[test]
1711    fn responses_stream_handles_multiple_tool_calls() {
1712        let events = vec![
1713            ResponsesStreamEvent::OutputItemAdded {
1714                output_index: 0,
1715                sequence_number: None,
1716                item: response_item_function_call("item_fn1", Some("{\"city\":\"NYC\"}")),
1717            },
1718            ResponsesStreamEvent::FunctionCallArgumentsDone {
1719                item_id: "item_fn1".into(),
1720                output_index: 0,
1721                arguments: "{\"city\":\"NYC\"}".into(),
1722                sequence_number: None,
1723            },
1724            ResponsesStreamEvent::OutputItemAdded {
1725                output_index: 1,
1726                sequence_number: None,
1727                item: response_item_function_call("item_fn2", Some("{\"city\":\"LA\"}")),
1728            },
1729            ResponsesStreamEvent::FunctionCallArgumentsDone {
1730                item_id: "item_fn2".into(),
1731                output_index: 1,
1732                arguments: "{\"city\":\"LA\"}".into(),
1733                sequence_number: None,
1734            },
1735            ResponsesStreamEvent::Completed {
1736                response: ResponseSummary::default(),
1737            },
1738        ];
1739
1740        let mapped = map_response_events(events);
1741        assert_eq!(mapped.len(), 3);
1742        assert!(matches!(
1743            mapped[0],
1744            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1745            if raw_input == "{\"city\":\"NYC\"}"
1746        ));
1747        assert!(matches!(
1748            mapped[1],
1749            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1750            if raw_input == "{\"city\":\"LA\"}"
1751        ));
1752        assert!(matches!(
1753            mapped[2],
1754            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1755        ));
1756    }
1757
1758    #[test]
1759    fn responses_stream_handles_mixed_text_and_tool_calls() {
1760        let events = vec![
1761            ResponsesStreamEvent::OutputItemAdded {
1762                output_index: 0,
1763                sequence_number: None,
1764                item: response_item_message("msg_123"),
1765            },
1766            ResponsesStreamEvent::OutputTextDelta {
1767                item_id: "msg_123".into(),
1768                output_index: 0,
1769                content_index: Some(0),
1770                delta: "Let me check that".into(),
1771            },
1772            ResponsesStreamEvent::OutputItemAdded {
1773                output_index: 1,
1774                sequence_number: None,
1775                item: response_item_function_call("item_fn", Some("{\"query\":\"test\"}")),
1776            },
1777            ResponsesStreamEvent::FunctionCallArgumentsDone {
1778                item_id: "item_fn".into(),
1779                output_index: 1,
1780                arguments: "{\"query\":\"test\"}".into(),
1781                sequence_number: None,
1782            },
1783            ResponsesStreamEvent::Completed {
1784                response: ResponseSummary::default(),
1785            },
1786        ];
1787
1788        let mapped = map_response_events(events);
1789        assert!(matches!(
1790            mapped[0],
1791            LanguageModelCompletionEvent::StartMessage { .. }
1792        ));
1793        assert!(matches!(
1794            mapped[1],
1795            LanguageModelCompletionEvent::Text(ref text) if text == "Let me check that"
1796        ));
1797        assert!(matches!(
1798            mapped[2],
1799            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1800            if raw_input == "{\"query\":\"test\"}"
1801        ));
1802        assert!(matches!(
1803            mapped[3],
1804            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1805        ));
1806    }
1807
1808    #[test]
1809    fn responses_stream_handles_json_parse_error() {
1810        let events = vec![
1811            ResponsesStreamEvent::OutputItemAdded {
1812                output_index: 0,
1813                sequence_number: None,
1814                item: response_item_function_call("item_fn", Some("{invalid json")),
1815            },
1816            ResponsesStreamEvent::FunctionCallArgumentsDone {
1817                item_id: "item_fn".into(),
1818                output_index: 0,
1819                arguments: "{invalid json".into(),
1820                sequence_number: None,
1821            },
1822            ResponsesStreamEvent::Completed {
1823                response: ResponseSummary::default(),
1824            },
1825        ];
1826
1827        let mapped = map_response_events(events);
1828        assert!(matches!(
1829            mapped[0],
1830            LanguageModelCompletionEvent::ToolUseJsonParseError {
1831                ref raw_input,
1832                ..
1833            } if raw_input.as_ref() == "{invalid json"
1834        ));
1835    }
1836
1837    #[test]
1838    fn responses_stream_handles_incomplete_function_call() {
1839        let events = vec![
1840            ResponsesStreamEvent::OutputItemAdded {
1841                output_index: 0,
1842                sequence_number: None,
1843                item: response_item_function_call("item_fn", Some("{\"city\":")),
1844            },
1845            ResponsesStreamEvent::FunctionCallArgumentsDelta {
1846                item_id: "item_fn".into(),
1847                output_index: 0,
1848                delta: "\"Boston\"".into(),
1849                sequence_number: None,
1850            },
1851            ResponsesStreamEvent::Incomplete {
1852                response: ResponseSummary {
1853                    status_details: Some(ResponseStatusDetails {
1854                        reason: Some("max_output_tokens".into()),
1855                        r#type: Some("incomplete".into()),
1856                        error: None,
1857                    }),
1858                    output: vec![response_item_function_call(
1859                        "item_fn",
1860                        Some("{\"city\":\"Boston\"}"),
1861                    )],
1862                    ..Default::default()
1863                },
1864            },
1865        ];
1866
1867        let mapped = map_response_events(events);
1868        assert!(matches!(
1869            mapped[0],
1870            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1871            if raw_input == "{\"city\":\"Boston\"}"
1872        ));
1873        assert!(matches!(
1874            mapped[1],
1875            LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1876        ));
1877    }
1878
1879    #[test]
1880    fn responses_stream_incomplete_does_not_duplicate_tool_calls() {
1881        let events = vec![
1882            ResponsesStreamEvent::OutputItemAdded {
1883                output_index: 0,
1884                sequence_number: None,
1885                item: response_item_function_call("item_fn", Some("{\"city\":\"Boston\"}")),
1886            },
1887            ResponsesStreamEvent::FunctionCallArgumentsDone {
1888                item_id: "item_fn".into(),
1889                output_index: 0,
1890                arguments: "{\"city\":\"Boston\"}".into(),
1891                sequence_number: None,
1892            },
1893            ResponsesStreamEvent::Incomplete {
1894                response: ResponseSummary {
1895                    status_details: Some(ResponseStatusDetails {
1896                        reason: Some("max_output_tokens".into()),
1897                        r#type: Some("incomplete".into()),
1898                        error: None,
1899                    }),
1900                    output: vec![response_item_function_call(
1901                        "item_fn",
1902                        Some("{\"city\":\"Boston\"}"),
1903                    )],
1904                    ..Default::default()
1905                },
1906            },
1907        ];
1908
1909        let mapped = map_response_events(events);
1910        assert_eq!(mapped.len(), 2);
1911        assert!(matches!(
1912            mapped[0],
1913            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1914            if raw_input == "{\"city\":\"Boston\"}"
1915        ));
1916        assert!(matches!(
1917            mapped[1],
1918            LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1919        ));
1920    }
1921}