open_ai.rs

   1use anyhow::{Result, anyhow};
   2use collections::{BTreeMap, HashMap};
   3use futures::Stream;
   4use futures::{FutureExt, StreamExt, future::BoxFuture};
   5use gpui::{AnyView, App, AsyncApp, Context, Entity, SharedString, Task, Window};
   6use http_client::HttpClient;
   7use language_model::{
   8    ApiKeyState, AuthenticateError, EnvVar, IconOrSvg, LanguageModel, LanguageModelCompletionError,
   9    LanguageModelCompletionEvent, LanguageModelId, LanguageModelImage, LanguageModelName,
  10    LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
  11    LanguageModelProviderState, LanguageModelRequest, LanguageModelRequestMessage,
  12    LanguageModelToolChoice, LanguageModelToolResult, LanguageModelToolResultContent,
  13    LanguageModelToolUse, LanguageModelToolUseId, MessageContent, RateLimiter, Role, StopReason,
  14    TokenUsage, env_var,
  15};
  16use menu;
  17use open_ai::responses::{
  18    ResponseFunctionCallItem, ResponseFunctionCallOutputItem, ResponseInputContent,
  19    ResponseInputItem, ResponseMessageItem,
  20};
  21use open_ai::{
  22    ImageUrl, Model, OPEN_AI_API_URL, ReasoningEffort, ResponseStreamEvent,
  23    responses::{
  24        Request as ResponseRequest, ResponseOutputItem, ResponseSummary as ResponsesSummary,
  25        ResponseUsage as ResponsesUsage, StreamEvent as ResponsesStreamEvent, stream_response,
  26    },
  27    stream_completion,
  28};
  29use settings::{OpenAiAvailableModel as AvailableModel, Settings, SettingsStore};
  30use std::pin::Pin;
  31use std::str::FromStr as _;
  32use std::sync::{Arc, LazyLock};
  33use strum::IntoEnumIterator;
  34use ui::{ButtonLink, ConfiguredApiCard, List, ListBulletItem, prelude::*};
  35use ui_input::InputField;
  36use util::ResultExt;
  37
  38const PROVIDER_ID: LanguageModelProviderId = language_model::OPEN_AI_PROVIDER_ID;
  39const PROVIDER_NAME: LanguageModelProviderName = language_model::OPEN_AI_PROVIDER_NAME;
  40
  41const API_KEY_ENV_VAR_NAME: &str = "OPENAI_API_KEY";
  42static API_KEY_ENV_VAR: LazyLock<EnvVar> = env_var!(API_KEY_ENV_VAR_NAME);
  43
  44#[derive(Default, Clone, Debug, PartialEq)]
  45pub struct OpenAiSettings {
  46    pub api_url: String,
  47    pub available_models: Vec<AvailableModel>,
  48}
  49
  50pub struct OpenAiLanguageModelProvider {
  51    http_client: Arc<dyn HttpClient>,
  52    state: Entity<State>,
  53}
  54
  55pub struct State {
  56    api_key_state: ApiKeyState,
  57}
  58
  59impl State {
  60    fn is_authenticated(&self) -> bool {
  61        self.api_key_state.has_key()
  62    }
  63
  64    fn set_api_key(&mut self, api_key: Option<String>, cx: &mut Context<Self>) -> Task<Result<()>> {
  65        let api_url = OpenAiLanguageModelProvider::api_url(cx);
  66        self.api_key_state
  67            .store(api_url, api_key, |this| &mut this.api_key_state, cx)
  68    }
  69
  70    fn authenticate(&mut self, cx: &mut Context<Self>) -> Task<Result<(), AuthenticateError>> {
  71        let api_url = OpenAiLanguageModelProvider::api_url(cx);
  72        self.api_key_state
  73            .load_if_needed(api_url, |this| &mut this.api_key_state, cx)
  74    }
  75}
  76
  77impl OpenAiLanguageModelProvider {
  78    pub fn new(http_client: Arc<dyn HttpClient>, cx: &mut App) -> Self {
  79        let state = cx.new(|cx| {
  80            cx.observe_global::<SettingsStore>(|this: &mut State, cx| {
  81                let api_url = Self::api_url(cx);
  82                this.api_key_state
  83                    .handle_url_change(api_url, |this| &mut this.api_key_state, cx);
  84                cx.notify();
  85            })
  86            .detach();
  87            State {
  88                api_key_state: ApiKeyState::new(Self::api_url(cx), (*API_KEY_ENV_VAR).clone()),
  89            }
  90        });
  91
  92        Self { http_client, state }
  93    }
  94
  95    fn create_language_model(&self, model: open_ai::Model) -> Arc<dyn LanguageModel> {
  96        Arc::new(OpenAiLanguageModel {
  97            id: LanguageModelId::from(model.id().to_string()),
  98            model,
  99            state: self.state.clone(),
 100            http_client: self.http_client.clone(),
 101            request_limiter: RateLimiter::new(4),
 102        })
 103    }
 104
 105    fn settings(cx: &App) -> &OpenAiSettings {
 106        &crate::AllLanguageModelSettings::get_global(cx).openai
 107    }
 108
 109    fn api_url(cx: &App) -> SharedString {
 110        let api_url = &Self::settings(cx).api_url;
 111        if api_url.is_empty() {
 112            open_ai::OPEN_AI_API_URL.into()
 113        } else {
 114            SharedString::new(api_url.as_str())
 115        }
 116    }
 117}
 118
 119impl LanguageModelProviderState for OpenAiLanguageModelProvider {
 120    type ObservableEntity = State;
 121
 122    fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
 123        Some(self.state.clone())
 124    }
 125}
 126
 127impl LanguageModelProvider for OpenAiLanguageModelProvider {
 128    fn id(&self) -> LanguageModelProviderId {
 129        PROVIDER_ID
 130    }
 131
 132    fn name(&self) -> LanguageModelProviderName {
 133        PROVIDER_NAME
 134    }
 135
 136    fn icon(&self) -> IconOrSvg {
 137        IconOrSvg::Icon(IconName::AiOpenAi)
 138    }
 139
 140    fn default_model(&self, _cx: &App) -> Option<Arc<dyn LanguageModel>> {
 141        Some(self.create_language_model(open_ai::Model::default()))
 142    }
 143
 144    fn default_fast_model(&self, _cx: &App) -> Option<Arc<dyn LanguageModel>> {
 145        Some(self.create_language_model(open_ai::Model::default_fast()))
 146    }
 147
 148    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 149        let mut models = BTreeMap::default();
 150
 151        // Add base models from open_ai::Model::iter()
 152        for model in open_ai::Model::iter() {
 153            if !matches!(model, open_ai::Model::Custom { .. }) {
 154                models.insert(model.id().to_string(), model);
 155            }
 156        }
 157
 158        // Override with available models from settings
 159        for model in &OpenAiLanguageModelProvider::settings(cx).available_models {
 160            models.insert(
 161                model.name.clone(),
 162                open_ai::Model::Custom {
 163                    name: model.name.clone(),
 164                    display_name: model.display_name.clone(),
 165                    max_tokens: model.max_tokens,
 166                    max_output_tokens: model.max_output_tokens,
 167                    max_completion_tokens: model.max_completion_tokens,
 168                    reasoning_effort: model.reasoning_effort.clone(),
 169                    supports_chat_completions: model.capabilities.chat_completions,
 170                },
 171            );
 172        }
 173
 174        models
 175            .into_values()
 176            .map(|model| self.create_language_model(model))
 177            .collect()
 178    }
 179
 180    fn is_authenticated(&self, cx: &App) -> bool {
 181        self.state.read(cx).is_authenticated()
 182    }
 183
 184    fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
 185        self.state.update(cx, |state, cx| state.authenticate(cx))
 186    }
 187
 188    fn configuration_view(
 189        &self,
 190        _target_agent: language_model::ConfigurationViewTargetAgent,
 191        window: &mut Window,
 192        cx: &mut App,
 193    ) -> AnyView {
 194        cx.new(|cx| ConfigurationView::new(self.state.clone(), window, cx))
 195            .into()
 196    }
 197
 198    fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>> {
 199        self.state
 200            .update(cx, |state, cx| state.set_api_key(None, cx))
 201    }
 202}
 203
 204pub struct OpenAiLanguageModel {
 205    id: LanguageModelId,
 206    model: open_ai::Model,
 207    state: Entity<State>,
 208    http_client: Arc<dyn HttpClient>,
 209    request_limiter: RateLimiter,
 210}
 211
 212impl OpenAiLanguageModel {
 213    fn stream_completion(
 214        &self,
 215        request: open_ai::Request,
 216        cx: &AsyncApp,
 217    ) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<ResponseStreamEvent>>>>
 218    {
 219        let http_client = self.http_client.clone();
 220
 221        let (api_key, api_url) = self.state.read_with(cx, |state, cx| {
 222            let api_url = OpenAiLanguageModelProvider::api_url(cx);
 223            (state.api_key_state.key(&api_url), api_url)
 224        });
 225
 226        let future = self.request_limiter.stream(async move {
 227            let provider = PROVIDER_NAME;
 228            let Some(api_key) = api_key else {
 229                return Err(LanguageModelCompletionError::NoApiKey { provider });
 230            };
 231            let request = stream_completion(
 232                http_client.as_ref(),
 233                provider.0.as_str(),
 234                &api_url,
 235                &api_key,
 236                request,
 237            );
 238            let response = request.await?;
 239            Ok(response)
 240        });
 241
 242        async move { Ok(future.await?.boxed()) }.boxed()
 243    }
 244
 245    fn stream_response(
 246        &self,
 247        request: ResponseRequest,
 248        cx: &AsyncApp,
 249    ) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<ResponsesStreamEvent>>>>
 250    {
 251        let http_client = self.http_client.clone();
 252
 253        let (api_key, api_url) = self.state.read_with(cx, |state, cx| {
 254            let api_url = OpenAiLanguageModelProvider::api_url(cx);
 255            (state.api_key_state.key(&api_url), api_url)
 256        });
 257
 258        let provider = PROVIDER_NAME;
 259        let future = self.request_limiter.stream(async move {
 260            let Some(api_key) = api_key else {
 261                return Err(LanguageModelCompletionError::NoApiKey { provider });
 262            };
 263            let request = stream_response(
 264                http_client.as_ref(),
 265                provider.0.as_str(),
 266                &api_url,
 267                &api_key,
 268                request,
 269            );
 270            let response = request.await?;
 271            Ok(response)
 272        });
 273
 274        async move { Ok(future.await?.boxed()) }.boxed()
 275    }
 276}
 277
 278impl LanguageModel for OpenAiLanguageModel {
 279    fn id(&self) -> LanguageModelId {
 280        self.id.clone()
 281    }
 282
 283    fn name(&self) -> LanguageModelName {
 284        LanguageModelName::from(self.model.display_name().to_string())
 285    }
 286
 287    fn provider_id(&self) -> LanguageModelProviderId {
 288        PROVIDER_ID
 289    }
 290
 291    fn provider_name(&self) -> LanguageModelProviderName {
 292        PROVIDER_NAME
 293    }
 294
 295    fn supports_tools(&self) -> bool {
 296        true
 297    }
 298
 299    fn supports_images(&self) -> bool {
 300        use open_ai::Model;
 301        match &self.model {
 302            Model::FourOmni
 303            | Model::FourOmniMini
 304            | Model::FourPointOne
 305            | Model::FourPointOneMini
 306            | Model::FourPointOneNano
 307            | Model::Five
 308            | Model::FiveCodex
 309            | Model::FiveMini
 310            | Model::FiveNano
 311            | Model::FivePointOne
 312            | Model::FivePointTwo
 313            | Model::FivePointTwoCodex
 314            | Model::O1
 315            | Model::O3
 316            | Model::O4Mini => true,
 317            Model::ThreePointFiveTurbo
 318            | Model::Four
 319            | Model::FourTurbo
 320            | Model::O3Mini
 321            | Model::Custom { .. } => false,
 322        }
 323    }
 324
 325    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
 326        match choice {
 327            LanguageModelToolChoice::Auto => true,
 328            LanguageModelToolChoice::Any => true,
 329            LanguageModelToolChoice::None => true,
 330        }
 331    }
 332
 333    fn telemetry_id(&self) -> String {
 334        format!("openai/{}", self.model.id())
 335    }
 336
 337    fn max_token_count(&self) -> u64 {
 338        self.model.max_token_count()
 339    }
 340
 341    fn max_output_tokens(&self) -> Option<u64> {
 342        self.model.max_output_tokens()
 343    }
 344
 345    fn count_tokens(
 346        &self,
 347        request: LanguageModelRequest,
 348        cx: &App,
 349    ) -> BoxFuture<'static, Result<u64>> {
 350        count_open_ai_tokens(request, self.model.clone(), cx)
 351    }
 352
 353    fn stream_completion(
 354        &self,
 355        request: LanguageModelRequest,
 356        cx: &AsyncApp,
 357    ) -> BoxFuture<
 358        'static,
 359        Result<
 360            futures::stream::BoxStream<
 361                'static,
 362                Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
 363            >,
 364            LanguageModelCompletionError,
 365        >,
 366    > {
 367        if self.model.supports_chat_completions() {
 368            let request = into_open_ai(
 369                request,
 370                self.model.id(),
 371                self.model.supports_parallel_tool_calls(),
 372                self.model.supports_prompt_cache_key(),
 373                self.max_output_tokens(),
 374                self.model.reasoning_effort(),
 375            );
 376            let completions = self.stream_completion(request, cx);
 377            async move {
 378                let mapper = OpenAiEventMapper::new();
 379                Ok(mapper.map_stream(completions.await?).boxed())
 380            }
 381            .boxed()
 382        } else {
 383            let request = into_open_ai_response(
 384                request,
 385                self.model.id(),
 386                self.model.supports_parallel_tool_calls(),
 387                self.model.supports_prompt_cache_key(),
 388                self.max_output_tokens(),
 389                self.model.reasoning_effort(),
 390            );
 391            let completions = self.stream_response(request, cx);
 392            async move {
 393                let mapper = OpenAiResponseEventMapper::new();
 394                Ok(mapper.map_stream(completions.await?).boxed())
 395            }
 396            .boxed()
 397        }
 398    }
 399}
 400
 401pub fn into_open_ai(
 402    request: LanguageModelRequest,
 403    model_id: &str,
 404    supports_parallel_tool_calls: bool,
 405    supports_prompt_cache_key: bool,
 406    max_output_tokens: Option<u64>,
 407    reasoning_effort: Option<ReasoningEffort>,
 408) -> open_ai::Request {
 409    let stream = !model_id.starts_with("o1-");
 410
 411    let mut messages = Vec::new();
 412    for message in request.messages {
 413        for content in message.content {
 414            match content {
 415                MessageContent::Text(text) | MessageContent::Thinking { text, .. } => {
 416                    if !text.trim().is_empty() {
 417                        add_message_content_part(
 418                            open_ai::MessagePart::Text { text },
 419                            message.role,
 420                            &mut messages,
 421                        );
 422                    }
 423                }
 424                MessageContent::RedactedThinking(_) => {}
 425                MessageContent::Image(image) => {
 426                    add_message_content_part(
 427                        open_ai::MessagePart::Image {
 428                            image_url: ImageUrl {
 429                                url: image.to_base64_url(),
 430                                detail: None,
 431                            },
 432                        },
 433                        message.role,
 434                        &mut messages,
 435                    );
 436                }
 437                MessageContent::ToolUse(tool_use) => {
 438                    let tool_call = open_ai::ToolCall {
 439                        id: tool_use.id.to_string(),
 440                        content: open_ai::ToolCallContent::Function {
 441                            function: open_ai::FunctionContent {
 442                                name: tool_use.name.to_string(),
 443                                arguments: serde_json::to_string(&tool_use.input)
 444                                    .unwrap_or_default(),
 445                            },
 446                        },
 447                    };
 448
 449                    if let Some(open_ai::RequestMessage::Assistant { tool_calls, .. }) =
 450                        messages.last_mut()
 451                    {
 452                        tool_calls.push(tool_call);
 453                    } else {
 454                        messages.push(open_ai::RequestMessage::Assistant {
 455                            content: None,
 456                            tool_calls: vec![tool_call],
 457                        });
 458                    }
 459                }
 460                MessageContent::ToolResult(tool_result) => {
 461                    let content = match &tool_result.content {
 462                        LanguageModelToolResultContent::Text(text) => {
 463                            vec![open_ai::MessagePart::Text {
 464                                text: text.to_string(),
 465                            }]
 466                        }
 467                        LanguageModelToolResultContent::Image(image) => {
 468                            vec![open_ai::MessagePart::Image {
 469                                image_url: ImageUrl {
 470                                    url: image.to_base64_url(),
 471                                    detail: None,
 472                                },
 473                            }]
 474                        }
 475                    };
 476
 477                    messages.push(open_ai::RequestMessage::Tool {
 478                        content: content.into(),
 479                        tool_call_id: tool_result.tool_use_id.to_string(),
 480                    });
 481                }
 482            }
 483        }
 484    }
 485
 486    open_ai::Request {
 487        model: model_id.into(),
 488        messages,
 489        stream,
 490        stop: request.stop,
 491        temperature: request.temperature.or(Some(1.0)),
 492        max_completion_tokens: max_output_tokens,
 493        parallel_tool_calls: if supports_parallel_tool_calls && !request.tools.is_empty() {
 494            // Disable parallel tool calls, as the Agent currently expects a maximum of one per turn.
 495            Some(false)
 496        } else {
 497            None
 498        },
 499        prompt_cache_key: if supports_prompt_cache_key {
 500            request.thread_id
 501        } else {
 502            None
 503        },
 504        tools: request
 505            .tools
 506            .into_iter()
 507            .map(|tool| open_ai::ToolDefinition::Function {
 508                function: open_ai::FunctionDefinition {
 509                    name: tool.name,
 510                    description: Some(tool.description),
 511                    parameters: Some(tool.input_schema),
 512                },
 513            })
 514            .collect(),
 515        tool_choice: request.tool_choice.map(|choice| match choice {
 516            LanguageModelToolChoice::Auto => open_ai::ToolChoice::Auto,
 517            LanguageModelToolChoice::Any => open_ai::ToolChoice::Required,
 518            LanguageModelToolChoice::None => open_ai::ToolChoice::None,
 519        }),
 520        reasoning_effort,
 521    }
 522}
 523
 524pub fn into_open_ai_response(
 525    request: LanguageModelRequest,
 526    model_id: &str,
 527    supports_parallel_tool_calls: bool,
 528    supports_prompt_cache_key: bool,
 529    max_output_tokens: Option<u64>,
 530    reasoning_effort: Option<ReasoningEffort>,
 531) -> ResponseRequest {
 532    let stream = !model_id.starts_with("o1-");
 533
 534    let LanguageModelRequest {
 535        thread_id,
 536        prompt_id: _,
 537        intent: _,
 538        mode: _,
 539        messages,
 540        tools,
 541        tool_choice,
 542        stop: _,
 543        temperature,
 544        thinking_allowed: _,
 545    } = request;
 546
 547    let mut input_items = Vec::new();
 548    for (index, message) in messages.into_iter().enumerate() {
 549        append_message_to_response_items(message, index, &mut input_items);
 550    }
 551
 552    let tools: Vec<_> = tools
 553        .into_iter()
 554        .map(|tool| open_ai::responses::ToolDefinition::Function {
 555            name: tool.name,
 556            description: Some(tool.description),
 557            parameters: Some(tool.input_schema),
 558            strict: None,
 559        })
 560        .collect();
 561
 562    ResponseRequest {
 563        model: model_id.into(),
 564        input: input_items,
 565        stream,
 566        temperature,
 567        top_p: None,
 568        max_output_tokens,
 569        parallel_tool_calls: if tools.is_empty() {
 570            None
 571        } else {
 572            Some(supports_parallel_tool_calls)
 573        },
 574        tool_choice: tool_choice.map(|choice| match choice {
 575            LanguageModelToolChoice::Auto => open_ai::ToolChoice::Auto,
 576            LanguageModelToolChoice::Any => open_ai::ToolChoice::Required,
 577            LanguageModelToolChoice::None => open_ai::ToolChoice::None,
 578        }),
 579        tools,
 580        prompt_cache_key: if supports_prompt_cache_key {
 581            thread_id
 582        } else {
 583            None
 584        },
 585        reasoning: reasoning_effort.map(|effort| open_ai::responses::ReasoningConfig { effort }),
 586    }
 587}
 588
 589fn append_message_to_response_items(
 590    message: LanguageModelRequestMessage,
 591    index: usize,
 592    input_items: &mut Vec<ResponseInputItem>,
 593) {
 594    let mut content_parts: Vec<ResponseInputContent> = Vec::new();
 595
 596    for content in message.content {
 597        match content {
 598            MessageContent::Text(text) => {
 599                push_response_text_part(&message.role, text, &mut content_parts);
 600            }
 601            MessageContent::Thinking { text, .. } => {
 602                push_response_text_part(&message.role, text, &mut content_parts);
 603            }
 604            MessageContent::RedactedThinking(_) => {}
 605            MessageContent::Image(image) => {
 606                push_response_image_part(&message.role, image, &mut content_parts);
 607            }
 608            MessageContent::ToolUse(tool_use) => {
 609                flush_response_parts(&message.role, index, &mut content_parts, input_items);
 610                let call_id = tool_use.id.to_string();
 611                input_items.push(ResponseInputItem::FunctionCall(ResponseFunctionCallItem {
 612                    call_id,
 613                    name: tool_use.name.to_string(),
 614                    arguments: tool_use.raw_input,
 615                }));
 616            }
 617            MessageContent::ToolResult(tool_result) => {
 618                flush_response_parts(&message.role, index, &mut content_parts, input_items);
 619                input_items.push(ResponseInputItem::FunctionCallOutput(
 620                    ResponseFunctionCallOutputItem {
 621                        call_id: tool_result.tool_use_id.to_string(),
 622                        output: tool_result_output(&tool_result),
 623                    },
 624                ));
 625            }
 626        }
 627    }
 628
 629    flush_response_parts(&message.role, index, &mut content_parts, input_items);
 630}
 631
 632fn push_response_text_part(
 633    role: &Role,
 634    text: impl Into<String>,
 635    parts: &mut Vec<ResponseInputContent>,
 636) {
 637    let text = text.into();
 638    if text.trim().is_empty() {
 639        return;
 640    }
 641
 642    match role {
 643        Role::Assistant => parts.push(ResponseInputContent::OutputText {
 644            text,
 645            annotations: Vec::new(),
 646        }),
 647        _ => parts.push(ResponseInputContent::Text { text }),
 648    }
 649}
 650
 651fn push_response_image_part(
 652    role: &Role,
 653    image: LanguageModelImage,
 654    parts: &mut Vec<ResponseInputContent>,
 655) {
 656    match role {
 657        Role::Assistant => parts.push(ResponseInputContent::OutputText {
 658            text: "[image omitted]".to_string(),
 659            annotations: Vec::new(),
 660        }),
 661        _ => parts.push(ResponseInputContent::Image {
 662            image_url: image.to_base64_url(),
 663        }),
 664    }
 665}
 666
 667fn flush_response_parts(
 668    role: &Role,
 669    _index: usize,
 670    parts: &mut Vec<ResponseInputContent>,
 671    input_items: &mut Vec<ResponseInputItem>,
 672) {
 673    if parts.is_empty() {
 674        return;
 675    }
 676
 677    let item = ResponseInputItem::Message(ResponseMessageItem {
 678        role: match role {
 679            Role::User => open_ai::Role::User,
 680            Role::Assistant => open_ai::Role::Assistant,
 681            Role::System => open_ai::Role::System,
 682        },
 683        content: parts.clone(),
 684    });
 685
 686    input_items.push(item);
 687    parts.clear();
 688}
 689
 690fn tool_result_output(result: &LanguageModelToolResult) -> String {
 691    if let Some(output) = &result.output {
 692        match output {
 693            serde_json::Value::String(text) => text.clone(),
 694            serde_json::Value::Null => String::new(),
 695            _ => output.to_string(),
 696        }
 697    } else {
 698        match &result.content {
 699            LanguageModelToolResultContent::Text(text) => text.to_string(),
 700            LanguageModelToolResultContent::Image(image) => image.to_base64_url(),
 701        }
 702    }
 703}
 704
 705fn add_message_content_part(
 706    new_part: open_ai::MessagePart,
 707    role: Role,
 708    messages: &mut Vec<open_ai::RequestMessage>,
 709) {
 710    match (role, messages.last_mut()) {
 711        (Role::User, Some(open_ai::RequestMessage::User { content }))
 712        | (
 713            Role::Assistant,
 714            Some(open_ai::RequestMessage::Assistant {
 715                content: Some(content),
 716                ..
 717            }),
 718        )
 719        | (Role::System, Some(open_ai::RequestMessage::System { content, .. })) => {
 720            content.push_part(new_part);
 721        }
 722        _ => {
 723            messages.push(match role {
 724                Role::User => open_ai::RequestMessage::User {
 725                    content: open_ai::MessageContent::from(vec![new_part]),
 726                },
 727                Role::Assistant => open_ai::RequestMessage::Assistant {
 728                    content: Some(open_ai::MessageContent::from(vec![new_part])),
 729                    tool_calls: Vec::new(),
 730                },
 731                Role::System => open_ai::RequestMessage::System {
 732                    content: open_ai::MessageContent::from(vec![new_part]),
 733                },
 734            });
 735        }
 736    }
 737}
 738
 739pub struct OpenAiEventMapper {
 740    tool_calls_by_index: HashMap<usize, RawToolCall>,
 741}
 742
 743impl OpenAiEventMapper {
 744    pub fn new() -> Self {
 745        Self {
 746            tool_calls_by_index: HashMap::default(),
 747        }
 748    }
 749
 750    pub fn map_stream(
 751        mut self,
 752        events: Pin<Box<dyn Send + Stream<Item = Result<ResponseStreamEvent>>>>,
 753    ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 754    {
 755        events.flat_map(move |event| {
 756            futures::stream::iter(match event {
 757                Ok(event) => self.map_event(event),
 758                Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
 759            })
 760        })
 761    }
 762
 763    pub fn map_event(
 764        &mut self,
 765        event: ResponseStreamEvent,
 766    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
 767        let mut events = Vec::new();
 768        if let Some(usage) = event.usage {
 769            events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
 770                input_tokens: usage.prompt_tokens,
 771                output_tokens: usage.completion_tokens,
 772                cache_creation_input_tokens: 0,
 773                cache_read_input_tokens: 0,
 774            })));
 775        }
 776
 777        let Some(choice) = event.choices.first() else {
 778            return events;
 779        };
 780
 781        if let Some(delta) = choice.delta.as_ref() {
 782            if let Some(content) = delta.content.clone() {
 783                events.push(Ok(LanguageModelCompletionEvent::Text(content)));
 784            }
 785
 786            if let Some(tool_calls) = delta.tool_calls.as_ref() {
 787                for tool_call in tool_calls {
 788                    let entry = self.tool_calls_by_index.entry(tool_call.index).or_default();
 789
 790                    if let Some(tool_id) = tool_call.id.clone() {
 791                        entry.id = tool_id;
 792                    }
 793
 794                    if let Some(function) = tool_call.function.as_ref() {
 795                        if let Some(name) = function.name.clone() {
 796                            entry.name = name;
 797                        }
 798
 799                        if let Some(arguments) = function.arguments.clone() {
 800                            entry.arguments.push_str(&arguments);
 801                        }
 802                    }
 803                }
 804            }
 805        }
 806
 807        match choice.finish_reason.as_deref() {
 808            Some("stop") => {
 809                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
 810            }
 811            Some("tool_calls") => {
 812                events.extend(self.tool_calls_by_index.drain().map(|(_, tool_call)| {
 813                    match serde_json::Value::from_str(&tool_call.arguments) {
 814                        Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
 815                            LanguageModelToolUse {
 816                                id: tool_call.id.clone().into(),
 817                                name: tool_call.name.as_str().into(),
 818                                is_input_complete: true,
 819                                input,
 820                                raw_input: tool_call.arguments.clone(),
 821                                thought_signature: None,
 822                            },
 823                        )),
 824                        Err(error) => Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
 825                            id: tool_call.id.into(),
 826                            tool_name: tool_call.name.into(),
 827                            raw_input: tool_call.arguments.clone().into(),
 828                            json_parse_error: error.to_string(),
 829                        }),
 830                    }
 831                }));
 832
 833                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
 834            }
 835            Some(stop_reason) => {
 836                log::error!("Unexpected OpenAI stop_reason: {stop_reason:?}",);
 837                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
 838            }
 839            None => {}
 840        }
 841
 842        events
 843    }
 844}
 845
 846#[derive(Default)]
 847struct RawToolCall {
 848    id: String,
 849    name: String,
 850    arguments: String,
 851}
 852
 853pub struct OpenAiResponseEventMapper {
 854    function_calls_by_item: HashMap<String, PendingResponseFunctionCall>,
 855    pending_stop_reason: Option<StopReason>,
 856}
 857
 858#[derive(Default)]
 859struct PendingResponseFunctionCall {
 860    call_id: String,
 861    name: Arc<str>,
 862    arguments: String,
 863}
 864
 865impl OpenAiResponseEventMapper {
 866    pub fn new() -> Self {
 867        Self {
 868            function_calls_by_item: HashMap::default(),
 869            pending_stop_reason: None,
 870        }
 871    }
 872
 873    pub fn map_stream(
 874        mut self,
 875        events: Pin<Box<dyn Send + Stream<Item = Result<ResponsesStreamEvent>>>>,
 876    ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 877    {
 878        events.flat_map(move |event| {
 879            futures::stream::iter(match event {
 880                Ok(event) => self.map_event(event),
 881                Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
 882            })
 883        })
 884    }
 885
 886    pub fn map_event(
 887        &mut self,
 888        event: ResponsesStreamEvent,
 889    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
 890        match event {
 891            ResponsesStreamEvent::OutputItemAdded { item, .. } => {
 892                let mut events = Vec::new();
 893
 894                match &item {
 895                    ResponseOutputItem::Message(message) => {
 896                        if let Some(id) = &message.id {
 897                            events.push(Ok(LanguageModelCompletionEvent::StartMessage {
 898                                message_id: id.clone(),
 899                            }));
 900                        }
 901                    }
 902                    ResponseOutputItem::FunctionCall(function_call) => {
 903                        if let Some(item_id) = function_call.id.clone() {
 904                            let call_id = function_call
 905                                .call_id
 906                                .clone()
 907                                .or_else(|| function_call.id.clone())
 908                                .unwrap_or_else(|| item_id.clone());
 909                            let entry = PendingResponseFunctionCall {
 910                                call_id,
 911                                name: Arc::<str>::from(
 912                                    function_call.name.clone().unwrap_or_default(),
 913                                ),
 914                                arguments: function_call.arguments.clone(),
 915                            };
 916                            self.function_calls_by_item.insert(item_id, entry);
 917                        }
 918                    }
 919                    ResponseOutputItem::Unknown => {}
 920                }
 921                events
 922            }
 923            ResponsesStreamEvent::OutputTextDelta { delta, .. } => {
 924                if delta.is_empty() {
 925                    Vec::new()
 926                } else {
 927                    vec![Ok(LanguageModelCompletionEvent::Text(delta))]
 928                }
 929            }
 930            ResponsesStreamEvent::FunctionCallArgumentsDelta { item_id, delta, .. } => {
 931                if let Some(entry) = self.function_calls_by_item.get_mut(&item_id) {
 932                    entry.arguments.push_str(&delta);
 933                }
 934                Vec::new()
 935            }
 936            ResponsesStreamEvent::FunctionCallArgumentsDone {
 937                item_id, arguments, ..
 938            } => {
 939                if let Some(mut entry) = self.function_calls_by_item.remove(&item_id) {
 940                    if !arguments.is_empty() {
 941                        entry.arguments = arguments;
 942                    }
 943                    let raw_input = entry.arguments.clone();
 944                    self.pending_stop_reason = Some(StopReason::ToolUse);
 945                    match serde_json::from_str::<serde_json::Value>(&entry.arguments) {
 946                        Ok(input) => {
 947                            vec![Ok(LanguageModelCompletionEvent::ToolUse(
 948                                LanguageModelToolUse {
 949                                    id: LanguageModelToolUseId::from(entry.call_id.clone()),
 950                                    name: entry.name.clone(),
 951                                    is_input_complete: true,
 952                                    input,
 953                                    raw_input,
 954                                    thought_signature: None,
 955                                },
 956                            ))]
 957                        }
 958                        Err(error) => {
 959                            vec![Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
 960                                id: LanguageModelToolUseId::from(entry.call_id.clone()),
 961                                tool_name: entry.name.clone(),
 962                                raw_input: Arc::<str>::from(raw_input),
 963                                json_parse_error: error.to_string(),
 964                            })]
 965                        }
 966                    }
 967                } else {
 968                    Vec::new()
 969                }
 970            }
 971            ResponsesStreamEvent::Completed { response } => {
 972                self.handle_completion(response, StopReason::EndTurn)
 973            }
 974            ResponsesStreamEvent::Incomplete { response } => {
 975                let reason = response
 976                    .status_details
 977                    .as_ref()
 978                    .and_then(|details| details.reason.as_deref());
 979                let stop_reason = match reason {
 980                    Some("max_output_tokens") => StopReason::MaxTokens,
 981                    Some("content_filter") => {
 982                        self.pending_stop_reason = Some(StopReason::Refusal);
 983                        StopReason::Refusal
 984                    }
 985                    _ => self
 986                        .pending_stop_reason
 987                        .take()
 988                        .unwrap_or(StopReason::EndTurn),
 989                };
 990
 991                let mut events = Vec::new();
 992                if self.pending_stop_reason.is_none() {
 993                    events.extend(self.emit_tool_calls_from_output(&response.output));
 994                }
 995                if let Some(usage) = response.usage.as_ref() {
 996                    events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
 997                        token_usage_from_response_usage(usage),
 998                    )));
 999                }
1000                events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
1001                events
1002            }
1003            ResponsesStreamEvent::Failed { response } => {
1004                let message = response
1005                    .status_details
1006                    .and_then(|details| details.error)
1007                    .map(|error| error.to_string())
1008                    .unwrap_or_else(|| "response failed".to_string());
1009                vec![Err(LanguageModelCompletionError::Other(anyhow!(message)))]
1010            }
1011            ResponsesStreamEvent::Error { error }
1012            | ResponsesStreamEvent::GenericError { error } => {
1013                vec![Err(LanguageModelCompletionError::Other(anyhow!(format!(
1014                    "{error:?}"
1015                ))))]
1016            }
1017            ResponsesStreamEvent::OutputTextDone { .. } => Vec::new(),
1018            ResponsesStreamEvent::OutputItemDone { .. }
1019            | ResponsesStreamEvent::ContentPartAdded { .. }
1020            | ResponsesStreamEvent::ContentPartDone { .. }
1021            | ResponsesStreamEvent::Created { .. }
1022            | ResponsesStreamEvent::InProgress { .. }
1023            | ResponsesStreamEvent::Unknown => Vec::new(),
1024        }
1025    }
1026
1027    fn handle_completion(
1028        &mut self,
1029        response: ResponsesSummary,
1030        default_reason: StopReason,
1031    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
1032        let mut events = Vec::new();
1033
1034        if self.pending_stop_reason.is_none() {
1035            events.extend(self.emit_tool_calls_from_output(&response.output));
1036        }
1037
1038        if let Some(usage) = response.usage.as_ref() {
1039            events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
1040                token_usage_from_response_usage(usage),
1041            )));
1042        }
1043
1044        let stop_reason = self.pending_stop_reason.take().unwrap_or(default_reason);
1045        events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
1046        events
1047    }
1048
1049    fn emit_tool_calls_from_output(
1050        &mut self,
1051        output: &[ResponseOutputItem],
1052    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
1053        let mut events = Vec::new();
1054        for item in output {
1055            if let ResponseOutputItem::FunctionCall(function_call) = item {
1056                let Some(call_id) = function_call
1057                    .call_id
1058                    .clone()
1059                    .or_else(|| function_call.id.clone())
1060                else {
1061                    log::error!(
1062                        "Function call item missing both call_id and id: {:?}",
1063                        function_call
1064                    );
1065                    continue;
1066                };
1067                let name: Arc<str> = Arc::from(function_call.name.clone().unwrap_or_default());
1068                let arguments = &function_call.arguments;
1069                if !arguments.is_empty() {
1070                    self.pending_stop_reason = Some(StopReason::ToolUse);
1071                    match serde_json::from_str::<serde_json::Value>(arguments) {
1072                        Ok(input) => {
1073                            events.push(Ok(LanguageModelCompletionEvent::ToolUse(
1074                                LanguageModelToolUse {
1075                                    id: LanguageModelToolUseId::from(call_id.clone()),
1076                                    name: name.clone(),
1077                                    is_input_complete: true,
1078                                    input,
1079                                    raw_input: arguments.clone(),
1080                                    thought_signature: None,
1081                                },
1082                            )));
1083                        }
1084                        Err(error) => {
1085                            events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
1086                                id: LanguageModelToolUseId::from(call_id.clone()),
1087                                tool_name: name.clone(),
1088                                raw_input: Arc::<str>::from(arguments.clone()),
1089                                json_parse_error: error.to_string(),
1090                            }));
1091                        }
1092                    }
1093                }
1094            }
1095        }
1096        events
1097    }
1098}
1099
1100fn token_usage_from_response_usage(usage: &ResponsesUsage) -> TokenUsage {
1101    TokenUsage {
1102        input_tokens: usage.input_tokens.unwrap_or_default(),
1103        output_tokens: usage.output_tokens.unwrap_or_default(),
1104        cache_creation_input_tokens: 0,
1105        cache_read_input_tokens: 0,
1106    }
1107}
1108
1109pub(crate) fn collect_tiktoken_messages(
1110    request: LanguageModelRequest,
1111) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
1112    request
1113        .messages
1114        .into_iter()
1115        .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
1116            role: match message.role {
1117                Role::User => "user".into(),
1118                Role::Assistant => "assistant".into(),
1119                Role::System => "system".into(),
1120            },
1121            content: Some(message.string_contents()),
1122            name: None,
1123            function_call: None,
1124        })
1125        .collect::<Vec<_>>()
1126}
1127
1128pub fn count_open_ai_tokens(
1129    request: LanguageModelRequest,
1130    model: Model,
1131    cx: &App,
1132) -> BoxFuture<'static, Result<u64>> {
1133    cx.background_spawn(async move {
1134        let messages = collect_tiktoken_messages(request);
1135        match model {
1136            Model::Custom { max_tokens, .. } => {
1137                let model = if max_tokens >= 100_000 {
1138                    // If the max tokens is 100k or more, it is likely the o200k_base tokenizer from gpt4o
1139                    "gpt-4o"
1140                } else {
1141                    // Otherwise fallback to gpt-4, since only cl100k_base and o200k_base are
1142                    // supported with this tiktoken method
1143                    "gpt-4"
1144                };
1145                tiktoken_rs::num_tokens_from_messages(model, &messages)
1146            }
1147            // Currently supported by tiktoken_rs
1148            // Sometimes tiktoken-rs is behind on model support. If that is the case, make a new branch
1149            // arm with an override. We enumerate all supported models here so that we can check if new
1150            // models are supported yet or not.
1151            Model::ThreePointFiveTurbo
1152            | Model::Four
1153            | Model::FourTurbo
1154            | Model::FourOmni
1155            | Model::FourOmniMini
1156            | Model::FourPointOne
1157            | Model::FourPointOneMini
1158            | Model::FourPointOneNano
1159            | Model::O1
1160            | Model::O3
1161            | Model::O3Mini
1162            | Model::O4Mini
1163            | Model::Five
1164            | Model::FiveCodex
1165            | Model::FiveMini
1166            | Model::FiveNano => tiktoken_rs::num_tokens_from_messages(model.id(), &messages),
1167            // GPT-5.1, 5.2, and 5.2-codex don't have dedicated tiktoken support; use gpt-5 tokenizer
1168            Model::FivePointOne | Model::FivePointTwo | Model::FivePointTwoCodex => {
1169                tiktoken_rs::num_tokens_from_messages("gpt-5", &messages)
1170            }
1171        }
1172        .map(|tokens| tokens as u64)
1173    })
1174    .boxed()
1175}
1176
1177struct ConfigurationView {
1178    api_key_editor: Entity<InputField>,
1179    state: Entity<State>,
1180    load_credentials_task: Option<Task<()>>,
1181}
1182
1183impl ConfigurationView {
1184    fn new(state: Entity<State>, window: &mut Window, cx: &mut Context<Self>) -> Self {
1185        let api_key_editor = cx.new(|cx| {
1186            InputField::new(
1187                window,
1188                cx,
1189                "sk-000000000000000000000000000000000000000000000000",
1190            )
1191        });
1192
1193        cx.observe(&state, |_, _, cx| {
1194            cx.notify();
1195        })
1196        .detach();
1197
1198        let load_credentials_task = Some(cx.spawn_in(window, {
1199            let state = state.clone();
1200            async move |this, cx| {
1201                if let Some(task) = Some(state.update(cx, |state, cx| state.authenticate(cx))) {
1202                    // We don't log an error, because "not signed in" is also an error.
1203                    let _ = task.await;
1204                }
1205                this.update(cx, |this, cx| {
1206                    this.load_credentials_task = None;
1207                    cx.notify();
1208                })
1209                .log_err();
1210            }
1211        }));
1212
1213        Self {
1214            api_key_editor,
1215            state,
1216            load_credentials_task,
1217        }
1218    }
1219
1220    fn save_api_key(&mut self, _: &menu::Confirm, window: &mut Window, cx: &mut Context<Self>) {
1221        let api_key = self.api_key_editor.read(cx).text(cx).trim().to_string();
1222        if api_key.is_empty() {
1223            return;
1224        }
1225
1226        // url changes can cause the editor to be displayed again
1227        self.api_key_editor
1228            .update(cx, |editor, cx| editor.set_text("", window, cx));
1229
1230        let state = self.state.clone();
1231        cx.spawn_in(window, async move |_, cx| {
1232            state
1233                .update(cx, |state, cx| state.set_api_key(Some(api_key), cx))
1234                .await
1235        })
1236        .detach_and_log_err(cx);
1237    }
1238
1239    fn reset_api_key(&mut self, window: &mut Window, cx: &mut Context<Self>) {
1240        self.api_key_editor
1241            .update(cx, |input, cx| input.set_text("", window, cx));
1242
1243        let state = self.state.clone();
1244        cx.spawn_in(window, async move |_, cx| {
1245            state
1246                .update(cx, |state, cx| state.set_api_key(None, cx))
1247                .await
1248        })
1249        .detach_and_log_err(cx);
1250    }
1251
1252    fn should_render_editor(&self, cx: &mut Context<Self>) -> bool {
1253        !self.state.read(cx).is_authenticated()
1254    }
1255}
1256
1257impl Render for ConfigurationView {
1258    fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1259        let env_var_set = self.state.read(cx).api_key_state.is_from_env_var();
1260        let configured_card_label = if env_var_set {
1261            format!("API key set in {API_KEY_ENV_VAR_NAME} environment variable")
1262        } else {
1263            let api_url = OpenAiLanguageModelProvider::api_url(cx);
1264            if api_url == OPEN_AI_API_URL {
1265                "API key configured".to_string()
1266            } else {
1267                format!("API key configured for {}", api_url)
1268            }
1269        };
1270
1271        let api_key_section = if self.should_render_editor(cx) {
1272            v_flex()
1273                .on_action(cx.listener(Self::save_api_key))
1274                .child(Label::new("To use Zed's agent with OpenAI, you need to add an API key. Follow these steps:"))
1275                .child(
1276                    List::new()
1277                        .child(
1278                            ListBulletItem::new("")
1279                                .child(Label::new("Create one by visiting"))
1280                                .child(ButtonLink::new("OpenAI's console", "https://platform.openai.com/api-keys"))
1281                        )
1282                        .child(
1283                            ListBulletItem::new("Ensure your OpenAI account has credits")
1284                        )
1285                        .child(
1286                            ListBulletItem::new("Paste your API key below and hit enter to start using the agent")
1287                        ),
1288                )
1289                .child(self.api_key_editor.clone())
1290                .child(
1291                    Label::new(format!(
1292                        "You can also set the {API_KEY_ENV_VAR_NAME} environment variable and restart Zed."
1293                    ))
1294                    .size(LabelSize::Small)
1295                    .color(Color::Muted),
1296                )
1297                .child(
1298                    Label::new(
1299                        "Note that having a subscription for another service like GitHub Copilot won't work.",
1300                    )
1301                    .size(LabelSize::Small).color(Color::Muted),
1302                )
1303                .into_any_element()
1304        } else {
1305            ConfiguredApiCard::new(configured_card_label)
1306                .disabled(env_var_set)
1307                .on_click(cx.listener(|this, _, window, cx| this.reset_api_key(window, cx)))
1308                .when(env_var_set, |this| {
1309                    this.tooltip_label(format!("To reset your API key, unset the {API_KEY_ENV_VAR_NAME} environment variable."))
1310                })
1311                .into_any_element()
1312        };
1313
1314        let compatible_api_section = h_flex()
1315            .mt_1p5()
1316            .gap_0p5()
1317            .flex_wrap()
1318            .when(self.should_render_editor(cx), |this| {
1319                this.pt_1p5()
1320                    .border_t_1()
1321                    .border_color(cx.theme().colors().border_variant)
1322            })
1323            .child(
1324                h_flex()
1325                    .gap_2()
1326                    .child(
1327                        Icon::new(IconName::Info)
1328                            .size(IconSize::XSmall)
1329                            .color(Color::Muted),
1330                    )
1331                    .child(Label::new("Zed also supports OpenAI-compatible models.")),
1332            )
1333            .child(
1334                Button::new("docs", "Learn More")
1335                    .icon(IconName::ArrowUpRight)
1336                    .icon_size(IconSize::Small)
1337                    .icon_color(Color::Muted)
1338                    .on_click(move |_, _window, cx| {
1339                        cx.open_url("https://zed.dev/docs/ai/llm-providers#openai-api-compatible")
1340                    }),
1341            );
1342
1343        if self.load_credentials_task.is_some() {
1344            div().child(Label::new("Loading credentials…")).into_any()
1345        } else {
1346            v_flex()
1347                .size_full()
1348                .child(api_key_section)
1349                .child(compatible_api_section)
1350                .into_any()
1351        }
1352    }
1353}
1354
1355#[cfg(test)]
1356mod tests {
1357    use futures::{StreamExt, executor::block_on};
1358    use gpui::TestAppContext;
1359    use language_model::{LanguageModelRequestMessage, LanguageModelRequestTool};
1360    use open_ai::responses::{
1361        ResponseFunctionToolCall, ResponseOutputItem, ResponseOutputMessage, ResponseStatusDetails,
1362        ResponseSummary, ResponseUsage, StreamEvent as ResponsesStreamEvent,
1363    };
1364    use pretty_assertions::assert_eq;
1365    use serde_json::json;
1366
1367    use super::*;
1368
1369    fn map_response_events(events: Vec<ResponsesStreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1370        block_on(async {
1371            OpenAiResponseEventMapper::new()
1372                .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1373                .collect::<Vec<_>>()
1374                .await
1375                .into_iter()
1376                .map(Result::unwrap)
1377                .collect()
1378        })
1379    }
1380
1381    fn response_item_message(id: &str) -> ResponseOutputItem {
1382        ResponseOutputItem::Message(ResponseOutputMessage {
1383            id: Some(id.to_string()),
1384            role: Some("assistant".to_string()),
1385            status: Some("in_progress".to_string()),
1386            content: vec![],
1387        })
1388    }
1389
1390    fn response_item_function_call(id: &str, args: Option<&str>) -> ResponseOutputItem {
1391        ResponseOutputItem::FunctionCall(ResponseFunctionToolCall {
1392            id: Some(id.to_string()),
1393            status: Some("in_progress".to_string()),
1394            name: Some("get_weather".to_string()),
1395            call_id: Some("call_123".to_string()),
1396            arguments: args.map(|s| s.to_string()).unwrap_or_default(),
1397        })
1398    }
1399
1400    #[gpui::test]
1401    fn tiktoken_rs_support(cx: &TestAppContext) {
1402        let request = LanguageModelRequest {
1403            thread_id: None,
1404            prompt_id: None,
1405            intent: None,
1406            mode: None,
1407            messages: vec![LanguageModelRequestMessage {
1408                role: Role::User,
1409                content: vec![MessageContent::Text("message".into())],
1410                cache: false,
1411                reasoning_details: None,
1412            }],
1413            tools: vec![],
1414            tool_choice: None,
1415            stop: vec![],
1416            temperature: None,
1417            thinking_allowed: true,
1418        };
1419
1420        // Validate that all models are supported by tiktoken-rs
1421        for model in Model::iter() {
1422            let count = cx
1423                .foreground_executor()
1424                .block_on(count_open_ai_tokens(
1425                    request.clone(),
1426                    model,
1427                    &cx.app.borrow(),
1428                ))
1429                .unwrap();
1430            assert!(count > 0);
1431        }
1432    }
1433
1434    #[test]
1435    fn responses_stream_maps_text_and_usage() {
1436        let events = vec![
1437            ResponsesStreamEvent::OutputItemAdded {
1438                output_index: 0,
1439                sequence_number: None,
1440                item: response_item_message("msg_123"),
1441            },
1442            ResponsesStreamEvent::OutputTextDelta {
1443                item_id: "msg_123".into(),
1444                output_index: 0,
1445                content_index: Some(0),
1446                delta: "Hello".into(),
1447            },
1448            ResponsesStreamEvent::Completed {
1449                response: ResponseSummary {
1450                    usage: Some(ResponseUsage {
1451                        input_tokens: Some(5),
1452                        output_tokens: Some(3),
1453                        total_tokens: Some(8),
1454                    }),
1455                    ..Default::default()
1456                },
1457            },
1458        ];
1459
1460        let mapped = map_response_events(events);
1461        assert!(matches!(
1462            mapped[0],
1463            LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_123"
1464        ));
1465        assert!(matches!(
1466            mapped[1],
1467            LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1468        ));
1469        assert!(matches!(
1470            mapped[2],
1471            LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1472                input_tokens: 5,
1473                output_tokens: 3,
1474                ..
1475            })
1476        ));
1477        assert!(matches!(
1478            mapped[3],
1479            LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1480        ));
1481    }
1482
1483    #[test]
1484    fn into_open_ai_response_builds_complete_payload() {
1485        let tool_call_id = LanguageModelToolUseId::from("call-42");
1486        let tool_input = json!({ "city": "Boston" });
1487        let tool_arguments = serde_json::to_string(&tool_input).unwrap();
1488        let tool_use = LanguageModelToolUse {
1489            id: tool_call_id.clone(),
1490            name: Arc::from("get_weather"),
1491            raw_input: tool_arguments.clone(),
1492            input: tool_input,
1493            is_input_complete: true,
1494            thought_signature: None,
1495        };
1496        let tool_result = LanguageModelToolResult {
1497            tool_use_id: tool_call_id,
1498            tool_name: Arc::from("get_weather"),
1499            is_error: false,
1500            content: LanguageModelToolResultContent::Text(Arc::from("Sunny")),
1501            output: Some(json!({ "forecast": "Sunny" })),
1502        };
1503        let user_image = LanguageModelImage {
1504            source: SharedString::from("aGVsbG8="),
1505            size: None,
1506        };
1507        let expected_image_url = user_image.to_base64_url();
1508
1509        let request = LanguageModelRequest {
1510            thread_id: Some("thread-123".into()),
1511            prompt_id: None,
1512            intent: None,
1513            mode: None,
1514            messages: vec![
1515                LanguageModelRequestMessage {
1516                    role: Role::System,
1517                    content: vec![MessageContent::Text("System context".into())],
1518                    cache: false,
1519                    reasoning_details: None,
1520                },
1521                LanguageModelRequestMessage {
1522                    role: Role::User,
1523                    content: vec![
1524                        MessageContent::Text("Please check the weather.".into()),
1525                        MessageContent::Image(user_image),
1526                    ],
1527                    cache: false,
1528                    reasoning_details: None,
1529                },
1530                LanguageModelRequestMessage {
1531                    role: Role::Assistant,
1532                    content: vec![
1533                        MessageContent::Text("Looking that up.".into()),
1534                        MessageContent::ToolUse(tool_use),
1535                    ],
1536                    cache: false,
1537                    reasoning_details: None,
1538                },
1539                LanguageModelRequestMessage {
1540                    role: Role::Assistant,
1541                    content: vec![MessageContent::ToolResult(tool_result)],
1542                    cache: false,
1543                    reasoning_details: None,
1544                },
1545            ],
1546            tools: vec![LanguageModelRequestTool {
1547                name: "get_weather".into(),
1548                description: "Fetches the weather".into(),
1549                input_schema: json!({ "type": "object" }),
1550            }],
1551            tool_choice: Some(LanguageModelToolChoice::Any),
1552            stop: vec!["<STOP>".into()],
1553            temperature: None,
1554            thinking_allowed: false,
1555        };
1556
1557        let response = into_open_ai_response(
1558            request,
1559            "custom-model",
1560            true,
1561            true,
1562            Some(2048),
1563            Some(ReasoningEffort::Low),
1564        );
1565
1566        let serialized = serde_json::to_value(&response).unwrap();
1567        let expected = json!({
1568            "model": "custom-model",
1569            "input": [
1570                {
1571                    "type": "message",
1572                    "role": "system",
1573                    "content": [
1574                        { "type": "input_text", "text": "System context" }
1575                    ]
1576                },
1577                {
1578                    "type": "message",
1579                    "role": "user",
1580                    "content": [
1581                        { "type": "input_text", "text": "Please check the weather." },
1582                        { "type": "input_image", "image_url": expected_image_url }
1583                    ]
1584                },
1585                {
1586                    "type": "message",
1587                    "role": "assistant",
1588                    "content": [
1589                        { "type": "output_text", "text": "Looking that up.", "annotations": [] }
1590                    ]
1591                },
1592                {
1593                    "type": "function_call",
1594                    "call_id": "call-42",
1595                    "name": "get_weather",
1596                    "arguments": tool_arguments
1597                },
1598                {
1599                    "type": "function_call_output",
1600                    "call_id": "call-42",
1601                    "output": "{\"forecast\":\"Sunny\"}"
1602                }
1603            ],
1604            "stream": true,
1605            "max_output_tokens": 2048,
1606            "parallel_tool_calls": true,
1607            "tool_choice": "required",
1608            "tools": [
1609                {
1610                    "type": "function",
1611                    "name": "get_weather",
1612                    "description": "Fetches the weather",
1613                    "parameters": { "type": "object" }
1614                }
1615            ],
1616            "prompt_cache_key": "thread-123",
1617            "reasoning": { "effort": "low" }
1618        });
1619
1620        assert_eq!(serialized, expected);
1621    }
1622
1623    #[test]
1624    fn responses_stream_maps_tool_calls() {
1625        let events = vec![
1626            ResponsesStreamEvent::OutputItemAdded {
1627                output_index: 0,
1628                sequence_number: None,
1629                item: response_item_function_call("item_fn", Some("{\"city\":\"Bos")),
1630            },
1631            ResponsesStreamEvent::FunctionCallArgumentsDelta {
1632                item_id: "item_fn".into(),
1633                output_index: 0,
1634                delta: "ton\"}".into(),
1635                sequence_number: None,
1636            },
1637            ResponsesStreamEvent::FunctionCallArgumentsDone {
1638                item_id: "item_fn".into(),
1639                output_index: 0,
1640                arguments: "{\"city\":\"Boston\"}".into(),
1641                sequence_number: None,
1642            },
1643            ResponsesStreamEvent::Completed {
1644                response: ResponseSummary::default(),
1645            },
1646        ];
1647
1648        let mapped = map_response_events(events);
1649        assert!(matches!(
1650            mapped[0],
1651            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse {
1652                ref id,
1653                ref name,
1654                ref raw_input,
1655                ..
1656            }) if id.to_string() == "call_123"
1657                && name.as_ref() == "get_weather"
1658                && raw_input == "{\"city\":\"Boston\"}"
1659        ));
1660        assert!(matches!(
1661            mapped[1],
1662            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1663        ));
1664    }
1665
1666    #[test]
1667    fn responses_stream_uses_max_tokens_stop_reason() {
1668        let events = vec![ResponsesStreamEvent::Incomplete {
1669            response: ResponseSummary {
1670                status_details: Some(ResponseStatusDetails {
1671                    reason: Some("max_output_tokens".into()),
1672                    r#type: Some("incomplete".into()),
1673                    error: None,
1674                }),
1675                usage: Some(ResponseUsage {
1676                    input_tokens: Some(10),
1677                    output_tokens: Some(20),
1678                    total_tokens: Some(30),
1679                }),
1680                ..Default::default()
1681            },
1682        }];
1683
1684        let mapped = map_response_events(events);
1685        assert!(matches!(
1686            mapped[0],
1687            LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1688                input_tokens: 10,
1689                output_tokens: 20,
1690                ..
1691            })
1692        ));
1693        assert!(matches!(
1694            mapped[1],
1695            LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1696        ));
1697    }
1698
1699    #[test]
1700    fn responses_stream_handles_multiple_tool_calls() {
1701        let events = vec![
1702            ResponsesStreamEvent::OutputItemAdded {
1703                output_index: 0,
1704                sequence_number: None,
1705                item: response_item_function_call("item_fn1", Some("{\"city\":\"NYC\"}")),
1706            },
1707            ResponsesStreamEvent::FunctionCallArgumentsDone {
1708                item_id: "item_fn1".into(),
1709                output_index: 0,
1710                arguments: "{\"city\":\"NYC\"}".into(),
1711                sequence_number: None,
1712            },
1713            ResponsesStreamEvent::OutputItemAdded {
1714                output_index: 1,
1715                sequence_number: None,
1716                item: response_item_function_call("item_fn2", Some("{\"city\":\"LA\"}")),
1717            },
1718            ResponsesStreamEvent::FunctionCallArgumentsDone {
1719                item_id: "item_fn2".into(),
1720                output_index: 1,
1721                arguments: "{\"city\":\"LA\"}".into(),
1722                sequence_number: None,
1723            },
1724            ResponsesStreamEvent::Completed {
1725                response: ResponseSummary::default(),
1726            },
1727        ];
1728
1729        let mapped = map_response_events(events);
1730        assert_eq!(mapped.len(), 3);
1731        assert!(matches!(
1732            mapped[0],
1733            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1734            if raw_input == "{\"city\":\"NYC\"}"
1735        ));
1736        assert!(matches!(
1737            mapped[1],
1738            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1739            if raw_input == "{\"city\":\"LA\"}"
1740        ));
1741        assert!(matches!(
1742            mapped[2],
1743            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1744        ));
1745    }
1746
1747    #[test]
1748    fn responses_stream_handles_mixed_text_and_tool_calls() {
1749        let events = vec![
1750            ResponsesStreamEvent::OutputItemAdded {
1751                output_index: 0,
1752                sequence_number: None,
1753                item: response_item_message("msg_123"),
1754            },
1755            ResponsesStreamEvent::OutputTextDelta {
1756                item_id: "msg_123".into(),
1757                output_index: 0,
1758                content_index: Some(0),
1759                delta: "Let me check that".into(),
1760            },
1761            ResponsesStreamEvent::OutputItemAdded {
1762                output_index: 1,
1763                sequence_number: None,
1764                item: response_item_function_call("item_fn", Some("{\"query\":\"test\"}")),
1765            },
1766            ResponsesStreamEvent::FunctionCallArgumentsDone {
1767                item_id: "item_fn".into(),
1768                output_index: 1,
1769                arguments: "{\"query\":\"test\"}".into(),
1770                sequence_number: None,
1771            },
1772            ResponsesStreamEvent::Completed {
1773                response: ResponseSummary::default(),
1774            },
1775        ];
1776
1777        let mapped = map_response_events(events);
1778        assert!(matches!(
1779            mapped[0],
1780            LanguageModelCompletionEvent::StartMessage { .. }
1781        ));
1782        assert!(matches!(
1783            mapped[1],
1784            LanguageModelCompletionEvent::Text(ref text) if text == "Let me check that"
1785        ));
1786        assert!(matches!(
1787            mapped[2],
1788            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1789            if raw_input == "{\"query\":\"test\"}"
1790        ));
1791        assert!(matches!(
1792            mapped[3],
1793            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1794        ));
1795    }
1796
1797    #[test]
1798    fn responses_stream_handles_json_parse_error() {
1799        let events = vec![
1800            ResponsesStreamEvent::OutputItemAdded {
1801                output_index: 0,
1802                sequence_number: None,
1803                item: response_item_function_call("item_fn", Some("{invalid json")),
1804            },
1805            ResponsesStreamEvent::FunctionCallArgumentsDone {
1806                item_id: "item_fn".into(),
1807                output_index: 0,
1808                arguments: "{invalid json".into(),
1809                sequence_number: None,
1810            },
1811            ResponsesStreamEvent::Completed {
1812                response: ResponseSummary::default(),
1813            },
1814        ];
1815
1816        let mapped = map_response_events(events);
1817        assert!(matches!(
1818            mapped[0],
1819            LanguageModelCompletionEvent::ToolUseJsonParseError {
1820                ref raw_input,
1821                ..
1822            } if raw_input.as_ref() == "{invalid json"
1823        ));
1824    }
1825
1826    #[test]
1827    fn responses_stream_handles_incomplete_function_call() {
1828        let events = vec![
1829            ResponsesStreamEvent::OutputItemAdded {
1830                output_index: 0,
1831                sequence_number: None,
1832                item: response_item_function_call("item_fn", Some("{\"city\":")),
1833            },
1834            ResponsesStreamEvent::FunctionCallArgumentsDelta {
1835                item_id: "item_fn".into(),
1836                output_index: 0,
1837                delta: "\"Boston\"".into(),
1838                sequence_number: None,
1839            },
1840            ResponsesStreamEvent::Incomplete {
1841                response: ResponseSummary {
1842                    status_details: Some(ResponseStatusDetails {
1843                        reason: Some("max_output_tokens".into()),
1844                        r#type: Some("incomplete".into()),
1845                        error: None,
1846                    }),
1847                    output: vec![response_item_function_call(
1848                        "item_fn",
1849                        Some("{\"city\":\"Boston\"}"),
1850                    )],
1851                    ..Default::default()
1852                },
1853            },
1854        ];
1855
1856        let mapped = map_response_events(events);
1857        assert!(matches!(
1858            mapped[0],
1859            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1860            if raw_input == "{\"city\":\"Boston\"}"
1861        ));
1862        assert!(matches!(
1863            mapped[1],
1864            LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1865        ));
1866    }
1867
1868    #[test]
1869    fn responses_stream_incomplete_does_not_duplicate_tool_calls() {
1870        let events = vec![
1871            ResponsesStreamEvent::OutputItemAdded {
1872                output_index: 0,
1873                sequence_number: None,
1874                item: response_item_function_call("item_fn", Some("{\"city\":\"Boston\"}")),
1875            },
1876            ResponsesStreamEvent::FunctionCallArgumentsDone {
1877                item_id: "item_fn".into(),
1878                output_index: 0,
1879                arguments: "{\"city\":\"Boston\"}".into(),
1880                sequence_number: None,
1881            },
1882            ResponsesStreamEvent::Incomplete {
1883                response: ResponseSummary {
1884                    status_details: Some(ResponseStatusDetails {
1885                        reason: Some("max_output_tokens".into()),
1886                        r#type: Some("incomplete".into()),
1887                        error: None,
1888                    }),
1889                    output: vec![response_item_function_call(
1890                        "item_fn",
1891                        Some("{\"city\":\"Boston\"}"),
1892                    )],
1893                    ..Default::default()
1894                },
1895            },
1896        ];
1897
1898        let mapped = map_response_events(events);
1899        assert_eq!(mapped.len(), 2);
1900        assert!(matches!(
1901            mapped[0],
1902            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1903            if raw_input == "{\"city\":\"Boston\"}"
1904        ));
1905        assert!(matches!(
1906            mapped[1],
1907            LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1908        ));
1909    }
1910}