open_ai.rs

   1use anyhow::{Result, anyhow};
   2use collections::{BTreeMap, HashMap};
   3use futures::Stream;
   4use futures::{FutureExt, StreamExt, future, future::BoxFuture};
   5use gpui::{AnyView, App, AsyncApp, Context, Entity, SharedString, Task, Window};
   6use http_client::HttpClient;
   7use language_model::{
   8    ApiKeyState, AuthenticateError, EnvVar, IconOrSvg, LanguageModel, LanguageModelCompletionError,
   9    LanguageModelCompletionEvent, LanguageModelId, LanguageModelImage, LanguageModelName,
  10    LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
  11    LanguageModelProviderState, LanguageModelRequest, LanguageModelRequestMessage,
  12    LanguageModelToolChoice, LanguageModelToolResult, LanguageModelToolResultContent,
  13    LanguageModelToolUse, LanguageModelToolUseId, MessageContent, RateLimiter, Role, StopReason,
  14    TokenUsage, env_var,
  15};
  16use menu;
  17use open_ai::{
  18    ImageUrl, Model, OPEN_AI_API_URL, ReasoningEffort, ResponseStreamEvent,
  19    responses::{
  20        Request as ResponseRequest, ResponseOutputItem, ResponseSummary as ResponsesSummary,
  21        ResponseUsage as ResponsesUsage, StreamEvent as ResponsesStreamEvent, stream_response,
  22    },
  23    stream_completion,
  24};
  25use serde_json::{Value, json};
  26use settings::{OpenAiAvailableModel as AvailableModel, Settings, SettingsStore};
  27use std::pin::Pin;
  28use std::str::FromStr as _;
  29use std::sync::{Arc, LazyLock};
  30use strum::IntoEnumIterator;
  31use ui::{ButtonLink, ConfiguredApiCard, List, ListBulletItem, prelude::*};
  32use ui_input::InputField;
  33use util::ResultExt;
  34
  35const PROVIDER_ID: LanguageModelProviderId = language_model::OPEN_AI_PROVIDER_ID;
  36const PROVIDER_NAME: LanguageModelProviderName = language_model::OPEN_AI_PROVIDER_NAME;
  37
  38const API_KEY_ENV_VAR_NAME: &str = "OPENAI_API_KEY";
  39static API_KEY_ENV_VAR: LazyLock<EnvVar> = env_var!(API_KEY_ENV_VAR_NAME);
  40
  41#[derive(Default, Clone, Debug, PartialEq)]
  42pub struct OpenAiSettings {
  43    pub api_url: String,
  44    pub available_models: Vec<AvailableModel>,
  45}
  46
  47pub struct OpenAiLanguageModelProvider {
  48    http_client: Arc<dyn HttpClient>,
  49    state: Entity<State>,
  50}
  51
  52pub struct State {
  53    api_key_state: ApiKeyState,
  54}
  55
  56impl State {
  57    fn is_authenticated(&self) -> bool {
  58        self.api_key_state.has_key()
  59    }
  60
  61    fn set_api_key(&mut self, api_key: Option<String>, cx: &mut Context<Self>) -> Task<Result<()>> {
  62        let api_url = OpenAiLanguageModelProvider::api_url(cx);
  63        self.api_key_state
  64            .store(api_url, api_key, |this| &mut this.api_key_state, cx)
  65    }
  66
  67    fn authenticate(&mut self, cx: &mut Context<Self>) -> Task<Result<(), AuthenticateError>> {
  68        let api_url = OpenAiLanguageModelProvider::api_url(cx);
  69        self.api_key_state
  70            .load_if_needed(api_url, |this| &mut this.api_key_state, cx)
  71    }
  72}
  73
  74impl OpenAiLanguageModelProvider {
  75    pub fn new(http_client: Arc<dyn HttpClient>, cx: &mut App) -> Self {
  76        let state = cx.new(|cx| {
  77            cx.observe_global::<SettingsStore>(|this: &mut State, cx| {
  78                let api_url = Self::api_url(cx);
  79                this.api_key_state
  80                    .handle_url_change(api_url, |this| &mut this.api_key_state, cx);
  81                cx.notify();
  82            })
  83            .detach();
  84            State {
  85                api_key_state: ApiKeyState::new(Self::api_url(cx), (*API_KEY_ENV_VAR).clone()),
  86            }
  87        });
  88
  89        Self { http_client, state }
  90    }
  91
  92    fn create_language_model(&self, model: open_ai::Model) -> Arc<dyn LanguageModel> {
  93        Arc::new(OpenAiLanguageModel {
  94            id: LanguageModelId::from(model.id().to_string()),
  95            model,
  96            state: self.state.clone(),
  97            http_client: self.http_client.clone(),
  98            request_limiter: RateLimiter::new(4),
  99        })
 100    }
 101
 102    fn settings(cx: &App) -> &OpenAiSettings {
 103        &crate::AllLanguageModelSettings::get_global(cx).openai
 104    }
 105
 106    fn api_url(cx: &App) -> SharedString {
 107        let api_url = &Self::settings(cx).api_url;
 108        if api_url.is_empty() {
 109            open_ai::OPEN_AI_API_URL.into()
 110        } else {
 111            SharedString::new(api_url.as_str())
 112        }
 113    }
 114}
 115
 116impl LanguageModelProviderState for OpenAiLanguageModelProvider {
 117    type ObservableEntity = State;
 118
 119    fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
 120        Some(self.state.clone())
 121    }
 122}
 123
 124impl LanguageModelProvider for OpenAiLanguageModelProvider {
 125    fn id(&self) -> LanguageModelProviderId {
 126        PROVIDER_ID
 127    }
 128
 129    fn name(&self) -> LanguageModelProviderName {
 130        PROVIDER_NAME
 131    }
 132
 133    fn icon(&self) -> IconOrSvg {
 134        IconOrSvg::Icon(IconName::AiOpenAi)
 135    }
 136
 137    fn default_model(&self, _cx: &App) -> Option<Arc<dyn LanguageModel>> {
 138        Some(self.create_language_model(open_ai::Model::default()))
 139    }
 140
 141    fn default_fast_model(&self, _cx: &App) -> Option<Arc<dyn LanguageModel>> {
 142        Some(self.create_language_model(open_ai::Model::default_fast()))
 143    }
 144
 145    fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
 146        let mut models = BTreeMap::default();
 147
 148        // Add base models from open_ai::Model::iter()
 149        for model in open_ai::Model::iter() {
 150            if !matches!(model, open_ai::Model::Custom { .. }) {
 151                models.insert(model.id().to_string(), model);
 152            }
 153        }
 154
 155        // Override with available models from settings
 156        for model in &OpenAiLanguageModelProvider::settings(cx).available_models {
 157            models.insert(
 158                model.name.clone(),
 159                open_ai::Model::Custom {
 160                    name: model.name.clone(),
 161                    display_name: model.display_name.clone(),
 162                    max_tokens: model.max_tokens,
 163                    max_output_tokens: model.max_output_tokens,
 164                    max_completion_tokens: model.max_completion_tokens,
 165                    reasoning_effort: model.reasoning_effort.clone(),
 166                    supports_chat_completions: model.capabilities.chat_completions,
 167                },
 168            );
 169        }
 170
 171        models
 172            .into_values()
 173            .map(|model| self.create_language_model(model))
 174            .collect()
 175    }
 176
 177    fn is_authenticated(&self, cx: &App) -> bool {
 178        self.state.read(cx).is_authenticated()
 179    }
 180
 181    fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
 182        self.state.update(cx, |state, cx| state.authenticate(cx))
 183    }
 184
 185    fn configuration_view(
 186        &self,
 187        _target_agent: language_model::ConfigurationViewTargetAgent,
 188        window: &mut Window,
 189        cx: &mut App,
 190    ) -> AnyView {
 191        cx.new(|cx| ConfigurationView::new(self.state.clone(), window, cx))
 192            .into()
 193    }
 194
 195    fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>> {
 196        self.state
 197            .update(cx, |state, cx| state.set_api_key(None, cx))
 198    }
 199}
 200
 201pub struct OpenAiLanguageModel {
 202    id: LanguageModelId,
 203    model: open_ai::Model,
 204    state: Entity<State>,
 205    http_client: Arc<dyn HttpClient>,
 206    request_limiter: RateLimiter,
 207}
 208
 209impl OpenAiLanguageModel {
 210    fn stream_completion(
 211        &self,
 212        request: open_ai::Request,
 213        cx: &AsyncApp,
 214    ) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<ResponseStreamEvent>>>>
 215    {
 216        let http_client = self.http_client.clone();
 217
 218        let Ok((api_key, api_url)) = self.state.read_with(cx, |state, cx| {
 219            let api_url = OpenAiLanguageModelProvider::api_url(cx);
 220            (state.api_key_state.key(&api_url), api_url)
 221        }) else {
 222            return future::ready(Err(anyhow!("App state dropped"))).boxed();
 223        };
 224
 225        let future = self.request_limiter.stream(async move {
 226            let provider = PROVIDER_NAME;
 227            let Some(api_key) = api_key else {
 228                return Err(LanguageModelCompletionError::NoApiKey { provider });
 229            };
 230            let request = stream_completion(
 231                http_client.as_ref(),
 232                provider.0.as_str(),
 233                &api_url,
 234                &api_key,
 235                request,
 236            );
 237            let response = request.await?;
 238            Ok(response)
 239        });
 240
 241        async move { Ok(future.await?.boxed()) }.boxed()
 242    }
 243
 244    fn stream_response(
 245        &self,
 246        request: ResponseRequest,
 247        cx: &AsyncApp,
 248    ) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<ResponsesStreamEvent>>>>
 249    {
 250        let http_client = self.http_client.clone();
 251
 252        let Ok((api_key, api_url)) = self.state.read_with(cx, |state, cx| {
 253            let api_url = OpenAiLanguageModelProvider::api_url(cx);
 254            (state.api_key_state.key(&api_url), api_url)
 255        }) else {
 256            return future::ready(Err(anyhow!("App state dropped"))).boxed();
 257        };
 258
 259        let provider = PROVIDER_NAME;
 260        let future = self.request_limiter.stream(async move {
 261            let Some(api_key) = api_key else {
 262                return Err(LanguageModelCompletionError::NoApiKey { provider });
 263            };
 264            let request = stream_response(
 265                http_client.as_ref(),
 266                provider.0.as_str(),
 267                &api_url,
 268                &api_key,
 269                request,
 270            );
 271            let response = request.await?;
 272            Ok(response)
 273        });
 274
 275        async move { Ok(future.await?.boxed()) }.boxed()
 276    }
 277}
 278
 279impl LanguageModel for OpenAiLanguageModel {
 280    fn id(&self) -> LanguageModelId {
 281        self.id.clone()
 282    }
 283
 284    fn name(&self) -> LanguageModelName {
 285        LanguageModelName::from(self.model.display_name().to_string())
 286    }
 287
 288    fn provider_id(&self) -> LanguageModelProviderId {
 289        PROVIDER_ID
 290    }
 291
 292    fn provider_name(&self) -> LanguageModelProviderName {
 293        PROVIDER_NAME
 294    }
 295
 296    fn supports_tools(&self) -> bool {
 297        true
 298    }
 299
 300    fn supports_images(&self) -> bool {
 301        use open_ai::Model;
 302        match &self.model {
 303            Model::FourOmni
 304            | Model::FourOmniMini
 305            | Model::FourPointOne
 306            | Model::FourPointOneMini
 307            | Model::FourPointOneNano
 308            | Model::Five
 309            | Model::FiveCodex
 310            | Model::FiveMini
 311            | Model::FiveNano
 312            | Model::FivePointOne
 313            | Model::FivePointTwo
 314            | Model::O1
 315            | Model::O3
 316            | Model::O4Mini => true,
 317            Model::ThreePointFiveTurbo
 318            | Model::Four
 319            | Model::FourTurbo
 320            | Model::O3Mini
 321            | Model::Custom { .. } => false,
 322        }
 323    }
 324
 325    fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
 326        match choice {
 327            LanguageModelToolChoice::Auto => true,
 328            LanguageModelToolChoice::Any => true,
 329            LanguageModelToolChoice::None => true,
 330        }
 331    }
 332
 333    fn telemetry_id(&self) -> String {
 334        format!("openai/{}", self.model.id())
 335    }
 336
 337    fn max_token_count(&self) -> u64 {
 338        self.model.max_token_count()
 339    }
 340
 341    fn max_output_tokens(&self) -> Option<u64> {
 342        self.model.max_output_tokens()
 343    }
 344
 345    fn count_tokens(
 346        &self,
 347        request: LanguageModelRequest,
 348        cx: &App,
 349    ) -> BoxFuture<'static, Result<u64>> {
 350        count_open_ai_tokens(request, self.model.clone(), cx)
 351    }
 352
 353    fn stream_completion(
 354        &self,
 355        request: LanguageModelRequest,
 356        cx: &AsyncApp,
 357    ) -> BoxFuture<
 358        'static,
 359        Result<
 360            futures::stream::BoxStream<
 361                'static,
 362                Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
 363            >,
 364            LanguageModelCompletionError,
 365        >,
 366    > {
 367        if self.model.supports_chat_completions() {
 368            let request = into_open_ai(
 369                request,
 370                self.model.id(),
 371                self.model.supports_parallel_tool_calls(),
 372                self.model.supports_prompt_cache_key(),
 373                self.max_output_tokens(),
 374                self.model.reasoning_effort(),
 375            );
 376            let completions = self.stream_completion(request, cx);
 377            async move {
 378                let mapper = OpenAiEventMapper::new();
 379                Ok(mapper.map_stream(completions.await?).boxed())
 380            }
 381            .boxed()
 382        } else {
 383            let request = into_open_ai_response(
 384                request,
 385                self.model.id(),
 386                self.model.supports_parallel_tool_calls(),
 387                self.model.supports_prompt_cache_key(),
 388                self.max_output_tokens(),
 389                self.model.reasoning_effort(),
 390            );
 391            let completions = self.stream_response(request, cx);
 392            async move {
 393                let mapper = OpenAiResponseEventMapper::new();
 394                Ok(mapper.map_stream(completions.await?).boxed())
 395            }
 396            .boxed()
 397        }
 398    }
 399}
 400
 401pub fn into_open_ai(
 402    request: LanguageModelRequest,
 403    model_id: &str,
 404    supports_parallel_tool_calls: bool,
 405    supports_prompt_cache_key: bool,
 406    max_output_tokens: Option<u64>,
 407    reasoning_effort: Option<ReasoningEffort>,
 408) -> open_ai::Request {
 409    let stream = !model_id.starts_with("o1-");
 410
 411    let mut messages = Vec::new();
 412    for message in request.messages {
 413        for content in message.content {
 414            match content {
 415                MessageContent::Text(text) | MessageContent::Thinking { text, .. } => {
 416                    if !text.trim().is_empty() {
 417                        add_message_content_part(
 418                            open_ai::MessagePart::Text { text },
 419                            message.role,
 420                            &mut messages,
 421                        );
 422                    }
 423                }
 424                MessageContent::RedactedThinking(_) => {}
 425                MessageContent::Image(image) => {
 426                    add_message_content_part(
 427                        open_ai::MessagePart::Image {
 428                            image_url: ImageUrl {
 429                                url: image.to_base64_url(),
 430                                detail: None,
 431                            },
 432                        },
 433                        message.role,
 434                        &mut messages,
 435                    );
 436                }
 437                MessageContent::ToolUse(tool_use) => {
 438                    let tool_call = open_ai::ToolCall {
 439                        id: tool_use.id.to_string(),
 440                        content: open_ai::ToolCallContent::Function {
 441                            function: open_ai::FunctionContent {
 442                                name: tool_use.name.to_string(),
 443                                arguments: serde_json::to_string(&tool_use.input)
 444                                    .unwrap_or_default(),
 445                            },
 446                        },
 447                    };
 448
 449                    if let Some(open_ai::RequestMessage::Assistant { tool_calls, .. }) =
 450                        messages.last_mut()
 451                    {
 452                        tool_calls.push(tool_call);
 453                    } else {
 454                        messages.push(open_ai::RequestMessage::Assistant {
 455                            content: None,
 456                            tool_calls: vec![tool_call],
 457                        });
 458                    }
 459                }
 460                MessageContent::ToolResult(tool_result) => {
 461                    let content = match &tool_result.content {
 462                        LanguageModelToolResultContent::Text(text) => {
 463                            vec![open_ai::MessagePart::Text {
 464                                text: text.to_string(),
 465                            }]
 466                        }
 467                        LanguageModelToolResultContent::Image(image) => {
 468                            vec![open_ai::MessagePart::Image {
 469                                image_url: ImageUrl {
 470                                    url: image.to_base64_url(),
 471                                    detail: None,
 472                                },
 473                            }]
 474                        }
 475                    };
 476
 477                    messages.push(open_ai::RequestMessage::Tool {
 478                        content: content.into(),
 479                        tool_call_id: tool_result.tool_use_id.to_string(),
 480                    });
 481                }
 482            }
 483        }
 484    }
 485
 486    open_ai::Request {
 487        model: model_id.into(),
 488        messages,
 489        stream,
 490        stop: request.stop,
 491        temperature: request.temperature.or(Some(1.0)),
 492        max_completion_tokens: max_output_tokens,
 493        parallel_tool_calls: if supports_parallel_tool_calls && !request.tools.is_empty() {
 494            // Disable parallel tool calls, as the Agent currently expects a maximum of one per turn.
 495            Some(false)
 496        } else {
 497            None
 498        },
 499        prompt_cache_key: if supports_prompt_cache_key {
 500            request.thread_id
 501        } else {
 502            None
 503        },
 504        tools: request
 505            .tools
 506            .into_iter()
 507            .map(|tool| open_ai::ToolDefinition::Function {
 508                function: open_ai::FunctionDefinition {
 509                    name: tool.name,
 510                    description: Some(tool.description),
 511                    parameters: Some(tool.input_schema),
 512                },
 513            })
 514            .collect(),
 515        tool_choice: request.tool_choice.map(|choice| match choice {
 516            LanguageModelToolChoice::Auto => open_ai::ToolChoice::Auto,
 517            LanguageModelToolChoice::Any => open_ai::ToolChoice::Required,
 518            LanguageModelToolChoice::None => open_ai::ToolChoice::None,
 519        }),
 520        reasoning_effort,
 521    }
 522}
 523
 524pub fn into_open_ai_response(
 525    request: LanguageModelRequest,
 526    model_id: &str,
 527    supports_parallel_tool_calls: bool,
 528    supports_prompt_cache_key: bool,
 529    max_output_tokens: Option<u64>,
 530    reasoning_effort: Option<ReasoningEffort>,
 531) -> ResponseRequest {
 532    let stream = !model_id.starts_with("o1-");
 533
 534    let LanguageModelRequest {
 535        thread_id,
 536        prompt_id: _,
 537        intent: _,
 538        mode: _,
 539        messages,
 540        tools,
 541        tool_choice,
 542        stop: _,
 543        temperature,
 544        thinking_allowed: _,
 545    } = request;
 546
 547    let mut input_items = Vec::new();
 548    for (index, message) in messages.into_iter().enumerate() {
 549        append_message_to_response_items(message, index, &mut input_items);
 550    }
 551
 552    let tools: Vec<_> = tools
 553        .into_iter()
 554        .map(|tool| open_ai::responses::ToolDefinition::Function {
 555            name: tool.name,
 556            description: Some(tool.description),
 557            parameters: Some(tool.input_schema),
 558            strict: None,
 559        })
 560        .collect();
 561
 562    ResponseRequest {
 563        model: model_id.into(),
 564        input: input_items,
 565        stream,
 566        temperature,
 567        top_p: None,
 568        max_output_tokens,
 569        parallel_tool_calls: if tools.is_empty() {
 570            None
 571        } else {
 572            Some(supports_parallel_tool_calls)
 573        },
 574        tool_choice: tool_choice.map(|choice| match choice {
 575            LanguageModelToolChoice::Auto => open_ai::ToolChoice::Auto,
 576            LanguageModelToolChoice::Any => open_ai::ToolChoice::Required,
 577            LanguageModelToolChoice::None => open_ai::ToolChoice::None,
 578        }),
 579        tools,
 580        prompt_cache_key: if supports_prompt_cache_key {
 581            thread_id
 582        } else {
 583            None
 584        },
 585        reasoning: reasoning_effort.map(|effort| open_ai::responses::ReasoningConfig { effort }),
 586    }
 587}
 588
 589fn append_message_to_response_items(
 590    message: LanguageModelRequestMessage,
 591    index: usize,
 592    input_items: &mut Vec<Value>,
 593) {
 594    let mut content_parts: Vec<Value> = Vec::new();
 595
 596    for content in message.content {
 597        match content {
 598            MessageContent::Text(text) => {
 599                push_response_text_part(&message.role, text, &mut content_parts);
 600            }
 601            MessageContent::Thinking { text, .. } => {
 602                push_response_text_part(&message.role, text, &mut content_parts);
 603            }
 604            MessageContent::RedactedThinking(_) => {}
 605            MessageContent::Image(image) => {
 606                push_response_image_part(&message.role, image, &mut content_parts);
 607            }
 608            MessageContent::ToolUse(tool_use) => {
 609                flush_response_parts(&message.role, index, &mut content_parts, input_items);
 610                let call_id = tool_use.id.to_string();
 611                input_items.push(json!({
 612                    "type": "function_call",
 613                    "call_id": call_id,
 614                    "name": tool_use.name,
 615                    "arguments": tool_use.raw_input,
 616                }));
 617            }
 618            MessageContent::ToolResult(tool_result) => {
 619                flush_response_parts(&message.role, index, &mut content_parts, input_items);
 620                input_items.push(json!({
 621                    "type": "function_call_output",
 622                    "call_id": tool_result.tool_use_id.to_string(),
 623                    "output": tool_result_output(&tool_result),
 624                }));
 625            }
 626        }
 627    }
 628
 629    flush_response_parts(&message.role, index, &mut content_parts, input_items);
 630}
 631
 632fn push_response_text_part(role: &Role, text: impl Into<String>, parts: &mut Vec<Value>) {
 633    let text = text.into();
 634    if text.trim().is_empty() {
 635        return;
 636    }
 637
 638    match role {
 639        Role::Assistant => parts.push(json!({
 640            "type": "output_text",
 641            "text": text,
 642            "annotations": [],
 643        })),
 644        _ => parts.push(json!({
 645            "type": "input_text",
 646            "text": text,
 647        })),
 648    }
 649}
 650
 651fn push_response_image_part(role: &Role, image: LanguageModelImage, parts: &mut Vec<Value>) {
 652    match role {
 653        Role::Assistant => parts.push(json!({
 654            "type": "output_text",
 655            "text": "[image omitted]",
 656            "annotations": [],
 657        })),
 658        _ => parts.push(json!({
 659            "type": "input_image",
 660            "image_url": image.to_base64_url(),
 661        })),
 662    }
 663}
 664
 665fn flush_response_parts(
 666    role: &Role,
 667    _index: usize,
 668    parts: &mut Vec<Value>,
 669    input_items: &mut Vec<Value>,
 670) {
 671    if parts.is_empty() {
 672        return;
 673    }
 674
 675    let item = match role {
 676        Role::Assistant => json!({
 677            "type": "message",
 678            "role": "assistant",
 679            "status": "completed",
 680            "content": parts.clone(),
 681        }),
 682        Role::User => json!({
 683            "type": "message",
 684            "role": "user",
 685            "content": parts.clone(),
 686        }),
 687        Role::System => json!({
 688            "type": "message",
 689            "role": "system",
 690            "content": parts.clone(),
 691        }),
 692    };
 693
 694    input_items.push(item);
 695    parts.clear();
 696}
 697
 698fn tool_result_output(result: &LanguageModelToolResult) -> String {
 699    if let Some(output) = &result.output {
 700        match output {
 701            serde_json::Value::String(text) => text.clone(),
 702            serde_json::Value::Null => String::new(),
 703            _ => output.to_string(),
 704        }
 705    } else {
 706        match &result.content {
 707            LanguageModelToolResultContent::Text(text) => text.to_string(),
 708            LanguageModelToolResultContent::Image(image) => image.to_base64_url(),
 709        }
 710    }
 711}
 712
 713fn add_message_content_part(
 714    new_part: open_ai::MessagePart,
 715    role: Role,
 716    messages: &mut Vec<open_ai::RequestMessage>,
 717) {
 718    match (role, messages.last_mut()) {
 719        (Role::User, Some(open_ai::RequestMessage::User { content }))
 720        | (
 721            Role::Assistant,
 722            Some(open_ai::RequestMessage::Assistant {
 723                content: Some(content),
 724                ..
 725            }),
 726        )
 727        | (Role::System, Some(open_ai::RequestMessage::System { content, .. })) => {
 728            content.push_part(new_part);
 729        }
 730        _ => {
 731            messages.push(match role {
 732                Role::User => open_ai::RequestMessage::User {
 733                    content: open_ai::MessageContent::from(vec![new_part]),
 734                },
 735                Role::Assistant => open_ai::RequestMessage::Assistant {
 736                    content: Some(open_ai::MessageContent::from(vec![new_part])),
 737                    tool_calls: Vec::new(),
 738                },
 739                Role::System => open_ai::RequestMessage::System {
 740                    content: open_ai::MessageContent::from(vec![new_part]),
 741                },
 742            });
 743        }
 744    }
 745}
 746
 747pub struct OpenAiEventMapper {
 748    tool_calls_by_index: HashMap<usize, RawToolCall>,
 749}
 750
 751impl OpenAiEventMapper {
 752    pub fn new() -> Self {
 753        Self {
 754            tool_calls_by_index: HashMap::default(),
 755        }
 756    }
 757
 758    pub fn map_stream(
 759        mut self,
 760        events: Pin<Box<dyn Send + Stream<Item = Result<ResponseStreamEvent>>>>,
 761    ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 762    {
 763        events.flat_map(move |event| {
 764            futures::stream::iter(match event {
 765                Ok(event) => self.map_event(event),
 766                Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
 767            })
 768        })
 769    }
 770
 771    pub fn map_event(
 772        &mut self,
 773        event: ResponseStreamEvent,
 774    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
 775        let mut events = Vec::new();
 776        if let Some(usage) = event.usage {
 777            events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
 778                input_tokens: usage.prompt_tokens,
 779                output_tokens: usage.completion_tokens,
 780                cache_creation_input_tokens: 0,
 781                cache_read_input_tokens: 0,
 782            })));
 783        }
 784
 785        let Some(choice) = event.choices.first() else {
 786            return events;
 787        };
 788
 789        if let Some(delta) = choice.delta.as_ref() {
 790            if let Some(content) = delta.content.clone() {
 791                events.push(Ok(LanguageModelCompletionEvent::Text(content)));
 792            }
 793
 794            if let Some(tool_calls) = delta.tool_calls.as_ref() {
 795                for tool_call in tool_calls {
 796                    let entry = self.tool_calls_by_index.entry(tool_call.index).or_default();
 797
 798                    if let Some(tool_id) = tool_call.id.clone() {
 799                        entry.id = tool_id;
 800                    }
 801
 802                    if let Some(function) = tool_call.function.as_ref() {
 803                        if let Some(name) = function.name.clone() {
 804                            entry.name = name;
 805                        }
 806
 807                        if let Some(arguments) = function.arguments.clone() {
 808                            entry.arguments.push_str(&arguments);
 809                        }
 810                    }
 811                }
 812            }
 813        }
 814
 815        match choice.finish_reason.as_deref() {
 816            Some("stop") => {
 817                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
 818            }
 819            Some("tool_calls") => {
 820                events.extend(self.tool_calls_by_index.drain().map(|(_, tool_call)| {
 821                    match serde_json::Value::from_str(&tool_call.arguments) {
 822                        Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
 823                            LanguageModelToolUse {
 824                                id: tool_call.id.clone().into(),
 825                                name: tool_call.name.as_str().into(),
 826                                is_input_complete: true,
 827                                input,
 828                                raw_input: tool_call.arguments.clone(),
 829                                thought_signature: None,
 830                            },
 831                        )),
 832                        Err(error) => Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
 833                            id: tool_call.id.into(),
 834                            tool_name: tool_call.name.into(),
 835                            raw_input: tool_call.arguments.clone().into(),
 836                            json_parse_error: error.to_string(),
 837                        }),
 838                    }
 839                }));
 840
 841                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
 842            }
 843            Some(stop_reason) => {
 844                log::error!("Unexpected OpenAI stop_reason: {stop_reason:?}",);
 845                events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
 846            }
 847            None => {}
 848        }
 849
 850        events
 851    }
 852}
 853
 854#[derive(Default)]
 855struct RawToolCall {
 856    id: String,
 857    name: String,
 858    arguments: String,
 859}
 860
 861pub struct OpenAiResponseEventMapper {
 862    function_calls_by_item: HashMap<String, PendingResponseFunctionCall>,
 863    pending_stop_reason: Option<StopReason>,
 864}
 865
 866#[derive(Default)]
 867struct PendingResponseFunctionCall {
 868    call_id: String,
 869    name: Arc<str>,
 870    arguments: String,
 871}
 872
 873impl OpenAiResponseEventMapper {
 874    pub fn new() -> Self {
 875        Self {
 876            function_calls_by_item: HashMap::default(),
 877            pending_stop_reason: None,
 878        }
 879    }
 880
 881    pub fn map_stream(
 882        mut self,
 883        events: Pin<Box<dyn Send + Stream<Item = Result<ResponsesStreamEvent>>>>,
 884    ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
 885    {
 886        events.flat_map(move |event| {
 887            futures::stream::iter(match event {
 888                Ok(event) => self.map_event(event),
 889                Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
 890            })
 891        })
 892    }
 893
 894    fn map_event(
 895        &mut self,
 896        event: ResponsesStreamEvent,
 897    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
 898        match event {
 899            ResponsesStreamEvent::OutputItemAdded { item, .. } => {
 900                let mut events = Vec::new();
 901
 902                match &item {
 903                    ResponseOutputItem::Message(message) => {
 904                        if let Some(id) = &message.id {
 905                            events.push(Ok(LanguageModelCompletionEvent::StartMessage {
 906                                message_id: id.clone(),
 907                            }));
 908                        }
 909                    }
 910                    ResponseOutputItem::FunctionCall(function_call) => {
 911                        if let Some(item_id) = function_call.id.clone() {
 912                            let call_id = function_call
 913                                .call_id
 914                                .clone()
 915                                .or_else(|| function_call.id.clone())
 916                                .unwrap_or_else(|| item_id.clone());
 917                            let entry = PendingResponseFunctionCall {
 918                                call_id,
 919                                name: Arc::<str>::from(
 920                                    function_call.name.clone().unwrap_or_default(),
 921                                ),
 922                                arguments: function_call.arguments.clone(),
 923                            };
 924                            self.function_calls_by_item.insert(item_id, entry);
 925                        }
 926                    }
 927                    ResponseOutputItem::Unknown => {}
 928                }
 929                events
 930            }
 931            ResponsesStreamEvent::OutputTextDelta { delta, .. } => {
 932                if delta.is_empty() {
 933                    Vec::new()
 934                } else {
 935                    vec![Ok(LanguageModelCompletionEvent::Text(delta))]
 936                }
 937            }
 938            ResponsesStreamEvent::FunctionCallArgumentsDelta { item_id, delta, .. } => {
 939                if let Some(entry) = self.function_calls_by_item.get_mut(&item_id) {
 940                    entry.arguments.push_str(&delta);
 941                }
 942                Vec::new()
 943            }
 944            ResponsesStreamEvent::FunctionCallArgumentsDone {
 945                item_id, arguments, ..
 946            } => {
 947                if let Some(mut entry) = self.function_calls_by_item.remove(&item_id) {
 948                    if !arguments.is_empty() {
 949                        entry.arguments = arguments;
 950                    }
 951                    let raw_input = entry.arguments.clone();
 952                    self.pending_stop_reason = Some(StopReason::ToolUse);
 953                    match serde_json::from_str::<serde_json::Value>(&entry.arguments) {
 954                        Ok(input) => {
 955                            vec![Ok(LanguageModelCompletionEvent::ToolUse(
 956                                LanguageModelToolUse {
 957                                    id: LanguageModelToolUseId::from(entry.call_id.clone()),
 958                                    name: entry.name.clone(),
 959                                    is_input_complete: true,
 960                                    input,
 961                                    raw_input,
 962                                    thought_signature: None,
 963                                },
 964                            ))]
 965                        }
 966                        Err(error) => {
 967                            vec![Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
 968                                id: LanguageModelToolUseId::from(entry.call_id.clone()),
 969                                tool_name: entry.name.clone(),
 970                                raw_input: Arc::<str>::from(raw_input),
 971                                json_parse_error: error.to_string(),
 972                            })]
 973                        }
 974                    }
 975                } else {
 976                    Vec::new()
 977                }
 978            }
 979            ResponsesStreamEvent::Completed { response } => {
 980                self.handle_completion(response, StopReason::EndTurn)
 981            }
 982            ResponsesStreamEvent::Incomplete { response } => {
 983                let reason = response
 984                    .status_details
 985                    .as_ref()
 986                    .and_then(|details| details.reason.as_deref());
 987                let stop_reason = match reason {
 988                    Some("max_output_tokens") => StopReason::MaxTokens,
 989                    Some("content_filter") => {
 990                        self.pending_stop_reason = Some(StopReason::Refusal);
 991                        StopReason::Refusal
 992                    }
 993                    _ => self
 994                        .pending_stop_reason
 995                        .take()
 996                        .unwrap_or(StopReason::EndTurn),
 997                };
 998
 999                let mut events = Vec::new();
1000                if self.pending_stop_reason.is_none() {
1001                    events.extend(self.emit_tool_calls_from_output(&response.output));
1002                }
1003                if let Some(usage) = response.usage.as_ref() {
1004                    events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
1005                        token_usage_from_response_usage(usage),
1006                    )));
1007                }
1008                events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
1009                events
1010            }
1011            ResponsesStreamEvent::Failed { response } => {
1012                let message = response
1013                    .status_details
1014                    .and_then(|details| details.error)
1015                    .map(|error| error.to_string())
1016                    .unwrap_or_else(|| "response failed".to_string());
1017                vec![Err(LanguageModelCompletionError::Other(anyhow!(message)))]
1018            }
1019            ResponsesStreamEvent::Error { error }
1020            | ResponsesStreamEvent::GenericError { error } => {
1021                vec![Err(LanguageModelCompletionError::Other(anyhow!(format!(
1022                    "{error:?}"
1023                ))))]
1024            }
1025            ResponsesStreamEvent::OutputTextDone { .. } => Vec::new(),
1026            ResponsesStreamEvent::OutputItemDone { .. }
1027            | ResponsesStreamEvent::ContentPartAdded { .. }
1028            | ResponsesStreamEvent::ContentPartDone { .. }
1029            | ResponsesStreamEvent::Created { .. }
1030            | ResponsesStreamEvent::InProgress { .. }
1031            | ResponsesStreamEvent::Unknown => Vec::new(),
1032        }
1033    }
1034
1035    fn handle_completion(
1036        &mut self,
1037        response: ResponsesSummary,
1038        default_reason: StopReason,
1039    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
1040        let mut events = Vec::new();
1041
1042        if self.pending_stop_reason.is_none() {
1043            events.extend(self.emit_tool_calls_from_output(&response.output));
1044        }
1045
1046        if let Some(usage) = response.usage.as_ref() {
1047            events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
1048                token_usage_from_response_usage(usage),
1049            )));
1050        }
1051
1052        let stop_reason = self.pending_stop_reason.take().unwrap_or(default_reason);
1053        events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
1054        events
1055    }
1056
1057    fn emit_tool_calls_from_output(
1058        &mut self,
1059        output: &[ResponseOutputItem],
1060    ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
1061        let mut events = Vec::new();
1062        for item in output {
1063            if let ResponseOutputItem::FunctionCall(function_call) = item {
1064                let Some(call_id) = function_call
1065                    .call_id
1066                    .clone()
1067                    .or_else(|| function_call.id.clone())
1068                else {
1069                    log::error!(
1070                        "Function call item missing both call_id and id: {:?}",
1071                        function_call
1072                    );
1073                    continue;
1074                };
1075                let name: Arc<str> = Arc::from(function_call.name.clone().unwrap_or_default());
1076                let arguments = &function_call.arguments;
1077                if !arguments.is_empty() {
1078                    self.pending_stop_reason = Some(StopReason::ToolUse);
1079                    match serde_json::from_str::<serde_json::Value>(arguments) {
1080                        Ok(input) => {
1081                            events.push(Ok(LanguageModelCompletionEvent::ToolUse(
1082                                LanguageModelToolUse {
1083                                    id: LanguageModelToolUseId::from(call_id.clone()),
1084                                    name: name.clone(),
1085                                    is_input_complete: true,
1086                                    input,
1087                                    raw_input: arguments.clone(),
1088                                    thought_signature: None,
1089                                },
1090                            )));
1091                        }
1092                        Err(error) => {
1093                            events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
1094                                id: LanguageModelToolUseId::from(call_id.clone()),
1095                                tool_name: name.clone(),
1096                                raw_input: Arc::<str>::from(arguments.clone()),
1097                                json_parse_error: error.to_string(),
1098                            }));
1099                        }
1100                    }
1101                }
1102            }
1103        }
1104        events
1105    }
1106}
1107
1108fn token_usage_from_response_usage(usage: &ResponsesUsage) -> TokenUsage {
1109    TokenUsage {
1110        input_tokens: usage.input_tokens.unwrap_or_default(),
1111        output_tokens: usage.output_tokens.unwrap_or_default(),
1112        cache_creation_input_tokens: 0,
1113        cache_read_input_tokens: 0,
1114    }
1115}
1116
1117pub(crate) fn collect_tiktoken_messages(
1118    request: LanguageModelRequest,
1119) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
1120    request
1121        .messages
1122        .into_iter()
1123        .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
1124            role: match message.role {
1125                Role::User => "user".into(),
1126                Role::Assistant => "assistant".into(),
1127                Role::System => "system".into(),
1128            },
1129            content: Some(message.string_contents()),
1130            name: None,
1131            function_call: None,
1132        })
1133        .collect::<Vec<_>>()
1134}
1135
1136pub fn count_open_ai_tokens(
1137    request: LanguageModelRequest,
1138    model: Model,
1139    cx: &App,
1140) -> BoxFuture<'static, Result<u64>> {
1141    cx.background_spawn(async move {
1142        let messages = collect_tiktoken_messages(request);
1143        match model {
1144            Model::Custom { max_tokens, .. } => {
1145                let model = if max_tokens >= 100_000 {
1146                    // If the max tokens is 100k or more, it is likely the o200k_base tokenizer from gpt4o
1147                    "gpt-4o"
1148                } else {
1149                    // Otherwise fallback to gpt-4, since only cl100k_base and o200k_base are
1150                    // supported with this tiktoken method
1151                    "gpt-4"
1152                };
1153                tiktoken_rs::num_tokens_from_messages(model, &messages)
1154            }
1155            // Currently supported by tiktoken_rs
1156            // Sometimes tiktoken-rs is behind on model support. If that is the case, make a new branch
1157            // arm with an override. We enumerate all supported models here so that we can check if new
1158            // models are supported yet or not.
1159            Model::ThreePointFiveTurbo
1160            | Model::Four
1161            | Model::FourTurbo
1162            | Model::FourOmni
1163            | Model::FourOmniMini
1164            | Model::FourPointOne
1165            | Model::FourPointOneMini
1166            | Model::FourPointOneNano
1167            | Model::O1
1168            | Model::O3
1169            | Model::O3Mini
1170            | Model::O4Mini
1171            | Model::Five
1172            | Model::FiveCodex
1173            | Model::FiveMini
1174            | Model::FiveNano => tiktoken_rs::num_tokens_from_messages(model.id(), &messages),
1175            // GPT-5.1 and 5.2 don't have dedicated tiktoken support; use gpt-5 tokenizer
1176            Model::FivePointOne | Model::FivePointTwo => {
1177                tiktoken_rs::num_tokens_from_messages("gpt-5", &messages)
1178            }
1179        }
1180        .map(|tokens| tokens as u64)
1181    })
1182    .boxed()
1183}
1184
1185struct ConfigurationView {
1186    api_key_editor: Entity<InputField>,
1187    state: Entity<State>,
1188    load_credentials_task: Option<Task<()>>,
1189}
1190
1191impl ConfigurationView {
1192    fn new(state: Entity<State>, window: &mut Window, cx: &mut Context<Self>) -> Self {
1193        let api_key_editor = cx.new(|cx| {
1194            InputField::new(
1195                window,
1196                cx,
1197                "sk-000000000000000000000000000000000000000000000000",
1198            )
1199        });
1200
1201        cx.observe(&state, |_, _, cx| {
1202            cx.notify();
1203        })
1204        .detach();
1205
1206        let load_credentials_task = Some(cx.spawn_in(window, {
1207            let state = state.clone();
1208            async move |this, cx| {
1209                if let Some(task) = state
1210                    .update(cx, |state, cx| state.authenticate(cx))
1211                    .log_err()
1212                {
1213                    // We don't log an error, because "not signed in" is also an error.
1214                    let _ = task.await;
1215                }
1216                this.update(cx, |this, cx| {
1217                    this.load_credentials_task = None;
1218                    cx.notify();
1219                })
1220                .log_err();
1221            }
1222        }));
1223
1224        Self {
1225            api_key_editor,
1226            state,
1227            load_credentials_task,
1228        }
1229    }
1230
1231    fn save_api_key(&mut self, _: &menu::Confirm, window: &mut Window, cx: &mut Context<Self>) {
1232        let api_key = self.api_key_editor.read(cx).text(cx).trim().to_string();
1233        if api_key.is_empty() {
1234            return;
1235        }
1236
1237        // url changes can cause the editor to be displayed again
1238        self.api_key_editor
1239            .update(cx, |editor, cx| editor.set_text("", window, cx));
1240
1241        let state = self.state.clone();
1242        cx.spawn_in(window, async move |_, cx| {
1243            state
1244                .update(cx, |state, cx| state.set_api_key(Some(api_key), cx))?
1245                .await
1246        })
1247        .detach_and_log_err(cx);
1248    }
1249
1250    fn reset_api_key(&mut self, window: &mut Window, cx: &mut Context<Self>) {
1251        self.api_key_editor
1252            .update(cx, |input, cx| input.set_text("", window, cx));
1253
1254        let state = self.state.clone();
1255        cx.spawn_in(window, async move |_, cx| {
1256            state
1257                .update(cx, |state, cx| state.set_api_key(None, cx))?
1258                .await
1259        })
1260        .detach_and_log_err(cx);
1261    }
1262
1263    fn should_render_editor(&self, cx: &mut Context<Self>) -> bool {
1264        !self.state.read(cx).is_authenticated()
1265    }
1266}
1267
1268impl Render for ConfigurationView {
1269    fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1270        let env_var_set = self.state.read(cx).api_key_state.is_from_env_var();
1271        let configured_card_label = if env_var_set {
1272            format!("API key set in {API_KEY_ENV_VAR_NAME} environment variable")
1273        } else {
1274            let api_url = OpenAiLanguageModelProvider::api_url(cx);
1275            if api_url == OPEN_AI_API_URL {
1276                "API key configured".to_string()
1277            } else {
1278                format!("API key configured for {}", api_url)
1279            }
1280        };
1281
1282        let api_key_section = if self.should_render_editor(cx) {
1283            v_flex()
1284                .on_action(cx.listener(Self::save_api_key))
1285                .child(Label::new("To use Zed's agent with OpenAI, you need to add an API key. Follow these steps:"))
1286                .child(
1287                    List::new()
1288                        .child(
1289                            ListBulletItem::new("")
1290                                .child(Label::new("Create one by visiting"))
1291                                .child(ButtonLink::new("OpenAI's console", "https://platform.openai.com/api-keys"))
1292                        )
1293                        .child(
1294                            ListBulletItem::new("Ensure your OpenAI account has credits")
1295                        )
1296                        .child(
1297                            ListBulletItem::new("Paste your API key below and hit enter to start using the agent")
1298                        ),
1299                )
1300                .child(self.api_key_editor.clone())
1301                .child(
1302                    Label::new(format!(
1303                        "You can also set the {API_KEY_ENV_VAR_NAME} environment variable and restart Zed."
1304                    ))
1305                    .size(LabelSize::Small)
1306                    .color(Color::Muted),
1307                )
1308                .child(
1309                    Label::new(
1310                        "Note that having a subscription for another service like GitHub Copilot won't work.",
1311                    )
1312                    .size(LabelSize::Small).color(Color::Muted),
1313                )
1314                .into_any_element()
1315        } else {
1316            ConfiguredApiCard::new(configured_card_label)
1317                .disabled(env_var_set)
1318                .on_click(cx.listener(|this, _, window, cx| this.reset_api_key(window, cx)))
1319                .when(env_var_set, |this| {
1320                    this.tooltip_label(format!("To reset your API key, unset the {API_KEY_ENV_VAR_NAME} environment variable."))
1321                })
1322                .into_any_element()
1323        };
1324
1325        let compatible_api_section = h_flex()
1326            .mt_1p5()
1327            .gap_0p5()
1328            .flex_wrap()
1329            .when(self.should_render_editor(cx), |this| {
1330                this.pt_1p5()
1331                    .border_t_1()
1332                    .border_color(cx.theme().colors().border_variant)
1333            })
1334            .child(
1335                h_flex()
1336                    .gap_2()
1337                    .child(
1338                        Icon::new(IconName::Info)
1339                            .size(IconSize::XSmall)
1340                            .color(Color::Muted),
1341                    )
1342                    .child(Label::new("Zed also supports OpenAI-compatible models.")),
1343            )
1344            .child(
1345                Button::new("docs", "Learn More")
1346                    .icon(IconName::ArrowUpRight)
1347                    .icon_size(IconSize::Small)
1348                    .icon_color(Color::Muted)
1349                    .on_click(move |_, _window, cx| {
1350                        cx.open_url("https://zed.dev/docs/ai/llm-providers#openai-api-compatible")
1351                    }),
1352            );
1353
1354        if self.load_credentials_task.is_some() {
1355            div().child(Label::new("Loading credentials…")).into_any()
1356        } else {
1357            v_flex()
1358                .size_full()
1359                .child(api_key_section)
1360                .child(compatible_api_section)
1361                .into_any()
1362        }
1363    }
1364}
1365
1366#[cfg(test)]
1367mod tests {
1368    use super::*;
1369    use futures::{StreamExt, executor::block_on};
1370    use gpui::TestAppContext;
1371    use language_model::{LanguageModelRequestMessage, LanguageModelRequestTool};
1372    use open_ai::responses::{
1373        ResponseFunctionToolCall, ResponseOutputItem, ResponseOutputMessage, ResponseStatusDetails,
1374        ResponseSummary, ResponseUsage, StreamEvent as ResponsesStreamEvent,
1375    };
1376    use pretty_assertions::assert_eq;
1377
1378    fn map_response_events(events: Vec<ResponsesStreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1379        block_on(async {
1380            OpenAiResponseEventMapper::new()
1381                .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1382                .collect::<Vec<_>>()
1383                .await
1384                .into_iter()
1385                .map(Result::unwrap)
1386                .collect()
1387        })
1388    }
1389
1390    fn response_item_message(id: &str) -> ResponseOutputItem {
1391        ResponseOutputItem::Message(ResponseOutputMessage {
1392            id: Some(id.to_string()),
1393            role: Some("assistant".to_string()),
1394            status: Some("in_progress".to_string()),
1395            content: vec![],
1396        })
1397    }
1398
1399    fn response_item_function_call(id: &str, args: Option<&str>) -> ResponseOutputItem {
1400        ResponseOutputItem::FunctionCall(ResponseFunctionToolCall {
1401            id: Some(id.to_string()),
1402            status: Some("in_progress".to_string()),
1403            name: Some("get_weather".to_string()),
1404            call_id: Some("call_123".to_string()),
1405            arguments: args.map(|s| s.to_string()).unwrap_or_default(),
1406        })
1407    }
1408
1409    #[gpui::test]
1410    fn tiktoken_rs_support(cx: &TestAppContext) {
1411        let request = LanguageModelRequest {
1412            thread_id: None,
1413            prompt_id: None,
1414            intent: None,
1415            mode: None,
1416            messages: vec![LanguageModelRequestMessage {
1417                role: Role::User,
1418                content: vec![MessageContent::Text("message".into())],
1419                cache: false,
1420                reasoning_details: None,
1421            }],
1422            tools: vec![],
1423            tool_choice: None,
1424            stop: vec![],
1425            temperature: None,
1426            thinking_allowed: true,
1427        };
1428
1429        // Validate that all models are supported by tiktoken-rs
1430        for model in Model::iter() {
1431            let count = cx
1432                .executor()
1433                .block(count_open_ai_tokens(
1434                    request.clone(),
1435                    model,
1436                    &cx.app.borrow(),
1437                ))
1438                .unwrap();
1439            assert!(count > 0);
1440        }
1441    }
1442
1443    #[test]
1444    fn responses_stream_maps_text_and_usage() {
1445        let events = vec![
1446            ResponsesStreamEvent::OutputItemAdded {
1447                output_index: 0,
1448                sequence_number: None,
1449                item: response_item_message("msg_123"),
1450            },
1451            ResponsesStreamEvent::OutputTextDelta {
1452                item_id: "msg_123".into(),
1453                output_index: 0,
1454                content_index: Some(0),
1455                delta: "Hello".into(),
1456            },
1457            ResponsesStreamEvent::Completed {
1458                response: ResponseSummary {
1459                    usage: Some(ResponseUsage {
1460                        input_tokens: Some(5),
1461                        output_tokens: Some(3),
1462                        total_tokens: Some(8),
1463                    }),
1464                    ..Default::default()
1465                },
1466            },
1467        ];
1468
1469        let mapped = map_response_events(events);
1470        assert!(matches!(
1471            mapped[0],
1472            LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_123"
1473        ));
1474        assert!(matches!(
1475            mapped[1],
1476            LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1477        ));
1478        assert!(matches!(
1479            mapped[2],
1480            LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1481                input_tokens: 5,
1482                output_tokens: 3,
1483                ..
1484            })
1485        ));
1486        assert!(matches!(
1487            mapped[3],
1488            LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1489        ));
1490    }
1491
1492    #[test]
1493    fn into_open_ai_response_builds_complete_payload() {
1494        let tool_call_id = LanguageModelToolUseId::from("call-42");
1495        let tool_input = json!({ "city": "Boston" });
1496        let tool_arguments = serde_json::to_string(&tool_input).unwrap();
1497        let tool_use = LanguageModelToolUse {
1498            id: tool_call_id.clone(),
1499            name: Arc::from("get_weather"),
1500            raw_input: tool_arguments.clone(),
1501            input: tool_input,
1502            is_input_complete: true,
1503            thought_signature: None,
1504        };
1505        let tool_result = LanguageModelToolResult {
1506            tool_use_id: tool_call_id,
1507            tool_name: Arc::from("get_weather"),
1508            is_error: false,
1509            content: LanguageModelToolResultContent::Text(Arc::from("Sunny")),
1510            output: Some(json!({ "forecast": "Sunny" })),
1511        };
1512        let user_image = LanguageModelImage {
1513            source: SharedString::from("aGVsbG8="),
1514            size: None,
1515        };
1516        let expected_image_url = user_image.to_base64_url();
1517
1518        let request = LanguageModelRequest {
1519            thread_id: Some("thread-123".into()),
1520            prompt_id: None,
1521            intent: None,
1522            mode: None,
1523            messages: vec![
1524                LanguageModelRequestMessage {
1525                    role: Role::System,
1526                    content: vec![MessageContent::Text("System context".into())],
1527                    cache: false,
1528                    reasoning_details: None,
1529                },
1530                LanguageModelRequestMessage {
1531                    role: Role::User,
1532                    content: vec![
1533                        MessageContent::Text("Please check the weather.".into()),
1534                        MessageContent::Image(user_image),
1535                    ],
1536                    cache: false,
1537                    reasoning_details: None,
1538                },
1539                LanguageModelRequestMessage {
1540                    role: Role::Assistant,
1541                    content: vec![
1542                        MessageContent::Text("Looking that up.".into()),
1543                        MessageContent::ToolUse(tool_use),
1544                    ],
1545                    cache: false,
1546                    reasoning_details: None,
1547                },
1548                LanguageModelRequestMessage {
1549                    role: Role::Assistant,
1550                    content: vec![MessageContent::ToolResult(tool_result)],
1551                    cache: false,
1552                    reasoning_details: None,
1553                },
1554            ],
1555            tools: vec![LanguageModelRequestTool {
1556                name: "get_weather".into(),
1557                description: "Fetches the weather".into(),
1558                input_schema: json!({ "type": "object" }),
1559            }],
1560            tool_choice: Some(LanguageModelToolChoice::Any),
1561            stop: vec!["<STOP>".into()],
1562            temperature: None,
1563            thinking_allowed: false,
1564        };
1565
1566        let response = into_open_ai_response(
1567            request,
1568            "custom-model",
1569            true,
1570            true,
1571            Some(2048),
1572            Some(ReasoningEffort::Low),
1573        );
1574
1575        let serialized = serde_json::to_value(&response).unwrap();
1576        let expected = json!({
1577            "model": "custom-model",
1578            "input": [
1579                {
1580                    "type": "message",
1581                    "role": "system",
1582                    "content": [
1583                        { "type": "input_text", "text": "System context" }
1584                    ]
1585                },
1586                {
1587                    "type": "message",
1588                    "role": "user",
1589                    "content": [
1590                        { "type": "input_text", "text": "Please check the weather." },
1591                        { "type": "input_image", "image_url": expected_image_url }
1592                    ]
1593                },
1594                {
1595                    "type": "message",
1596                    "role": "assistant",
1597                    "status": "completed",
1598                    "content": [
1599                        { "type": "output_text", "text": "Looking that up.", "annotations": [] }
1600                    ]
1601                },
1602                {
1603                    "type": "function_call",
1604                    "call_id": "call-42",
1605                    "name": "get_weather",
1606                    "arguments": tool_arguments
1607                },
1608                {
1609                    "type": "function_call_output",
1610                    "call_id": "call-42",
1611                    "output": "{\"forecast\":\"Sunny\"}"
1612                }
1613            ],
1614            "stream": true,
1615            "max_output_tokens": 2048,
1616            "parallel_tool_calls": true,
1617            "tool_choice": "required",
1618            "tools": [
1619                {
1620                    "type": "function",
1621                    "name": "get_weather",
1622                    "description": "Fetches the weather",
1623                    "parameters": { "type": "object" }
1624                }
1625            ],
1626            "prompt_cache_key": "thread-123",
1627            "reasoning": { "effort": "low" }
1628        });
1629
1630        assert_eq!(serialized, expected);
1631    }
1632
1633    #[test]
1634    fn responses_stream_maps_tool_calls() {
1635        let events = vec![
1636            ResponsesStreamEvent::OutputItemAdded {
1637                output_index: 0,
1638                sequence_number: None,
1639                item: response_item_function_call("item_fn", Some("{\"city\":\"Bos")),
1640            },
1641            ResponsesStreamEvent::FunctionCallArgumentsDelta {
1642                item_id: "item_fn".into(),
1643                output_index: 0,
1644                delta: "ton\"}".into(),
1645                sequence_number: None,
1646            },
1647            ResponsesStreamEvent::FunctionCallArgumentsDone {
1648                item_id: "item_fn".into(),
1649                output_index: 0,
1650                arguments: "{\"city\":\"Boston\"}".into(),
1651                sequence_number: None,
1652            },
1653            ResponsesStreamEvent::Completed {
1654                response: ResponseSummary::default(),
1655            },
1656        ];
1657
1658        let mapped = map_response_events(events);
1659        assert!(matches!(
1660            mapped[0],
1661            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse {
1662                ref id,
1663                ref name,
1664                ref raw_input,
1665                ..
1666            }) if id.to_string() == "call_123"
1667                && name.as_ref() == "get_weather"
1668                && raw_input == "{\"city\":\"Boston\"}"
1669        ));
1670        assert!(matches!(
1671            mapped[1],
1672            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1673        ));
1674    }
1675
1676    #[test]
1677    fn responses_stream_uses_max_tokens_stop_reason() {
1678        let events = vec![ResponsesStreamEvent::Incomplete {
1679            response: ResponseSummary {
1680                status_details: Some(ResponseStatusDetails {
1681                    reason: Some("max_output_tokens".into()),
1682                    r#type: Some("incomplete".into()),
1683                    error: None,
1684                }),
1685                usage: Some(ResponseUsage {
1686                    input_tokens: Some(10),
1687                    output_tokens: Some(20),
1688                    total_tokens: Some(30),
1689                }),
1690                ..Default::default()
1691            },
1692        }];
1693
1694        let mapped = map_response_events(events);
1695        assert!(matches!(
1696            mapped[0],
1697            LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1698                input_tokens: 10,
1699                output_tokens: 20,
1700                ..
1701            })
1702        ));
1703        assert!(matches!(
1704            mapped[1],
1705            LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1706        ));
1707    }
1708
1709    #[test]
1710    fn responses_stream_handles_multiple_tool_calls() {
1711        let events = vec![
1712            ResponsesStreamEvent::OutputItemAdded {
1713                output_index: 0,
1714                sequence_number: None,
1715                item: response_item_function_call("item_fn1", Some("{\"city\":\"NYC\"}")),
1716            },
1717            ResponsesStreamEvent::FunctionCallArgumentsDone {
1718                item_id: "item_fn1".into(),
1719                output_index: 0,
1720                arguments: "{\"city\":\"NYC\"}".into(),
1721                sequence_number: None,
1722            },
1723            ResponsesStreamEvent::OutputItemAdded {
1724                output_index: 1,
1725                sequence_number: None,
1726                item: response_item_function_call("item_fn2", Some("{\"city\":\"LA\"}")),
1727            },
1728            ResponsesStreamEvent::FunctionCallArgumentsDone {
1729                item_id: "item_fn2".into(),
1730                output_index: 1,
1731                arguments: "{\"city\":\"LA\"}".into(),
1732                sequence_number: None,
1733            },
1734            ResponsesStreamEvent::Completed {
1735                response: ResponseSummary::default(),
1736            },
1737        ];
1738
1739        let mapped = map_response_events(events);
1740        assert_eq!(mapped.len(), 3);
1741        assert!(matches!(
1742            mapped[0],
1743            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1744            if raw_input == "{\"city\":\"NYC\"}"
1745        ));
1746        assert!(matches!(
1747            mapped[1],
1748            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1749            if raw_input == "{\"city\":\"LA\"}"
1750        ));
1751        assert!(matches!(
1752            mapped[2],
1753            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1754        ));
1755    }
1756
1757    #[test]
1758    fn responses_stream_handles_mixed_text_and_tool_calls() {
1759        let events = vec![
1760            ResponsesStreamEvent::OutputItemAdded {
1761                output_index: 0,
1762                sequence_number: None,
1763                item: response_item_message("msg_123"),
1764            },
1765            ResponsesStreamEvent::OutputTextDelta {
1766                item_id: "msg_123".into(),
1767                output_index: 0,
1768                content_index: Some(0),
1769                delta: "Let me check that".into(),
1770            },
1771            ResponsesStreamEvent::OutputItemAdded {
1772                output_index: 1,
1773                sequence_number: None,
1774                item: response_item_function_call("item_fn", Some("{\"query\":\"test\"}")),
1775            },
1776            ResponsesStreamEvent::FunctionCallArgumentsDone {
1777                item_id: "item_fn".into(),
1778                output_index: 1,
1779                arguments: "{\"query\":\"test\"}".into(),
1780                sequence_number: None,
1781            },
1782            ResponsesStreamEvent::Completed {
1783                response: ResponseSummary::default(),
1784            },
1785        ];
1786
1787        let mapped = map_response_events(events);
1788        assert!(matches!(
1789            mapped[0],
1790            LanguageModelCompletionEvent::StartMessage { .. }
1791        ));
1792        assert!(matches!(
1793            mapped[1],
1794            LanguageModelCompletionEvent::Text(ref text) if text == "Let me check that"
1795        ));
1796        assert!(matches!(
1797            mapped[2],
1798            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1799            if raw_input == "{\"query\":\"test\"}"
1800        ));
1801        assert!(matches!(
1802            mapped[3],
1803            LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1804        ));
1805    }
1806
1807    #[test]
1808    fn responses_stream_handles_json_parse_error() {
1809        let events = vec![
1810            ResponsesStreamEvent::OutputItemAdded {
1811                output_index: 0,
1812                sequence_number: None,
1813                item: response_item_function_call("item_fn", Some("{invalid json")),
1814            },
1815            ResponsesStreamEvent::FunctionCallArgumentsDone {
1816                item_id: "item_fn".into(),
1817                output_index: 0,
1818                arguments: "{invalid json".into(),
1819                sequence_number: None,
1820            },
1821            ResponsesStreamEvent::Completed {
1822                response: ResponseSummary::default(),
1823            },
1824        ];
1825
1826        let mapped = map_response_events(events);
1827        assert!(matches!(
1828            mapped[0],
1829            LanguageModelCompletionEvent::ToolUseJsonParseError {
1830                ref raw_input,
1831                ..
1832            } if raw_input.as_ref() == "{invalid json"
1833        ));
1834    }
1835
1836    #[test]
1837    fn responses_stream_handles_incomplete_function_call() {
1838        let events = vec![
1839            ResponsesStreamEvent::OutputItemAdded {
1840                output_index: 0,
1841                sequence_number: None,
1842                item: response_item_function_call("item_fn", Some("{\"city\":")),
1843            },
1844            ResponsesStreamEvent::FunctionCallArgumentsDelta {
1845                item_id: "item_fn".into(),
1846                output_index: 0,
1847                delta: "\"Boston\"".into(),
1848                sequence_number: None,
1849            },
1850            ResponsesStreamEvent::Incomplete {
1851                response: ResponseSummary {
1852                    status_details: Some(ResponseStatusDetails {
1853                        reason: Some("max_output_tokens".into()),
1854                        r#type: Some("incomplete".into()),
1855                        error: None,
1856                    }),
1857                    output: vec![response_item_function_call(
1858                        "item_fn",
1859                        Some("{\"city\":\"Boston\"}"),
1860                    )],
1861                    ..Default::default()
1862                },
1863            },
1864        ];
1865
1866        let mapped = map_response_events(events);
1867        assert!(matches!(
1868            mapped[0],
1869            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1870            if raw_input == "{\"city\":\"Boston\"}"
1871        ));
1872        assert!(matches!(
1873            mapped[1],
1874            LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1875        ));
1876    }
1877
1878    #[test]
1879    fn responses_stream_incomplete_does_not_duplicate_tool_calls() {
1880        let events = vec![
1881            ResponsesStreamEvent::OutputItemAdded {
1882                output_index: 0,
1883                sequence_number: None,
1884                item: response_item_function_call("item_fn", Some("{\"city\":\"Boston\"}")),
1885            },
1886            ResponsesStreamEvent::FunctionCallArgumentsDone {
1887                item_id: "item_fn".into(),
1888                output_index: 0,
1889                arguments: "{\"city\":\"Boston\"}".into(),
1890                sequence_number: None,
1891            },
1892            ResponsesStreamEvent::Incomplete {
1893                response: ResponseSummary {
1894                    status_details: Some(ResponseStatusDetails {
1895                        reason: Some("max_output_tokens".into()),
1896                        r#type: Some("incomplete".into()),
1897                        error: None,
1898                    }),
1899                    output: vec![response_item_function_call(
1900                        "item_fn",
1901                        Some("{\"city\":\"Boston\"}"),
1902                    )],
1903                    ..Default::default()
1904                },
1905            },
1906        ];
1907
1908        let mapped = map_response_events(events);
1909        assert_eq!(mapped.len(), 2);
1910        assert!(matches!(
1911            mapped[0],
1912            LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1913            if raw_input == "{\"city\":\"Boston\"}"
1914        ));
1915        assert!(matches!(
1916            mapped[1],
1917            LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1918        ));
1919    }
1920}