1use anyhow::{Result, anyhow};
2use collections::{BTreeMap, HashMap};
3use futures::Stream;
4use futures::{FutureExt, StreamExt, future::BoxFuture};
5use gpui::{AnyView, App, AsyncApp, Context, Entity, SharedString, Task, Window};
6use http_client::HttpClient;
7use language_model::{
8 ApiKeyState, AuthenticateError, EnvVar, IconOrSvg, LanguageModel, LanguageModelCompletionError,
9 LanguageModelCompletionEvent, LanguageModelId, LanguageModelImage, LanguageModelName,
10 LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
11 LanguageModelProviderState, LanguageModelRequest, LanguageModelRequestMessage,
12 LanguageModelToolChoice, LanguageModelToolResult, LanguageModelToolResultContent,
13 LanguageModelToolUse, LanguageModelToolUseId, MessageContent, RateLimiter, Role, StopReason,
14 TokenUsage, env_var,
15};
16use menu;
17use open_ai::{
18 ImageUrl, Model, OPEN_AI_API_URL, ReasoningEffort, ResponseStreamEvent,
19 responses::{
20 Request as ResponseRequest, ResponseOutputItem, ResponseSummary as ResponsesSummary,
21 ResponseUsage as ResponsesUsage, StreamEvent as ResponsesStreamEvent, stream_response,
22 },
23 stream_completion,
24};
25use serde_json::{Value, json};
26use settings::{OpenAiAvailableModel as AvailableModel, Settings, SettingsStore};
27use std::pin::Pin;
28use std::str::FromStr as _;
29use std::sync::{Arc, LazyLock};
30use strum::IntoEnumIterator;
31use ui::{ButtonLink, ConfiguredApiCard, List, ListBulletItem, prelude::*};
32use ui_input::InputField;
33use util::ResultExt;
34
35const PROVIDER_ID: LanguageModelProviderId = language_model::OPEN_AI_PROVIDER_ID;
36const PROVIDER_NAME: LanguageModelProviderName = language_model::OPEN_AI_PROVIDER_NAME;
37
38const API_KEY_ENV_VAR_NAME: &str = "OPENAI_API_KEY";
39static API_KEY_ENV_VAR: LazyLock<EnvVar> = env_var!(API_KEY_ENV_VAR_NAME);
40
41#[derive(Default, Clone, Debug, PartialEq)]
42pub struct OpenAiSettings {
43 pub api_url: String,
44 pub available_models: Vec<AvailableModel>,
45}
46
47pub struct OpenAiLanguageModelProvider {
48 http_client: Arc<dyn HttpClient>,
49 state: Entity<State>,
50}
51
52pub struct State {
53 api_key_state: ApiKeyState,
54}
55
56impl State {
57 fn is_authenticated(&self) -> bool {
58 self.api_key_state.has_key()
59 }
60
61 fn set_api_key(&mut self, api_key: Option<String>, cx: &mut Context<Self>) -> Task<Result<()>> {
62 let api_url = OpenAiLanguageModelProvider::api_url(cx);
63 self.api_key_state
64 .store(api_url, api_key, |this| &mut this.api_key_state, cx)
65 }
66
67 fn authenticate(&mut self, cx: &mut Context<Self>) -> Task<Result<(), AuthenticateError>> {
68 let api_url = OpenAiLanguageModelProvider::api_url(cx);
69 self.api_key_state
70 .load_if_needed(api_url, |this| &mut this.api_key_state, cx)
71 }
72}
73
74impl OpenAiLanguageModelProvider {
75 pub fn new(http_client: Arc<dyn HttpClient>, cx: &mut App) -> Self {
76 let state = cx.new(|cx| {
77 cx.observe_global::<SettingsStore>(|this: &mut State, cx| {
78 let api_url = Self::api_url(cx);
79 this.api_key_state
80 .handle_url_change(api_url, |this| &mut this.api_key_state, cx);
81 cx.notify();
82 })
83 .detach();
84 State {
85 api_key_state: ApiKeyState::new(Self::api_url(cx), (*API_KEY_ENV_VAR).clone()),
86 }
87 });
88
89 Self { http_client, state }
90 }
91
92 fn create_language_model(&self, model: open_ai::Model) -> Arc<dyn LanguageModel> {
93 Arc::new(OpenAiLanguageModel {
94 id: LanguageModelId::from(model.id().to_string()),
95 model,
96 state: self.state.clone(),
97 http_client: self.http_client.clone(),
98 request_limiter: RateLimiter::new(4),
99 })
100 }
101
102 fn settings(cx: &App) -> &OpenAiSettings {
103 &crate::AllLanguageModelSettings::get_global(cx).openai
104 }
105
106 fn api_url(cx: &App) -> SharedString {
107 let api_url = &Self::settings(cx).api_url;
108 if api_url.is_empty() {
109 open_ai::OPEN_AI_API_URL.into()
110 } else {
111 SharedString::new(api_url.as_str())
112 }
113 }
114}
115
116impl LanguageModelProviderState for OpenAiLanguageModelProvider {
117 type ObservableEntity = State;
118
119 fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
120 Some(self.state.clone())
121 }
122}
123
124impl LanguageModelProvider for OpenAiLanguageModelProvider {
125 fn id(&self) -> LanguageModelProviderId {
126 PROVIDER_ID
127 }
128
129 fn name(&self) -> LanguageModelProviderName {
130 PROVIDER_NAME
131 }
132
133 fn icon(&self) -> IconOrSvg {
134 IconOrSvg::Icon(IconName::AiOpenAi)
135 }
136
137 fn default_model(&self, _cx: &App) -> Option<Arc<dyn LanguageModel>> {
138 Some(self.create_language_model(open_ai::Model::default()))
139 }
140
141 fn default_fast_model(&self, _cx: &App) -> Option<Arc<dyn LanguageModel>> {
142 Some(self.create_language_model(open_ai::Model::default_fast()))
143 }
144
145 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
146 let mut models = BTreeMap::default();
147
148 // Add base models from open_ai::Model::iter()
149 for model in open_ai::Model::iter() {
150 if !matches!(model, open_ai::Model::Custom { .. }) {
151 models.insert(model.id().to_string(), model);
152 }
153 }
154
155 // Override with available models from settings
156 for model in &OpenAiLanguageModelProvider::settings(cx).available_models {
157 models.insert(
158 model.name.clone(),
159 open_ai::Model::Custom {
160 name: model.name.clone(),
161 display_name: model.display_name.clone(),
162 max_tokens: model.max_tokens,
163 max_output_tokens: model.max_output_tokens,
164 max_completion_tokens: model.max_completion_tokens,
165 reasoning_effort: model.reasoning_effort.clone(),
166 supports_chat_completions: model.capabilities.chat_completions,
167 },
168 );
169 }
170
171 models
172 .into_values()
173 .map(|model| self.create_language_model(model))
174 .collect()
175 }
176
177 fn is_authenticated(&self, cx: &App) -> bool {
178 self.state.read(cx).is_authenticated()
179 }
180
181 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
182 self.state.update(cx, |state, cx| state.authenticate(cx))
183 }
184
185 fn configuration_view(
186 &self,
187 _target_agent: language_model::ConfigurationViewTargetAgent,
188 window: &mut Window,
189 cx: &mut App,
190 ) -> AnyView {
191 cx.new(|cx| ConfigurationView::new(self.state.clone(), window, cx))
192 .into()
193 }
194
195 fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>> {
196 self.state
197 .update(cx, |state, cx| state.set_api_key(None, cx))
198 }
199}
200
201pub struct OpenAiLanguageModel {
202 id: LanguageModelId,
203 model: open_ai::Model,
204 state: Entity<State>,
205 http_client: Arc<dyn HttpClient>,
206 request_limiter: RateLimiter,
207}
208
209impl OpenAiLanguageModel {
210 fn stream_completion(
211 &self,
212 request: open_ai::Request,
213 cx: &AsyncApp,
214 ) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<ResponseStreamEvent>>>>
215 {
216 let http_client = self.http_client.clone();
217
218 let (api_key, api_url) = self.state.read_with(cx, |state, cx| {
219 let api_url = OpenAiLanguageModelProvider::api_url(cx);
220 (state.api_key_state.key(&api_url), api_url)
221 });
222
223 let future = self.request_limiter.stream(async move {
224 let provider = PROVIDER_NAME;
225 let Some(api_key) = api_key else {
226 return Err(LanguageModelCompletionError::NoApiKey { provider });
227 };
228 let request = stream_completion(
229 http_client.as_ref(),
230 provider.0.as_str(),
231 &api_url,
232 &api_key,
233 request,
234 );
235 let response = request.await?;
236 Ok(response)
237 });
238
239 async move { Ok(future.await?.boxed()) }.boxed()
240 }
241
242 fn stream_response(
243 &self,
244 request: ResponseRequest,
245 cx: &AsyncApp,
246 ) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<ResponsesStreamEvent>>>>
247 {
248 let http_client = self.http_client.clone();
249
250 let (api_key, api_url) = self.state.read_with(cx, |state, cx| {
251 let api_url = OpenAiLanguageModelProvider::api_url(cx);
252 (state.api_key_state.key(&api_url), api_url)
253 });
254
255 let provider = PROVIDER_NAME;
256 let future = self.request_limiter.stream(async move {
257 let Some(api_key) = api_key else {
258 return Err(LanguageModelCompletionError::NoApiKey { provider });
259 };
260 let request = stream_response(
261 http_client.as_ref(),
262 provider.0.as_str(),
263 &api_url,
264 &api_key,
265 request,
266 );
267 let response = request.await?;
268 Ok(response)
269 });
270
271 async move { Ok(future.await?.boxed()) }.boxed()
272 }
273}
274
275impl LanguageModel for OpenAiLanguageModel {
276 fn id(&self) -> LanguageModelId {
277 self.id.clone()
278 }
279
280 fn name(&self) -> LanguageModelName {
281 LanguageModelName::from(self.model.display_name().to_string())
282 }
283
284 fn provider_id(&self) -> LanguageModelProviderId {
285 PROVIDER_ID
286 }
287
288 fn provider_name(&self) -> LanguageModelProviderName {
289 PROVIDER_NAME
290 }
291
292 fn supports_tools(&self) -> bool {
293 true
294 }
295
296 fn supports_images(&self) -> bool {
297 use open_ai::Model;
298 match &self.model {
299 Model::FourOmni
300 | Model::FourOmniMini
301 | Model::FourPointOne
302 | Model::FourPointOneMini
303 | Model::FourPointOneNano
304 | Model::Five
305 | Model::FiveCodex
306 | Model::FiveMini
307 | Model::FiveNano
308 | Model::FivePointOne
309 | Model::FivePointTwo
310 | Model::O1
311 | Model::O3
312 | Model::O4Mini => true,
313 Model::ThreePointFiveTurbo
314 | Model::Four
315 | Model::FourTurbo
316 | Model::O3Mini
317 | Model::Custom { .. } => false,
318 }
319 }
320
321 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
322 match choice {
323 LanguageModelToolChoice::Auto => true,
324 LanguageModelToolChoice::Any => true,
325 LanguageModelToolChoice::None => true,
326 }
327 }
328
329 fn telemetry_id(&self) -> String {
330 format!("openai/{}", self.model.id())
331 }
332
333 fn max_token_count(&self) -> u64 {
334 self.model.max_token_count()
335 }
336
337 fn max_output_tokens(&self) -> Option<u64> {
338 self.model.max_output_tokens()
339 }
340
341 fn count_tokens(
342 &self,
343 request: LanguageModelRequest,
344 cx: &App,
345 ) -> BoxFuture<'static, Result<u64>> {
346 count_open_ai_tokens(request, self.model.clone(), cx)
347 }
348
349 fn stream_completion(
350 &self,
351 request: LanguageModelRequest,
352 cx: &AsyncApp,
353 ) -> BoxFuture<
354 'static,
355 Result<
356 futures::stream::BoxStream<
357 'static,
358 Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
359 >,
360 LanguageModelCompletionError,
361 >,
362 > {
363 if self.model.supports_chat_completions() {
364 let request = into_open_ai(
365 request,
366 self.model.id(),
367 self.model.supports_parallel_tool_calls(),
368 self.model.supports_prompt_cache_key(),
369 self.max_output_tokens(),
370 self.model.reasoning_effort(),
371 );
372 let completions = self.stream_completion(request, cx);
373 async move {
374 let mapper = OpenAiEventMapper::new();
375 Ok(mapper.map_stream(completions.await?).boxed())
376 }
377 .boxed()
378 } else {
379 let request = into_open_ai_response(
380 request,
381 self.model.id(),
382 self.model.supports_parallel_tool_calls(),
383 self.model.supports_prompt_cache_key(),
384 self.max_output_tokens(),
385 self.model.reasoning_effort(),
386 );
387 let completions = self.stream_response(request, cx);
388 async move {
389 let mapper = OpenAiResponseEventMapper::new();
390 Ok(mapper.map_stream(completions.await?).boxed())
391 }
392 .boxed()
393 }
394 }
395}
396
397pub fn into_open_ai(
398 request: LanguageModelRequest,
399 model_id: &str,
400 supports_parallel_tool_calls: bool,
401 supports_prompt_cache_key: bool,
402 max_output_tokens: Option<u64>,
403 reasoning_effort: Option<ReasoningEffort>,
404) -> open_ai::Request {
405 let stream = !model_id.starts_with("o1-");
406
407 let mut messages = Vec::new();
408 for message in request.messages {
409 for content in message.content {
410 match content {
411 MessageContent::Text(text) | MessageContent::Thinking { text, .. } => {
412 if !text.trim().is_empty() {
413 add_message_content_part(
414 open_ai::MessagePart::Text { text },
415 message.role,
416 &mut messages,
417 );
418 }
419 }
420 MessageContent::RedactedThinking(_) => {}
421 MessageContent::Image(image) => {
422 add_message_content_part(
423 open_ai::MessagePart::Image {
424 image_url: ImageUrl {
425 url: image.to_base64_url(),
426 detail: None,
427 },
428 },
429 message.role,
430 &mut messages,
431 );
432 }
433 MessageContent::ToolUse(tool_use) => {
434 let tool_call = open_ai::ToolCall {
435 id: tool_use.id.to_string(),
436 content: open_ai::ToolCallContent::Function {
437 function: open_ai::FunctionContent {
438 name: tool_use.name.to_string(),
439 arguments: serde_json::to_string(&tool_use.input)
440 .unwrap_or_default(),
441 },
442 },
443 };
444
445 if let Some(open_ai::RequestMessage::Assistant { tool_calls, .. }) =
446 messages.last_mut()
447 {
448 tool_calls.push(tool_call);
449 } else {
450 messages.push(open_ai::RequestMessage::Assistant {
451 content: None,
452 tool_calls: vec![tool_call],
453 });
454 }
455 }
456 MessageContent::ToolResult(tool_result) => {
457 let content = match &tool_result.content {
458 LanguageModelToolResultContent::Text(text) => {
459 vec![open_ai::MessagePart::Text {
460 text: text.to_string(),
461 }]
462 }
463 LanguageModelToolResultContent::Image(image) => {
464 vec![open_ai::MessagePart::Image {
465 image_url: ImageUrl {
466 url: image.to_base64_url(),
467 detail: None,
468 },
469 }]
470 }
471 };
472
473 messages.push(open_ai::RequestMessage::Tool {
474 content: content.into(),
475 tool_call_id: tool_result.tool_use_id.to_string(),
476 });
477 }
478 }
479 }
480 }
481
482 open_ai::Request {
483 model: model_id.into(),
484 messages,
485 stream,
486 stop: request.stop,
487 temperature: request.temperature.or(Some(1.0)),
488 max_completion_tokens: max_output_tokens,
489 parallel_tool_calls: if supports_parallel_tool_calls && !request.tools.is_empty() {
490 // Disable parallel tool calls, as the Agent currently expects a maximum of one per turn.
491 Some(false)
492 } else {
493 None
494 },
495 prompt_cache_key: if supports_prompt_cache_key {
496 request.thread_id
497 } else {
498 None
499 },
500 tools: request
501 .tools
502 .into_iter()
503 .map(|tool| open_ai::ToolDefinition::Function {
504 function: open_ai::FunctionDefinition {
505 name: tool.name,
506 description: Some(tool.description),
507 parameters: Some(tool.input_schema),
508 },
509 })
510 .collect(),
511 tool_choice: request.tool_choice.map(|choice| match choice {
512 LanguageModelToolChoice::Auto => open_ai::ToolChoice::Auto,
513 LanguageModelToolChoice::Any => open_ai::ToolChoice::Required,
514 LanguageModelToolChoice::None => open_ai::ToolChoice::None,
515 }),
516 reasoning_effort,
517 }
518}
519
520pub fn into_open_ai_response(
521 request: LanguageModelRequest,
522 model_id: &str,
523 supports_parallel_tool_calls: bool,
524 supports_prompt_cache_key: bool,
525 max_output_tokens: Option<u64>,
526 reasoning_effort: Option<ReasoningEffort>,
527) -> ResponseRequest {
528 let stream = !model_id.starts_with("o1-");
529
530 let LanguageModelRequest {
531 thread_id,
532 prompt_id: _,
533 intent: _,
534 mode: _,
535 messages,
536 tools,
537 tool_choice,
538 stop: _,
539 temperature,
540 thinking_allowed: _,
541 } = request;
542
543 let mut input_items = Vec::new();
544 for (index, message) in messages.into_iter().enumerate() {
545 append_message_to_response_items(message, index, &mut input_items);
546 }
547
548 let tools: Vec<_> = tools
549 .into_iter()
550 .map(|tool| open_ai::responses::ToolDefinition::Function {
551 name: tool.name,
552 description: Some(tool.description),
553 parameters: Some(tool.input_schema),
554 strict: None,
555 })
556 .collect();
557
558 ResponseRequest {
559 model: model_id.into(),
560 input: input_items,
561 stream,
562 temperature,
563 top_p: None,
564 max_output_tokens,
565 parallel_tool_calls: if tools.is_empty() {
566 None
567 } else {
568 Some(supports_parallel_tool_calls)
569 },
570 tool_choice: tool_choice.map(|choice| match choice {
571 LanguageModelToolChoice::Auto => open_ai::ToolChoice::Auto,
572 LanguageModelToolChoice::Any => open_ai::ToolChoice::Required,
573 LanguageModelToolChoice::None => open_ai::ToolChoice::None,
574 }),
575 tools,
576 prompt_cache_key: if supports_prompt_cache_key {
577 thread_id
578 } else {
579 None
580 },
581 reasoning: reasoning_effort.map(|effort| open_ai::responses::ReasoningConfig { effort }),
582 }
583}
584
585fn append_message_to_response_items(
586 message: LanguageModelRequestMessage,
587 index: usize,
588 input_items: &mut Vec<Value>,
589) {
590 let mut content_parts: Vec<Value> = Vec::new();
591
592 for content in message.content {
593 match content {
594 MessageContent::Text(text) => {
595 push_response_text_part(&message.role, text, &mut content_parts);
596 }
597 MessageContent::Thinking { text, .. } => {
598 push_response_text_part(&message.role, text, &mut content_parts);
599 }
600 MessageContent::RedactedThinking(_) => {}
601 MessageContent::Image(image) => {
602 push_response_image_part(&message.role, image, &mut content_parts);
603 }
604 MessageContent::ToolUse(tool_use) => {
605 flush_response_parts(&message.role, index, &mut content_parts, input_items);
606 let call_id = tool_use.id.to_string();
607 input_items.push(json!({
608 "type": "function_call",
609 "call_id": call_id,
610 "name": tool_use.name,
611 "arguments": tool_use.raw_input,
612 }));
613 }
614 MessageContent::ToolResult(tool_result) => {
615 flush_response_parts(&message.role, index, &mut content_parts, input_items);
616 input_items.push(json!({
617 "type": "function_call_output",
618 "call_id": tool_result.tool_use_id.to_string(),
619 "output": tool_result_output(&tool_result),
620 }));
621 }
622 }
623 }
624
625 flush_response_parts(&message.role, index, &mut content_parts, input_items);
626}
627
628fn push_response_text_part(role: &Role, text: impl Into<String>, parts: &mut Vec<Value>) {
629 let text = text.into();
630 if text.trim().is_empty() {
631 return;
632 }
633
634 match role {
635 Role::Assistant => parts.push(json!({
636 "type": "output_text",
637 "text": text,
638 "annotations": [],
639 })),
640 _ => parts.push(json!({
641 "type": "input_text",
642 "text": text,
643 })),
644 }
645}
646
647fn push_response_image_part(role: &Role, image: LanguageModelImage, parts: &mut Vec<Value>) {
648 match role {
649 Role::Assistant => parts.push(json!({
650 "type": "output_text",
651 "text": "[image omitted]",
652 "annotations": [],
653 })),
654 _ => parts.push(json!({
655 "type": "input_image",
656 "image_url": image.to_base64_url(),
657 })),
658 }
659}
660
661fn flush_response_parts(
662 role: &Role,
663 _index: usize,
664 parts: &mut Vec<Value>,
665 input_items: &mut Vec<Value>,
666) {
667 if parts.is_empty() {
668 return;
669 }
670
671 let item = match role {
672 Role::Assistant => json!({
673 "type": "message",
674 "role": "assistant",
675 "status": "completed",
676 "content": parts.clone(),
677 }),
678 Role::User => json!({
679 "type": "message",
680 "role": "user",
681 "content": parts.clone(),
682 }),
683 Role::System => json!({
684 "type": "message",
685 "role": "system",
686 "content": parts.clone(),
687 }),
688 };
689
690 input_items.push(item);
691 parts.clear();
692}
693
694fn tool_result_output(result: &LanguageModelToolResult) -> String {
695 if let Some(output) = &result.output {
696 match output {
697 serde_json::Value::String(text) => text.clone(),
698 serde_json::Value::Null => String::new(),
699 _ => output.to_string(),
700 }
701 } else {
702 match &result.content {
703 LanguageModelToolResultContent::Text(text) => text.to_string(),
704 LanguageModelToolResultContent::Image(image) => image.to_base64_url(),
705 }
706 }
707}
708
709fn add_message_content_part(
710 new_part: open_ai::MessagePart,
711 role: Role,
712 messages: &mut Vec<open_ai::RequestMessage>,
713) {
714 match (role, messages.last_mut()) {
715 (Role::User, Some(open_ai::RequestMessage::User { content }))
716 | (
717 Role::Assistant,
718 Some(open_ai::RequestMessage::Assistant {
719 content: Some(content),
720 ..
721 }),
722 )
723 | (Role::System, Some(open_ai::RequestMessage::System { content, .. })) => {
724 content.push_part(new_part);
725 }
726 _ => {
727 messages.push(match role {
728 Role::User => open_ai::RequestMessage::User {
729 content: open_ai::MessageContent::from(vec![new_part]),
730 },
731 Role::Assistant => open_ai::RequestMessage::Assistant {
732 content: Some(open_ai::MessageContent::from(vec![new_part])),
733 tool_calls: Vec::new(),
734 },
735 Role::System => open_ai::RequestMessage::System {
736 content: open_ai::MessageContent::from(vec![new_part]),
737 },
738 });
739 }
740 }
741}
742
743pub struct OpenAiEventMapper {
744 tool_calls_by_index: HashMap<usize, RawToolCall>,
745}
746
747impl OpenAiEventMapper {
748 pub fn new() -> Self {
749 Self {
750 tool_calls_by_index: HashMap::default(),
751 }
752 }
753
754 pub fn map_stream(
755 mut self,
756 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseStreamEvent>>>>,
757 ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
758 {
759 events.flat_map(move |event| {
760 futures::stream::iter(match event {
761 Ok(event) => self.map_event(event),
762 Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
763 })
764 })
765 }
766
767 pub fn map_event(
768 &mut self,
769 event: ResponseStreamEvent,
770 ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
771 let mut events = Vec::new();
772 if let Some(usage) = event.usage {
773 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
774 input_tokens: usage.prompt_tokens,
775 output_tokens: usage.completion_tokens,
776 cache_creation_input_tokens: 0,
777 cache_read_input_tokens: 0,
778 })));
779 }
780
781 let Some(choice) = event.choices.first() else {
782 return events;
783 };
784
785 if let Some(delta) = choice.delta.as_ref() {
786 if let Some(content) = delta.content.clone() {
787 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
788 }
789
790 if let Some(tool_calls) = delta.tool_calls.as_ref() {
791 for tool_call in tool_calls {
792 let entry = self.tool_calls_by_index.entry(tool_call.index).or_default();
793
794 if let Some(tool_id) = tool_call.id.clone() {
795 entry.id = tool_id;
796 }
797
798 if let Some(function) = tool_call.function.as_ref() {
799 if let Some(name) = function.name.clone() {
800 entry.name = name;
801 }
802
803 if let Some(arguments) = function.arguments.clone() {
804 entry.arguments.push_str(&arguments);
805 }
806 }
807 }
808 }
809 }
810
811 match choice.finish_reason.as_deref() {
812 Some("stop") => {
813 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
814 }
815 Some("tool_calls") => {
816 events.extend(self.tool_calls_by_index.drain().map(|(_, tool_call)| {
817 match serde_json::Value::from_str(&tool_call.arguments) {
818 Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
819 LanguageModelToolUse {
820 id: tool_call.id.clone().into(),
821 name: tool_call.name.as_str().into(),
822 is_input_complete: true,
823 input,
824 raw_input: tool_call.arguments.clone(),
825 thought_signature: None,
826 },
827 )),
828 Err(error) => Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
829 id: tool_call.id.into(),
830 tool_name: tool_call.name.into(),
831 raw_input: tool_call.arguments.clone().into(),
832 json_parse_error: error.to_string(),
833 }),
834 }
835 }));
836
837 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
838 }
839 Some(stop_reason) => {
840 log::error!("Unexpected OpenAI stop_reason: {stop_reason:?}",);
841 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
842 }
843 None => {}
844 }
845
846 events
847 }
848}
849
850#[derive(Default)]
851struct RawToolCall {
852 id: String,
853 name: String,
854 arguments: String,
855}
856
857pub struct OpenAiResponseEventMapper {
858 function_calls_by_item: HashMap<String, PendingResponseFunctionCall>,
859 pending_stop_reason: Option<StopReason>,
860}
861
862#[derive(Default)]
863struct PendingResponseFunctionCall {
864 call_id: String,
865 name: Arc<str>,
866 arguments: String,
867}
868
869impl OpenAiResponseEventMapper {
870 pub fn new() -> Self {
871 Self {
872 function_calls_by_item: HashMap::default(),
873 pending_stop_reason: None,
874 }
875 }
876
877 pub fn map_stream(
878 mut self,
879 events: Pin<Box<dyn Send + Stream<Item = Result<ResponsesStreamEvent>>>>,
880 ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
881 {
882 events.flat_map(move |event| {
883 futures::stream::iter(match event {
884 Ok(event) => self.map_event(event),
885 Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
886 })
887 })
888 }
889
890 fn map_event(
891 &mut self,
892 event: ResponsesStreamEvent,
893 ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
894 match event {
895 ResponsesStreamEvent::OutputItemAdded { item, .. } => {
896 let mut events = Vec::new();
897
898 match &item {
899 ResponseOutputItem::Message(message) => {
900 if let Some(id) = &message.id {
901 events.push(Ok(LanguageModelCompletionEvent::StartMessage {
902 message_id: id.clone(),
903 }));
904 }
905 }
906 ResponseOutputItem::FunctionCall(function_call) => {
907 if let Some(item_id) = function_call.id.clone() {
908 let call_id = function_call
909 .call_id
910 .clone()
911 .or_else(|| function_call.id.clone())
912 .unwrap_or_else(|| item_id.clone());
913 let entry = PendingResponseFunctionCall {
914 call_id,
915 name: Arc::<str>::from(
916 function_call.name.clone().unwrap_or_default(),
917 ),
918 arguments: function_call.arguments.clone(),
919 };
920 self.function_calls_by_item.insert(item_id, entry);
921 }
922 }
923 ResponseOutputItem::Unknown => {}
924 }
925 events
926 }
927 ResponsesStreamEvent::OutputTextDelta { delta, .. } => {
928 if delta.is_empty() {
929 Vec::new()
930 } else {
931 vec![Ok(LanguageModelCompletionEvent::Text(delta))]
932 }
933 }
934 ResponsesStreamEvent::FunctionCallArgumentsDelta { item_id, delta, .. } => {
935 if let Some(entry) = self.function_calls_by_item.get_mut(&item_id) {
936 entry.arguments.push_str(&delta);
937 }
938 Vec::new()
939 }
940 ResponsesStreamEvent::FunctionCallArgumentsDone {
941 item_id, arguments, ..
942 } => {
943 if let Some(mut entry) = self.function_calls_by_item.remove(&item_id) {
944 if !arguments.is_empty() {
945 entry.arguments = arguments;
946 }
947 let raw_input = entry.arguments.clone();
948 self.pending_stop_reason = Some(StopReason::ToolUse);
949 match serde_json::from_str::<serde_json::Value>(&entry.arguments) {
950 Ok(input) => {
951 vec![Ok(LanguageModelCompletionEvent::ToolUse(
952 LanguageModelToolUse {
953 id: LanguageModelToolUseId::from(entry.call_id.clone()),
954 name: entry.name.clone(),
955 is_input_complete: true,
956 input,
957 raw_input,
958 thought_signature: None,
959 },
960 ))]
961 }
962 Err(error) => {
963 vec![Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
964 id: LanguageModelToolUseId::from(entry.call_id.clone()),
965 tool_name: entry.name.clone(),
966 raw_input: Arc::<str>::from(raw_input),
967 json_parse_error: error.to_string(),
968 })]
969 }
970 }
971 } else {
972 Vec::new()
973 }
974 }
975 ResponsesStreamEvent::Completed { response } => {
976 self.handle_completion(response, StopReason::EndTurn)
977 }
978 ResponsesStreamEvent::Incomplete { response } => {
979 let reason = response
980 .status_details
981 .as_ref()
982 .and_then(|details| details.reason.as_deref());
983 let stop_reason = match reason {
984 Some("max_output_tokens") => StopReason::MaxTokens,
985 Some("content_filter") => {
986 self.pending_stop_reason = Some(StopReason::Refusal);
987 StopReason::Refusal
988 }
989 _ => self
990 .pending_stop_reason
991 .take()
992 .unwrap_or(StopReason::EndTurn),
993 };
994
995 let mut events = Vec::new();
996 if self.pending_stop_reason.is_none() {
997 events.extend(self.emit_tool_calls_from_output(&response.output));
998 }
999 if let Some(usage) = response.usage.as_ref() {
1000 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
1001 token_usage_from_response_usage(usage),
1002 )));
1003 }
1004 events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
1005 events
1006 }
1007 ResponsesStreamEvent::Failed { response } => {
1008 let message = response
1009 .status_details
1010 .and_then(|details| details.error)
1011 .map(|error| error.to_string())
1012 .unwrap_or_else(|| "response failed".to_string());
1013 vec![Err(LanguageModelCompletionError::Other(anyhow!(message)))]
1014 }
1015 ResponsesStreamEvent::Error { error }
1016 | ResponsesStreamEvent::GenericError { error } => {
1017 vec![Err(LanguageModelCompletionError::Other(anyhow!(format!(
1018 "{error:?}"
1019 ))))]
1020 }
1021 ResponsesStreamEvent::OutputTextDone { .. } => Vec::new(),
1022 ResponsesStreamEvent::OutputItemDone { .. }
1023 | ResponsesStreamEvent::ContentPartAdded { .. }
1024 | ResponsesStreamEvent::ContentPartDone { .. }
1025 | ResponsesStreamEvent::Created { .. }
1026 | ResponsesStreamEvent::InProgress { .. }
1027 | ResponsesStreamEvent::Unknown => Vec::new(),
1028 }
1029 }
1030
1031 fn handle_completion(
1032 &mut self,
1033 response: ResponsesSummary,
1034 default_reason: StopReason,
1035 ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
1036 let mut events = Vec::new();
1037
1038 if self.pending_stop_reason.is_none() {
1039 events.extend(self.emit_tool_calls_from_output(&response.output));
1040 }
1041
1042 if let Some(usage) = response.usage.as_ref() {
1043 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
1044 token_usage_from_response_usage(usage),
1045 )));
1046 }
1047
1048 let stop_reason = self.pending_stop_reason.take().unwrap_or(default_reason);
1049 events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
1050 events
1051 }
1052
1053 fn emit_tool_calls_from_output(
1054 &mut self,
1055 output: &[ResponseOutputItem],
1056 ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
1057 let mut events = Vec::new();
1058 for item in output {
1059 if let ResponseOutputItem::FunctionCall(function_call) = item {
1060 let Some(call_id) = function_call
1061 .call_id
1062 .clone()
1063 .or_else(|| function_call.id.clone())
1064 else {
1065 log::error!(
1066 "Function call item missing both call_id and id: {:?}",
1067 function_call
1068 );
1069 continue;
1070 };
1071 let name: Arc<str> = Arc::from(function_call.name.clone().unwrap_or_default());
1072 let arguments = &function_call.arguments;
1073 if !arguments.is_empty() {
1074 self.pending_stop_reason = Some(StopReason::ToolUse);
1075 match serde_json::from_str::<serde_json::Value>(arguments) {
1076 Ok(input) => {
1077 events.push(Ok(LanguageModelCompletionEvent::ToolUse(
1078 LanguageModelToolUse {
1079 id: LanguageModelToolUseId::from(call_id.clone()),
1080 name: name.clone(),
1081 is_input_complete: true,
1082 input,
1083 raw_input: arguments.clone(),
1084 thought_signature: None,
1085 },
1086 )));
1087 }
1088 Err(error) => {
1089 events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
1090 id: LanguageModelToolUseId::from(call_id.clone()),
1091 tool_name: name.clone(),
1092 raw_input: Arc::<str>::from(arguments.clone()),
1093 json_parse_error: error.to_string(),
1094 }));
1095 }
1096 }
1097 }
1098 }
1099 }
1100 events
1101 }
1102}
1103
1104fn token_usage_from_response_usage(usage: &ResponsesUsage) -> TokenUsage {
1105 TokenUsage {
1106 input_tokens: usage.input_tokens.unwrap_or_default(),
1107 output_tokens: usage.output_tokens.unwrap_or_default(),
1108 cache_creation_input_tokens: 0,
1109 cache_read_input_tokens: 0,
1110 }
1111}
1112
1113pub(crate) fn collect_tiktoken_messages(
1114 request: LanguageModelRequest,
1115) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
1116 request
1117 .messages
1118 .into_iter()
1119 .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
1120 role: match message.role {
1121 Role::User => "user".into(),
1122 Role::Assistant => "assistant".into(),
1123 Role::System => "system".into(),
1124 },
1125 content: Some(message.string_contents()),
1126 name: None,
1127 function_call: None,
1128 })
1129 .collect::<Vec<_>>()
1130}
1131
1132pub fn count_open_ai_tokens(
1133 request: LanguageModelRequest,
1134 model: Model,
1135 cx: &App,
1136) -> BoxFuture<'static, Result<u64>> {
1137 cx.background_spawn(async move {
1138 let messages = collect_tiktoken_messages(request);
1139 match model {
1140 Model::Custom { max_tokens, .. } => {
1141 let model = if max_tokens >= 100_000 {
1142 // If the max tokens is 100k or more, it is likely the o200k_base tokenizer from gpt4o
1143 "gpt-4o"
1144 } else {
1145 // Otherwise fallback to gpt-4, since only cl100k_base and o200k_base are
1146 // supported with this tiktoken method
1147 "gpt-4"
1148 };
1149 tiktoken_rs::num_tokens_from_messages(model, &messages)
1150 }
1151 // Currently supported by tiktoken_rs
1152 // Sometimes tiktoken-rs is behind on model support. If that is the case, make a new branch
1153 // arm with an override. We enumerate all supported models here so that we can check if new
1154 // models are supported yet or not.
1155 Model::ThreePointFiveTurbo
1156 | Model::Four
1157 | Model::FourTurbo
1158 | Model::FourOmni
1159 | Model::FourOmniMini
1160 | Model::FourPointOne
1161 | Model::FourPointOneMini
1162 | Model::FourPointOneNano
1163 | Model::O1
1164 | Model::O3
1165 | Model::O3Mini
1166 | Model::O4Mini
1167 | Model::Five
1168 | Model::FiveCodex
1169 | Model::FiveMini
1170 | Model::FiveNano => tiktoken_rs::num_tokens_from_messages(model.id(), &messages),
1171 // GPT-5.1 and 5.2 don't have dedicated tiktoken support; use gpt-5 tokenizer
1172 Model::FivePointOne | Model::FivePointTwo => {
1173 tiktoken_rs::num_tokens_from_messages("gpt-5", &messages)
1174 }
1175 }
1176 .map(|tokens| tokens as u64)
1177 })
1178 .boxed()
1179}
1180
1181struct ConfigurationView {
1182 api_key_editor: Entity<InputField>,
1183 state: Entity<State>,
1184 load_credentials_task: Option<Task<()>>,
1185}
1186
1187impl ConfigurationView {
1188 fn new(state: Entity<State>, window: &mut Window, cx: &mut Context<Self>) -> Self {
1189 let api_key_editor = cx.new(|cx| {
1190 InputField::new(
1191 window,
1192 cx,
1193 "sk-000000000000000000000000000000000000000000000000",
1194 )
1195 });
1196
1197 cx.observe(&state, |_, _, cx| {
1198 cx.notify();
1199 })
1200 .detach();
1201
1202 let load_credentials_task = Some(cx.spawn_in(window, {
1203 let state = state.clone();
1204 async move |this, cx| {
1205 if let Some(task) = Some(state.update(cx, |state, cx| state.authenticate(cx))) {
1206 // We don't log an error, because "not signed in" is also an error.
1207 let _ = task.await;
1208 }
1209 this.update(cx, |this, cx| {
1210 this.load_credentials_task = None;
1211 cx.notify();
1212 })
1213 .log_err();
1214 }
1215 }));
1216
1217 Self {
1218 api_key_editor,
1219 state,
1220 load_credentials_task,
1221 }
1222 }
1223
1224 fn save_api_key(&mut self, _: &menu::Confirm, window: &mut Window, cx: &mut Context<Self>) {
1225 let api_key = self.api_key_editor.read(cx).text(cx).trim().to_string();
1226 if api_key.is_empty() {
1227 return;
1228 }
1229
1230 // url changes can cause the editor to be displayed again
1231 self.api_key_editor
1232 .update(cx, |editor, cx| editor.set_text("", window, cx));
1233
1234 let state = self.state.clone();
1235 cx.spawn_in(window, async move |_, cx| {
1236 state
1237 .update(cx, |state, cx| state.set_api_key(Some(api_key), cx))
1238 .await
1239 })
1240 .detach_and_log_err(cx);
1241 }
1242
1243 fn reset_api_key(&mut self, window: &mut Window, cx: &mut Context<Self>) {
1244 self.api_key_editor
1245 .update(cx, |input, cx| input.set_text("", window, cx));
1246
1247 let state = self.state.clone();
1248 cx.spawn_in(window, async move |_, cx| {
1249 state
1250 .update(cx, |state, cx| state.set_api_key(None, cx))
1251 .await
1252 })
1253 .detach_and_log_err(cx);
1254 }
1255
1256 fn should_render_editor(&self, cx: &mut Context<Self>) -> bool {
1257 !self.state.read(cx).is_authenticated()
1258 }
1259}
1260
1261impl Render for ConfigurationView {
1262 fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1263 let env_var_set = self.state.read(cx).api_key_state.is_from_env_var();
1264 let configured_card_label = if env_var_set {
1265 format!("API key set in {API_KEY_ENV_VAR_NAME} environment variable")
1266 } else {
1267 let api_url = OpenAiLanguageModelProvider::api_url(cx);
1268 if api_url == OPEN_AI_API_URL {
1269 "API key configured".to_string()
1270 } else {
1271 format!("API key configured for {}", api_url)
1272 }
1273 };
1274
1275 let api_key_section = if self.should_render_editor(cx) {
1276 v_flex()
1277 .on_action(cx.listener(Self::save_api_key))
1278 .child(Label::new("To use Zed's agent with OpenAI, you need to add an API key. Follow these steps:"))
1279 .child(
1280 List::new()
1281 .child(
1282 ListBulletItem::new("")
1283 .child(Label::new("Create one by visiting"))
1284 .child(ButtonLink::new("OpenAI's console", "https://platform.openai.com/api-keys"))
1285 )
1286 .child(
1287 ListBulletItem::new("Ensure your OpenAI account has credits")
1288 )
1289 .child(
1290 ListBulletItem::new("Paste your API key below and hit enter to start using the agent")
1291 ),
1292 )
1293 .child(self.api_key_editor.clone())
1294 .child(
1295 Label::new(format!(
1296 "You can also set the {API_KEY_ENV_VAR_NAME} environment variable and restart Zed."
1297 ))
1298 .size(LabelSize::Small)
1299 .color(Color::Muted),
1300 )
1301 .child(
1302 Label::new(
1303 "Note that having a subscription for another service like GitHub Copilot won't work.",
1304 )
1305 .size(LabelSize::Small).color(Color::Muted),
1306 )
1307 .into_any_element()
1308 } else {
1309 ConfiguredApiCard::new(configured_card_label)
1310 .disabled(env_var_set)
1311 .on_click(cx.listener(|this, _, window, cx| this.reset_api_key(window, cx)))
1312 .when(env_var_set, |this| {
1313 this.tooltip_label(format!("To reset your API key, unset the {API_KEY_ENV_VAR_NAME} environment variable."))
1314 })
1315 .into_any_element()
1316 };
1317
1318 let compatible_api_section = h_flex()
1319 .mt_1p5()
1320 .gap_0p5()
1321 .flex_wrap()
1322 .when(self.should_render_editor(cx), |this| {
1323 this.pt_1p5()
1324 .border_t_1()
1325 .border_color(cx.theme().colors().border_variant)
1326 })
1327 .child(
1328 h_flex()
1329 .gap_2()
1330 .child(
1331 Icon::new(IconName::Info)
1332 .size(IconSize::XSmall)
1333 .color(Color::Muted),
1334 )
1335 .child(Label::new("Zed also supports OpenAI-compatible models.")),
1336 )
1337 .child(
1338 Button::new("docs", "Learn More")
1339 .icon(IconName::ArrowUpRight)
1340 .icon_size(IconSize::Small)
1341 .icon_color(Color::Muted)
1342 .on_click(move |_, _window, cx| {
1343 cx.open_url("https://zed.dev/docs/ai/llm-providers#openai-api-compatible")
1344 }),
1345 );
1346
1347 if self.load_credentials_task.is_some() {
1348 div().child(Label::new("Loading credentials…")).into_any()
1349 } else {
1350 v_flex()
1351 .size_full()
1352 .child(api_key_section)
1353 .child(compatible_api_section)
1354 .into_any()
1355 }
1356 }
1357}
1358
1359#[cfg(test)]
1360mod tests {
1361 use super::*;
1362 use futures::{StreamExt, executor::block_on};
1363 use gpui::TestAppContext;
1364 use language_model::{LanguageModelRequestMessage, LanguageModelRequestTool};
1365 use open_ai::responses::{
1366 ResponseFunctionToolCall, ResponseOutputItem, ResponseOutputMessage, ResponseStatusDetails,
1367 ResponseSummary, ResponseUsage, StreamEvent as ResponsesStreamEvent,
1368 };
1369 use pretty_assertions::assert_eq;
1370
1371 fn map_response_events(events: Vec<ResponsesStreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1372 block_on(async {
1373 OpenAiResponseEventMapper::new()
1374 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1375 .collect::<Vec<_>>()
1376 .await
1377 .into_iter()
1378 .map(Result::unwrap)
1379 .collect()
1380 })
1381 }
1382
1383 fn response_item_message(id: &str) -> ResponseOutputItem {
1384 ResponseOutputItem::Message(ResponseOutputMessage {
1385 id: Some(id.to_string()),
1386 role: Some("assistant".to_string()),
1387 status: Some("in_progress".to_string()),
1388 content: vec![],
1389 })
1390 }
1391
1392 fn response_item_function_call(id: &str, args: Option<&str>) -> ResponseOutputItem {
1393 ResponseOutputItem::FunctionCall(ResponseFunctionToolCall {
1394 id: Some(id.to_string()),
1395 status: Some("in_progress".to_string()),
1396 name: Some("get_weather".to_string()),
1397 call_id: Some("call_123".to_string()),
1398 arguments: args.map(|s| s.to_string()).unwrap_or_default(),
1399 })
1400 }
1401
1402 #[gpui::test]
1403 fn tiktoken_rs_support(cx: &TestAppContext) {
1404 let request = LanguageModelRequest {
1405 thread_id: None,
1406 prompt_id: None,
1407 intent: None,
1408 mode: None,
1409 messages: vec![LanguageModelRequestMessage {
1410 role: Role::User,
1411 content: vec![MessageContent::Text("message".into())],
1412 cache: false,
1413 reasoning_details: None,
1414 }],
1415 tools: vec![],
1416 tool_choice: None,
1417 stop: vec![],
1418 temperature: None,
1419 thinking_allowed: true,
1420 };
1421
1422 // Validate that all models are supported by tiktoken-rs
1423 for model in Model::iter() {
1424 let count = cx
1425 .executor()
1426 .block(count_open_ai_tokens(
1427 request.clone(),
1428 model,
1429 &cx.app.borrow(),
1430 ))
1431 .unwrap();
1432 assert!(count > 0);
1433 }
1434 }
1435
1436 #[test]
1437 fn responses_stream_maps_text_and_usage() {
1438 let events = vec![
1439 ResponsesStreamEvent::OutputItemAdded {
1440 output_index: 0,
1441 sequence_number: None,
1442 item: response_item_message("msg_123"),
1443 },
1444 ResponsesStreamEvent::OutputTextDelta {
1445 item_id: "msg_123".into(),
1446 output_index: 0,
1447 content_index: Some(0),
1448 delta: "Hello".into(),
1449 },
1450 ResponsesStreamEvent::Completed {
1451 response: ResponseSummary {
1452 usage: Some(ResponseUsage {
1453 input_tokens: Some(5),
1454 output_tokens: Some(3),
1455 total_tokens: Some(8),
1456 }),
1457 ..Default::default()
1458 },
1459 },
1460 ];
1461
1462 let mapped = map_response_events(events);
1463 assert!(matches!(
1464 mapped[0],
1465 LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_123"
1466 ));
1467 assert!(matches!(
1468 mapped[1],
1469 LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1470 ));
1471 assert!(matches!(
1472 mapped[2],
1473 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1474 input_tokens: 5,
1475 output_tokens: 3,
1476 ..
1477 })
1478 ));
1479 assert!(matches!(
1480 mapped[3],
1481 LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1482 ));
1483 }
1484
1485 #[test]
1486 fn into_open_ai_response_builds_complete_payload() {
1487 let tool_call_id = LanguageModelToolUseId::from("call-42");
1488 let tool_input = json!({ "city": "Boston" });
1489 let tool_arguments = serde_json::to_string(&tool_input).unwrap();
1490 let tool_use = LanguageModelToolUse {
1491 id: tool_call_id.clone(),
1492 name: Arc::from("get_weather"),
1493 raw_input: tool_arguments.clone(),
1494 input: tool_input,
1495 is_input_complete: true,
1496 thought_signature: None,
1497 };
1498 let tool_result = LanguageModelToolResult {
1499 tool_use_id: tool_call_id,
1500 tool_name: Arc::from("get_weather"),
1501 is_error: false,
1502 content: LanguageModelToolResultContent::Text(Arc::from("Sunny")),
1503 output: Some(json!({ "forecast": "Sunny" })),
1504 };
1505 let user_image = LanguageModelImage {
1506 source: SharedString::from("aGVsbG8="),
1507 size: None,
1508 };
1509 let expected_image_url = user_image.to_base64_url();
1510
1511 let request = LanguageModelRequest {
1512 thread_id: Some("thread-123".into()),
1513 prompt_id: None,
1514 intent: None,
1515 mode: None,
1516 messages: vec![
1517 LanguageModelRequestMessage {
1518 role: Role::System,
1519 content: vec![MessageContent::Text("System context".into())],
1520 cache: false,
1521 reasoning_details: None,
1522 },
1523 LanguageModelRequestMessage {
1524 role: Role::User,
1525 content: vec![
1526 MessageContent::Text("Please check the weather.".into()),
1527 MessageContent::Image(user_image),
1528 ],
1529 cache: false,
1530 reasoning_details: None,
1531 },
1532 LanguageModelRequestMessage {
1533 role: Role::Assistant,
1534 content: vec![
1535 MessageContent::Text("Looking that up.".into()),
1536 MessageContent::ToolUse(tool_use),
1537 ],
1538 cache: false,
1539 reasoning_details: None,
1540 },
1541 LanguageModelRequestMessage {
1542 role: Role::Assistant,
1543 content: vec![MessageContent::ToolResult(tool_result)],
1544 cache: false,
1545 reasoning_details: None,
1546 },
1547 ],
1548 tools: vec![LanguageModelRequestTool {
1549 name: "get_weather".into(),
1550 description: "Fetches the weather".into(),
1551 input_schema: json!({ "type": "object" }),
1552 }],
1553 tool_choice: Some(LanguageModelToolChoice::Any),
1554 stop: vec!["<STOP>".into()],
1555 temperature: None,
1556 thinking_allowed: false,
1557 };
1558
1559 let response = into_open_ai_response(
1560 request,
1561 "custom-model",
1562 true,
1563 true,
1564 Some(2048),
1565 Some(ReasoningEffort::Low),
1566 );
1567
1568 let serialized = serde_json::to_value(&response).unwrap();
1569 let expected = json!({
1570 "model": "custom-model",
1571 "input": [
1572 {
1573 "type": "message",
1574 "role": "system",
1575 "content": [
1576 { "type": "input_text", "text": "System context" }
1577 ]
1578 },
1579 {
1580 "type": "message",
1581 "role": "user",
1582 "content": [
1583 { "type": "input_text", "text": "Please check the weather." },
1584 { "type": "input_image", "image_url": expected_image_url }
1585 ]
1586 },
1587 {
1588 "type": "message",
1589 "role": "assistant",
1590 "status": "completed",
1591 "content": [
1592 { "type": "output_text", "text": "Looking that up.", "annotations": [] }
1593 ]
1594 },
1595 {
1596 "type": "function_call",
1597 "call_id": "call-42",
1598 "name": "get_weather",
1599 "arguments": tool_arguments
1600 },
1601 {
1602 "type": "function_call_output",
1603 "call_id": "call-42",
1604 "output": "{\"forecast\":\"Sunny\"}"
1605 }
1606 ],
1607 "stream": true,
1608 "max_output_tokens": 2048,
1609 "parallel_tool_calls": true,
1610 "tool_choice": "required",
1611 "tools": [
1612 {
1613 "type": "function",
1614 "name": "get_weather",
1615 "description": "Fetches the weather",
1616 "parameters": { "type": "object" }
1617 }
1618 ],
1619 "prompt_cache_key": "thread-123",
1620 "reasoning": { "effort": "low" }
1621 });
1622
1623 assert_eq!(serialized, expected);
1624 }
1625
1626 #[test]
1627 fn responses_stream_maps_tool_calls() {
1628 let events = vec![
1629 ResponsesStreamEvent::OutputItemAdded {
1630 output_index: 0,
1631 sequence_number: None,
1632 item: response_item_function_call("item_fn", Some("{\"city\":\"Bos")),
1633 },
1634 ResponsesStreamEvent::FunctionCallArgumentsDelta {
1635 item_id: "item_fn".into(),
1636 output_index: 0,
1637 delta: "ton\"}".into(),
1638 sequence_number: None,
1639 },
1640 ResponsesStreamEvent::FunctionCallArgumentsDone {
1641 item_id: "item_fn".into(),
1642 output_index: 0,
1643 arguments: "{\"city\":\"Boston\"}".into(),
1644 sequence_number: None,
1645 },
1646 ResponsesStreamEvent::Completed {
1647 response: ResponseSummary::default(),
1648 },
1649 ];
1650
1651 let mapped = map_response_events(events);
1652 assert!(matches!(
1653 mapped[0],
1654 LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse {
1655 ref id,
1656 ref name,
1657 ref raw_input,
1658 ..
1659 }) if id.to_string() == "call_123"
1660 && name.as_ref() == "get_weather"
1661 && raw_input == "{\"city\":\"Boston\"}"
1662 ));
1663 assert!(matches!(
1664 mapped[1],
1665 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1666 ));
1667 }
1668
1669 #[test]
1670 fn responses_stream_uses_max_tokens_stop_reason() {
1671 let events = vec![ResponsesStreamEvent::Incomplete {
1672 response: ResponseSummary {
1673 status_details: Some(ResponseStatusDetails {
1674 reason: Some("max_output_tokens".into()),
1675 r#type: Some("incomplete".into()),
1676 error: None,
1677 }),
1678 usage: Some(ResponseUsage {
1679 input_tokens: Some(10),
1680 output_tokens: Some(20),
1681 total_tokens: Some(30),
1682 }),
1683 ..Default::default()
1684 },
1685 }];
1686
1687 let mapped = map_response_events(events);
1688 assert!(matches!(
1689 mapped[0],
1690 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1691 input_tokens: 10,
1692 output_tokens: 20,
1693 ..
1694 })
1695 ));
1696 assert!(matches!(
1697 mapped[1],
1698 LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1699 ));
1700 }
1701
1702 #[test]
1703 fn responses_stream_handles_multiple_tool_calls() {
1704 let events = vec![
1705 ResponsesStreamEvent::OutputItemAdded {
1706 output_index: 0,
1707 sequence_number: None,
1708 item: response_item_function_call("item_fn1", Some("{\"city\":\"NYC\"}")),
1709 },
1710 ResponsesStreamEvent::FunctionCallArgumentsDone {
1711 item_id: "item_fn1".into(),
1712 output_index: 0,
1713 arguments: "{\"city\":\"NYC\"}".into(),
1714 sequence_number: None,
1715 },
1716 ResponsesStreamEvent::OutputItemAdded {
1717 output_index: 1,
1718 sequence_number: None,
1719 item: response_item_function_call("item_fn2", Some("{\"city\":\"LA\"}")),
1720 },
1721 ResponsesStreamEvent::FunctionCallArgumentsDone {
1722 item_id: "item_fn2".into(),
1723 output_index: 1,
1724 arguments: "{\"city\":\"LA\"}".into(),
1725 sequence_number: None,
1726 },
1727 ResponsesStreamEvent::Completed {
1728 response: ResponseSummary::default(),
1729 },
1730 ];
1731
1732 let mapped = map_response_events(events);
1733 assert_eq!(mapped.len(), 3);
1734 assert!(matches!(
1735 mapped[0],
1736 LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1737 if raw_input == "{\"city\":\"NYC\"}"
1738 ));
1739 assert!(matches!(
1740 mapped[1],
1741 LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1742 if raw_input == "{\"city\":\"LA\"}"
1743 ));
1744 assert!(matches!(
1745 mapped[2],
1746 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1747 ));
1748 }
1749
1750 #[test]
1751 fn responses_stream_handles_mixed_text_and_tool_calls() {
1752 let events = vec![
1753 ResponsesStreamEvent::OutputItemAdded {
1754 output_index: 0,
1755 sequence_number: None,
1756 item: response_item_message("msg_123"),
1757 },
1758 ResponsesStreamEvent::OutputTextDelta {
1759 item_id: "msg_123".into(),
1760 output_index: 0,
1761 content_index: Some(0),
1762 delta: "Let me check that".into(),
1763 },
1764 ResponsesStreamEvent::OutputItemAdded {
1765 output_index: 1,
1766 sequence_number: None,
1767 item: response_item_function_call("item_fn", Some("{\"query\":\"test\"}")),
1768 },
1769 ResponsesStreamEvent::FunctionCallArgumentsDone {
1770 item_id: "item_fn".into(),
1771 output_index: 1,
1772 arguments: "{\"query\":\"test\"}".into(),
1773 sequence_number: None,
1774 },
1775 ResponsesStreamEvent::Completed {
1776 response: ResponseSummary::default(),
1777 },
1778 ];
1779
1780 let mapped = map_response_events(events);
1781 assert!(matches!(
1782 mapped[0],
1783 LanguageModelCompletionEvent::StartMessage { .. }
1784 ));
1785 assert!(matches!(
1786 mapped[1],
1787 LanguageModelCompletionEvent::Text(ref text) if text == "Let me check that"
1788 ));
1789 assert!(matches!(
1790 mapped[2],
1791 LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1792 if raw_input == "{\"query\":\"test\"}"
1793 ));
1794 assert!(matches!(
1795 mapped[3],
1796 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1797 ));
1798 }
1799
1800 #[test]
1801 fn responses_stream_handles_json_parse_error() {
1802 let events = vec![
1803 ResponsesStreamEvent::OutputItemAdded {
1804 output_index: 0,
1805 sequence_number: None,
1806 item: response_item_function_call("item_fn", Some("{invalid json")),
1807 },
1808 ResponsesStreamEvent::FunctionCallArgumentsDone {
1809 item_id: "item_fn".into(),
1810 output_index: 0,
1811 arguments: "{invalid json".into(),
1812 sequence_number: None,
1813 },
1814 ResponsesStreamEvent::Completed {
1815 response: ResponseSummary::default(),
1816 },
1817 ];
1818
1819 let mapped = map_response_events(events);
1820 assert!(matches!(
1821 mapped[0],
1822 LanguageModelCompletionEvent::ToolUseJsonParseError {
1823 ref raw_input,
1824 ..
1825 } if raw_input.as_ref() == "{invalid json"
1826 ));
1827 }
1828
1829 #[test]
1830 fn responses_stream_handles_incomplete_function_call() {
1831 let events = vec![
1832 ResponsesStreamEvent::OutputItemAdded {
1833 output_index: 0,
1834 sequence_number: None,
1835 item: response_item_function_call("item_fn", Some("{\"city\":")),
1836 },
1837 ResponsesStreamEvent::FunctionCallArgumentsDelta {
1838 item_id: "item_fn".into(),
1839 output_index: 0,
1840 delta: "\"Boston\"".into(),
1841 sequence_number: None,
1842 },
1843 ResponsesStreamEvent::Incomplete {
1844 response: ResponseSummary {
1845 status_details: Some(ResponseStatusDetails {
1846 reason: Some("max_output_tokens".into()),
1847 r#type: Some("incomplete".into()),
1848 error: None,
1849 }),
1850 output: vec![response_item_function_call(
1851 "item_fn",
1852 Some("{\"city\":\"Boston\"}"),
1853 )],
1854 ..Default::default()
1855 },
1856 },
1857 ];
1858
1859 let mapped = map_response_events(events);
1860 assert!(matches!(
1861 mapped[0],
1862 LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1863 if raw_input == "{\"city\":\"Boston\"}"
1864 ));
1865 assert!(matches!(
1866 mapped[1],
1867 LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1868 ));
1869 }
1870
1871 #[test]
1872 fn responses_stream_incomplete_does_not_duplicate_tool_calls() {
1873 let events = vec![
1874 ResponsesStreamEvent::OutputItemAdded {
1875 output_index: 0,
1876 sequence_number: None,
1877 item: response_item_function_call("item_fn", Some("{\"city\":\"Boston\"}")),
1878 },
1879 ResponsesStreamEvent::FunctionCallArgumentsDone {
1880 item_id: "item_fn".into(),
1881 output_index: 0,
1882 arguments: "{\"city\":\"Boston\"}".into(),
1883 sequence_number: None,
1884 },
1885 ResponsesStreamEvent::Incomplete {
1886 response: ResponseSummary {
1887 status_details: Some(ResponseStatusDetails {
1888 reason: Some("max_output_tokens".into()),
1889 r#type: Some("incomplete".into()),
1890 error: None,
1891 }),
1892 output: vec![response_item_function_call(
1893 "item_fn",
1894 Some("{\"city\":\"Boston\"}"),
1895 )],
1896 ..Default::default()
1897 },
1898 },
1899 ];
1900
1901 let mapped = map_response_events(events);
1902 assert_eq!(mapped.len(), 2);
1903 assert!(matches!(
1904 mapped[0],
1905 LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1906 if raw_input == "{\"city\":\"Boston\"}"
1907 ));
1908 assert!(matches!(
1909 mapped[1],
1910 LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1911 ));
1912 }
1913}