1use anyhow::{Result, anyhow};
2use collections::{BTreeMap, HashMap};
3use futures::Stream;
4use futures::{FutureExt, StreamExt, future::BoxFuture};
5use gpui::{AnyView, App, AsyncApp, Context, Entity, SharedString, Task, Window};
6use http_client::HttpClient;
7use language_model::{
8 ApiKeyState, AuthenticateError, EnvVar, IconOrSvg, LanguageModel, LanguageModelCompletionError,
9 LanguageModelCompletionEvent, LanguageModelId, LanguageModelImage, LanguageModelName,
10 LanguageModelProvider, LanguageModelProviderId, LanguageModelProviderName,
11 LanguageModelProviderState, LanguageModelRequest, LanguageModelRequestMessage,
12 LanguageModelToolChoice, LanguageModelToolResult, LanguageModelToolResultContent,
13 LanguageModelToolUse, LanguageModelToolUseId, MessageContent, RateLimiter, Role, StopReason,
14 TokenUsage, env_var,
15};
16use menu;
17use open_ai::responses::{
18 ResponseFunctionCallItem, ResponseFunctionCallOutputItem, ResponseInputContent,
19 ResponseInputItem, ResponseMessageItem,
20};
21use open_ai::{
22 ImageUrl, Model, OPEN_AI_API_URL, ReasoningEffort, ResponseStreamEvent,
23 responses::{
24 Request as ResponseRequest, ResponseOutputItem, ResponseSummary as ResponsesSummary,
25 ResponseUsage as ResponsesUsage, StreamEvent as ResponsesStreamEvent, stream_response,
26 },
27 stream_completion,
28};
29use settings::{OpenAiAvailableModel as AvailableModel, Settings, SettingsStore};
30use std::pin::Pin;
31use std::str::FromStr as _;
32use std::sync::{Arc, LazyLock};
33use strum::IntoEnumIterator;
34use ui::{ButtonLink, ConfiguredApiCard, List, ListBulletItem, prelude::*};
35use ui_input::InputField;
36use util::ResultExt;
37
38const PROVIDER_ID: LanguageModelProviderId = language_model::OPEN_AI_PROVIDER_ID;
39const PROVIDER_NAME: LanguageModelProviderName = language_model::OPEN_AI_PROVIDER_NAME;
40
41const API_KEY_ENV_VAR_NAME: &str = "OPENAI_API_KEY";
42static API_KEY_ENV_VAR: LazyLock<EnvVar> = env_var!(API_KEY_ENV_VAR_NAME);
43
44#[derive(Default, Clone, Debug, PartialEq)]
45pub struct OpenAiSettings {
46 pub api_url: String,
47 pub available_models: Vec<AvailableModel>,
48}
49
50pub struct OpenAiLanguageModelProvider {
51 http_client: Arc<dyn HttpClient>,
52 state: Entity<State>,
53}
54
55pub struct State {
56 api_key_state: ApiKeyState,
57}
58
59impl State {
60 fn is_authenticated(&self) -> bool {
61 self.api_key_state.has_key()
62 }
63
64 fn set_api_key(&mut self, api_key: Option<String>, cx: &mut Context<Self>) -> Task<Result<()>> {
65 let api_url = OpenAiLanguageModelProvider::api_url(cx);
66 self.api_key_state
67 .store(api_url, api_key, |this| &mut this.api_key_state, cx)
68 }
69
70 fn authenticate(&mut self, cx: &mut Context<Self>) -> Task<Result<(), AuthenticateError>> {
71 let api_url = OpenAiLanguageModelProvider::api_url(cx);
72 self.api_key_state
73 .load_if_needed(api_url, |this| &mut this.api_key_state, cx)
74 }
75}
76
77impl OpenAiLanguageModelProvider {
78 pub fn new(http_client: Arc<dyn HttpClient>, cx: &mut App) -> Self {
79 let state = cx.new(|cx| {
80 cx.observe_global::<SettingsStore>(|this: &mut State, cx| {
81 let api_url = Self::api_url(cx);
82 this.api_key_state
83 .handle_url_change(api_url, |this| &mut this.api_key_state, cx);
84 cx.notify();
85 })
86 .detach();
87 State {
88 api_key_state: ApiKeyState::new(Self::api_url(cx), (*API_KEY_ENV_VAR).clone()),
89 }
90 });
91
92 Self { http_client, state }
93 }
94
95 fn create_language_model(&self, model: open_ai::Model) -> Arc<dyn LanguageModel> {
96 Arc::new(OpenAiLanguageModel {
97 id: LanguageModelId::from(model.id().to_string()),
98 model,
99 state: self.state.clone(),
100 http_client: self.http_client.clone(),
101 request_limiter: RateLimiter::new(4),
102 })
103 }
104
105 fn settings(cx: &App) -> &OpenAiSettings {
106 &crate::AllLanguageModelSettings::get_global(cx).openai
107 }
108
109 fn api_url(cx: &App) -> SharedString {
110 let api_url = &Self::settings(cx).api_url;
111 if api_url.is_empty() {
112 open_ai::OPEN_AI_API_URL.into()
113 } else {
114 SharedString::new(api_url.as_str())
115 }
116 }
117}
118
119impl LanguageModelProviderState for OpenAiLanguageModelProvider {
120 type ObservableEntity = State;
121
122 fn observable_entity(&self) -> Option<Entity<Self::ObservableEntity>> {
123 Some(self.state.clone())
124 }
125}
126
127impl LanguageModelProvider for OpenAiLanguageModelProvider {
128 fn id(&self) -> LanguageModelProviderId {
129 PROVIDER_ID
130 }
131
132 fn name(&self) -> LanguageModelProviderName {
133 PROVIDER_NAME
134 }
135
136 fn icon(&self) -> IconOrSvg {
137 IconOrSvg::Icon(IconName::AiOpenAi)
138 }
139
140 fn default_model(&self, _cx: &App) -> Option<Arc<dyn LanguageModel>> {
141 Some(self.create_language_model(open_ai::Model::default()))
142 }
143
144 fn default_fast_model(&self, _cx: &App) -> Option<Arc<dyn LanguageModel>> {
145 Some(self.create_language_model(open_ai::Model::default_fast()))
146 }
147
148 fn provided_models(&self, cx: &App) -> Vec<Arc<dyn LanguageModel>> {
149 let mut models = BTreeMap::default();
150
151 // Add base models from open_ai::Model::iter()
152 for model in open_ai::Model::iter() {
153 if !matches!(model, open_ai::Model::Custom { .. }) {
154 models.insert(model.id().to_string(), model);
155 }
156 }
157
158 // Override with available models from settings
159 for model in &OpenAiLanguageModelProvider::settings(cx).available_models {
160 models.insert(
161 model.name.clone(),
162 open_ai::Model::Custom {
163 name: model.name.clone(),
164 display_name: model.display_name.clone(),
165 max_tokens: model.max_tokens,
166 max_output_tokens: model.max_output_tokens,
167 max_completion_tokens: model.max_completion_tokens,
168 reasoning_effort: model.reasoning_effort.clone(),
169 supports_chat_completions: model.capabilities.chat_completions,
170 },
171 );
172 }
173
174 models
175 .into_values()
176 .map(|model| self.create_language_model(model))
177 .collect()
178 }
179
180 fn is_authenticated(&self, cx: &App) -> bool {
181 self.state.read(cx).is_authenticated()
182 }
183
184 fn authenticate(&self, cx: &mut App) -> Task<Result<(), AuthenticateError>> {
185 self.state.update(cx, |state, cx| state.authenticate(cx))
186 }
187
188 fn configuration_view(
189 &self,
190 _target_agent: language_model::ConfigurationViewTargetAgent,
191 window: &mut Window,
192 cx: &mut App,
193 ) -> AnyView {
194 cx.new(|cx| ConfigurationView::new(self.state.clone(), window, cx))
195 .into()
196 }
197
198 fn reset_credentials(&self, cx: &mut App) -> Task<Result<()>> {
199 self.state
200 .update(cx, |state, cx| state.set_api_key(None, cx))
201 }
202}
203
204pub struct OpenAiLanguageModel {
205 id: LanguageModelId,
206 model: open_ai::Model,
207 state: Entity<State>,
208 http_client: Arc<dyn HttpClient>,
209 request_limiter: RateLimiter,
210}
211
212impl OpenAiLanguageModel {
213 fn stream_completion(
214 &self,
215 request: open_ai::Request,
216 cx: &AsyncApp,
217 ) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<ResponseStreamEvent>>>>
218 {
219 let http_client = self.http_client.clone();
220
221 let (api_key, api_url) = self.state.read_with(cx, |state, cx| {
222 let api_url = OpenAiLanguageModelProvider::api_url(cx);
223 (state.api_key_state.key(&api_url), api_url)
224 });
225
226 let future = self.request_limiter.stream(async move {
227 let provider = PROVIDER_NAME;
228 let Some(api_key) = api_key else {
229 return Err(LanguageModelCompletionError::NoApiKey { provider });
230 };
231 let request = stream_completion(
232 http_client.as_ref(),
233 provider.0.as_str(),
234 &api_url,
235 &api_key,
236 request,
237 );
238 let response = request.await?;
239 Ok(response)
240 });
241
242 async move { Ok(future.await?.boxed()) }.boxed()
243 }
244
245 fn stream_response(
246 &self,
247 request: ResponseRequest,
248 cx: &AsyncApp,
249 ) -> BoxFuture<'static, Result<futures::stream::BoxStream<'static, Result<ResponsesStreamEvent>>>>
250 {
251 let http_client = self.http_client.clone();
252
253 let (api_key, api_url) = self.state.read_with(cx, |state, cx| {
254 let api_url = OpenAiLanguageModelProvider::api_url(cx);
255 (state.api_key_state.key(&api_url), api_url)
256 });
257
258 let provider = PROVIDER_NAME;
259 let future = self.request_limiter.stream(async move {
260 let Some(api_key) = api_key else {
261 return Err(LanguageModelCompletionError::NoApiKey { provider });
262 };
263 let request = stream_response(
264 http_client.as_ref(),
265 provider.0.as_str(),
266 &api_url,
267 &api_key,
268 request,
269 );
270 let response = request.await?;
271 Ok(response)
272 });
273
274 async move { Ok(future.await?.boxed()) }.boxed()
275 }
276}
277
278impl LanguageModel for OpenAiLanguageModel {
279 fn id(&self) -> LanguageModelId {
280 self.id.clone()
281 }
282
283 fn name(&self) -> LanguageModelName {
284 LanguageModelName::from(self.model.display_name().to_string())
285 }
286
287 fn provider_id(&self) -> LanguageModelProviderId {
288 PROVIDER_ID
289 }
290
291 fn provider_name(&self) -> LanguageModelProviderName {
292 PROVIDER_NAME
293 }
294
295 fn supports_tools(&self) -> bool {
296 true
297 }
298
299 fn supports_images(&self) -> bool {
300 use open_ai::Model;
301 match &self.model {
302 Model::FourOmni
303 | Model::FourOmniMini
304 | Model::FourPointOne
305 | Model::FourPointOneMini
306 | Model::FourPointOneNano
307 | Model::Five
308 | Model::FiveCodex
309 | Model::FiveMini
310 | Model::FiveNano
311 | Model::FivePointOne
312 | Model::FivePointTwo
313 | Model::O1
314 | Model::O3
315 | Model::O4Mini => true,
316 Model::ThreePointFiveTurbo
317 | Model::Four
318 | Model::FourTurbo
319 | Model::O3Mini
320 | Model::Custom { .. } => false,
321 }
322 }
323
324 fn supports_tool_choice(&self, choice: LanguageModelToolChoice) -> bool {
325 match choice {
326 LanguageModelToolChoice::Auto => true,
327 LanguageModelToolChoice::Any => true,
328 LanguageModelToolChoice::None => true,
329 }
330 }
331
332 fn telemetry_id(&self) -> String {
333 format!("openai/{}", self.model.id())
334 }
335
336 fn max_token_count(&self) -> u64 {
337 self.model.max_token_count()
338 }
339
340 fn max_output_tokens(&self) -> Option<u64> {
341 self.model.max_output_tokens()
342 }
343
344 fn count_tokens(
345 &self,
346 request: LanguageModelRequest,
347 cx: &App,
348 ) -> BoxFuture<'static, Result<u64>> {
349 count_open_ai_tokens(request, self.model.clone(), cx)
350 }
351
352 fn stream_completion(
353 &self,
354 request: LanguageModelRequest,
355 cx: &AsyncApp,
356 ) -> BoxFuture<
357 'static,
358 Result<
359 futures::stream::BoxStream<
360 'static,
361 Result<LanguageModelCompletionEvent, LanguageModelCompletionError>,
362 >,
363 LanguageModelCompletionError,
364 >,
365 > {
366 if self.model.supports_chat_completions() {
367 let request = into_open_ai(
368 request,
369 self.model.id(),
370 self.model.supports_parallel_tool_calls(),
371 self.model.supports_prompt_cache_key(),
372 self.max_output_tokens(),
373 self.model.reasoning_effort(),
374 );
375 let completions = self.stream_completion(request, cx);
376 async move {
377 let mapper = OpenAiEventMapper::new();
378 Ok(mapper.map_stream(completions.await?).boxed())
379 }
380 .boxed()
381 } else {
382 let request = into_open_ai_response(
383 request,
384 self.model.id(),
385 self.model.supports_parallel_tool_calls(),
386 self.model.supports_prompt_cache_key(),
387 self.max_output_tokens(),
388 self.model.reasoning_effort(),
389 );
390 let completions = self.stream_response(request, cx);
391 async move {
392 let mapper = OpenAiResponseEventMapper::new();
393 Ok(mapper.map_stream(completions.await?).boxed())
394 }
395 .boxed()
396 }
397 }
398}
399
400pub fn into_open_ai(
401 request: LanguageModelRequest,
402 model_id: &str,
403 supports_parallel_tool_calls: bool,
404 supports_prompt_cache_key: bool,
405 max_output_tokens: Option<u64>,
406 reasoning_effort: Option<ReasoningEffort>,
407) -> open_ai::Request {
408 let stream = !model_id.starts_with("o1-");
409
410 let mut messages = Vec::new();
411 for message in request.messages {
412 for content in message.content {
413 match content {
414 MessageContent::Text(text) | MessageContent::Thinking { text, .. } => {
415 if !text.trim().is_empty() {
416 add_message_content_part(
417 open_ai::MessagePart::Text { text },
418 message.role,
419 &mut messages,
420 );
421 }
422 }
423 MessageContent::RedactedThinking(_) => {}
424 MessageContent::Image(image) => {
425 add_message_content_part(
426 open_ai::MessagePart::Image {
427 image_url: ImageUrl {
428 url: image.to_base64_url(),
429 detail: None,
430 },
431 },
432 message.role,
433 &mut messages,
434 );
435 }
436 MessageContent::ToolUse(tool_use) => {
437 let tool_call = open_ai::ToolCall {
438 id: tool_use.id.to_string(),
439 content: open_ai::ToolCallContent::Function {
440 function: open_ai::FunctionContent {
441 name: tool_use.name.to_string(),
442 arguments: serde_json::to_string(&tool_use.input)
443 .unwrap_or_default(),
444 },
445 },
446 };
447
448 if let Some(open_ai::RequestMessage::Assistant { tool_calls, .. }) =
449 messages.last_mut()
450 {
451 tool_calls.push(tool_call);
452 } else {
453 messages.push(open_ai::RequestMessage::Assistant {
454 content: None,
455 tool_calls: vec![tool_call],
456 });
457 }
458 }
459 MessageContent::ToolResult(tool_result) => {
460 let content = match &tool_result.content {
461 LanguageModelToolResultContent::Text(text) => {
462 vec![open_ai::MessagePart::Text {
463 text: text.to_string(),
464 }]
465 }
466 LanguageModelToolResultContent::Image(image) => {
467 vec![open_ai::MessagePart::Image {
468 image_url: ImageUrl {
469 url: image.to_base64_url(),
470 detail: None,
471 },
472 }]
473 }
474 };
475
476 messages.push(open_ai::RequestMessage::Tool {
477 content: content.into(),
478 tool_call_id: tool_result.tool_use_id.to_string(),
479 });
480 }
481 }
482 }
483 }
484
485 open_ai::Request {
486 model: model_id.into(),
487 messages,
488 stream,
489 stop: request.stop,
490 temperature: request.temperature.or(Some(1.0)),
491 max_completion_tokens: max_output_tokens,
492 parallel_tool_calls: if supports_parallel_tool_calls && !request.tools.is_empty() {
493 // Disable parallel tool calls, as the Agent currently expects a maximum of one per turn.
494 Some(false)
495 } else {
496 None
497 },
498 prompt_cache_key: if supports_prompt_cache_key {
499 request.thread_id
500 } else {
501 None
502 },
503 tools: request
504 .tools
505 .into_iter()
506 .map(|tool| open_ai::ToolDefinition::Function {
507 function: open_ai::FunctionDefinition {
508 name: tool.name,
509 description: Some(tool.description),
510 parameters: Some(tool.input_schema),
511 },
512 })
513 .collect(),
514 tool_choice: request.tool_choice.map(|choice| match choice {
515 LanguageModelToolChoice::Auto => open_ai::ToolChoice::Auto,
516 LanguageModelToolChoice::Any => open_ai::ToolChoice::Required,
517 LanguageModelToolChoice::None => open_ai::ToolChoice::None,
518 }),
519 reasoning_effort,
520 }
521}
522
523pub fn into_open_ai_response(
524 request: LanguageModelRequest,
525 model_id: &str,
526 supports_parallel_tool_calls: bool,
527 supports_prompt_cache_key: bool,
528 max_output_tokens: Option<u64>,
529 reasoning_effort: Option<ReasoningEffort>,
530) -> ResponseRequest {
531 let stream = !model_id.starts_with("o1-");
532
533 let LanguageModelRequest {
534 thread_id,
535 prompt_id: _,
536 intent: _,
537 mode: _,
538 messages,
539 tools,
540 tool_choice,
541 stop: _,
542 temperature,
543 thinking_allowed: _,
544 } = request;
545
546 let mut input_items = Vec::new();
547 for (index, message) in messages.into_iter().enumerate() {
548 append_message_to_response_items(message, index, &mut input_items);
549 }
550
551 let tools: Vec<_> = tools
552 .into_iter()
553 .map(|tool| open_ai::responses::ToolDefinition::Function {
554 name: tool.name,
555 description: Some(tool.description),
556 parameters: Some(tool.input_schema),
557 strict: None,
558 })
559 .collect();
560
561 ResponseRequest {
562 model: model_id.into(),
563 input: input_items,
564 stream,
565 temperature,
566 top_p: None,
567 max_output_tokens,
568 parallel_tool_calls: if tools.is_empty() {
569 None
570 } else {
571 Some(supports_parallel_tool_calls)
572 },
573 tool_choice: tool_choice.map(|choice| match choice {
574 LanguageModelToolChoice::Auto => open_ai::ToolChoice::Auto,
575 LanguageModelToolChoice::Any => open_ai::ToolChoice::Required,
576 LanguageModelToolChoice::None => open_ai::ToolChoice::None,
577 }),
578 tools,
579 prompt_cache_key: if supports_prompt_cache_key {
580 thread_id
581 } else {
582 None
583 },
584 reasoning: reasoning_effort.map(|effort| open_ai::responses::ReasoningConfig { effort }),
585 }
586}
587
588fn append_message_to_response_items(
589 message: LanguageModelRequestMessage,
590 index: usize,
591 input_items: &mut Vec<ResponseInputItem>,
592) {
593 let mut content_parts: Vec<ResponseInputContent> = Vec::new();
594
595 for content in message.content {
596 match content {
597 MessageContent::Text(text) => {
598 push_response_text_part(&message.role, text, &mut content_parts);
599 }
600 MessageContent::Thinking { text, .. } => {
601 push_response_text_part(&message.role, text, &mut content_parts);
602 }
603 MessageContent::RedactedThinking(_) => {}
604 MessageContent::Image(image) => {
605 push_response_image_part(&message.role, image, &mut content_parts);
606 }
607 MessageContent::ToolUse(tool_use) => {
608 flush_response_parts(&message.role, index, &mut content_parts, input_items);
609 let call_id = tool_use.id.to_string();
610 input_items.push(ResponseInputItem::FunctionCall(ResponseFunctionCallItem {
611 call_id,
612 name: tool_use.name.to_string(),
613 arguments: tool_use.raw_input,
614 }));
615 }
616 MessageContent::ToolResult(tool_result) => {
617 flush_response_parts(&message.role, index, &mut content_parts, input_items);
618 input_items.push(ResponseInputItem::FunctionCallOutput(
619 ResponseFunctionCallOutputItem {
620 call_id: tool_result.tool_use_id.to_string(),
621 output: tool_result_output(&tool_result),
622 },
623 ));
624 }
625 }
626 }
627
628 flush_response_parts(&message.role, index, &mut content_parts, input_items);
629}
630
631fn push_response_text_part(
632 role: &Role,
633 text: impl Into<String>,
634 parts: &mut Vec<ResponseInputContent>,
635) {
636 let text = text.into();
637 if text.trim().is_empty() {
638 return;
639 }
640
641 match role {
642 Role::Assistant => parts.push(ResponseInputContent::OutputText {
643 text,
644 annotations: Vec::new(),
645 }),
646 _ => parts.push(ResponseInputContent::Text { text }),
647 }
648}
649
650fn push_response_image_part(
651 role: &Role,
652 image: LanguageModelImage,
653 parts: &mut Vec<ResponseInputContent>,
654) {
655 match role {
656 Role::Assistant => parts.push(ResponseInputContent::OutputText {
657 text: "[image omitted]".to_string(),
658 annotations: Vec::new(),
659 }),
660 _ => parts.push(ResponseInputContent::Image {
661 image_url: image.to_base64_url(),
662 }),
663 }
664}
665
666fn flush_response_parts(
667 role: &Role,
668 _index: usize,
669 parts: &mut Vec<ResponseInputContent>,
670 input_items: &mut Vec<ResponseInputItem>,
671) {
672 if parts.is_empty() {
673 return;
674 }
675
676 let item = ResponseInputItem::Message(ResponseMessageItem {
677 role: match role {
678 Role::User => open_ai::Role::User,
679 Role::Assistant => open_ai::Role::Assistant,
680 Role::System => open_ai::Role::System,
681 },
682 content: parts.clone(),
683 });
684
685 input_items.push(item);
686 parts.clear();
687}
688
689fn tool_result_output(result: &LanguageModelToolResult) -> String {
690 if let Some(output) = &result.output {
691 match output {
692 serde_json::Value::String(text) => text.clone(),
693 serde_json::Value::Null => String::new(),
694 _ => output.to_string(),
695 }
696 } else {
697 match &result.content {
698 LanguageModelToolResultContent::Text(text) => text.to_string(),
699 LanguageModelToolResultContent::Image(image) => image.to_base64_url(),
700 }
701 }
702}
703
704fn add_message_content_part(
705 new_part: open_ai::MessagePart,
706 role: Role,
707 messages: &mut Vec<open_ai::RequestMessage>,
708) {
709 match (role, messages.last_mut()) {
710 (Role::User, Some(open_ai::RequestMessage::User { content }))
711 | (
712 Role::Assistant,
713 Some(open_ai::RequestMessage::Assistant {
714 content: Some(content),
715 ..
716 }),
717 )
718 | (Role::System, Some(open_ai::RequestMessage::System { content, .. })) => {
719 content.push_part(new_part);
720 }
721 _ => {
722 messages.push(match role {
723 Role::User => open_ai::RequestMessage::User {
724 content: open_ai::MessageContent::from(vec![new_part]),
725 },
726 Role::Assistant => open_ai::RequestMessage::Assistant {
727 content: Some(open_ai::MessageContent::from(vec![new_part])),
728 tool_calls: Vec::new(),
729 },
730 Role::System => open_ai::RequestMessage::System {
731 content: open_ai::MessageContent::from(vec![new_part]),
732 },
733 });
734 }
735 }
736}
737
738pub struct OpenAiEventMapper {
739 tool_calls_by_index: HashMap<usize, RawToolCall>,
740}
741
742impl OpenAiEventMapper {
743 pub fn new() -> Self {
744 Self {
745 tool_calls_by_index: HashMap::default(),
746 }
747 }
748
749 pub fn map_stream(
750 mut self,
751 events: Pin<Box<dyn Send + Stream<Item = Result<ResponseStreamEvent>>>>,
752 ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
753 {
754 events.flat_map(move |event| {
755 futures::stream::iter(match event {
756 Ok(event) => self.map_event(event),
757 Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
758 })
759 })
760 }
761
762 pub fn map_event(
763 &mut self,
764 event: ResponseStreamEvent,
765 ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
766 let mut events = Vec::new();
767 if let Some(usage) = event.usage {
768 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
769 input_tokens: usage.prompt_tokens,
770 output_tokens: usage.completion_tokens,
771 cache_creation_input_tokens: 0,
772 cache_read_input_tokens: 0,
773 })));
774 }
775
776 let Some(choice) = event.choices.first() else {
777 return events;
778 };
779
780 if let Some(delta) = choice.delta.as_ref() {
781 if let Some(content) = delta.content.clone() {
782 events.push(Ok(LanguageModelCompletionEvent::Text(content)));
783 }
784
785 if let Some(tool_calls) = delta.tool_calls.as_ref() {
786 for tool_call in tool_calls {
787 let entry = self.tool_calls_by_index.entry(tool_call.index).or_default();
788
789 if let Some(tool_id) = tool_call.id.clone() {
790 entry.id = tool_id;
791 }
792
793 if let Some(function) = tool_call.function.as_ref() {
794 if let Some(name) = function.name.clone() {
795 entry.name = name;
796 }
797
798 if let Some(arguments) = function.arguments.clone() {
799 entry.arguments.push_str(&arguments);
800 }
801 }
802 }
803 }
804 }
805
806 match choice.finish_reason.as_deref() {
807 Some("stop") => {
808 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
809 }
810 Some("tool_calls") => {
811 events.extend(self.tool_calls_by_index.drain().map(|(_, tool_call)| {
812 match serde_json::Value::from_str(&tool_call.arguments) {
813 Ok(input) => Ok(LanguageModelCompletionEvent::ToolUse(
814 LanguageModelToolUse {
815 id: tool_call.id.clone().into(),
816 name: tool_call.name.as_str().into(),
817 is_input_complete: true,
818 input,
819 raw_input: tool_call.arguments.clone(),
820 thought_signature: None,
821 },
822 )),
823 Err(error) => Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
824 id: tool_call.id.into(),
825 tool_name: tool_call.name.into(),
826 raw_input: tool_call.arguments.clone().into(),
827 json_parse_error: error.to_string(),
828 }),
829 }
830 }));
831
832 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::ToolUse)));
833 }
834 Some(stop_reason) => {
835 log::error!("Unexpected OpenAI stop_reason: {stop_reason:?}",);
836 events.push(Ok(LanguageModelCompletionEvent::Stop(StopReason::EndTurn)));
837 }
838 None => {}
839 }
840
841 events
842 }
843}
844
845#[derive(Default)]
846struct RawToolCall {
847 id: String,
848 name: String,
849 arguments: String,
850}
851
852pub struct OpenAiResponseEventMapper {
853 function_calls_by_item: HashMap<String, PendingResponseFunctionCall>,
854 pending_stop_reason: Option<StopReason>,
855}
856
857#[derive(Default)]
858struct PendingResponseFunctionCall {
859 call_id: String,
860 name: Arc<str>,
861 arguments: String,
862}
863
864impl OpenAiResponseEventMapper {
865 pub fn new() -> Self {
866 Self {
867 function_calls_by_item: HashMap::default(),
868 pending_stop_reason: None,
869 }
870 }
871
872 pub fn map_stream(
873 mut self,
874 events: Pin<Box<dyn Send + Stream<Item = Result<ResponsesStreamEvent>>>>,
875 ) -> impl Stream<Item = Result<LanguageModelCompletionEvent, LanguageModelCompletionError>>
876 {
877 events.flat_map(move |event| {
878 futures::stream::iter(match event {
879 Ok(event) => self.map_event(event),
880 Err(error) => vec![Err(LanguageModelCompletionError::from(anyhow!(error)))],
881 })
882 })
883 }
884
885 pub fn map_event(
886 &mut self,
887 event: ResponsesStreamEvent,
888 ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
889 match event {
890 ResponsesStreamEvent::OutputItemAdded { item, .. } => {
891 let mut events = Vec::new();
892
893 match &item {
894 ResponseOutputItem::Message(message) => {
895 if let Some(id) = &message.id {
896 events.push(Ok(LanguageModelCompletionEvent::StartMessage {
897 message_id: id.clone(),
898 }));
899 }
900 }
901 ResponseOutputItem::FunctionCall(function_call) => {
902 if let Some(item_id) = function_call.id.clone() {
903 let call_id = function_call
904 .call_id
905 .clone()
906 .or_else(|| function_call.id.clone())
907 .unwrap_or_else(|| item_id.clone());
908 let entry = PendingResponseFunctionCall {
909 call_id,
910 name: Arc::<str>::from(
911 function_call.name.clone().unwrap_or_default(),
912 ),
913 arguments: function_call.arguments.clone(),
914 };
915 self.function_calls_by_item.insert(item_id, entry);
916 }
917 }
918 ResponseOutputItem::Unknown => {}
919 }
920 events
921 }
922 ResponsesStreamEvent::OutputTextDelta { delta, .. } => {
923 if delta.is_empty() {
924 Vec::new()
925 } else {
926 vec![Ok(LanguageModelCompletionEvent::Text(delta))]
927 }
928 }
929 ResponsesStreamEvent::FunctionCallArgumentsDelta { item_id, delta, .. } => {
930 if let Some(entry) = self.function_calls_by_item.get_mut(&item_id) {
931 entry.arguments.push_str(&delta);
932 }
933 Vec::new()
934 }
935 ResponsesStreamEvent::FunctionCallArgumentsDone {
936 item_id, arguments, ..
937 } => {
938 if let Some(mut entry) = self.function_calls_by_item.remove(&item_id) {
939 if !arguments.is_empty() {
940 entry.arguments = arguments;
941 }
942 let raw_input = entry.arguments.clone();
943 self.pending_stop_reason = Some(StopReason::ToolUse);
944 match serde_json::from_str::<serde_json::Value>(&entry.arguments) {
945 Ok(input) => {
946 vec![Ok(LanguageModelCompletionEvent::ToolUse(
947 LanguageModelToolUse {
948 id: LanguageModelToolUseId::from(entry.call_id.clone()),
949 name: entry.name.clone(),
950 is_input_complete: true,
951 input,
952 raw_input,
953 thought_signature: None,
954 },
955 ))]
956 }
957 Err(error) => {
958 vec![Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
959 id: LanguageModelToolUseId::from(entry.call_id.clone()),
960 tool_name: entry.name.clone(),
961 raw_input: Arc::<str>::from(raw_input),
962 json_parse_error: error.to_string(),
963 })]
964 }
965 }
966 } else {
967 Vec::new()
968 }
969 }
970 ResponsesStreamEvent::Completed { response } => {
971 self.handle_completion(response, StopReason::EndTurn)
972 }
973 ResponsesStreamEvent::Incomplete { response } => {
974 let reason = response
975 .status_details
976 .as_ref()
977 .and_then(|details| details.reason.as_deref());
978 let stop_reason = match reason {
979 Some("max_output_tokens") => StopReason::MaxTokens,
980 Some("content_filter") => {
981 self.pending_stop_reason = Some(StopReason::Refusal);
982 StopReason::Refusal
983 }
984 _ => self
985 .pending_stop_reason
986 .take()
987 .unwrap_or(StopReason::EndTurn),
988 };
989
990 let mut events = Vec::new();
991 if self.pending_stop_reason.is_none() {
992 events.extend(self.emit_tool_calls_from_output(&response.output));
993 }
994 if let Some(usage) = response.usage.as_ref() {
995 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
996 token_usage_from_response_usage(usage),
997 )));
998 }
999 events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
1000 events
1001 }
1002 ResponsesStreamEvent::Failed { response } => {
1003 let message = response
1004 .status_details
1005 .and_then(|details| details.error)
1006 .map(|error| error.to_string())
1007 .unwrap_or_else(|| "response failed".to_string());
1008 vec![Err(LanguageModelCompletionError::Other(anyhow!(message)))]
1009 }
1010 ResponsesStreamEvent::Error { error }
1011 | ResponsesStreamEvent::GenericError { error } => {
1012 vec![Err(LanguageModelCompletionError::Other(anyhow!(format!(
1013 "{error:?}"
1014 ))))]
1015 }
1016 ResponsesStreamEvent::OutputTextDone { .. } => Vec::new(),
1017 ResponsesStreamEvent::OutputItemDone { .. }
1018 | ResponsesStreamEvent::ContentPartAdded { .. }
1019 | ResponsesStreamEvent::ContentPartDone { .. }
1020 | ResponsesStreamEvent::Created { .. }
1021 | ResponsesStreamEvent::InProgress { .. }
1022 | ResponsesStreamEvent::Unknown => Vec::new(),
1023 }
1024 }
1025
1026 fn handle_completion(
1027 &mut self,
1028 response: ResponsesSummary,
1029 default_reason: StopReason,
1030 ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
1031 let mut events = Vec::new();
1032
1033 if self.pending_stop_reason.is_none() {
1034 events.extend(self.emit_tool_calls_from_output(&response.output));
1035 }
1036
1037 if let Some(usage) = response.usage.as_ref() {
1038 events.push(Ok(LanguageModelCompletionEvent::UsageUpdate(
1039 token_usage_from_response_usage(usage),
1040 )));
1041 }
1042
1043 let stop_reason = self.pending_stop_reason.take().unwrap_or(default_reason);
1044 events.push(Ok(LanguageModelCompletionEvent::Stop(stop_reason)));
1045 events
1046 }
1047
1048 fn emit_tool_calls_from_output(
1049 &mut self,
1050 output: &[ResponseOutputItem],
1051 ) -> Vec<Result<LanguageModelCompletionEvent, LanguageModelCompletionError>> {
1052 let mut events = Vec::new();
1053 for item in output {
1054 if let ResponseOutputItem::FunctionCall(function_call) = item {
1055 let Some(call_id) = function_call
1056 .call_id
1057 .clone()
1058 .or_else(|| function_call.id.clone())
1059 else {
1060 log::error!(
1061 "Function call item missing both call_id and id: {:?}",
1062 function_call
1063 );
1064 continue;
1065 };
1066 let name: Arc<str> = Arc::from(function_call.name.clone().unwrap_or_default());
1067 let arguments = &function_call.arguments;
1068 if !arguments.is_empty() {
1069 self.pending_stop_reason = Some(StopReason::ToolUse);
1070 match serde_json::from_str::<serde_json::Value>(arguments) {
1071 Ok(input) => {
1072 events.push(Ok(LanguageModelCompletionEvent::ToolUse(
1073 LanguageModelToolUse {
1074 id: LanguageModelToolUseId::from(call_id.clone()),
1075 name: name.clone(),
1076 is_input_complete: true,
1077 input,
1078 raw_input: arguments.clone(),
1079 thought_signature: None,
1080 },
1081 )));
1082 }
1083 Err(error) => {
1084 events.push(Ok(LanguageModelCompletionEvent::ToolUseJsonParseError {
1085 id: LanguageModelToolUseId::from(call_id.clone()),
1086 tool_name: name.clone(),
1087 raw_input: Arc::<str>::from(arguments.clone()),
1088 json_parse_error: error.to_string(),
1089 }));
1090 }
1091 }
1092 }
1093 }
1094 }
1095 events
1096 }
1097}
1098
1099fn token_usage_from_response_usage(usage: &ResponsesUsage) -> TokenUsage {
1100 TokenUsage {
1101 input_tokens: usage.input_tokens.unwrap_or_default(),
1102 output_tokens: usage.output_tokens.unwrap_or_default(),
1103 cache_creation_input_tokens: 0,
1104 cache_read_input_tokens: 0,
1105 }
1106}
1107
1108pub(crate) fn collect_tiktoken_messages(
1109 request: LanguageModelRequest,
1110) -> Vec<tiktoken_rs::ChatCompletionRequestMessage> {
1111 request
1112 .messages
1113 .into_iter()
1114 .map(|message| tiktoken_rs::ChatCompletionRequestMessage {
1115 role: match message.role {
1116 Role::User => "user".into(),
1117 Role::Assistant => "assistant".into(),
1118 Role::System => "system".into(),
1119 },
1120 content: Some(message.string_contents()),
1121 name: None,
1122 function_call: None,
1123 })
1124 .collect::<Vec<_>>()
1125}
1126
1127pub fn count_open_ai_tokens(
1128 request: LanguageModelRequest,
1129 model: Model,
1130 cx: &App,
1131) -> BoxFuture<'static, Result<u64>> {
1132 cx.background_spawn(async move {
1133 let messages = collect_tiktoken_messages(request);
1134 match model {
1135 Model::Custom { max_tokens, .. } => {
1136 let model = if max_tokens >= 100_000 {
1137 // If the max tokens is 100k or more, it is likely the o200k_base tokenizer from gpt4o
1138 "gpt-4o"
1139 } else {
1140 // Otherwise fallback to gpt-4, since only cl100k_base and o200k_base are
1141 // supported with this tiktoken method
1142 "gpt-4"
1143 };
1144 tiktoken_rs::num_tokens_from_messages(model, &messages)
1145 }
1146 // Currently supported by tiktoken_rs
1147 // Sometimes tiktoken-rs is behind on model support. If that is the case, make a new branch
1148 // arm with an override. We enumerate all supported models here so that we can check if new
1149 // models are supported yet or not.
1150 Model::ThreePointFiveTurbo
1151 | Model::Four
1152 | Model::FourTurbo
1153 | Model::FourOmni
1154 | Model::FourOmniMini
1155 | Model::FourPointOne
1156 | Model::FourPointOneMini
1157 | Model::FourPointOneNano
1158 | Model::O1
1159 | Model::O3
1160 | Model::O3Mini
1161 | Model::O4Mini
1162 | Model::Five
1163 | Model::FiveCodex
1164 | Model::FiveMini
1165 | Model::FiveNano => tiktoken_rs::num_tokens_from_messages(model.id(), &messages),
1166 // GPT-5.1 and 5.2 don't have dedicated tiktoken support; use gpt-5 tokenizer
1167 Model::FivePointOne | Model::FivePointTwo => {
1168 tiktoken_rs::num_tokens_from_messages("gpt-5", &messages)
1169 }
1170 }
1171 .map(|tokens| tokens as u64)
1172 })
1173 .boxed()
1174}
1175
1176struct ConfigurationView {
1177 api_key_editor: Entity<InputField>,
1178 state: Entity<State>,
1179 load_credentials_task: Option<Task<()>>,
1180}
1181
1182impl ConfigurationView {
1183 fn new(state: Entity<State>, window: &mut Window, cx: &mut Context<Self>) -> Self {
1184 let api_key_editor = cx.new(|cx| {
1185 InputField::new(
1186 window,
1187 cx,
1188 "sk-000000000000000000000000000000000000000000000000",
1189 )
1190 });
1191
1192 cx.observe(&state, |_, _, cx| {
1193 cx.notify();
1194 })
1195 .detach();
1196
1197 let load_credentials_task = Some(cx.spawn_in(window, {
1198 let state = state.clone();
1199 async move |this, cx| {
1200 if let Some(task) = Some(state.update(cx, |state, cx| state.authenticate(cx))) {
1201 // We don't log an error, because "not signed in" is also an error.
1202 let _ = task.await;
1203 }
1204 this.update(cx, |this, cx| {
1205 this.load_credentials_task = None;
1206 cx.notify();
1207 })
1208 .log_err();
1209 }
1210 }));
1211
1212 Self {
1213 api_key_editor,
1214 state,
1215 load_credentials_task,
1216 }
1217 }
1218
1219 fn save_api_key(&mut self, _: &menu::Confirm, window: &mut Window, cx: &mut Context<Self>) {
1220 let api_key = self.api_key_editor.read(cx).text(cx).trim().to_string();
1221 if api_key.is_empty() {
1222 return;
1223 }
1224
1225 // url changes can cause the editor to be displayed again
1226 self.api_key_editor
1227 .update(cx, |editor, cx| editor.set_text("", window, cx));
1228
1229 let state = self.state.clone();
1230 cx.spawn_in(window, async move |_, cx| {
1231 state
1232 .update(cx, |state, cx| state.set_api_key(Some(api_key), cx))
1233 .await
1234 })
1235 .detach_and_log_err(cx);
1236 }
1237
1238 fn reset_api_key(&mut self, window: &mut Window, cx: &mut Context<Self>) {
1239 self.api_key_editor
1240 .update(cx, |input, cx| input.set_text("", window, cx));
1241
1242 let state = self.state.clone();
1243 cx.spawn_in(window, async move |_, cx| {
1244 state
1245 .update(cx, |state, cx| state.set_api_key(None, cx))
1246 .await
1247 })
1248 .detach_and_log_err(cx);
1249 }
1250
1251 fn should_render_editor(&self, cx: &mut Context<Self>) -> bool {
1252 !self.state.read(cx).is_authenticated()
1253 }
1254}
1255
1256impl Render for ConfigurationView {
1257 fn render(&mut self, _: &mut Window, cx: &mut Context<Self>) -> impl IntoElement {
1258 let env_var_set = self.state.read(cx).api_key_state.is_from_env_var();
1259 let configured_card_label = if env_var_set {
1260 format!("API key set in {API_KEY_ENV_VAR_NAME} environment variable")
1261 } else {
1262 let api_url = OpenAiLanguageModelProvider::api_url(cx);
1263 if api_url == OPEN_AI_API_URL {
1264 "API key configured".to_string()
1265 } else {
1266 format!("API key configured for {}", api_url)
1267 }
1268 };
1269
1270 let api_key_section = if self.should_render_editor(cx) {
1271 v_flex()
1272 .on_action(cx.listener(Self::save_api_key))
1273 .child(Label::new("To use Zed's agent with OpenAI, you need to add an API key. Follow these steps:"))
1274 .child(
1275 List::new()
1276 .child(
1277 ListBulletItem::new("")
1278 .child(Label::new("Create one by visiting"))
1279 .child(ButtonLink::new("OpenAI's console", "https://platform.openai.com/api-keys"))
1280 )
1281 .child(
1282 ListBulletItem::new("Ensure your OpenAI account has credits")
1283 )
1284 .child(
1285 ListBulletItem::new("Paste your API key below and hit enter to start using the agent")
1286 ),
1287 )
1288 .child(self.api_key_editor.clone())
1289 .child(
1290 Label::new(format!(
1291 "You can also set the {API_KEY_ENV_VAR_NAME} environment variable and restart Zed."
1292 ))
1293 .size(LabelSize::Small)
1294 .color(Color::Muted),
1295 )
1296 .child(
1297 Label::new(
1298 "Note that having a subscription for another service like GitHub Copilot won't work.",
1299 )
1300 .size(LabelSize::Small).color(Color::Muted),
1301 )
1302 .into_any_element()
1303 } else {
1304 ConfiguredApiCard::new(configured_card_label)
1305 .disabled(env_var_set)
1306 .on_click(cx.listener(|this, _, window, cx| this.reset_api_key(window, cx)))
1307 .when(env_var_set, |this| {
1308 this.tooltip_label(format!("To reset your API key, unset the {API_KEY_ENV_VAR_NAME} environment variable."))
1309 })
1310 .into_any_element()
1311 };
1312
1313 let compatible_api_section = h_flex()
1314 .mt_1p5()
1315 .gap_0p5()
1316 .flex_wrap()
1317 .when(self.should_render_editor(cx), |this| {
1318 this.pt_1p5()
1319 .border_t_1()
1320 .border_color(cx.theme().colors().border_variant)
1321 })
1322 .child(
1323 h_flex()
1324 .gap_2()
1325 .child(
1326 Icon::new(IconName::Info)
1327 .size(IconSize::XSmall)
1328 .color(Color::Muted),
1329 )
1330 .child(Label::new("Zed also supports OpenAI-compatible models.")),
1331 )
1332 .child(
1333 Button::new("docs", "Learn More")
1334 .icon(IconName::ArrowUpRight)
1335 .icon_size(IconSize::Small)
1336 .icon_color(Color::Muted)
1337 .on_click(move |_, _window, cx| {
1338 cx.open_url("https://zed.dev/docs/ai/llm-providers#openai-api-compatible")
1339 }),
1340 );
1341
1342 if self.load_credentials_task.is_some() {
1343 div().child(Label::new("Loading credentials…")).into_any()
1344 } else {
1345 v_flex()
1346 .size_full()
1347 .child(api_key_section)
1348 .child(compatible_api_section)
1349 .into_any()
1350 }
1351 }
1352}
1353
1354#[cfg(test)]
1355mod tests {
1356 use futures::{StreamExt, executor::block_on};
1357 use gpui::TestAppContext;
1358 use language_model::{LanguageModelRequestMessage, LanguageModelRequestTool};
1359 use open_ai::responses::{
1360 ResponseFunctionToolCall, ResponseOutputItem, ResponseOutputMessage, ResponseStatusDetails,
1361 ResponseSummary, ResponseUsage, StreamEvent as ResponsesStreamEvent,
1362 };
1363 use pretty_assertions::assert_eq;
1364 use serde_json::json;
1365
1366 use super::*;
1367
1368 fn map_response_events(events: Vec<ResponsesStreamEvent>) -> Vec<LanguageModelCompletionEvent> {
1369 block_on(async {
1370 OpenAiResponseEventMapper::new()
1371 .map_stream(Box::pin(futures::stream::iter(events.into_iter().map(Ok))))
1372 .collect::<Vec<_>>()
1373 .await
1374 .into_iter()
1375 .map(Result::unwrap)
1376 .collect()
1377 })
1378 }
1379
1380 fn response_item_message(id: &str) -> ResponseOutputItem {
1381 ResponseOutputItem::Message(ResponseOutputMessage {
1382 id: Some(id.to_string()),
1383 role: Some("assistant".to_string()),
1384 status: Some("in_progress".to_string()),
1385 content: vec![],
1386 })
1387 }
1388
1389 fn response_item_function_call(id: &str, args: Option<&str>) -> ResponseOutputItem {
1390 ResponseOutputItem::FunctionCall(ResponseFunctionToolCall {
1391 id: Some(id.to_string()),
1392 status: Some("in_progress".to_string()),
1393 name: Some("get_weather".to_string()),
1394 call_id: Some("call_123".to_string()),
1395 arguments: args.map(|s| s.to_string()).unwrap_or_default(),
1396 })
1397 }
1398
1399 #[gpui::test]
1400 fn tiktoken_rs_support(cx: &TestAppContext) {
1401 let request = LanguageModelRequest {
1402 thread_id: None,
1403 prompt_id: None,
1404 intent: None,
1405 mode: None,
1406 messages: vec![LanguageModelRequestMessage {
1407 role: Role::User,
1408 content: vec![MessageContent::Text("message".into())],
1409 cache: false,
1410 reasoning_details: None,
1411 }],
1412 tools: vec![],
1413 tool_choice: None,
1414 stop: vec![],
1415 temperature: None,
1416 thinking_allowed: true,
1417 };
1418
1419 // Validate that all models are supported by tiktoken-rs
1420 for model in Model::iter() {
1421 let count = cx
1422 .foreground_executor()
1423 .block_on(count_open_ai_tokens(
1424 request.clone(),
1425 model,
1426 &cx.app.borrow(),
1427 ))
1428 .unwrap();
1429 assert!(count > 0);
1430 }
1431 }
1432
1433 #[test]
1434 fn responses_stream_maps_text_and_usage() {
1435 let events = vec![
1436 ResponsesStreamEvent::OutputItemAdded {
1437 output_index: 0,
1438 sequence_number: None,
1439 item: response_item_message("msg_123"),
1440 },
1441 ResponsesStreamEvent::OutputTextDelta {
1442 item_id: "msg_123".into(),
1443 output_index: 0,
1444 content_index: Some(0),
1445 delta: "Hello".into(),
1446 },
1447 ResponsesStreamEvent::Completed {
1448 response: ResponseSummary {
1449 usage: Some(ResponseUsage {
1450 input_tokens: Some(5),
1451 output_tokens: Some(3),
1452 total_tokens: Some(8),
1453 }),
1454 ..Default::default()
1455 },
1456 },
1457 ];
1458
1459 let mapped = map_response_events(events);
1460 assert!(matches!(
1461 mapped[0],
1462 LanguageModelCompletionEvent::StartMessage { ref message_id } if message_id == "msg_123"
1463 ));
1464 assert!(matches!(
1465 mapped[1],
1466 LanguageModelCompletionEvent::Text(ref text) if text == "Hello"
1467 ));
1468 assert!(matches!(
1469 mapped[2],
1470 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1471 input_tokens: 5,
1472 output_tokens: 3,
1473 ..
1474 })
1475 ));
1476 assert!(matches!(
1477 mapped[3],
1478 LanguageModelCompletionEvent::Stop(StopReason::EndTurn)
1479 ));
1480 }
1481
1482 #[test]
1483 fn into_open_ai_response_builds_complete_payload() {
1484 let tool_call_id = LanguageModelToolUseId::from("call-42");
1485 let tool_input = json!({ "city": "Boston" });
1486 let tool_arguments = serde_json::to_string(&tool_input).unwrap();
1487 let tool_use = LanguageModelToolUse {
1488 id: tool_call_id.clone(),
1489 name: Arc::from("get_weather"),
1490 raw_input: tool_arguments.clone(),
1491 input: tool_input,
1492 is_input_complete: true,
1493 thought_signature: None,
1494 };
1495 let tool_result = LanguageModelToolResult {
1496 tool_use_id: tool_call_id,
1497 tool_name: Arc::from("get_weather"),
1498 is_error: false,
1499 content: LanguageModelToolResultContent::Text(Arc::from("Sunny")),
1500 output: Some(json!({ "forecast": "Sunny" })),
1501 };
1502 let user_image = LanguageModelImage {
1503 source: SharedString::from("aGVsbG8="),
1504 size: None,
1505 };
1506 let expected_image_url = user_image.to_base64_url();
1507
1508 let request = LanguageModelRequest {
1509 thread_id: Some("thread-123".into()),
1510 prompt_id: None,
1511 intent: None,
1512 mode: None,
1513 messages: vec![
1514 LanguageModelRequestMessage {
1515 role: Role::System,
1516 content: vec![MessageContent::Text("System context".into())],
1517 cache: false,
1518 reasoning_details: None,
1519 },
1520 LanguageModelRequestMessage {
1521 role: Role::User,
1522 content: vec![
1523 MessageContent::Text("Please check the weather.".into()),
1524 MessageContent::Image(user_image),
1525 ],
1526 cache: false,
1527 reasoning_details: None,
1528 },
1529 LanguageModelRequestMessage {
1530 role: Role::Assistant,
1531 content: vec![
1532 MessageContent::Text("Looking that up.".into()),
1533 MessageContent::ToolUse(tool_use),
1534 ],
1535 cache: false,
1536 reasoning_details: None,
1537 },
1538 LanguageModelRequestMessage {
1539 role: Role::Assistant,
1540 content: vec![MessageContent::ToolResult(tool_result)],
1541 cache: false,
1542 reasoning_details: None,
1543 },
1544 ],
1545 tools: vec![LanguageModelRequestTool {
1546 name: "get_weather".into(),
1547 description: "Fetches the weather".into(),
1548 input_schema: json!({ "type": "object" }),
1549 }],
1550 tool_choice: Some(LanguageModelToolChoice::Any),
1551 stop: vec!["<STOP>".into()],
1552 temperature: None,
1553 thinking_allowed: false,
1554 };
1555
1556 let response = into_open_ai_response(
1557 request,
1558 "custom-model",
1559 true,
1560 true,
1561 Some(2048),
1562 Some(ReasoningEffort::Low),
1563 );
1564
1565 let serialized = serde_json::to_value(&response).unwrap();
1566 let expected = json!({
1567 "model": "custom-model",
1568 "input": [
1569 {
1570 "type": "message",
1571 "role": "system",
1572 "content": [
1573 { "type": "input_text", "text": "System context" }
1574 ]
1575 },
1576 {
1577 "type": "message",
1578 "role": "user",
1579 "content": [
1580 { "type": "input_text", "text": "Please check the weather." },
1581 { "type": "input_image", "image_url": expected_image_url }
1582 ]
1583 },
1584 {
1585 "type": "message",
1586 "role": "assistant",
1587 "content": [
1588 { "type": "output_text", "text": "Looking that up.", "annotations": [] }
1589 ]
1590 },
1591 {
1592 "type": "function_call",
1593 "call_id": "call-42",
1594 "name": "get_weather",
1595 "arguments": tool_arguments
1596 },
1597 {
1598 "type": "function_call_output",
1599 "call_id": "call-42",
1600 "output": "{\"forecast\":\"Sunny\"}"
1601 }
1602 ],
1603 "stream": true,
1604 "max_output_tokens": 2048,
1605 "parallel_tool_calls": true,
1606 "tool_choice": "required",
1607 "tools": [
1608 {
1609 "type": "function",
1610 "name": "get_weather",
1611 "description": "Fetches the weather",
1612 "parameters": { "type": "object" }
1613 }
1614 ],
1615 "prompt_cache_key": "thread-123",
1616 "reasoning": { "effort": "low" }
1617 });
1618
1619 assert_eq!(serialized, expected);
1620 }
1621
1622 #[test]
1623 fn responses_stream_maps_tool_calls() {
1624 let events = vec![
1625 ResponsesStreamEvent::OutputItemAdded {
1626 output_index: 0,
1627 sequence_number: None,
1628 item: response_item_function_call("item_fn", Some("{\"city\":\"Bos")),
1629 },
1630 ResponsesStreamEvent::FunctionCallArgumentsDelta {
1631 item_id: "item_fn".into(),
1632 output_index: 0,
1633 delta: "ton\"}".into(),
1634 sequence_number: None,
1635 },
1636 ResponsesStreamEvent::FunctionCallArgumentsDone {
1637 item_id: "item_fn".into(),
1638 output_index: 0,
1639 arguments: "{\"city\":\"Boston\"}".into(),
1640 sequence_number: None,
1641 },
1642 ResponsesStreamEvent::Completed {
1643 response: ResponseSummary::default(),
1644 },
1645 ];
1646
1647 let mapped = map_response_events(events);
1648 assert!(matches!(
1649 mapped[0],
1650 LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse {
1651 ref id,
1652 ref name,
1653 ref raw_input,
1654 ..
1655 }) if id.to_string() == "call_123"
1656 && name.as_ref() == "get_weather"
1657 && raw_input == "{\"city\":\"Boston\"}"
1658 ));
1659 assert!(matches!(
1660 mapped[1],
1661 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1662 ));
1663 }
1664
1665 #[test]
1666 fn responses_stream_uses_max_tokens_stop_reason() {
1667 let events = vec![ResponsesStreamEvent::Incomplete {
1668 response: ResponseSummary {
1669 status_details: Some(ResponseStatusDetails {
1670 reason: Some("max_output_tokens".into()),
1671 r#type: Some("incomplete".into()),
1672 error: None,
1673 }),
1674 usage: Some(ResponseUsage {
1675 input_tokens: Some(10),
1676 output_tokens: Some(20),
1677 total_tokens: Some(30),
1678 }),
1679 ..Default::default()
1680 },
1681 }];
1682
1683 let mapped = map_response_events(events);
1684 assert!(matches!(
1685 mapped[0],
1686 LanguageModelCompletionEvent::UsageUpdate(TokenUsage {
1687 input_tokens: 10,
1688 output_tokens: 20,
1689 ..
1690 })
1691 ));
1692 assert!(matches!(
1693 mapped[1],
1694 LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1695 ));
1696 }
1697
1698 #[test]
1699 fn responses_stream_handles_multiple_tool_calls() {
1700 let events = vec![
1701 ResponsesStreamEvent::OutputItemAdded {
1702 output_index: 0,
1703 sequence_number: None,
1704 item: response_item_function_call("item_fn1", Some("{\"city\":\"NYC\"}")),
1705 },
1706 ResponsesStreamEvent::FunctionCallArgumentsDone {
1707 item_id: "item_fn1".into(),
1708 output_index: 0,
1709 arguments: "{\"city\":\"NYC\"}".into(),
1710 sequence_number: None,
1711 },
1712 ResponsesStreamEvent::OutputItemAdded {
1713 output_index: 1,
1714 sequence_number: None,
1715 item: response_item_function_call("item_fn2", Some("{\"city\":\"LA\"}")),
1716 },
1717 ResponsesStreamEvent::FunctionCallArgumentsDone {
1718 item_id: "item_fn2".into(),
1719 output_index: 1,
1720 arguments: "{\"city\":\"LA\"}".into(),
1721 sequence_number: None,
1722 },
1723 ResponsesStreamEvent::Completed {
1724 response: ResponseSummary::default(),
1725 },
1726 ];
1727
1728 let mapped = map_response_events(events);
1729 assert_eq!(mapped.len(), 3);
1730 assert!(matches!(
1731 mapped[0],
1732 LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1733 if raw_input == "{\"city\":\"NYC\"}"
1734 ));
1735 assert!(matches!(
1736 mapped[1],
1737 LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1738 if raw_input == "{\"city\":\"LA\"}"
1739 ));
1740 assert!(matches!(
1741 mapped[2],
1742 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1743 ));
1744 }
1745
1746 #[test]
1747 fn responses_stream_handles_mixed_text_and_tool_calls() {
1748 let events = vec![
1749 ResponsesStreamEvent::OutputItemAdded {
1750 output_index: 0,
1751 sequence_number: None,
1752 item: response_item_message("msg_123"),
1753 },
1754 ResponsesStreamEvent::OutputTextDelta {
1755 item_id: "msg_123".into(),
1756 output_index: 0,
1757 content_index: Some(0),
1758 delta: "Let me check that".into(),
1759 },
1760 ResponsesStreamEvent::OutputItemAdded {
1761 output_index: 1,
1762 sequence_number: None,
1763 item: response_item_function_call("item_fn", Some("{\"query\":\"test\"}")),
1764 },
1765 ResponsesStreamEvent::FunctionCallArgumentsDone {
1766 item_id: "item_fn".into(),
1767 output_index: 1,
1768 arguments: "{\"query\":\"test\"}".into(),
1769 sequence_number: None,
1770 },
1771 ResponsesStreamEvent::Completed {
1772 response: ResponseSummary::default(),
1773 },
1774 ];
1775
1776 let mapped = map_response_events(events);
1777 assert!(matches!(
1778 mapped[0],
1779 LanguageModelCompletionEvent::StartMessage { .. }
1780 ));
1781 assert!(matches!(
1782 mapped[1],
1783 LanguageModelCompletionEvent::Text(ref text) if text == "Let me check that"
1784 ));
1785 assert!(matches!(
1786 mapped[2],
1787 LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1788 if raw_input == "{\"query\":\"test\"}"
1789 ));
1790 assert!(matches!(
1791 mapped[3],
1792 LanguageModelCompletionEvent::Stop(StopReason::ToolUse)
1793 ));
1794 }
1795
1796 #[test]
1797 fn responses_stream_handles_json_parse_error() {
1798 let events = vec![
1799 ResponsesStreamEvent::OutputItemAdded {
1800 output_index: 0,
1801 sequence_number: None,
1802 item: response_item_function_call("item_fn", Some("{invalid json")),
1803 },
1804 ResponsesStreamEvent::FunctionCallArgumentsDone {
1805 item_id: "item_fn".into(),
1806 output_index: 0,
1807 arguments: "{invalid json".into(),
1808 sequence_number: None,
1809 },
1810 ResponsesStreamEvent::Completed {
1811 response: ResponseSummary::default(),
1812 },
1813 ];
1814
1815 let mapped = map_response_events(events);
1816 assert!(matches!(
1817 mapped[0],
1818 LanguageModelCompletionEvent::ToolUseJsonParseError {
1819 ref raw_input,
1820 ..
1821 } if raw_input.as_ref() == "{invalid json"
1822 ));
1823 }
1824
1825 #[test]
1826 fn responses_stream_handles_incomplete_function_call() {
1827 let events = vec![
1828 ResponsesStreamEvent::OutputItemAdded {
1829 output_index: 0,
1830 sequence_number: None,
1831 item: response_item_function_call("item_fn", Some("{\"city\":")),
1832 },
1833 ResponsesStreamEvent::FunctionCallArgumentsDelta {
1834 item_id: "item_fn".into(),
1835 output_index: 0,
1836 delta: "\"Boston\"".into(),
1837 sequence_number: None,
1838 },
1839 ResponsesStreamEvent::Incomplete {
1840 response: ResponseSummary {
1841 status_details: Some(ResponseStatusDetails {
1842 reason: Some("max_output_tokens".into()),
1843 r#type: Some("incomplete".into()),
1844 error: None,
1845 }),
1846 output: vec![response_item_function_call(
1847 "item_fn",
1848 Some("{\"city\":\"Boston\"}"),
1849 )],
1850 ..Default::default()
1851 },
1852 },
1853 ];
1854
1855 let mapped = map_response_events(events);
1856 assert!(matches!(
1857 mapped[0],
1858 LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1859 if raw_input == "{\"city\":\"Boston\"}"
1860 ));
1861 assert!(matches!(
1862 mapped[1],
1863 LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1864 ));
1865 }
1866
1867 #[test]
1868 fn responses_stream_incomplete_does_not_duplicate_tool_calls() {
1869 let events = vec![
1870 ResponsesStreamEvent::OutputItemAdded {
1871 output_index: 0,
1872 sequence_number: None,
1873 item: response_item_function_call("item_fn", Some("{\"city\":\"Boston\"}")),
1874 },
1875 ResponsesStreamEvent::FunctionCallArgumentsDone {
1876 item_id: "item_fn".into(),
1877 output_index: 0,
1878 arguments: "{\"city\":\"Boston\"}".into(),
1879 sequence_number: None,
1880 },
1881 ResponsesStreamEvent::Incomplete {
1882 response: ResponseSummary {
1883 status_details: Some(ResponseStatusDetails {
1884 reason: Some("max_output_tokens".into()),
1885 r#type: Some("incomplete".into()),
1886 error: None,
1887 }),
1888 output: vec![response_item_function_call(
1889 "item_fn",
1890 Some("{\"city\":\"Boston\"}"),
1891 )],
1892 ..Default::default()
1893 },
1894 },
1895 ];
1896
1897 let mapped = map_response_events(events);
1898 assert_eq!(mapped.len(), 2);
1899 assert!(matches!(
1900 mapped[0],
1901 LanguageModelCompletionEvent::ToolUse(LanguageModelToolUse { ref raw_input, .. })
1902 if raw_input == "{\"city\":\"Boston\"}"
1903 ));
1904 assert!(matches!(
1905 mapped[1],
1906 LanguageModelCompletionEvent::Stop(StopReason::MaxTokens)
1907 ));
1908 }
1909}