1use anyhow::{Result, anyhow};
2use futures::{AsyncBufReadExt, AsyncReadExt, StreamExt, io::BufReader, stream::BoxStream};
3use http_client::{AsyncBody, HttpClient, Method, Request as HttpRequest};
4use language_model_core::ReasoningEffort;
5use serde::{Deserialize, Serialize};
6use strum::EnumIter;
7
8pub const OPENCODE_API_URL: &str = "https://opencode.ai/zen";
9
10#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, Serialize, Deserialize)]
11#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
12#[serde(rename_all = "snake_case")]
13pub enum ApiProtocol {
14 #[default]
15 Anthropic,
16 OpenAiResponses,
17 OpenAiChat,
18 Google,
19}
20
21#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)]
22#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
23#[serde(rename_all = "snake_case")]
24pub enum OpenCodeSubscription {
25 Zen,
26 Go,
27 Free,
28}
29
30impl OpenCodeSubscription {
31 pub fn display_name(&self) -> &'static str {
32 match self {
33 Self::Zen => "Zen",
34 Self::Go => "Go",
35 Self::Free => "Free",
36 }
37 }
38
39 pub fn id_prefix(&self) -> &'static str {
40 match self {
41 Self::Zen => "zen",
42 Self::Go => "go",
43 Self::Free => "free",
44 }
45 }
46
47 pub fn api_path_suffix(&self) -> &'static str {
48 match self {
49 Self::Zen | Self::Free => "",
50 Self::Go => "/go",
51 }
52 }
53}
54
55#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
56#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, EnumIter)]
57pub enum Model {
58 // -- Anthropic protocol models --
59 #[serde(rename = "claude-opus-4-7")]
60 ClaudeOpus4_7,
61 #[serde(rename = "claude-opus-4-6")]
62 ClaudeOpus4_6,
63 #[serde(rename = "claude-opus-4-5")]
64 ClaudeOpus4_5,
65 #[serde(rename = "claude-opus-4-1")]
66 ClaudeOpus4_1,
67 #[default]
68 #[serde(rename = "claude-sonnet-4-6")]
69 ClaudeSonnet4_6,
70 #[serde(rename = "claude-sonnet-4-5")]
71 ClaudeSonnet4_5,
72 #[serde(rename = "claude-sonnet-4")]
73 ClaudeSonnet4,
74 #[serde(rename = "claude-haiku-4-5")]
75 ClaudeHaiku4_5,
76
77 // -- OpenAI Responses API models --
78 #[serde(rename = "gpt-5.5")]
79 Gpt5_5,
80 #[serde(rename = "gpt-5.5-pro")]
81 Gpt5_5Pro,
82 #[serde(rename = "gpt-5.4")]
83 Gpt5_4,
84 #[serde(rename = "gpt-5.4-pro")]
85 Gpt5_4Pro,
86 #[serde(rename = "gpt-5.4-mini")]
87 Gpt5_4Mini,
88 #[serde(rename = "gpt-5.4-nano")]
89 Gpt5_4Nano,
90 #[serde(rename = "gpt-5.3-codex")]
91 Gpt5_3Codex,
92 #[serde(rename = "gpt-5.3-codex-spark")]
93 Gpt5_3Spark,
94 #[serde(rename = "gpt-5.2")]
95 Gpt5_2,
96 #[serde(rename = "gpt-5.2-codex")]
97 Gpt5_2Codex,
98 #[serde(rename = "gpt-5.1")]
99 Gpt5_1,
100 #[serde(rename = "gpt-5.1-codex")]
101 Gpt5_1Codex,
102 #[serde(rename = "gpt-5.1-codex-max")]
103 Gpt5_1CodexMax,
104 #[serde(rename = "gpt-5.1-codex-mini")]
105 Gpt5_1CodexMini,
106 #[serde(rename = "gpt-5")]
107 Gpt5,
108 #[serde(rename = "gpt-5-codex")]
109 Gpt5Codex,
110 #[serde(rename = "gpt-5-nano")]
111 Gpt5Nano,
112
113 // -- Google protocol models --
114 #[serde(rename = "gemini-3.1-pro")]
115 Gemini3_1Pro,
116 #[serde(rename = "gemini-3-flash")]
117 Gemini3Flash,
118
119 // -- OpenAI Chat Completions protocol models --
120 #[serde(rename = "deepseek-v4-pro")]
121 DeepSeekV4Pro,
122 #[serde(rename = "deepseek-v4-flash")]
123 DeepSeekV4Flash,
124 #[serde(rename = "minimax-m2.5")]
125 MiniMaxM2_5,
126 #[serde(rename = "minimax-m2.5-free")]
127 MiniMaxM2_5Free,
128 #[serde(rename = "glm-5")]
129 Glm5,
130 #[serde(rename = "glm-5.1")]
131 Glm5_1,
132 #[serde(rename = "kimi-k2.5")]
133 KimiK2_5,
134 #[serde(rename = "kimi-k2.6")]
135 KimiK2_6,
136 #[serde(rename = "minimax-m2.7")]
137 MiniMaxM2_7,
138 #[serde(rename = "mimo-v2.5-pro")]
139 MimoV2_5Pro,
140 #[serde(rename = "mimo-v2.5")]
141 MimoV2_5,
142 #[serde(rename = "big-pickle")]
143 BigPickle,
144 #[serde(rename = "ring-2.6-1t-free")]
145 Ring2_6_1TFree,
146 #[serde(rename = "nemotron-3-super-free")]
147 Nemotron3SuperFree,
148 #[serde(rename = "qwen3.5-plus")]
149 Qwen3_5Plus,
150 #[serde(rename = "qwen3.6-plus")]
151 Qwen3_6Plus,
152
153 // -- Custom model --
154 #[serde(rename = "custom")]
155 Custom {
156 name: String,
157 display_name: Option<String>,
158 max_tokens: u64,
159 max_output_tokens: Option<u64>,
160 protocol: ApiProtocol,
161 reasoning_effort_levels: Option<Vec<ReasoningEffort>>,
162 custom_model_api_url: Option<String>,
163 interleaved_reasoning: bool,
164 },
165}
166
167impl Model {
168 pub fn default_fast() -> Self {
169 Self::ClaudeHaiku4_5
170 }
171
172 pub fn default_go() -> Self {
173 Self::KimiK2_5
174 }
175
176 pub fn default_go_fast() -> Self {
177 Self::MiniMaxM2_5
178 }
179
180 pub fn default_free() -> Self {
181 Self::BigPickle
182 }
183
184 pub fn default_free_fast() -> Self {
185 Self::MiniMaxM2_5Free
186 }
187
188 pub fn available_subscriptions(&self) -> &'static [OpenCodeSubscription] {
189 match self {
190 // Models available in both Zen and Go
191 Self::Glm5
192 | Self::Glm5_1
193 | Self::KimiK2_6
194 | Self::KimiK2_5
195 | Self::MiniMaxM2_5
196 | Self::Qwen3_5Plus
197 | Self::Qwen3_6Plus => &[OpenCodeSubscription::Zen, OpenCodeSubscription::Go],
198
199 // Go-only models
200 Self::MiniMaxM2_7
201 | Self::MimoV2_5Pro
202 | Self::MimoV2_5
203 | Self::DeepSeekV4Pro
204 | Self::DeepSeekV4Flash => &[OpenCodeSubscription::Go],
205
206 // Free models
207 Self::MiniMaxM2_5Free
208 | Self::Nemotron3SuperFree
209 | Self::BigPickle
210 | Self::Ring2_6_1TFree => &[OpenCodeSubscription::Free],
211
212 // Custom models get their subscription from settings, not from here
213 Self::Custom { .. } => &[],
214
215 // All other built-in models are Zen-only
216 _ => &[OpenCodeSubscription::Zen],
217 }
218 }
219
220 pub fn id(&self) -> &str {
221 match self {
222 Self::ClaudeOpus4_7 => "claude-opus-4-7",
223 Self::ClaudeOpus4_6 => "claude-opus-4-6",
224 Self::ClaudeOpus4_5 => "claude-opus-4-5",
225 Self::ClaudeOpus4_1 => "claude-opus-4-1",
226 Self::ClaudeSonnet4_6 => "claude-sonnet-4-6",
227 Self::ClaudeSonnet4_5 => "claude-sonnet-4-5",
228 Self::ClaudeSonnet4 => "claude-sonnet-4",
229 Self::ClaudeHaiku4_5 => "claude-haiku-4-5",
230
231 Self::Gpt5_5 => "gpt-5.5",
232 Self::Gpt5_5Pro => "gpt-5.5-pro",
233 Self::Gpt5_4 => "gpt-5.4",
234 Self::Gpt5_4Pro => "gpt-5.4-pro",
235 Self::Gpt5_4Mini => "gpt-5.4-mini",
236 Self::Gpt5_4Nano => "gpt-5.4-nano",
237 Self::Gpt5_3Codex => "gpt-5.3-codex",
238 Self::Gpt5_3Spark => "gpt-5.3-codex-spark",
239 Self::Gpt5_2 => "gpt-5.2",
240 Self::Gpt5_2Codex => "gpt-5.2-codex",
241 Self::Gpt5_1 => "gpt-5.1",
242 Self::Gpt5_1Codex => "gpt-5.1-codex",
243 Self::Gpt5_1CodexMax => "gpt-5.1-codex-max",
244 Self::Gpt5_1CodexMini => "gpt-5.1-codex-mini",
245 Self::Gpt5 => "gpt-5",
246 Self::Gpt5Codex => "gpt-5-codex",
247 Self::Gpt5Nano => "gpt-5-nano",
248
249 Self::Gemini3_1Pro => "gemini-3.1-pro",
250 Self::Gemini3Flash => "gemini-3-flash",
251
252 Self::DeepSeekV4Pro => "deepseek-v4-pro",
253 Self::DeepSeekV4Flash => "deepseek-v4-flash",
254 Self::MiniMaxM2_5 => "minimax-m2.5",
255 Self::MiniMaxM2_5Free => "minimax-m2.5-free",
256 Self::Glm5 => "glm-5",
257 Self::Glm5_1 => "glm-5.1",
258 Self::KimiK2_5 => "kimi-k2.5",
259 Self::KimiK2_6 => "kimi-k2.6",
260 Self::MiniMaxM2_7 => "minimax-m2.7",
261 Self::MimoV2_5Pro => "mimo-v2.5-pro",
262 Self::MimoV2_5 => "mimo-v2.5",
263 Self::Qwen3_5Plus => "qwen3.5-plus",
264 Self::Qwen3_6Plus => "qwen3.6-plus",
265 Self::BigPickle => "big-pickle",
266 Self::Ring2_6_1TFree => "ring-2.6-1t-free",
267 Self::Nemotron3SuperFree => "nemotron-3-super-free",
268
269 Self::Custom { name, .. } => name,
270 }
271 }
272
273 pub fn display_name(&self) -> &str {
274 match self {
275 Self::ClaudeOpus4_7 => "Claude Opus 4.7",
276 Self::ClaudeOpus4_6 => "Claude Opus 4.6",
277 Self::ClaudeOpus4_5 => "Claude Opus 4.5",
278 Self::ClaudeOpus4_1 => "Claude Opus 4.1",
279 Self::ClaudeSonnet4_6 => "Claude Sonnet 4.6",
280 Self::ClaudeSonnet4_5 => "Claude Sonnet 4.5",
281 Self::ClaudeSonnet4 => "Claude Sonnet 4",
282 Self::ClaudeHaiku4_5 => "Claude Haiku 4.5",
283
284 Self::Gpt5_5 => "GPT 5.5",
285 Self::Gpt5_5Pro => "GPT 5.5 Pro",
286 Self::Gpt5_4 => "GPT 5.4",
287 Self::Gpt5_4Pro => "GPT 5.4 Pro",
288 Self::Gpt5_4Mini => "GPT 5.4 Mini",
289 Self::Gpt5_4Nano => "GPT 5.4 Nano",
290 Self::Gpt5_3Codex => "GPT 5.3 Codex",
291 Self::Gpt5_3Spark => "GPT 5.3 Codex Spark",
292 Self::Gpt5_2 => "GPT 5.2",
293 Self::Gpt5_2Codex => "GPT 5.2 Codex",
294 Self::Gpt5_1 => "GPT 5.1",
295 Self::Gpt5_1Codex => "GPT 5.1 Codex",
296 Self::Gpt5_1CodexMax => "GPT 5.1 Codex Max",
297 Self::Gpt5_1CodexMini => "GPT 5.1 Codex Mini",
298 Self::Gpt5 => "GPT 5",
299 Self::Gpt5Codex => "GPT 5 Codex",
300 Self::Gpt5Nano => "GPT 5 Nano",
301
302 Self::Gemini3_1Pro => "Gemini 3.1 Pro",
303 Self::Gemini3Flash => "Gemini 3 Flash",
304
305 Self::DeepSeekV4Pro => "DeepSeek V4 Pro",
306 Self::DeepSeekV4Flash => "DeepSeek V4 Flash",
307 Self::MiniMaxM2_5 => "MiniMax M2.5",
308 Self::MiniMaxM2_5Free => "MiniMax M2.5 Free",
309 Self::Glm5 => "GLM 5",
310 Self::Glm5_1 => "GLM 5.1",
311 Self::KimiK2_5 => "Kimi K2.5",
312 Self::KimiK2_6 => "Kimi K2.6",
313 Self::MiniMaxM2_7 => "MiniMax M2.7",
314 Self::MimoV2_5Pro => "MiMo V2.5 Pro",
315 Self::MimoV2_5 => "MiMo V2.5",
316 Self::Qwen3_5Plus => "Qwen3.5 Plus",
317 Self::Qwen3_6Plus => "Qwen3.6 Plus",
318 Self::BigPickle => "Big Pickle",
319 Self::Ring2_6_1TFree => "Ring 2.6 1T Free",
320 Self::Nemotron3SuperFree => "Nemotron 3 Super Free",
321
322 Self::Custom {
323 name, display_name, ..
324 } => display_name.as_deref().unwrap_or(name),
325 }
326 }
327
328 pub fn protocol(&self, subscription: OpenCodeSubscription) -> ApiProtocol {
329 match self {
330 // Models offered by OpenCode have the same configuration across subscriptions
331 // with one outlier: non-free MiniMax models
332 Self::MiniMaxM2_7 | Self::MiniMaxM2_5 => {
333 if subscription == OpenCodeSubscription::Zen {
334 ApiProtocol::OpenAiChat
335 } else {
336 ApiProtocol::Anthropic
337 }
338 }
339
340 Self::ClaudeOpus4_7
341 | Self::ClaudeOpus4_6
342 | Self::ClaudeOpus4_5
343 | Self::ClaudeOpus4_1
344 | Self::ClaudeSonnet4_6
345 | Self::ClaudeSonnet4_5
346 | Self::ClaudeSonnet4
347 | Self::ClaudeHaiku4_5 => ApiProtocol::Anthropic,
348
349 Self::Gpt5_5
350 | Self::Gpt5_5Pro
351 | Self::Gpt5_4
352 | Self::Gpt5_4Pro
353 | Self::Gpt5_4Mini
354 | Self::Gpt5_4Nano
355 | Self::Gpt5_3Codex
356 | Self::Gpt5_3Spark
357 | Self::Gpt5_2
358 | Self::Gpt5_2Codex
359 | Self::Gpt5_1
360 | Self::Gpt5_1Codex
361 | Self::Gpt5_1CodexMax
362 | Self::Gpt5_1CodexMini
363 | Self::Gpt5
364 | Self::Gpt5Codex
365 | Self::Gpt5Nano => ApiProtocol::OpenAiResponses,
366
367 Self::Gemini3_1Pro | Self::Gemini3Flash => ApiProtocol::Google,
368
369 Self::MiniMaxM2_5Free
370 | Self::Glm5
371 | Self::Glm5_1
372 | Self::KimiK2_5
373 | Self::KimiK2_6
374 | Self::MimoV2_5Pro
375 | Self::MimoV2_5
376 | Self::Qwen3_5Plus
377 | Self::Qwen3_6Plus
378 | Self::DeepSeekV4Pro
379 | Self::DeepSeekV4Flash
380 | Self::BigPickle
381 | Self::Ring2_6_1TFree
382 | Self::Nemotron3SuperFree => ApiProtocol::OpenAiChat,
383
384 Self::Custom { protocol, .. } => *protocol,
385 }
386 }
387
388 pub fn interleaved_reasoning(&self) -> bool {
389 match self {
390 Self::DeepSeekV4Pro
391 | Self::DeepSeekV4Flash
392 | Self::KimiK2_5
393 | Self::KimiK2_6
394 | Self::MimoV2_5
395 | Self::MimoV2_5Pro
396 | Self::Glm5
397 | Self::Glm5_1
398 | Self::BigPickle
399 | Self::Ring2_6_1TFree => true,
400
401 Self::Custom {
402 interleaved_reasoning,
403 ..
404 } => *interleaved_reasoning,
405
406 _ => false,
407 }
408 }
409
410 pub fn max_token_count(&self) -> u64 {
411 match self {
412 // Anthropic models
413 Self::ClaudeOpus4_7 => 1_000_000,
414 Self::ClaudeOpus4_6 | Self::ClaudeSonnet4_6 => 1_000_000,
415 Self::ClaudeSonnet4_5 => 1_000_000,
416 Self::ClaudeOpus4_5 | Self::ClaudeHaiku4_5 => 200_000,
417 Self::ClaudeOpus4_1 => 200_000,
418 Self::ClaudeSonnet4 => 1_000_000,
419
420 // OpenAI models
421 Self::Gpt5_5 | Self::Gpt5_5Pro => 1_050_000,
422 Self::Gpt5_4 | Self::Gpt5_4Pro => 1_050_000,
423 Self::Gpt5_4Mini | Self::Gpt5_4Nano => 400_000,
424 Self::Gpt5_3Codex => 400_000,
425 Self::Gpt5_3Spark => 128_000,
426 Self::Gpt5_2 | Self::Gpt5_2Codex => 400_000,
427 Self::Gpt5_1 | Self::Gpt5_1Codex | Self::Gpt5_1CodexMax | Self::Gpt5_1CodexMini => {
428 400_000
429 }
430 Self::Gpt5 | Self::Gpt5Codex | Self::Gpt5Nano => 400_000,
431
432 // Google models
433 Self::Gemini3_1Pro => 1_048_576,
434 Self::Gemini3Flash => 1_048_576,
435
436 // OpenAI-compatible models
437 Self::MiniMaxM2_7 => 204_800,
438 Self::MiniMaxM2_5 | Self::MiniMaxM2_5Free => 204_800,
439 Self::Glm5 | Self::Glm5_1 => 202_725,
440 Self::KimiK2_6 | Self::KimiK2_5 => 262_144,
441 Self::MimoV2_5Pro => 1_048_576,
442 Self::MimoV2_5 => 1_000_000,
443 Self::Qwen3_5Plus | Self::Qwen3_6Plus => 262_144,
444 Self::BigPickle => 200_000,
445 Self::Ring2_6_1TFree => 262_000,
446 Self::Nemotron3SuperFree => 204_800,
447 Self::DeepSeekV4Pro | Self::DeepSeekV4Flash => 1_000_000,
448
449 Self::Custom { max_tokens, .. } => *max_tokens,
450 }
451 }
452
453 pub fn max_output_tokens(&self) -> Option<u64> {
454 match self {
455 // Anthropic models
456 Self::ClaudeOpus4_7 | Self::ClaudeOpus4_6 => Some(128_000),
457 Self::ClaudeOpus4_5
458 | Self::ClaudeSonnet4_6
459 | Self::ClaudeSonnet4_5
460 | Self::ClaudeHaiku4_5
461 | Self::ClaudeSonnet4 => Some(64_000),
462 Self::ClaudeOpus4_1 => Some(32_000),
463
464 // OpenAI models
465 Self::Gpt5_5
466 | Self::Gpt5_5Pro
467 | Self::Gpt5_4
468 | Self::Gpt5_4Pro
469 | Self::Gpt5_4Mini
470 | Self::Gpt5_4Nano
471 | Self::Gpt5_3Codex
472 | Self::Gpt5_3Spark
473 | Self::Gpt5_2
474 | Self::Gpt5_2Codex
475 | Self::Gpt5_1
476 | Self::Gpt5_1Codex
477 | Self::Gpt5_1CodexMax
478 | Self::Gpt5_1CodexMini
479 | Self::Gpt5
480 | Self::Gpt5Codex
481 | Self::Gpt5Nano => Some(128_000),
482
483 // Google models
484 Self::Gemini3_1Pro | Self::Gemini3Flash => Some(65_536),
485
486 // OpenAI-compatible models
487 Self::MiniMaxM2_7 => Some(131_072),
488 Self::MiniMaxM2_5 | Self::MiniMaxM2_5Free => Some(131_072),
489 Self::Glm5 | Self::Glm5_1 => Some(32_768),
490 Self::BigPickle => Some(128_000),
491 Self::Ring2_6_1TFree => Some(66_000),
492 Self::KimiK2_6 | Self::KimiK2_5 => Some(65_536),
493 Self::Qwen3_5Plus | Self::Qwen3_6Plus => Some(65_536),
494 Self::DeepSeekV4Pro | Self::DeepSeekV4Flash => Some(384_000),
495 Self::Nemotron3SuperFree => Some(128_000),
496 Self::MimoV2_5Pro | Self::MimoV2_5 => Some(128_000),
497
498 Self::Custom {
499 max_output_tokens, ..
500 } => *max_output_tokens,
501 }
502 }
503
504 pub fn supports_tools(&self) -> bool {
505 true
506 }
507
508 pub fn supports_images(&self) -> bool {
509 match self {
510 // Anthropic models support images
511 Self::ClaudeOpus4_7
512 | Self::ClaudeOpus4_6
513 | Self::ClaudeOpus4_5
514 | Self::ClaudeOpus4_1
515 | Self::ClaudeSonnet4_6
516 | Self::ClaudeSonnet4_5
517 | Self::ClaudeSonnet4
518 | Self::ClaudeHaiku4_5 => true,
519
520 // OpenAI models support images
521 Self::Gpt5_5
522 | Self::Gpt5_5Pro
523 | Self::Gpt5_4
524 | Self::Gpt5_4Pro
525 | Self::Gpt5_4Mini
526 | Self::Gpt5_4Nano
527 | Self::Gpt5_3Codex
528 | Self::Gpt5_3Spark
529 | Self::Gpt5_2
530 | Self::Gpt5_2Codex
531 | Self::Gpt5_1
532 | Self::Gpt5_1Codex
533 | Self::Gpt5_1CodexMax
534 | Self::Gpt5_1CodexMini
535 | Self::Gpt5
536 | Self::Gpt5Codex
537 | Self::Gpt5Nano => true,
538
539 // Google models support images
540 Self::Gemini3_1Pro | Self::Gemini3Flash => true,
541
542 // OpenAI-compatible models with image support
543 Self::KimiK2_6
544 | Self::KimiK2_5
545 | Self::MimoV2_5
546 | Self::Qwen3_5Plus
547 | Self::Qwen3_6Plus => true,
548
549 // OpenAI-compatible models without image support
550 Self::MiniMaxM2_5
551 | Self::MiniMaxM2_5Free
552 | Self::Glm5
553 | Self::Glm5_1
554 | Self::MiniMaxM2_7
555 | Self::MimoV2_5Pro
556 | Self::DeepSeekV4Pro
557 | Self::DeepSeekV4Flash
558 | Self::BigPickle
559 | Self::Ring2_6_1TFree
560 | Self::Nemotron3SuperFree => false,
561
562 Self::Custom { protocol, .. } => matches!(
563 protocol,
564 ApiProtocol::Anthropic
565 | ApiProtocol::OpenAiResponses
566 | ApiProtocol::OpenAiChat
567 | ApiProtocol::Google
568 ),
569 }
570 }
571
572 pub fn supported_reasoning_effort_levels(&self) -> Option<Vec<ReasoningEffort>> {
573 match self {
574 Self::Ring2_6_1TFree | Self::MimoV2_5Pro | Self::MimoV2_5 => Some(vec![
575 ReasoningEffort::Low,
576 ReasoningEffort::Medium,
577 ReasoningEffort::High,
578 ]),
579
580 Self::DeepSeekV4Pro | Self::DeepSeekV4Flash => Some(vec![
581 ReasoningEffort::Low,
582 ReasoningEffort::Medium,
583 ReasoningEffort::High,
584 ReasoningEffort::XHigh,
585 ]),
586
587 Self::Custom {
588 reasoning_effort_levels,
589 ..
590 } => reasoning_effort_levels.clone(),
591
592 _ => None,
593 }
594 }
595}
596
597/// Stream generate content for Google models via OpenCode.
598///
599/// Unlike `google_ai::stream_generate_content()`, this uses:
600/// - `/v1/models/{model}` path (not `/v1beta/models/{model}`)
601/// - `Authorization: Bearer` header (not `key=` query param)
602pub async fn stream_generate_content(
603 client: &dyn HttpClient,
604 api_url: &str,
605 api_key: &str,
606 request: google_ai::GenerateContentRequest,
607) -> Result<BoxStream<'static, Result<google_ai::GenerateContentResponse>>> {
608 let api_key = api_key.trim();
609
610 let model_id = &request.model.model_id;
611
612 let uri = format!("{api_url}/v1/models/{model_id}:streamGenerateContent?alt=sse");
613
614 let request_builder = HttpRequest::builder()
615 .method(Method::POST)
616 .uri(uri)
617 .header("Content-Type", "application/json")
618 .header("Authorization", format!("Bearer {api_key}"));
619
620 let request = request_builder.body(AsyncBody::from(serde_json::to_string(&request)?))?;
621 let mut response = client.send(request).await?;
622 if response.status().is_success() {
623 let reader = BufReader::new(response.into_body());
624 Ok(reader
625 .lines()
626 .filter_map(|line| async move {
627 match line {
628 Ok(line) => {
629 if let Some(line) = line.strip_prefix("data: ") {
630 match serde_json::from_str(line) {
631 Ok(response) => Some(Ok(response)),
632 Err(error) => {
633 Some(Err(anyhow!("Error parsing JSON: {error:?}\n{line:?}")))
634 }
635 }
636 } else {
637 None
638 }
639 }
640 Err(error) => Some(Err(anyhow!(error))),
641 }
642 })
643 .boxed())
644 } else {
645 let mut text = String::new();
646 response.body_mut().read_to_string(&mut text).await?;
647 Err(anyhow!(
648 "error during streamGenerateContent via OpenCode, status code: {:?}, body: {}",
649 response.status(),
650 text
651 ))
652 }
653}