open_ai.rs

  1pub mod batches;
  2pub mod responses;
  3
  4use anyhow::{Context as _, Result, anyhow};
  5use futures::{AsyncBufReadExt, AsyncReadExt, StreamExt, io::BufReader, stream::BoxStream};
  6use http_client::{
  7    AsyncBody, HttpClient, Method, Request as HttpRequest, StatusCode,
  8    http::{HeaderMap, HeaderValue},
  9};
 10use serde::{Deserialize, Serialize};
 11use serde_json::Value;
 12pub use settings::OpenAiReasoningEffort as ReasoningEffort;
 13use std::{convert::TryFrom, future::Future};
 14use strum::EnumIter;
 15use thiserror::Error;
 16
 17pub const OPEN_AI_API_URL: &str = "https://api.openai.com/v1";
 18
 19fn is_none_or_empty<T: AsRef<[U]>, U>(opt: &Option<T>) -> bool {
 20    opt.as_ref().is_none_or(|v| v.as_ref().is_empty())
 21}
 22
 23#[derive(Clone, Copy, Serialize, Deserialize, Debug, Eq, PartialEq)]
 24#[serde(rename_all = "lowercase")]
 25pub enum Role {
 26    User,
 27    Assistant,
 28    System,
 29    Tool,
 30}
 31
 32impl TryFrom<String> for Role {
 33    type Error = anyhow::Error;
 34
 35    fn try_from(value: String) -> Result<Self> {
 36        match value.as_str() {
 37            "user" => Ok(Self::User),
 38            "assistant" => Ok(Self::Assistant),
 39            "system" => Ok(Self::System),
 40            "tool" => Ok(Self::Tool),
 41            _ => anyhow::bail!("invalid role '{value}'"),
 42        }
 43    }
 44}
 45
 46impl From<Role> for String {
 47    fn from(val: Role) -> Self {
 48        match val {
 49            Role::User => "user".to_owned(),
 50            Role::Assistant => "assistant".to_owned(),
 51            Role::System => "system".to_owned(),
 52            Role::Tool => "tool".to_owned(),
 53        }
 54    }
 55}
 56
 57#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
 58#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq, EnumIter)]
 59pub enum Model {
 60    #[serde(rename = "gpt-3.5-turbo")]
 61    ThreePointFiveTurbo,
 62    #[serde(rename = "gpt-4")]
 63    Four,
 64    #[serde(rename = "gpt-4-turbo")]
 65    FourTurbo,
 66    #[serde(rename = "gpt-4o")]
 67    #[default]
 68    FourOmni,
 69    #[serde(rename = "gpt-4o-mini")]
 70    FourOmniMini,
 71    #[serde(rename = "gpt-4.1")]
 72    FourPointOne,
 73    #[serde(rename = "gpt-4.1-mini")]
 74    FourPointOneMini,
 75    #[serde(rename = "gpt-4.1-nano")]
 76    FourPointOneNano,
 77    #[serde(rename = "o1")]
 78    O1,
 79    #[serde(rename = "o3-mini")]
 80    O3Mini,
 81    #[serde(rename = "o3")]
 82    O3,
 83    #[serde(rename = "o4-mini")]
 84    O4Mini,
 85    #[serde(rename = "gpt-5")]
 86    Five,
 87    #[serde(rename = "gpt-5-codex")]
 88    FiveCodex,
 89    #[serde(rename = "gpt-5-mini")]
 90    FiveMini,
 91    #[serde(rename = "gpt-5-nano")]
 92    FiveNano,
 93    #[serde(rename = "gpt-5.1")]
 94    FivePointOne,
 95    #[serde(rename = "gpt-5.2")]
 96    FivePointTwo,
 97    #[serde(rename = "gpt-5.2-codex")]
 98    FivePointTwoCodex,
 99    #[serde(rename = "custom")]
100    Custom {
101        name: String,
102        /// The name displayed in the UI, such as in the assistant panel model dropdown menu.
103        display_name: Option<String>,
104        max_tokens: u64,
105        max_output_tokens: Option<u64>,
106        max_completion_tokens: Option<u64>,
107        reasoning_effort: Option<ReasoningEffort>,
108        #[serde(default = "default_supports_chat_completions")]
109        supports_chat_completions: bool,
110    },
111}
112
113const fn default_supports_chat_completions() -> bool {
114    true
115}
116
117impl Model {
118    pub fn default_fast() -> Self {
119        // TODO: Replace with FiveMini since all other models are deprecated
120        Self::FourPointOneMini
121    }
122
123    pub fn from_id(id: &str) -> Result<Self> {
124        match id {
125            "gpt-3.5-turbo" => Ok(Self::ThreePointFiveTurbo),
126            "gpt-4" => Ok(Self::Four),
127            "gpt-4-turbo-preview" => Ok(Self::FourTurbo),
128            "gpt-4o" => Ok(Self::FourOmni),
129            "gpt-4o-mini" => Ok(Self::FourOmniMini),
130            "gpt-4.1" => Ok(Self::FourPointOne),
131            "gpt-4.1-mini" => Ok(Self::FourPointOneMini),
132            "gpt-4.1-nano" => Ok(Self::FourPointOneNano),
133            "o1" => Ok(Self::O1),
134            "o3-mini" => Ok(Self::O3Mini),
135            "o3" => Ok(Self::O3),
136            "o4-mini" => Ok(Self::O4Mini),
137            "gpt-5" => Ok(Self::Five),
138            "gpt-5-codex" => Ok(Self::FiveCodex),
139            "gpt-5-mini" => Ok(Self::FiveMini),
140            "gpt-5-nano" => Ok(Self::FiveNano),
141            "gpt-5.1" => Ok(Self::FivePointOne),
142            "gpt-5.2" => Ok(Self::FivePointTwo),
143            "gpt-5.2-codex" => Ok(Self::FivePointTwoCodex),
144            invalid_id => anyhow::bail!("invalid model id '{invalid_id}'"),
145        }
146    }
147
148    pub fn id(&self) -> &str {
149        match self {
150            Self::ThreePointFiveTurbo => "gpt-3.5-turbo",
151            Self::Four => "gpt-4",
152            Self::FourTurbo => "gpt-4-turbo",
153            Self::FourOmni => "gpt-4o",
154            Self::FourOmniMini => "gpt-4o-mini",
155            Self::FourPointOne => "gpt-4.1",
156            Self::FourPointOneMini => "gpt-4.1-mini",
157            Self::FourPointOneNano => "gpt-4.1-nano",
158            Self::O1 => "o1",
159            Self::O3Mini => "o3-mini",
160            Self::O3 => "o3",
161            Self::O4Mini => "o4-mini",
162            Self::Five => "gpt-5",
163            Self::FiveCodex => "gpt-5-codex",
164            Self::FiveMini => "gpt-5-mini",
165            Self::FiveNano => "gpt-5-nano",
166            Self::FivePointOne => "gpt-5.1",
167            Self::FivePointTwo => "gpt-5.2",
168            Self::FivePointTwoCodex => "gpt-5.2-codex",
169            Self::Custom { name, .. } => name,
170        }
171    }
172
173    pub fn display_name(&self) -> &str {
174        match self {
175            Self::ThreePointFiveTurbo => "gpt-3.5-turbo",
176            Self::Four => "gpt-4",
177            Self::FourTurbo => "gpt-4-turbo",
178            Self::FourOmni => "gpt-4o",
179            Self::FourOmniMini => "gpt-4o-mini",
180            Self::FourPointOne => "gpt-4.1",
181            Self::FourPointOneMini => "gpt-4.1-mini",
182            Self::FourPointOneNano => "gpt-4.1-nano",
183            Self::O1 => "o1",
184            Self::O3Mini => "o3-mini",
185            Self::O3 => "o3",
186            Self::O4Mini => "o4-mini",
187            Self::Five => "gpt-5",
188            Self::FiveCodex => "gpt-5-codex",
189            Self::FiveMini => "gpt-5-mini",
190            Self::FiveNano => "gpt-5-nano",
191            Self::FivePointOne => "gpt-5.1",
192            Self::FivePointTwo => "gpt-5.2",
193            Self::FivePointTwoCodex => "gpt-5.2-codex",
194            Self::Custom {
195                name, display_name, ..
196            } => display_name.as_ref().unwrap_or(name),
197        }
198    }
199
200    pub fn max_token_count(&self) -> u64 {
201        match self {
202            Self::ThreePointFiveTurbo => 16_385,
203            Self::Four => 8_192,
204            Self::FourTurbo => 128_000,
205            Self::FourOmni => 128_000,
206            Self::FourOmniMini => 128_000,
207            Self::FourPointOne => 1_047_576,
208            Self::FourPointOneMini => 1_047_576,
209            Self::FourPointOneNano => 1_047_576,
210            Self::O1 => 200_000,
211            Self::O3Mini => 200_000,
212            Self::O3 => 200_000,
213            Self::O4Mini => 200_000,
214            Self::Five => 272_000,
215            Self::FiveCodex => 272_000,
216            Self::FiveMini => 272_000,
217            Self::FiveNano => 272_000,
218            Self::FivePointOne => 400_000,
219            Self::FivePointTwo => 400_000,
220            Self::FivePointTwoCodex => 400_000,
221            Self::Custom { max_tokens, .. } => *max_tokens,
222        }
223    }
224
225    pub fn max_output_tokens(&self) -> Option<u64> {
226        match self {
227            Self::Custom {
228                max_output_tokens, ..
229            } => *max_output_tokens,
230            Self::ThreePointFiveTurbo => Some(4_096),
231            Self::Four => Some(8_192),
232            Self::FourTurbo => Some(4_096),
233            Self::FourOmni => Some(16_384),
234            Self::FourOmniMini => Some(16_384),
235            Self::FourPointOne => Some(32_768),
236            Self::FourPointOneMini => Some(32_768),
237            Self::FourPointOneNano => Some(32_768),
238            Self::O1 => Some(100_000),
239            Self::O3Mini => Some(100_000),
240            Self::O3 => Some(100_000),
241            Self::O4Mini => Some(100_000),
242            Self::Five => Some(128_000),
243            Self::FiveCodex => Some(128_000),
244            Self::FiveMini => Some(128_000),
245            Self::FiveNano => Some(128_000),
246            Self::FivePointOne => Some(128_000),
247            Self::FivePointTwo => Some(128_000),
248            Self::FivePointTwoCodex => Some(128_000),
249        }
250    }
251
252    pub fn reasoning_effort(&self) -> Option<ReasoningEffort> {
253        match self {
254            Self::Custom {
255                reasoning_effort, ..
256            } => reasoning_effort.to_owned(),
257            _ => None,
258        }
259    }
260
261    pub fn supports_chat_completions(&self) -> bool {
262        match self {
263            Self::Custom {
264                supports_chat_completions,
265                ..
266            } => *supports_chat_completions,
267            Self::FiveCodex | Self::FivePointTwoCodex => false,
268            _ => true,
269        }
270    }
271
272    /// Returns whether the given model supports the `parallel_tool_calls` parameter.
273    ///
274    /// If the model does not support the parameter, do not pass it up, or the API will return an error.
275    pub fn supports_parallel_tool_calls(&self) -> bool {
276        match self {
277            Self::ThreePointFiveTurbo
278            | Self::Four
279            | Self::FourTurbo
280            | Self::FourOmni
281            | Self::FourOmniMini
282            | Self::FourPointOne
283            | Self::FourPointOneMini
284            | Self::FourPointOneNano
285            | Self::Five
286            | Self::FiveCodex
287            | Self::FiveMini
288            | Self::FivePointOne
289            | Self::FivePointTwo
290            | Self::FivePointTwoCodex
291            | Self::FiveNano => true,
292            Self::O1 | Self::O3 | Self::O3Mini | Self::O4Mini | Model::Custom { .. } => false,
293        }
294    }
295
296    /// Returns whether the given model supports the `prompt_cache_key` parameter.
297    ///
298    /// If the model does not support the parameter, do not pass it up.
299    pub fn supports_prompt_cache_key(&self) -> bool {
300        true
301    }
302}
303
304#[derive(Debug, Serialize, Deserialize)]
305pub struct Request {
306    pub model: String,
307    pub messages: Vec<RequestMessage>,
308    pub stream: bool,
309    #[serde(default, skip_serializing_if = "Option::is_none")]
310    pub max_completion_tokens: Option<u64>,
311    #[serde(default, skip_serializing_if = "Vec::is_empty")]
312    pub stop: Vec<String>,
313    #[serde(default, skip_serializing_if = "Option::is_none")]
314    pub temperature: Option<f32>,
315    #[serde(default, skip_serializing_if = "Option::is_none")]
316    pub tool_choice: Option<ToolChoice>,
317    /// Whether to enable parallel function calling during tool use.
318    #[serde(default, skip_serializing_if = "Option::is_none")]
319    pub parallel_tool_calls: Option<bool>,
320    #[serde(default, skip_serializing_if = "Vec::is_empty")]
321    pub tools: Vec<ToolDefinition>,
322    #[serde(default, skip_serializing_if = "Option::is_none")]
323    pub prompt_cache_key: Option<String>,
324    #[serde(default, skip_serializing_if = "Option::is_none")]
325    pub reasoning_effort: Option<ReasoningEffort>,
326}
327
328#[derive(Debug, Serialize, Deserialize)]
329#[serde(rename_all = "lowercase")]
330pub enum ToolChoice {
331    Auto,
332    Required,
333    None,
334    #[serde(untagged)]
335    Other(ToolDefinition),
336}
337
338#[derive(Clone, Deserialize, Serialize, Debug)]
339#[serde(tag = "type", rename_all = "snake_case")]
340pub enum ToolDefinition {
341    #[allow(dead_code)]
342    Function { function: FunctionDefinition },
343}
344
345#[derive(Clone, Debug, Serialize, Deserialize)]
346pub struct FunctionDefinition {
347    pub name: String,
348    pub description: Option<String>,
349    pub parameters: Option<Value>,
350}
351
352#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq)]
353#[serde(tag = "role", rename_all = "lowercase")]
354pub enum RequestMessage {
355    Assistant {
356        content: Option<MessageContent>,
357        #[serde(default, skip_serializing_if = "Vec::is_empty")]
358        tool_calls: Vec<ToolCall>,
359    },
360    User {
361        content: MessageContent,
362    },
363    System {
364        content: MessageContent,
365    },
366    Tool {
367        content: MessageContent,
368        tool_call_id: String,
369    },
370}
371
372#[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq)]
373#[serde(untagged)]
374pub enum MessageContent {
375    Plain(String),
376    Multipart(Vec<MessagePart>),
377}
378
379impl MessageContent {
380    pub fn empty() -> Self {
381        MessageContent::Multipart(vec![])
382    }
383
384    pub fn push_part(&mut self, part: MessagePart) {
385        match self {
386            MessageContent::Plain(text) => {
387                *self =
388                    MessageContent::Multipart(vec![MessagePart::Text { text: text.clone() }, part]);
389            }
390            MessageContent::Multipart(parts) if parts.is_empty() => match part {
391                MessagePart::Text { text } => *self = MessageContent::Plain(text),
392                MessagePart::Image { .. } => *self = MessageContent::Multipart(vec![part]),
393            },
394            MessageContent::Multipart(parts) => parts.push(part),
395        }
396    }
397}
398
399impl From<Vec<MessagePart>> for MessageContent {
400    fn from(mut parts: Vec<MessagePart>) -> Self {
401        if let [MessagePart::Text { text }] = parts.as_mut_slice() {
402            MessageContent::Plain(std::mem::take(text))
403        } else {
404            MessageContent::Multipart(parts)
405        }
406    }
407}
408
409#[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq)]
410#[serde(tag = "type")]
411pub enum MessagePart {
412    #[serde(rename = "text")]
413    Text { text: String },
414    #[serde(rename = "image_url")]
415    Image { image_url: ImageUrl },
416}
417
418#[derive(Serialize, Deserialize, Clone, Debug, Eq, PartialEq)]
419pub struct ImageUrl {
420    pub url: String,
421    #[serde(skip_serializing_if = "Option::is_none")]
422    pub detail: Option<String>,
423}
424
425#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq)]
426pub struct ToolCall {
427    pub id: String,
428    #[serde(flatten)]
429    pub content: ToolCallContent,
430}
431
432#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq)]
433#[serde(tag = "type", rename_all = "lowercase")]
434pub enum ToolCallContent {
435    Function { function: FunctionContent },
436}
437
438#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq)]
439pub struct FunctionContent {
440    pub name: String,
441    pub arguments: String,
442}
443
444#[derive(Clone, Serialize, Deserialize, Debug)]
445pub struct Response {
446    pub id: String,
447    pub object: String,
448    pub created: u64,
449    pub model: String,
450    pub choices: Vec<Choice>,
451    pub usage: Usage,
452}
453
454#[derive(Clone, Serialize, Deserialize, Debug)]
455pub struct Choice {
456    pub index: u32,
457    pub message: RequestMessage,
458    pub finish_reason: Option<String>,
459}
460
461#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
462pub struct ResponseMessageDelta {
463    pub role: Option<Role>,
464    pub content: Option<String>,
465    #[serde(default, skip_serializing_if = "is_none_or_empty")]
466    pub tool_calls: Option<Vec<ToolCallChunk>>,
467    #[serde(default, skip_serializing_if = "is_none_or_empty")]
468    pub reasoning_content: Option<String>,
469}
470
471#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
472pub struct ToolCallChunk {
473    pub index: usize,
474    pub id: Option<String>,
475
476    // There is also an optional `type` field that would determine if a
477    // function is there. Sometimes this streams in with the `function` before
478    // it streams in the `type`
479    pub function: Option<FunctionChunk>,
480}
481
482#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
483pub struct FunctionChunk {
484    pub name: Option<String>,
485    pub arguments: Option<String>,
486}
487
488#[derive(Clone, Serialize, Deserialize, Debug)]
489pub struct Usage {
490    pub prompt_tokens: u64,
491    pub completion_tokens: u64,
492    pub total_tokens: u64,
493}
494
495#[derive(Serialize, Deserialize, Debug)]
496pub struct ChoiceDelta {
497    pub index: u32,
498    pub delta: Option<ResponseMessageDelta>,
499    pub finish_reason: Option<String>,
500}
501
502#[derive(Error, Debug)]
503pub enum RequestError {
504    #[error("HTTP response error from {provider}'s API: status {status_code} - {body:?}")]
505    HttpResponseError {
506        provider: String,
507        status_code: StatusCode,
508        body: String,
509        headers: HeaderMap<HeaderValue>,
510    },
511    #[error(transparent)]
512    Other(#[from] anyhow::Error),
513}
514
515#[derive(Serialize, Deserialize, Debug)]
516pub struct ResponseStreamError {
517    message: String,
518}
519
520#[derive(Serialize, Deserialize, Debug)]
521#[serde(untagged)]
522pub enum ResponseStreamResult {
523    Ok(ResponseStreamEvent),
524    Err { error: ResponseStreamError },
525}
526
527#[derive(Serialize, Deserialize, Debug)]
528pub struct ResponseStreamEvent {
529    pub choices: Vec<ChoiceDelta>,
530    pub usage: Option<Usage>,
531}
532
533pub async fn non_streaming_completion(
534    client: &dyn HttpClient,
535    api_url: &str,
536    api_key: &str,
537    request: Request,
538) -> Result<Response, RequestError> {
539    let uri = format!("{api_url}/chat/completions");
540    let request_builder = HttpRequest::builder()
541        .method(Method::POST)
542        .uri(uri)
543        .header("Content-Type", "application/json")
544        .header("Authorization", format!("Bearer {}", api_key.trim()));
545
546    let request = request_builder
547        .body(AsyncBody::from(
548            serde_json::to_string(&request).map_err(|e| RequestError::Other(e.into()))?,
549        ))
550        .map_err(|e| RequestError::Other(e.into()))?;
551
552    let mut response = client.send(request).await?;
553    if response.status().is_success() {
554        let mut body = String::new();
555        response
556            .body_mut()
557            .read_to_string(&mut body)
558            .await
559            .map_err(|e| RequestError::Other(e.into()))?;
560
561        serde_json::from_str(&body).map_err(|e| RequestError::Other(e.into()))
562    } else {
563        let mut body = String::new();
564        response
565            .body_mut()
566            .read_to_string(&mut body)
567            .await
568            .map_err(|e| RequestError::Other(e.into()))?;
569
570        Err(RequestError::HttpResponseError {
571            provider: "openai".to_owned(),
572            status_code: response.status(),
573            body,
574            headers: response.headers().clone(),
575        })
576    }
577}
578
579pub async fn stream_completion(
580    client: &dyn HttpClient,
581    provider_name: &str,
582    api_url: &str,
583    api_key: &str,
584    request: Request,
585) -> Result<BoxStream<'static, Result<ResponseStreamEvent>>, RequestError> {
586    let uri = format!("{api_url}/chat/completions");
587    let request_builder = HttpRequest::builder()
588        .method(Method::POST)
589        .uri(uri)
590        .header("Content-Type", "application/json")
591        .header("Authorization", format!("Bearer {}", api_key.trim()));
592
593    let request = request_builder
594        .body(AsyncBody::from(
595            serde_json::to_string(&request).map_err(|e| RequestError::Other(e.into()))?,
596        ))
597        .map_err(|e| RequestError::Other(e.into()))?;
598
599    let mut response = client.send(request).await?;
600    if response.status().is_success() {
601        let reader = BufReader::new(response.into_body());
602        Ok(reader
603            .lines()
604            .filter_map(|line| async move {
605                match line {
606                    Ok(line) => {
607                        let line = line.strip_prefix("data: ").or_else(|| line.strip_prefix("data:"))?;
608                        if line == "[DONE]" {
609                            None
610                        } else {
611                            match serde_json::from_str(line) {
612                                Ok(ResponseStreamResult::Ok(response)) => Some(Ok(response)),
613                                Ok(ResponseStreamResult::Err { error }) => {
614                                    Some(Err(anyhow!(error.message)))
615                                }
616                                Err(error) => {
617                                    log::error!(
618                                        "Failed to parse OpenAI response into ResponseStreamResult: `{}`\n\
619                                        Response: `{}`",
620                                        error,
621                                        line,
622                                    );
623                                    Some(Err(anyhow!(error)))
624                                }
625                            }
626                        }
627                    }
628                    Err(error) => Some(Err(anyhow!(error))),
629                }
630            })
631            .boxed())
632    } else {
633        let mut body = String::new();
634        response
635            .body_mut()
636            .read_to_string(&mut body)
637            .await
638            .map_err(|e| RequestError::Other(e.into()))?;
639
640        Err(RequestError::HttpResponseError {
641            provider: provider_name.to_owned(),
642            status_code: response.status(),
643            body,
644            headers: response.headers().clone(),
645        })
646    }
647}
648
649#[derive(Copy, Clone, Serialize, Deserialize)]
650pub enum OpenAiEmbeddingModel {
651    #[serde(rename = "text-embedding-3-small")]
652    TextEmbedding3Small,
653    #[serde(rename = "text-embedding-3-large")]
654    TextEmbedding3Large,
655}
656
657#[derive(Serialize)]
658struct OpenAiEmbeddingRequest<'a> {
659    model: OpenAiEmbeddingModel,
660    input: Vec<&'a str>,
661}
662
663#[derive(Deserialize)]
664pub struct OpenAiEmbeddingResponse {
665    pub data: Vec<OpenAiEmbedding>,
666}
667
668#[derive(Deserialize)]
669pub struct OpenAiEmbedding {
670    pub embedding: Vec<f32>,
671}
672
673pub fn embed<'a>(
674    client: &dyn HttpClient,
675    api_url: &str,
676    api_key: &str,
677    model: OpenAiEmbeddingModel,
678    texts: impl IntoIterator<Item = &'a str>,
679) -> impl 'static + Future<Output = Result<OpenAiEmbeddingResponse>> {
680    let uri = format!("{api_url}/embeddings");
681
682    let request = OpenAiEmbeddingRequest {
683        model,
684        input: texts.into_iter().collect(),
685    };
686    let body = AsyncBody::from(serde_json::to_string(&request).unwrap());
687    let request = HttpRequest::builder()
688        .method(Method::POST)
689        .uri(uri)
690        .header("Content-Type", "application/json")
691        .header("Authorization", format!("Bearer {}", api_key.trim()))
692        .body(body)
693        .map(|request| client.send(request));
694
695    async move {
696        let mut response = request?.await?;
697        let mut body = String::new();
698        response.body_mut().read_to_string(&mut body).await?;
699
700        anyhow::ensure!(
701            response.status().is_success(),
702            "error during embedding, status: {:?}, body: {:?}",
703            response.status(),
704            body
705        );
706        let response: OpenAiEmbeddingResponse =
707            serde_json::from_str(&body).context("failed to parse OpenAI embedding response")?;
708        Ok(response)
709    }
710}