embedding.rs

  1use anyhow::{anyhow, Result};
  2use async_trait::async_trait;
  3use futures::AsyncReadExt;
  4use gpui::executor::Background;
  5use gpui::serde_json;
  6use isahc::http::StatusCode;
  7use isahc::prelude::Configurable;
  8use isahc::{AsyncBody, Response};
  9use lazy_static::lazy_static;
 10use serde::{Deserialize, Serialize};
 11use std::env;
 12use std::sync::Arc;
 13use std::time::Duration;
 14use tiktoken_rs::{cl100k_base, CoreBPE};
 15use util::http::{HttpClient, Request};
 16
 17lazy_static! {
 18    static ref OPENAI_API_KEY: Option<String> = env::var("OPENAI_API_KEY").ok();
 19    static ref OPENAI_BPE_TOKENIZER: CoreBPE = cl100k_base().unwrap();
 20}
 21
 22#[derive(Clone)]
 23pub struct OpenAIEmbeddings {
 24    pub client: Arc<dyn HttpClient>,
 25    pub executor: Arc<Background>,
 26}
 27
 28#[derive(Serialize)]
 29struct OpenAIEmbeddingRequest<'a> {
 30    model: &'static str,
 31    input: Vec<&'a str>,
 32}
 33
 34#[derive(Deserialize)]
 35struct OpenAIEmbeddingResponse {
 36    data: Vec<OpenAIEmbedding>,
 37    usage: OpenAIEmbeddingUsage,
 38}
 39
 40#[derive(Debug, Deserialize)]
 41struct OpenAIEmbedding {
 42    embedding: Vec<f32>,
 43    index: usize,
 44    object: String,
 45}
 46
 47#[derive(Deserialize)]
 48struct OpenAIEmbeddingUsage {
 49    prompt_tokens: usize,
 50    total_tokens: usize,
 51}
 52
 53#[async_trait]
 54pub trait EmbeddingProvider: Sync + Send {
 55    async fn embed_batch(&self, spans: Vec<&str>) -> Result<Vec<Vec<f32>>>;
 56}
 57
 58pub struct DummyEmbeddings {}
 59
 60#[async_trait]
 61impl EmbeddingProvider for DummyEmbeddings {
 62    async fn embed_batch(&self, spans: Vec<&str>) -> Result<Vec<Vec<f32>>> {
 63        // 1024 is the OpenAI Embeddings size for ada models.
 64        // the model we will likely be starting with.
 65        let dummy_vec = vec![0.32 as f32; 1536];
 66        return Ok(vec![dummy_vec; spans.len()]);
 67    }
 68}
 69
 70impl OpenAIEmbeddings {
 71    async fn truncate(span: String) -> String {
 72        let mut tokens = OPENAI_BPE_TOKENIZER.encode_with_special_tokens(span.as_ref());
 73        if tokens.len() > 8190 {
 74            tokens.truncate(8190);
 75            let result = OPENAI_BPE_TOKENIZER.decode(tokens.clone());
 76            if result.is_ok() {
 77                let transformed = result.unwrap();
 78                // assert_ne!(transformed, span);
 79                return transformed;
 80            }
 81        }
 82
 83        return span.to_string();
 84    }
 85
 86    async fn send_request(&self, api_key: &str, spans: Vec<&str>) -> Result<Response<AsyncBody>> {
 87        let request = Request::post("https://api.openai.com/v1/embeddings")
 88            .redirect_policy(isahc::config::RedirectPolicy::Follow)
 89            .header("Content-Type", "application/json")
 90            .header("Authorization", format!("Bearer {}", api_key))
 91            .body(
 92                serde_json::to_string(&OpenAIEmbeddingRequest {
 93                    input: spans.clone(),
 94                    model: "text-embedding-ada-002",
 95                })
 96                .unwrap()
 97                .into(),
 98            )?;
 99
100        Ok(self.client.send(request).await?)
101    }
102}
103
104#[async_trait]
105impl EmbeddingProvider for OpenAIEmbeddings {
106    async fn embed_batch(&self, spans: Vec<&str>) -> Result<Vec<Vec<f32>>> {
107        const BACKOFF_SECONDS: [usize; 3] = [65, 180, 360];
108        const MAX_RETRIES: usize = 3;
109
110        let api_key = OPENAI_API_KEY
111            .as_ref()
112            .ok_or_else(|| anyhow!("no api key"))?;
113
114        let mut request_number = 0;
115        let mut response: Response<AsyncBody>;
116        let mut spans: Vec<String> = spans.iter().map(|x| x.to_string()).collect();
117        while request_number < MAX_RETRIES {
118            response = self
119                .send_request(api_key, spans.iter().map(|x| &**x).collect())
120                .await?;
121            request_number += 1;
122
123            if request_number + 1 == MAX_RETRIES && response.status() != StatusCode::OK {
124                return Err(anyhow!(
125                    "openai max retries, error: {:?}",
126                    &response.status()
127                ));
128            }
129
130            match response.status() {
131                StatusCode::TOO_MANY_REQUESTS => {
132                    let delay = Duration::from_secs(BACKOFF_SECONDS[request_number - 1] as u64);
133                    self.executor.timer(delay).await;
134                }
135                StatusCode::BAD_REQUEST => {
136                    log::info!("BAD REQUEST: {:?}", &response.status());
137                    // Don't worry about delaying bad request, as we can assume
138                    // we haven't been rate limited yet.
139                    for span in spans.iter_mut() {
140                        *span = Self::truncate(span.to_string()).await;
141                    }
142                }
143                StatusCode::OK => {
144                    let mut body = String::new();
145                    response.body_mut().read_to_string(&mut body).await?;
146                    let response: OpenAIEmbeddingResponse = serde_json::from_str(&body)?;
147
148                    log::info!(
149                        "openai embedding completed. tokens: {:?}",
150                        response.usage.total_tokens
151                    );
152                    return Ok(response
153                        .data
154                        .into_iter()
155                        .map(|embedding| embedding.embedding)
156                        .collect());
157                }
158                _ => {
159                    return Err(anyhow!("openai embedding failed {}", response.status()));
160                }
161            }
162        }
163
164        Err(anyhow!("openai embedding failed"))
165    }
166}