1use anthropic::{
2 ANTHROPIC_API_URL, Event, Message, Request as AnthropicRequest, RequestContent,
3 Response as AnthropicResponse, ResponseContent, Role, non_streaming_completion,
4 stream_completion,
5};
6use anyhow::Result;
7use futures::StreamExt as _;
8use http_client::HttpClient;
9use indoc::indoc;
10use reqwest_client::ReqwestClient;
11use sqlez::bindable::Bind;
12use sqlez::bindable::StaticColumnCount;
13use sqlez_macros::sql;
14use std::hash::Hash;
15use std::hash::Hasher;
16use std::path::Path;
17use std::sync::{Arc, Mutex};
18
19pub struct PlainLlmClient {
20 pub http_client: Arc<dyn HttpClient>,
21 pub api_key: String,
22}
23
24impl PlainLlmClient {
25 pub fn new() -> Result<Self> {
26 let http_client: Arc<dyn http_client::HttpClient> = Arc::new(ReqwestClient::new());
27 let api_key = std::env::var("ANTHROPIC_API_KEY")
28 .map_err(|_| anyhow::anyhow!("ANTHROPIC_API_KEY environment variable not set"))?;
29 Ok(Self {
30 http_client,
31 api_key,
32 })
33 }
34
35 pub async fn generate(
36 &self,
37 model: &str,
38 max_tokens: u64,
39 messages: Vec<Message>,
40 ) -> Result<AnthropicResponse> {
41 let request = AnthropicRequest {
42 model: model.to_string(),
43 max_tokens,
44 messages,
45 tools: Vec::new(),
46 thinking: None,
47 tool_choice: None,
48 system: None,
49 metadata: None,
50 stop_sequences: Vec::new(),
51 temperature: None,
52 top_k: None,
53 top_p: None,
54 };
55
56 let response = non_streaming_completion(
57 self.http_client.as_ref(),
58 ANTHROPIC_API_URL,
59 &self.api_key,
60 request,
61 None,
62 )
63 .await
64 .map_err(|e| anyhow::anyhow!("{:?}", e))?;
65
66 Ok(response)
67 }
68
69 pub async fn generate_streaming<F>(
70 &self,
71 model: &str,
72 max_tokens: u64,
73 messages: Vec<Message>,
74 mut on_progress: F,
75 ) -> Result<AnthropicResponse>
76 where
77 F: FnMut(usize, &str),
78 {
79 let request = AnthropicRequest {
80 model: model.to_string(),
81 max_tokens,
82 messages,
83 tools: Vec::new(),
84 thinking: None,
85 tool_choice: None,
86 system: None,
87 metadata: None,
88 stop_sequences: Vec::new(),
89 temperature: None,
90 top_k: None,
91 top_p: None,
92 };
93
94 let mut stream = stream_completion(
95 self.http_client.as_ref(),
96 ANTHROPIC_API_URL,
97 &self.api_key,
98 request,
99 None,
100 )
101 .await
102 .map_err(|e| anyhow::anyhow!("{:?}", e))?;
103
104 let mut response: Option<AnthropicResponse> = None;
105 let mut text_content = String::new();
106
107 while let Some(event_result) = stream.next().await {
108 let event = event_result.map_err(|e| anyhow::anyhow!("{:?}", e))?;
109
110 match event {
111 Event::MessageStart { message } => {
112 response = Some(message);
113 }
114 Event::ContentBlockDelta { delta, .. } => {
115 if let anthropic::ContentDelta::TextDelta { text } = delta {
116 text_content.push_str(&text);
117 on_progress(text_content.len(), &text_content);
118 }
119 }
120 _ => {}
121 }
122 }
123
124 let mut response = response.ok_or_else(|| anyhow::anyhow!("No response received"))?;
125
126 if response.content.is_empty() && !text_content.is_empty() {
127 response
128 .content
129 .push(ResponseContent::Text { text: text_content });
130 }
131
132 Ok(response)
133 }
134}
135
136pub struct BatchingLlmClient {
137 connection: Mutex<sqlez::connection::Connection>,
138 http_client: Arc<dyn HttpClient>,
139 api_key: String,
140}
141
142struct CacheRow {
143 request_hash: String,
144 request: Option<String>,
145 response: Option<String>,
146 batch_id: Option<String>,
147}
148
149impl StaticColumnCount for CacheRow {
150 fn column_count() -> usize {
151 4
152 }
153}
154
155impl Bind for CacheRow {
156 fn bind(&self, statement: &sqlez::statement::Statement, start_index: i32) -> Result<i32> {
157 let next_index = statement.bind(&self.request_hash, start_index)?;
158 let next_index = statement.bind(&self.request, next_index)?;
159 let next_index = statement.bind(&self.response, next_index)?;
160 let next_index = statement.bind(&self.batch_id, next_index)?;
161 Ok(next_index)
162 }
163}
164
165#[derive(serde::Serialize, serde::Deserialize)]
166struct SerializableRequest {
167 model: String,
168 max_tokens: u64,
169 messages: Vec<SerializableMessage>,
170}
171
172#[derive(serde::Serialize, serde::Deserialize)]
173struct SerializableMessage {
174 role: String,
175 content: String,
176}
177
178impl BatchingLlmClient {
179 fn new(cache_path: &Path) -> Result<Self> {
180 let http_client: Arc<dyn http_client::HttpClient> = Arc::new(ReqwestClient::new());
181 let api_key = std::env::var("ANTHROPIC_API_KEY")
182 .map_err(|_| anyhow::anyhow!("ANTHROPIC_API_KEY environment variable not set"))?;
183
184 let connection = sqlez::connection::Connection::open_file(&cache_path.to_str().unwrap());
185 let mut statement = sqlez::statement::Statement::prepare(
186 &connection,
187 indoc! {"
188 CREATE TABLE IF NOT EXISTS cache (
189 request_hash TEXT PRIMARY KEY,
190 request TEXT,
191 response TEXT,
192 batch_id TEXT
193 );
194 "},
195 )?;
196 statement.exec()?;
197 drop(statement);
198
199 Ok(Self {
200 connection: Mutex::new(connection),
201 http_client,
202 api_key,
203 })
204 }
205
206 pub fn lookup(
207 &self,
208 model: &str,
209 max_tokens: u64,
210 messages: &[Message],
211 ) -> Result<Option<AnthropicResponse>> {
212 let request_hash_str = Self::request_hash(model, max_tokens, messages);
213 let connection = self.connection.lock().unwrap();
214 let response: Vec<String> = connection.select_bound(
215 &sql!(SELECT response FROM cache WHERE request_hash = ?1 AND response IS NOT NULL;),
216 )?(request_hash_str.as_str())?;
217 Ok(response
218 .into_iter()
219 .next()
220 .and_then(|text| serde_json::from_str(&text).ok()))
221 }
222
223 pub fn mark_for_batch(&self, model: &str, max_tokens: u64, messages: &[Message]) -> Result<()> {
224 let request_hash = Self::request_hash(model, max_tokens, messages);
225
226 let serializable_messages: Vec<SerializableMessage> = messages
227 .iter()
228 .map(|msg| SerializableMessage {
229 role: match msg.role {
230 Role::User => "user".to_string(),
231 Role::Assistant => "assistant".to_string(),
232 },
233 content: message_content_to_string(&msg.content),
234 })
235 .collect();
236
237 let serializable_request = SerializableRequest {
238 model: model.to_string(),
239 max_tokens,
240 messages: serializable_messages,
241 };
242
243 let request = Some(serde_json::to_string(&serializable_request)?);
244 let cache_row = CacheRow {
245 request_hash,
246 request,
247 response: None,
248 batch_id: None,
249 };
250 let connection = self.connection.lock().unwrap();
251 connection.exec_bound::<CacheRow>(sql!(
252 INSERT OR IGNORE INTO cache(request_hash, request, response, batch_id) VALUES (?, ?, ?, ?)))?(
253 cache_row,
254 )
255 }
256
257 async fn generate(
258 &self,
259 model: &str,
260 max_tokens: u64,
261 messages: Vec<Message>,
262 ) -> Result<Option<AnthropicResponse>> {
263 let response = self.lookup(model, max_tokens, &messages)?;
264 if let Some(response) = response {
265 return Ok(Some(response));
266 }
267
268 self.mark_for_batch(model, max_tokens, &messages)?;
269
270 Ok(None)
271 }
272
273 /// Uploads pending requests as a new batch; downloads finished batches if any.
274 async fn sync_batches(&self) -> Result<()> {
275 self.upload_pending_requests().await?;
276 self.download_finished_batches().await
277 }
278
279 async fn download_finished_batches(&self) -> Result<()> {
280 let batch_ids: Vec<String> = {
281 let connection = self.connection.lock().unwrap();
282 let q = sql!(SELECT DISTINCT batch_id FROM cache WHERE batch_id IS NOT NULL AND response IS NULL);
283 connection.select(q)?()?
284 };
285
286 for batch_id in &batch_ids {
287 let batch_status = anthropic::batches::retrieve_batch(
288 self.http_client.as_ref(),
289 ANTHROPIC_API_URL,
290 &self.api_key,
291 &batch_id,
292 )
293 .await
294 .map_err(|e| anyhow::anyhow!("{:?}", e))?;
295
296 log::info!(
297 "Batch {} status: {}",
298 batch_id,
299 batch_status.processing_status
300 );
301
302 if batch_status.processing_status == "ended" {
303 let results = anthropic::batches::retrieve_batch_results(
304 self.http_client.as_ref(),
305 ANTHROPIC_API_URL,
306 &self.api_key,
307 &batch_id,
308 )
309 .await
310 .map_err(|e| anyhow::anyhow!("{:?}", e))?;
311
312 let mut updates: Vec<(String, String)> = Vec::new();
313 let mut success_count = 0;
314 for result in results {
315 let request_hash = result
316 .custom_id
317 .strip_prefix("req_hash_")
318 .unwrap_or(&result.custom_id)
319 .to_string();
320
321 match result.result {
322 anthropic::batches::BatchResult::Succeeded { message } => {
323 let response_json = serde_json::to_string(&message)?;
324 updates.push((response_json, request_hash));
325 success_count += 1;
326 }
327 anthropic::batches::BatchResult::Errored { error } => {
328 log::error!(
329 "Batch request {} failed: {}: {}",
330 request_hash,
331 error.error.error_type,
332 error.error.message
333 );
334 let error_json = serde_json::json!({
335 "error": {
336 "type": error.error.error_type,
337 "message": error.error.message
338 }
339 })
340 .to_string();
341 updates.push((error_json, request_hash));
342 }
343 anthropic::batches::BatchResult::Canceled => {
344 log::warn!("Batch request {} was canceled", request_hash);
345 let error_json = serde_json::json!({
346 "error": {
347 "type": "canceled",
348 "message": "Batch request was canceled"
349 }
350 })
351 .to_string();
352 updates.push((error_json, request_hash));
353 }
354 anthropic::batches::BatchResult::Expired => {
355 log::warn!("Batch request {} expired", request_hash);
356 let error_json = serde_json::json!({
357 "error": {
358 "type": "expired",
359 "message": "Batch request expired"
360 }
361 })
362 .to_string();
363 updates.push((error_json, request_hash));
364 }
365 }
366 }
367
368 let connection = self.connection.lock().unwrap();
369 connection.with_savepoint("batch_download", || {
370 let q = sql!(UPDATE cache SET response = ? WHERE request_hash = ?);
371 let mut exec = connection.exec_bound::<(&str, &str)>(q)?;
372 for (response_json, request_hash) in &updates {
373 exec((response_json.as_str(), request_hash.as_str()))?;
374 }
375 Ok(())
376 })?;
377 log::info!("Downloaded {} successful requests", success_count);
378 }
379 }
380
381 Ok(())
382 }
383
384 async fn upload_pending_requests(&self) -> Result<String> {
385 let rows: Vec<(String, String)> = {
386 let connection = self.connection.lock().unwrap();
387 let q = sql!(
388 SELECT request_hash, request FROM cache WHERE batch_id IS NULL AND response IS NULL
389 );
390 connection.select(q)?()?
391 };
392
393 if rows.is_empty() {
394 return Ok(String::new());
395 }
396
397 let batch_requests = rows
398 .iter()
399 .map(|(hash, request_str)| {
400 let serializable_request: SerializableRequest =
401 serde_json::from_str(&request_str).unwrap();
402
403 let messages: Vec<Message> = serializable_request
404 .messages
405 .into_iter()
406 .map(|msg| Message {
407 role: match msg.role.as_str() {
408 "user" => Role::User,
409 "assistant" => Role::Assistant,
410 _ => Role::User,
411 },
412 content: vec![RequestContent::Text {
413 text: msg.content,
414 cache_control: None,
415 }],
416 })
417 .collect();
418
419 let params = AnthropicRequest {
420 model: serializable_request.model,
421 max_tokens: serializable_request.max_tokens,
422 messages,
423 tools: Vec::new(),
424 thinking: None,
425 tool_choice: None,
426 system: None,
427 metadata: None,
428 stop_sequences: Vec::new(),
429 temperature: None,
430 top_k: None,
431 top_p: None,
432 };
433
434 let custom_id = format!("req_hash_{}", hash);
435 anthropic::batches::BatchRequest { custom_id, params }
436 })
437 .collect::<Vec<_>>();
438
439 let batch_len = batch_requests.len();
440 let batch = anthropic::batches::create_batch(
441 self.http_client.as_ref(),
442 ANTHROPIC_API_URL,
443 &self.api_key,
444 anthropic::batches::CreateBatchRequest {
445 requests: batch_requests,
446 },
447 )
448 .await
449 .map_err(|e| anyhow::anyhow!("{:?}", e))?;
450
451 {
452 let connection = self.connection.lock().unwrap();
453 let q = sql!(
454 UPDATE cache SET batch_id = ? WHERE batch_id is NULL
455 );
456 connection.exec_bound(q)?(batch.id.as_str())?;
457 }
458
459 log::info!("Uploaded batch with {} requests", batch_len);
460
461 Ok(batch.id)
462 }
463
464 fn request_hash(model: &str, max_tokens: u64, messages: &[Message]) -> String {
465 let mut hasher = std::hash::DefaultHasher::new();
466 model.hash(&mut hasher);
467 max_tokens.hash(&mut hasher);
468 for msg in messages {
469 message_content_to_string(&msg.content).hash(&mut hasher);
470 }
471 let request_hash = hasher.finish();
472 format!("{request_hash:016x}")
473 }
474}
475
476fn message_content_to_string(content: &[RequestContent]) -> String {
477 content
478 .iter()
479 .filter_map(|c| match c {
480 RequestContent::Text { text, .. } => Some(text.clone()),
481 _ => None,
482 })
483 .collect::<Vec<String>>()
484 .join("\n")
485}
486
487pub enum AnthropicClient {
488 // No batching
489 Plain(PlainLlmClient),
490 Batch(BatchingLlmClient),
491 Dummy,
492}
493
494impl AnthropicClient {
495 pub fn plain() -> Result<Self> {
496 Ok(Self::Plain(PlainLlmClient::new()?))
497 }
498
499 pub fn batch(cache_path: &Path) -> Result<Self> {
500 Ok(Self::Batch(BatchingLlmClient::new(cache_path)?))
501 }
502
503 #[allow(dead_code)]
504 pub fn dummy() -> Self {
505 Self::Dummy
506 }
507
508 pub async fn generate(
509 &self,
510 model: &str,
511 max_tokens: u64,
512 messages: Vec<Message>,
513 ) -> Result<Option<AnthropicResponse>> {
514 match self {
515 AnthropicClient::Plain(plain_llm_client) => plain_llm_client
516 .generate(model, max_tokens, messages)
517 .await
518 .map(Some),
519 AnthropicClient::Batch(batching_llm_client) => {
520 batching_llm_client
521 .generate(model, max_tokens, messages)
522 .await
523 }
524 AnthropicClient::Dummy => panic!("Dummy LLM client is not expected to be used"),
525 }
526 }
527
528 #[allow(dead_code)]
529 pub async fn generate_streaming<F>(
530 &self,
531 model: &str,
532 max_tokens: u64,
533 messages: Vec<Message>,
534 on_progress: F,
535 ) -> Result<Option<AnthropicResponse>>
536 where
537 F: FnMut(usize, &str),
538 {
539 match self {
540 AnthropicClient::Plain(plain_llm_client) => plain_llm_client
541 .generate_streaming(model, max_tokens, messages, on_progress)
542 .await
543 .map(Some),
544 AnthropicClient::Batch(_) => {
545 anyhow::bail!("Streaming not supported with batching client")
546 }
547 AnthropicClient::Dummy => panic!("Dummy LLM client is not expected to be used"),
548 }
549 }
550
551 pub async fn sync_batches(&self) -> Result<()> {
552 match self {
553 AnthropicClient::Plain(_) => Ok(()),
554 AnthropicClient::Batch(batching_llm_client) => batching_llm_client.sync_batches().await,
555 AnthropicClient::Dummy => panic!("Dummy LLM client is not expected to be used"),
556 }
557 }
558}