1use anyhow::{Context as _, Result};
2use futures::{AsyncBufReadExt, AsyncReadExt, StreamExt, io::BufReader, stream::BoxStream};
3use http_client::{AsyncBody, HttpClient, Method, Request as HttpRequest, http};
4use serde::{Deserialize, Serialize};
5use serde_json::Value;
6use std::time::Duration;
7
8pub const OLLAMA_API_URL: &str = "http://localhost:11434";
9
10#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
11#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq)]
12#[serde(untagged)]
13pub enum KeepAlive {
14 /// Keep model alive for N seconds
15 Seconds(isize),
16 /// Keep model alive for a fixed duration. Accepts durations like "5m", "10m", "1h", "1d", etc.
17 Duration(String),
18}
19
20impl KeepAlive {
21 /// Keep model alive until a new model is loaded or until Ollama shuts down
22 fn indefinite() -> Self {
23 Self::Seconds(-1)
24 }
25}
26
27impl Default for KeepAlive {
28 fn default() -> Self {
29 Self::indefinite()
30 }
31}
32
33#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
34#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
35pub struct Model {
36 pub name: String,
37 pub display_name: Option<String>,
38 pub max_tokens: u64,
39 pub keep_alive: Option<KeepAlive>,
40 pub supports_tools: Option<bool>,
41 pub supports_vision: Option<bool>,
42 pub supports_thinking: Option<bool>,
43}
44
45fn get_max_tokens(name: &str) -> u64 {
46 /// Default context length for unknown models.
47 const DEFAULT_TOKENS: u64 = 4096;
48 /// Magic number. Lets many Ollama models work with ~16GB of ram.
49 /// Models that support context beyond 16k such as codestral (32k) or devstral (128k) will be clamped down to 16k
50 const MAXIMUM_TOKENS: u64 = 16384;
51
52 match name.split(':').next().unwrap() {
53 "granite-code" | "phi" | "tinyllama" => 2048,
54 "llama2" | "stablelm2" | "vicuna" | "yi" => 4096,
55 "aya" | "codegemma" | "gemma" | "gemma2" | "llama3" | "starcoder" => 8192,
56 "codellama" | "starcoder2" => 16384,
57 "codestral" | "dolphin-mixtral" | "llava" | "magistral" | "mistral" | "mixstral"
58 | "qwen2" | "qwen2.5-coder" => 32768,
59 "cogito" | "command-r" | "deepseek-coder-v2" | "deepseek-r1" | "deepseek-v3"
60 | "devstral" | "gemma3" | "gpt-oss" | "granite3.3" | "llama3.1" | "llama3.2"
61 | "llama3.3" | "mistral-nemo" | "phi3" | "phi3.5" | "phi4" | "qwen3" | "yi-coder" => 128000,
62 _ => DEFAULT_TOKENS,
63 }
64 .clamp(1, MAXIMUM_TOKENS)
65}
66
67impl Model {
68 pub fn new(
69 name: &str,
70 display_name: Option<&str>,
71 max_tokens: Option<u64>,
72 supports_tools: Option<bool>,
73 supports_vision: Option<bool>,
74 supports_thinking: Option<bool>,
75 ) -> Self {
76 Self {
77 name: name.to_owned(),
78 display_name: display_name
79 .map(ToString::to_string)
80 .or_else(|| name.strip_suffix(":latest").map(ToString::to_string)),
81 max_tokens: max_tokens.unwrap_or_else(|| get_max_tokens(name)),
82 keep_alive: Some(KeepAlive::indefinite()),
83 supports_tools,
84 supports_vision,
85 supports_thinking,
86 }
87 }
88
89 pub fn id(&self) -> &str {
90 &self.name
91 }
92
93 pub fn display_name(&self) -> &str {
94 self.display_name.as_ref().unwrap_or(&self.name)
95 }
96
97 pub fn max_token_count(&self) -> u64 {
98 self.max_tokens
99 }
100}
101
102#[derive(Serialize, Deserialize, Debug)]
103#[serde(tag = "role", rename_all = "lowercase")]
104pub enum ChatMessage {
105 Assistant {
106 content: String,
107 tool_calls: Option<Vec<OllamaToolCall>>,
108 #[serde(skip_serializing_if = "Option::is_none")]
109 images: Option<Vec<String>>,
110 thinking: Option<String>,
111 },
112 User {
113 content: String,
114 #[serde(skip_serializing_if = "Option::is_none")]
115 images: Option<Vec<String>>,
116 },
117 System {
118 content: String,
119 },
120 Tool {
121 tool_name: String,
122 content: String,
123 },
124}
125
126#[derive(Serialize, Deserialize, Debug)]
127#[serde(rename_all = "lowercase")]
128pub enum OllamaToolCall {
129 Function(OllamaFunctionCall),
130}
131
132#[derive(Serialize, Deserialize, Debug)]
133pub struct OllamaFunctionCall {
134 pub name: String,
135 pub arguments: Value,
136}
137
138#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
139pub struct OllamaFunctionTool {
140 pub name: String,
141 pub description: Option<String>,
142 pub parameters: Option<Value>,
143}
144
145#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
146#[serde(tag = "type", rename_all = "lowercase")]
147pub enum OllamaTool {
148 Function { function: OllamaFunctionTool },
149}
150
151#[derive(Serialize, Debug)]
152pub struct ChatRequest {
153 pub model: String,
154 pub messages: Vec<ChatMessage>,
155 pub stream: bool,
156 pub keep_alive: KeepAlive,
157 pub options: Option<ChatOptions>,
158 pub tools: Vec<OllamaTool>,
159 pub think: Option<bool>,
160}
161
162impl ChatRequest {
163 pub fn with_tools(mut self, tools: Vec<OllamaTool>) -> Self {
164 self.stream = false;
165 self.tools = tools;
166 self
167 }
168}
169
170// https://github.com/ollama/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values
171#[derive(Serialize, Default, Debug)]
172pub struct ChatOptions {
173 pub num_ctx: Option<u64>,
174 pub num_predict: Option<isize>,
175 pub stop: Option<Vec<String>>,
176 pub temperature: Option<f32>,
177 pub top_p: Option<f32>,
178}
179
180#[derive(Deserialize, Debug)]
181pub struct ChatResponseDelta {
182 #[allow(unused)]
183 pub model: String,
184 #[allow(unused)]
185 pub created_at: String,
186 pub message: ChatMessage,
187 #[allow(unused)]
188 pub done_reason: Option<String>,
189 #[allow(unused)]
190 pub done: bool,
191 pub prompt_eval_count: Option<u64>,
192 pub eval_count: Option<u64>,
193}
194
195#[derive(Serialize, Deserialize)]
196pub struct LocalModelsResponse {
197 pub models: Vec<LocalModelListing>,
198}
199
200#[derive(Serialize, Deserialize)]
201pub struct LocalModelListing {
202 pub name: String,
203 pub modified_at: String,
204 pub size: u64,
205 pub digest: String,
206 pub details: ModelDetails,
207}
208
209#[derive(Serialize, Deserialize)]
210pub struct LocalModel {
211 pub modelfile: String,
212 pub parameters: String,
213 pub template: String,
214 pub details: ModelDetails,
215}
216
217#[derive(Serialize, Deserialize)]
218pub struct ModelDetails {
219 pub format: String,
220 pub family: String,
221 pub families: Option<Vec<String>>,
222 pub parameter_size: String,
223 pub quantization_level: String,
224}
225
226#[derive(Deserialize, Debug)]
227pub struct ModelShow {
228 #[serde(default)]
229 pub capabilities: Vec<String>,
230}
231
232impl ModelShow {
233 pub fn supports_tools(&self) -> bool {
234 // .contains expects &String, which would require an additional allocation
235 self.capabilities.iter().any(|v| v == "tools")
236 }
237
238 pub fn supports_vision(&self) -> bool {
239 self.capabilities.iter().any(|v| v == "vision")
240 }
241
242 pub fn supports_thinking(&self) -> bool {
243 self.capabilities.iter().any(|v| v == "thinking")
244 }
245}
246
247pub async fn complete(
248 client: &dyn HttpClient,
249 api_url: &str,
250 request: ChatRequest,
251) -> Result<ChatResponseDelta> {
252 let uri = format!("{api_url}/api/chat");
253 let request_builder = HttpRequest::builder()
254 .method(Method::POST)
255 .uri(uri)
256 .header("Content-Type", "application/json");
257
258 let serialized_request = serde_json::to_string(&request)?;
259 let request = request_builder.body(AsyncBody::from(serialized_request))?;
260
261 let mut response = client.send(request).await?;
262
263 let mut body = Vec::new();
264 response.body_mut().read_to_end(&mut body).await?;
265
266 if response.status().is_success() {
267 let response_message: ChatResponseDelta = serde_json::from_slice(&body)?;
268 Ok(response_message)
269 } else {
270 let body_str = std::str::from_utf8(&body)?;
271 anyhow::bail!(
272 "Failed to connect to API: {} {}",
273 response.status(),
274 body_str
275 );
276 }
277}
278
279pub async fn stream_chat_completion(
280 client: &dyn HttpClient,
281 api_url: &str,
282 request: ChatRequest,
283) -> Result<BoxStream<'static, Result<ChatResponseDelta>>> {
284 let uri = format!("{api_url}/api/chat");
285 let request_builder = http::Request::builder()
286 .method(Method::POST)
287 .uri(uri)
288 .header("Content-Type", "application/json");
289
290 let request = request_builder.body(AsyncBody::from(serde_json::to_string(&request)?))?;
291 let mut response = client.send(request).await?;
292 if response.status().is_success() {
293 let reader = BufReader::new(response.into_body());
294
295 Ok(reader
296 .lines()
297 .map(|line| match line {
298 Ok(line) => serde_json::from_str(&line).context("Unable to parse chat response"),
299 Err(e) => Err(e.into()),
300 })
301 .boxed())
302 } else {
303 let mut body = String::new();
304 response.body_mut().read_to_string(&mut body).await?;
305 anyhow::bail!(
306 "Failed to connect to Ollama API: {} {}",
307 response.status(),
308 body,
309 );
310 }
311}
312
313pub async fn get_models(
314 client: &dyn HttpClient,
315 api_url: &str,
316 _: Option<Duration>,
317) -> Result<Vec<LocalModelListing>> {
318 let uri = format!("{api_url}/api/tags");
319 let request_builder = HttpRequest::builder()
320 .method(Method::GET)
321 .uri(uri)
322 .header("Accept", "application/json");
323
324 let request = request_builder.body(AsyncBody::default())?;
325
326 let mut response = client.send(request).await?;
327
328 let mut body = String::new();
329 response.body_mut().read_to_string(&mut body).await?;
330
331 anyhow::ensure!(
332 response.status().is_success(),
333 "Failed to connect to Ollama API: {} {}",
334 response.status(),
335 body,
336 );
337 let response: LocalModelsResponse =
338 serde_json::from_str(&body).context("Unable to parse Ollama tag listing")?;
339 Ok(response.models)
340}
341
342/// Fetch details of a model, used to determine model capabilities
343pub async fn show_model(client: &dyn HttpClient, api_url: &str, model: &str) -> Result<ModelShow> {
344 let uri = format!("{api_url}/api/show");
345 let request = HttpRequest::builder()
346 .method(Method::POST)
347 .uri(uri)
348 .header("Content-Type", "application/json")
349 .body(AsyncBody::from(
350 serde_json::json!({ "model": model }).to_string(),
351 ))?;
352
353 let mut response = client.send(request).await?;
354 let mut body = String::new();
355 response.body_mut().read_to_string(&mut body).await?;
356
357 anyhow::ensure!(
358 response.status().is_success(),
359 "Failed to connect to Ollama API: {} {}",
360 response.status(),
361 body,
362 );
363 let details: ModelShow = serde_json::from_str(body.as_str())?;
364 Ok(details)
365}
366
367#[cfg(test)]
368mod tests {
369 use super::*;
370
371 #[test]
372 fn parse_completion() {
373 let response = serde_json::json!({
374 "model": "llama3.2",
375 "created_at": "2023-12-12T14:13:43.416799Z",
376 "message": {
377 "role": "assistant",
378 "content": "Hello! How are you today?"
379 },
380 "done": true,
381 "total_duration": 5191566416u64,
382 "load_duration": 2154458,
383 "prompt_eval_count": 26,
384 "prompt_eval_duration": 383809000,
385 "eval_count": 298,
386 "eval_duration": 4799921000u64
387 });
388 let _: ChatResponseDelta = serde_json::from_value(response).unwrap();
389 }
390
391 #[test]
392 fn parse_streaming_completion() {
393 let partial = serde_json::json!({
394 "model": "llama3.2",
395 "created_at": "2023-08-04T08:52:19.385406455-07:00",
396 "message": {
397 "role": "assistant",
398 "content": "The",
399 "images": null
400 },
401 "done": false
402 });
403
404 let _: ChatResponseDelta = serde_json::from_value(partial).unwrap();
405
406 let last = serde_json::json!({
407 "model": "llama3.2",
408 "created_at": "2023-08-04T19:22:45.499127Z",
409 "message": {
410 "role": "assistant",
411 "content": ""
412 },
413 "done": true,
414 "total_duration": 4883583458u64,
415 "load_duration": 1334875,
416 "prompt_eval_count": 26,
417 "prompt_eval_duration": 342546000,
418 "eval_count": 282,
419 "eval_duration": 4535599000u64
420 });
421
422 let _: ChatResponseDelta = serde_json::from_value(last).unwrap();
423 }
424
425 #[test]
426 fn parse_tool_call() {
427 let response = serde_json::json!({
428 "model": "llama3.2:3b",
429 "created_at": "2025-04-28T20:02:02.140489Z",
430 "message": {
431 "role": "assistant",
432 "content": "",
433 "tool_calls": [
434 {
435 "function": {
436 "name": "weather",
437 "arguments": {
438 "city": "london",
439 }
440 }
441 }
442 ]
443 },
444 "done_reason": "stop",
445 "done": true,
446 "total_duration": 2758629166u64,
447 "load_duration": 1770059875,
448 "prompt_eval_count": 147,
449 "prompt_eval_duration": 684637583,
450 "eval_count": 16,
451 "eval_duration": 302561917,
452 });
453
454 let result: ChatResponseDelta = serde_json::from_value(response).unwrap();
455 match result.message {
456 ChatMessage::Assistant {
457 content,
458 tool_calls,
459 images: _,
460 thinking,
461 } => {
462 assert!(content.is_empty());
463 assert!(tool_calls.is_some_and(|v| !v.is_empty()));
464 assert!(thinking.is_none());
465 }
466 _ => panic!("Deserialized wrong role"),
467 }
468 }
469
470 #[test]
471 fn parse_show_model() {
472 let response = serde_json::json!({
473 "license": "LLAMA 3.2 COMMUNITY LICENSE AGREEMENT...",
474 "details": {
475 "parent_model": "",
476 "format": "gguf",
477 "family": "llama",
478 "families": ["llama"],
479 "parameter_size": "3.2B",
480 "quantization_level": "Q4_K_M"
481 },
482 "model_info": {
483 "general.architecture": "llama",
484 "general.basename": "Llama-3.2",
485 "general.file_type": 15,
486 "general.finetune": "Instruct",
487 "general.languages": ["en", "de", "fr", "it", "pt", "hi", "es", "th"],
488 "general.parameter_count": 3212749888u64,
489 "general.quantization_version": 2,
490 "general.size_label": "3B",
491 "general.tags": ["facebook", "meta", "pytorch", "llama", "llama-3", "text-generation"],
492 "general.type": "model",
493 "llama.attention.head_count": 24,
494 "llama.attention.head_count_kv": 8,
495 "llama.attention.key_length": 128,
496 "llama.attention.layer_norm_rms_epsilon": 0.00001,
497 "llama.attention.value_length": 128,
498 "llama.block_count": 28,
499 "llama.context_length": 131072,
500 "llama.embedding_length": 3072,
501 "llama.feed_forward_length": 8192,
502 "llama.rope.dimension_count": 128,
503 "llama.rope.freq_base": 500000,
504 "llama.vocab_size": 128256,
505 "tokenizer.ggml.bos_token_id": 128000,
506 "tokenizer.ggml.eos_token_id": 128009,
507 "tokenizer.ggml.merges": null,
508 "tokenizer.ggml.model": "gpt2",
509 "tokenizer.ggml.pre": "llama-bpe",
510 "tokenizer.ggml.token_type": null,
511 "tokenizer.ggml.tokens": null
512 },
513 "tensors": [
514 { "name": "rope_freqs.weight", "type": "F32", "shape": [64] },
515 { "name": "token_embd.weight", "type": "Q4_K_S", "shape": [3072, 128256] }
516 ],
517 "capabilities": ["completion", "tools"],
518 "modified_at": "2025-04-29T21:24:41.445877632+03:00"
519 });
520
521 let result: ModelShow = serde_json::from_value(response).unwrap();
522 assert!(result.supports_tools());
523 assert!(result.capabilities.contains(&"tools".to_string()));
524 assert!(result.capabilities.contains(&"completion".to_string()));
525 }
526
527 #[test]
528 fn serialize_chat_request_with_images() {
529 let base64_image = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==";
530
531 let request = ChatRequest {
532 model: "llava".to_string(),
533 messages: vec![ChatMessage::User {
534 content: "What do you see in this image?".to_string(),
535 images: Some(vec![base64_image.to_string()]),
536 }],
537 stream: false,
538 keep_alive: KeepAlive::default(),
539 options: None,
540 think: None,
541 tools: vec![],
542 };
543
544 let serialized = serde_json::to_string(&request).unwrap();
545 assert!(serialized.contains("images"));
546 assert!(serialized.contains(base64_image));
547 }
548
549 #[test]
550 fn serialize_chat_request_without_images() {
551 let request = ChatRequest {
552 model: "llama3.2".to_string(),
553 messages: vec![ChatMessage::User {
554 content: "Hello, world!".to_string(),
555 images: None,
556 }],
557 stream: false,
558 keep_alive: KeepAlive::default(),
559 options: None,
560 think: None,
561 tools: vec![],
562 };
563
564 let serialized = serde_json::to_string(&request).unwrap();
565 assert!(!serialized.contains("images"));
566 }
567
568 #[test]
569 fn test_json_format_with_images() {
570 let base64_image = "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk+M9QDwADhgGAWjR9awAAAABJRU5ErkJggg==";
571
572 let request = ChatRequest {
573 model: "llava".to_string(),
574 messages: vec![ChatMessage::User {
575 content: "What do you see?".to_string(),
576 images: Some(vec![base64_image.to_string()]),
577 }],
578 stream: false,
579 keep_alive: KeepAlive::default(),
580 options: None,
581 think: None,
582 tools: vec![],
583 };
584
585 let serialized = serde_json::to_string(&request).unwrap();
586
587 let parsed: serde_json::Value = serde_json::from_str(&serialized).unwrap();
588 let message_images = parsed["messages"][0]["images"].as_array().unwrap();
589 assert_eq!(message_images.len(), 1);
590 assert_eq!(message_images[0].as_str().unwrap(), base64_image);
591 }
592}