1use anyhow::{Context as _, Result, anyhow};
2use futures::{AsyncBufReadExt, AsyncReadExt, StreamExt, io::BufReader, stream::BoxStream};
3use http_client::{AsyncBody, HttpClient, Method, Request as HttpRequest, http};
4use serde::{Deserialize, Serialize};
5use serde_json::{Value, value::RawValue};
6use std::{convert::TryFrom, sync::Arc, time::Duration};
7
8pub const OLLAMA_API_URL: &str = "http://localhost:11434";
9
10#[derive(Clone, Copy, Serialize, Deserialize, Debug, Eq, PartialEq)]
11#[serde(rename_all = "lowercase")]
12pub enum Role {
13 User,
14 Assistant,
15 System,
16}
17
18impl TryFrom<String> for Role {
19 type Error = anyhow::Error;
20
21 fn try_from(value: String) -> Result<Self> {
22 match value.as_str() {
23 "user" => Ok(Self::User),
24 "assistant" => Ok(Self::Assistant),
25 "system" => Ok(Self::System),
26 _ => Err(anyhow!("invalid role '{value}'")),
27 }
28 }
29}
30
31impl From<Role> for String {
32 fn from(val: Role) -> Self {
33 match val {
34 Role::User => "user".to_owned(),
35 Role::Assistant => "assistant".to_owned(),
36 Role::System => "system".to_owned(),
37 }
38 }
39}
40
41#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
42#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq)]
43#[serde(untagged)]
44pub enum KeepAlive {
45 /// Keep model alive for N seconds
46 Seconds(isize),
47 /// Keep model alive for a fixed duration. Accepts durations like "5m", "10m", "1h", "1d", etc.
48 Duration(String),
49}
50
51impl KeepAlive {
52 /// Keep model alive until a new model is loaded or until Ollama shuts down
53 fn indefinite() -> Self {
54 Self::Seconds(-1)
55 }
56}
57
58impl Default for KeepAlive {
59 fn default() -> Self {
60 Self::indefinite()
61 }
62}
63
64#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
65#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
66pub struct Model {
67 pub name: String,
68 pub display_name: Option<String>,
69 pub max_tokens: usize,
70 pub keep_alive: Option<KeepAlive>,
71}
72
73fn get_max_tokens(name: &str) -> usize {
74 /// Default context length for unknown models.
75 const DEFAULT_TOKENS: usize = 2048;
76 /// Magic number. Lets many Ollama models work with ~16GB of ram.
77 const MAXIMUM_TOKENS: usize = 16384;
78
79 match name.split(':').next().unwrap() {
80 "phi" | "tinyllama" | "granite-code" => 2048,
81 "llama2" | "yi" | "vicuna" | "stablelm2" => 4096,
82 "llama3" | "gemma2" | "gemma" | "codegemma" | "starcoder" | "aya" => 8192,
83 "codellama" | "starcoder2" => 16384,
84 "mistral" | "codestral" | "mixstral" | "llava" | "qwen2" | "qwen2.5-coder"
85 | "dolphin-mixtral" => 32768,
86 "llama3.1" | "llama3.2" | "llama3.3" | "phi3" | "phi3.5" | "phi4" | "command-r"
87 | "qwen3" | "gemma3" | "deepseek-coder-v2" | "deepseek-v3" | "deepseek-r1" | "yi-coder" => {
88 128000
89 }
90 _ => DEFAULT_TOKENS,
91 }
92 .clamp(1, MAXIMUM_TOKENS)
93}
94
95impl Model {
96 pub fn new(name: &str, display_name: Option<&str>, max_tokens: Option<usize>) -> Self {
97 Self {
98 name: name.to_owned(),
99 display_name: display_name
100 .map(ToString::to_string)
101 .or_else(|| name.strip_suffix(":latest").map(ToString::to_string)),
102 max_tokens: max_tokens.unwrap_or_else(|| get_max_tokens(name)),
103 keep_alive: Some(KeepAlive::indefinite()),
104 }
105 }
106
107 pub fn id(&self) -> &str {
108 &self.name
109 }
110
111 pub fn display_name(&self) -> &str {
112 self.display_name.as_ref().unwrap_or(&self.name)
113 }
114
115 pub fn max_token_count(&self) -> usize {
116 self.max_tokens
117 }
118}
119
120#[derive(Serialize, Deserialize, Debug)]
121#[serde(tag = "role", rename_all = "lowercase")]
122pub enum ChatMessage {
123 Assistant {
124 content: String,
125 tool_calls: Option<Vec<OllamaToolCall>>,
126 },
127 User {
128 content: String,
129 },
130 System {
131 content: String,
132 },
133}
134
135#[derive(Serialize, Deserialize, Debug)]
136#[serde(rename_all = "lowercase")]
137pub enum OllamaToolCall {
138 Function(OllamaFunctionCall),
139}
140
141#[derive(Serialize, Deserialize, Debug)]
142pub struct OllamaFunctionCall {
143 pub name: String,
144 pub arguments: Box<RawValue>,
145}
146
147#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
148pub struct OllamaFunctionTool {
149 pub name: String,
150 pub description: Option<String>,
151 pub parameters: Option<Value>,
152}
153
154#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
155#[serde(tag = "type", rename_all = "lowercase")]
156pub enum OllamaTool {
157 Function { function: OllamaFunctionTool },
158}
159
160#[derive(Serialize, Debug)]
161pub struct ChatRequest {
162 pub model: String,
163 pub messages: Vec<ChatMessage>,
164 pub stream: bool,
165 pub keep_alive: KeepAlive,
166 pub options: Option<ChatOptions>,
167 pub tools: Vec<OllamaTool>,
168}
169
170impl ChatRequest {
171 pub fn with_tools(mut self, tools: Vec<OllamaTool>) -> Self {
172 self.stream = false;
173 self.tools = tools;
174 self
175 }
176}
177
178// https://github.com/ollama/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values
179#[derive(Serialize, Default, Debug)]
180pub struct ChatOptions {
181 pub num_ctx: Option<usize>,
182 pub num_predict: Option<isize>,
183 pub stop: Option<Vec<String>>,
184 pub temperature: Option<f32>,
185 pub top_p: Option<f32>,
186}
187
188#[derive(Deserialize, Debug)]
189pub struct ChatResponseDelta {
190 #[allow(unused)]
191 pub model: String,
192 #[allow(unused)]
193 pub created_at: String,
194 pub message: ChatMessage,
195 #[allow(unused)]
196 pub done_reason: Option<String>,
197 #[allow(unused)]
198 pub done: bool,
199}
200
201#[derive(Serialize, Deserialize)]
202pub struct LocalModelsResponse {
203 pub models: Vec<LocalModelListing>,
204}
205
206#[derive(Serialize, Deserialize)]
207pub struct LocalModelListing {
208 pub name: String,
209 pub modified_at: String,
210 pub size: u64,
211 pub digest: String,
212 pub details: ModelDetails,
213}
214
215#[derive(Serialize, Deserialize)]
216pub struct LocalModel {
217 pub modelfile: String,
218 pub parameters: String,
219 pub template: String,
220 pub details: ModelDetails,
221}
222
223#[derive(Serialize, Deserialize)]
224pub struct ModelDetails {
225 pub format: String,
226 pub family: String,
227 pub families: Option<Vec<String>>,
228 pub parameter_size: String,
229 pub quantization_level: String,
230}
231
232pub async fn complete(
233 client: &dyn HttpClient,
234 api_url: &str,
235 request: ChatRequest,
236) -> Result<ChatResponseDelta> {
237 let uri = format!("{api_url}/api/chat");
238 let request_builder = HttpRequest::builder()
239 .method(Method::POST)
240 .uri(uri)
241 .header("Content-Type", "application/json");
242
243 let serialized_request = serde_json::to_string(&request)?;
244 let request = request_builder.body(AsyncBody::from(serialized_request))?;
245
246 let mut response = client.send(request).await?;
247 if response.status().is_success() {
248 let mut body = Vec::new();
249 response.body_mut().read_to_end(&mut body).await?;
250 let response_message: ChatResponseDelta = serde_json::from_slice(&body)?;
251 Ok(response_message)
252 } else {
253 let mut body = Vec::new();
254 response.body_mut().read_to_end(&mut body).await?;
255 let body_str = std::str::from_utf8(&body)?;
256 Err(anyhow!(
257 "Failed to connect to API: {} {}",
258 response.status(),
259 body_str
260 ))
261 }
262}
263
264pub async fn stream_chat_completion(
265 client: &dyn HttpClient,
266 api_url: &str,
267 request: ChatRequest,
268) -> Result<BoxStream<'static, Result<ChatResponseDelta>>> {
269 let uri = format!("{api_url}/api/chat");
270 let request_builder = http::Request::builder()
271 .method(Method::POST)
272 .uri(uri)
273 .header("Content-Type", "application/json");
274
275 let request = request_builder.body(AsyncBody::from(serde_json::to_string(&request)?))?;
276 let mut response = client.send(request).await?;
277 if response.status().is_success() {
278 let reader = BufReader::new(response.into_body());
279
280 Ok(reader
281 .lines()
282 .filter_map(move |line| async move {
283 match line {
284 Ok(line) => {
285 Some(serde_json::from_str(&line).context("Unable to parse chat response"))
286 }
287 Err(e) => Some(Err(e.into())),
288 }
289 })
290 .boxed())
291 } else {
292 let mut body = String::new();
293 response.body_mut().read_to_string(&mut body).await?;
294
295 Err(anyhow!(
296 "Failed to connect to Ollama API: {} {}",
297 response.status(),
298 body,
299 ))
300 }
301}
302
303pub async fn get_models(
304 client: &dyn HttpClient,
305 api_url: &str,
306 _: Option<Duration>,
307) -> Result<Vec<LocalModelListing>> {
308 let uri = format!("{api_url}/api/tags");
309 let request_builder = HttpRequest::builder()
310 .method(Method::GET)
311 .uri(uri)
312 .header("Accept", "application/json");
313
314 let request = request_builder.body(AsyncBody::default())?;
315
316 let mut response = client.send(request).await?;
317
318 let mut body = String::new();
319 response.body_mut().read_to_string(&mut body).await?;
320
321 if response.status().is_success() {
322 let response: LocalModelsResponse =
323 serde_json::from_str(&body).context("Unable to parse Ollama tag listing")?;
324
325 Ok(response.models)
326 } else {
327 Err(anyhow!(
328 "Failed to connect to Ollama API: {} {}",
329 response.status(),
330 body,
331 ))
332 }
333}
334
335/// Sends an empty request to Ollama to trigger loading the model
336pub async fn preload_model(client: Arc<dyn HttpClient>, api_url: &str, model: &str) -> Result<()> {
337 let uri = format!("{api_url}/api/generate");
338 let request = HttpRequest::builder()
339 .method(Method::POST)
340 .uri(uri)
341 .header("Content-Type", "application/json")
342 .body(AsyncBody::from(serde_json::to_string(
343 &serde_json::json!({
344 "model": model,
345 "keep_alive": "15m",
346 }),
347 )?))?;
348
349 let mut response = client.send(request).await?;
350
351 if response.status().is_success() {
352 Ok(())
353 } else {
354 let mut body = String::new();
355 response.body_mut().read_to_string(&mut body).await?;
356
357 Err(anyhow!(
358 "Failed to connect to Ollama API: {} {}",
359 response.status(),
360 body,
361 ))
362 }
363}