1use anyhow::{Context as _, Result, anyhow};
2use futures::{AsyncBufReadExt, AsyncReadExt, StreamExt, io::BufReader, stream::BoxStream};
3use http_client::{AsyncBody, HttpClient, Method, Request as HttpRequest, http};
4use serde::{Deserialize, Serialize};
5use serde_json::{Value, value::RawValue};
6use std::{convert::TryFrom, sync::Arc, time::Duration};
7
8pub const OLLAMA_API_URL: &str = "http://localhost:11434";
9
10#[derive(Clone, Copy, Serialize, Deserialize, Debug, Eq, PartialEq)]
11#[serde(rename_all = "lowercase")]
12pub enum Role {
13 User,
14 Assistant,
15 System,
16}
17
18impl TryFrom<String> for Role {
19 type Error = anyhow::Error;
20
21 fn try_from(value: String) -> Result<Self> {
22 match value.as_str() {
23 "user" => Ok(Self::User),
24 "assistant" => Ok(Self::Assistant),
25 "system" => Ok(Self::System),
26 _ => Err(anyhow!("invalid role '{value}'")),
27 }
28 }
29}
30
31impl From<Role> for String {
32 fn from(val: Role) -> Self {
33 match val {
34 Role::User => "user".to_owned(),
35 Role::Assistant => "assistant".to_owned(),
36 Role::System => "system".to_owned(),
37 }
38 }
39}
40
41#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
42#[derive(Clone, Serialize, Deserialize, Debug, Eq, PartialEq)]
43#[serde(untagged)]
44pub enum KeepAlive {
45 /// Keep model alive for N seconds
46 Seconds(isize),
47 /// Keep model alive for a fixed duration. Accepts durations like "5m", "10m", "1h", "1d", etc.
48 Duration(String),
49}
50
51impl KeepAlive {
52 /// Keep model alive until a new model is loaded or until Ollama shuts down
53 fn indefinite() -> Self {
54 Self::Seconds(-1)
55 }
56}
57
58impl Default for KeepAlive {
59 fn default() -> Self {
60 Self::indefinite()
61 }
62}
63
64#[cfg_attr(feature = "schemars", derive(schemars::JsonSchema))]
65#[derive(Clone, Debug, Default, Serialize, Deserialize, PartialEq)]
66pub struct Model {
67 pub name: String,
68 pub display_name: Option<String>,
69 pub max_tokens: usize,
70 pub keep_alive: Option<KeepAlive>,
71}
72
73fn get_max_tokens(name: &str) -> usize {
74 /// Default context length for unknown models.
75 const DEFAULT_TOKENS: usize = 2048;
76 /// Magic number. Lets many Ollama models work with ~16GB of ram.
77 const MAXIMUM_TOKENS: usize = 16384;
78
79 match name.split(':').next().unwrap() {
80 "phi" | "tinyllama" | "granite-code" => 2048,
81 "llama2" | "yi" | "vicuna" | "stablelm2" => 4096,
82 "llama3" | "gemma2" | "gemma" | "codegemma" | "starcoder" | "aya" => 8192,
83 "codellama" | "starcoder2" => 16384,
84 "mistral" | "codestral" | "mixstral" | "llava" | "qwen2" | "qwen2.5-coder"
85 | "dolphin-mixtral" => 32768,
86 "llama3.1" | "llama3.2" | "llama3.3" | "phi3" | "phi3.5" | "phi4" | "command-r"
87 | "deepseek-coder-v2" | "deepseek-v3" | "deepseek-r1" | "yi-coder" => 128000,
88 _ => DEFAULT_TOKENS,
89 }
90 .clamp(1, MAXIMUM_TOKENS)
91}
92
93impl Model {
94 pub fn new(name: &str, display_name: Option<&str>, max_tokens: Option<usize>) -> Self {
95 Self {
96 name: name.to_owned(),
97 display_name: display_name
98 .map(ToString::to_string)
99 .or_else(|| name.strip_suffix(":latest").map(ToString::to_string)),
100 max_tokens: max_tokens.unwrap_or_else(|| get_max_tokens(name)),
101 keep_alive: Some(KeepAlive::indefinite()),
102 }
103 }
104
105 pub fn id(&self) -> &str {
106 &self.name
107 }
108
109 pub fn display_name(&self) -> &str {
110 self.display_name.as_ref().unwrap_or(&self.name)
111 }
112
113 pub fn max_token_count(&self) -> usize {
114 self.max_tokens
115 }
116}
117
118#[derive(Serialize, Deserialize, Debug)]
119#[serde(tag = "role", rename_all = "lowercase")]
120pub enum ChatMessage {
121 Assistant {
122 content: String,
123 tool_calls: Option<Vec<OllamaToolCall>>,
124 },
125 User {
126 content: String,
127 },
128 System {
129 content: String,
130 },
131}
132
133#[derive(Serialize, Deserialize, Debug)]
134#[serde(rename_all = "lowercase")]
135pub enum OllamaToolCall {
136 Function(OllamaFunctionCall),
137}
138
139#[derive(Serialize, Deserialize, Debug)]
140pub struct OllamaFunctionCall {
141 pub name: String,
142 pub arguments: Box<RawValue>,
143}
144
145#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
146pub struct OllamaFunctionTool {
147 pub name: String,
148 pub description: Option<String>,
149 pub parameters: Option<Value>,
150}
151
152#[derive(Serialize, Deserialize, Debug, Eq, PartialEq)]
153#[serde(tag = "type", rename_all = "lowercase")]
154pub enum OllamaTool {
155 Function { function: OllamaFunctionTool },
156}
157
158#[derive(Serialize, Debug)]
159pub struct ChatRequest {
160 pub model: String,
161 pub messages: Vec<ChatMessage>,
162 pub stream: bool,
163 pub keep_alive: KeepAlive,
164 pub options: Option<ChatOptions>,
165 pub tools: Vec<OllamaTool>,
166}
167
168impl ChatRequest {
169 pub fn with_tools(mut self, tools: Vec<OllamaTool>) -> Self {
170 self.stream = false;
171 self.tools = tools;
172 self
173 }
174}
175
176// https://github.com/ollama/ollama/blob/main/docs/modelfile.md#valid-parameters-and-values
177#[derive(Serialize, Default, Debug)]
178pub struct ChatOptions {
179 pub num_ctx: Option<usize>,
180 pub num_predict: Option<isize>,
181 pub stop: Option<Vec<String>>,
182 pub temperature: Option<f32>,
183 pub top_p: Option<f32>,
184}
185
186#[derive(Deserialize, Debug)]
187pub struct ChatResponseDelta {
188 #[allow(unused)]
189 pub model: String,
190 #[allow(unused)]
191 pub created_at: String,
192 pub message: ChatMessage,
193 #[allow(unused)]
194 pub done_reason: Option<String>,
195 #[allow(unused)]
196 pub done: bool,
197}
198
199#[derive(Serialize, Deserialize)]
200pub struct LocalModelsResponse {
201 pub models: Vec<LocalModelListing>,
202}
203
204#[derive(Serialize, Deserialize)]
205pub struct LocalModelListing {
206 pub name: String,
207 pub modified_at: String,
208 pub size: u64,
209 pub digest: String,
210 pub details: ModelDetails,
211}
212
213#[derive(Serialize, Deserialize)]
214pub struct LocalModel {
215 pub modelfile: String,
216 pub parameters: String,
217 pub template: String,
218 pub details: ModelDetails,
219}
220
221#[derive(Serialize, Deserialize)]
222pub struct ModelDetails {
223 pub format: String,
224 pub family: String,
225 pub families: Option<Vec<String>>,
226 pub parameter_size: String,
227 pub quantization_level: String,
228}
229
230pub async fn complete(
231 client: &dyn HttpClient,
232 api_url: &str,
233 request: ChatRequest,
234) -> Result<ChatResponseDelta> {
235 let uri = format!("{api_url}/api/chat");
236 let request_builder = HttpRequest::builder()
237 .method(Method::POST)
238 .uri(uri)
239 .header("Content-Type", "application/json");
240
241 let serialized_request = serde_json::to_string(&request)?;
242 let request = request_builder.body(AsyncBody::from(serialized_request))?;
243
244 let mut response = client.send(request).await?;
245 if response.status().is_success() {
246 let mut body = Vec::new();
247 response.body_mut().read_to_end(&mut body).await?;
248 let response_message: ChatResponseDelta = serde_json::from_slice(&body)?;
249 Ok(response_message)
250 } else {
251 let mut body = Vec::new();
252 response.body_mut().read_to_end(&mut body).await?;
253 let body_str = std::str::from_utf8(&body)?;
254 Err(anyhow!(
255 "Failed to connect to API: {} {}",
256 response.status(),
257 body_str
258 ))
259 }
260}
261
262pub async fn stream_chat_completion(
263 client: &dyn HttpClient,
264 api_url: &str,
265 request: ChatRequest,
266) -> Result<BoxStream<'static, Result<ChatResponseDelta>>> {
267 let uri = format!("{api_url}/api/chat");
268 let request_builder = http::Request::builder()
269 .method(Method::POST)
270 .uri(uri)
271 .header("Content-Type", "application/json");
272
273 let request = request_builder.body(AsyncBody::from(serde_json::to_string(&request)?))?;
274 let mut response = client.send(request).await?;
275 if response.status().is_success() {
276 let reader = BufReader::new(response.into_body());
277
278 Ok(reader
279 .lines()
280 .filter_map(move |line| async move {
281 match line {
282 Ok(line) => {
283 Some(serde_json::from_str(&line).context("Unable to parse chat response"))
284 }
285 Err(e) => Some(Err(e.into())),
286 }
287 })
288 .boxed())
289 } else {
290 let mut body = String::new();
291 response.body_mut().read_to_string(&mut body).await?;
292
293 Err(anyhow!(
294 "Failed to connect to Ollama API: {} {}",
295 response.status(),
296 body,
297 ))
298 }
299}
300
301pub async fn get_models(
302 client: &dyn HttpClient,
303 api_url: &str,
304 _: Option<Duration>,
305) -> Result<Vec<LocalModelListing>> {
306 let uri = format!("{api_url}/api/tags");
307 let request_builder = HttpRequest::builder()
308 .method(Method::GET)
309 .uri(uri)
310 .header("Accept", "application/json");
311
312 let request = request_builder.body(AsyncBody::default())?;
313
314 let mut response = client.send(request).await?;
315
316 let mut body = String::new();
317 response.body_mut().read_to_string(&mut body).await?;
318
319 if response.status().is_success() {
320 let response: LocalModelsResponse =
321 serde_json::from_str(&body).context("Unable to parse Ollama tag listing")?;
322
323 Ok(response.models)
324 } else {
325 Err(anyhow!(
326 "Failed to connect to Ollama API: {} {}",
327 response.status(),
328 body,
329 ))
330 }
331}
332
333/// Sends an empty request to Ollama to trigger loading the model
334pub async fn preload_model(client: Arc<dyn HttpClient>, api_url: &str, model: &str) -> Result<()> {
335 let uri = format!("{api_url}/api/generate");
336 let request = HttpRequest::builder()
337 .method(Method::POST)
338 .uri(uri)
339 .header("Content-Type", "application/json")
340 .body(AsyncBody::from(serde_json::to_string(
341 &serde_json::json!({
342 "model": model,
343 "keep_alive": "15m",
344 }),
345 )?))?;
346
347 let mut response = client.send(request).await?;
348
349 if response.status().is_success() {
350 Ok(())
351 } else {
352 let mut body = String::new();
353 response.body_mut().read_to_string(&mut body).await?;
354
355 Err(anyhow!(
356 "Failed to connect to Ollama API: {} {}",
357 response.status(),
358 body,
359 ))
360 }
361}