clients.gleam

  1// SPDX-FileCopyrightText: Amolith <amolith@secluded.site>
  2//
  3// SPDX-License-Identifier: AGPL-3.0-or-later
  4
  5import config
  6import gleam/float
  7import gleam/int
  8import gleam/option.{type Option, None, Some}
  9import gleam/result
 10import starlet
 11import starlet/anthropic
 12import starlet/gemini
 13import starlet/ollama
 14import starlet/openai
 15
 16pub fn send_openai(
 17  api_key: String,
 18  base_url: Option(String),
 19  model: String,
 20  system_prompt: String,
 21  user_prompt: String,
 22  reasoning: Option(config.ReasoningEffort),
 23) -> Result(String, String) {
 24  let client = case base_url {
 25    Some(url) -> openai.new_with_base_url(api_key, url)
 26    None -> openai.new(api_key)
 27  }
 28
 29  let chat =
 30    starlet.chat(client, model)
 31    |> starlet.system(system_prompt)
 32    |> starlet.user(user_prompt)
 33
 34  let chat = case reasoning {
 35    Some(config.ReasoningLow) -> openai.with_reasoning(chat, openai.ReasoningLow)
 36    Some(config.ReasoningMedium) ->
 37      openai.with_reasoning(chat, openai.ReasoningMedium)
 38    Some(config.ReasoningHigh) ->
 39      openai.with_reasoning(chat, openai.ReasoningHigh)
 40    None -> chat
 41  }
 42
 43  chat
 44  |> starlet.send()
 45  |> result.map(fn(resp) { starlet.text(resp.1) })
 46  |> result.map_error(format_starlet_error)
 47}
 48
 49pub fn send_anthropic(
 50  api_key: String,
 51  base_url: Option(String),
 52  model: String,
 53  system_prompt: String,
 54  user_prompt: String,
 55  reasoning: Option(config.ReasoningEffort),
 56) -> Result(String, String) {
 57  let client = case base_url {
 58    Some(url) -> anthropic.new_with_base_url(api_key, url)
 59    None -> anthropic.new(api_key)
 60  }
 61
 62  let chat =
 63    starlet.chat(client, model)
 64    |> starlet.system(system_prompt)
 65    |> starlet.user(user_prompt)
 66
 67  let chat = case reasoning {
 68    Some(effort) -> {
 69      let #(budget, max_tokens) = reasoning_budget(effort)
 70      case anthropic.with_thinking(chat, budget) {
 71        Ok(c) -> c |> starlet.max_tokens(max_tokens)
 72        Error(_) -> chat
 73      }
 74    }
 75    None -> chat
 76  }
 77
 78  chat
 79  |> starlet.send()
 80  |> result.map(fn(resp) { starlet.text(resp.1) })
 81  |> result.map_error(format_starlet_error)
 82}
 83
 84/// Calculate reasoning budget using OpenRouter's formula:
 85/// budget_tokens = max(min(max_tokens * effort_ratio, 128000), 1024)
 86/// Returns (budget, max_tokens) where max_tokens > budget
 87pub fn reasoning_budget(effort: config.ReasoningEffort) -> #(Int, Int) {
 88  let base_max = 64_000
 89  let ratio = case effort {
 90    config.ReasoningLow -> 0.2
 91    config.ReasoningMedium -> 0.5
 92    config.ReasoningHigh -> 0.8
 93  }
 94  let budget =
 95    int.max(int.min(float.truncate(int.to_float(base_max) *. ratio), 128_000), 1024)
 96  let max_tokens = budget + 16_384
 97  #(budget, max_tokens)
 98}
 99
100pub fn send_gemini(
101  api_key: String,
102  model: String,
103  system_prompt: String,
104  user_prompt: String,
105  reasoning: Option(config.ReasoningEffort),
106) -> Result(String, String) {
107  let client = gemini.new(api_key)
108
109  let chat =
110    starlet.chat(client, model)
111    |> starlet.system(system_prompt)
112    |> starlet.user(user_prompt)
113
114  let chat = case reasoning {
115    Some(effort) -> {
116      let #(budget, _) = reasoning_budget(effort)
117      case gemini.with_thinking(chat, gemini.ThinkingFixed(budget)) {
118        Ok(c) -> c
119        Error(_) -> chat
120      }
121    }
122    None -> chat
123  }
124
125  chat
126  |> starlet.send()
127  |> result.map(fn(resp) { starlet.text(resp.1) })
128  |> result.map_error(format_starlet_error)
129}
130
131pub fn send_ollama(
132  endpoint: String,
133  model: String,
134  system_prompt: String,
135  user_prompt: String,
136  reasoning: Option(config.ReasoningEffort),
137) -> Result(String, String) {
138  case endpoint {
139    "" -> Error("Ollama requires --endpoint (e.g. http://localhost:11434)")
140    base_url -> {
141      let client = ollama.new(base_url)
142
143      let chat =
144        starlet.chat(client, model)
145        |> starlet.system(system_prompt)
146        |> starlet.user(user_prompt)
147
148      let chat = case reasoning {
149        Some(config.ReasoningLow) -> ollama.with_thinking(chat, ollama.ThinkingLow)
150        Some(config.ReasoningMedium) ->
151          ollama.with_thinking(chat, ollama.ThinkingMedium)
152        Some(config.ReasoningHigh) ->
153          ollama.with_thinking(chat, ollama.ThinkingHigh)
154        None -> chat
155      }
156
157      chat
158      |> starlet.send()
159      |> result.map(fn(resp) { starlet.text(resp.1) })
160      |> result.map_error(format_starlet_error)
161    }
162  }
163}
164
165pub fn format_starlet_error(err: starlet.StarletError) -> String {
166  case err {
167    starlet.Transport(msg) -> "Network error: " <> msg
168    starlet.Http(status, body) ->
169      "HTTP " <> int.to_string(status) <> ": " <> body
170    starlet.Decode(msg) -> "Parse error: " <> msg
171    starlet.Provider(name, msg, _) -> name <> " error: " <> msg
172    starlet.Tool(_error) -> "Tool error"
173    starlet.RateLimited(retry_after) -> {
174      case retry_after {
175        Some(secs) -> "Rate limited, retry after " <> int.to_string(secs) <> "s"
176        None -> "Rate limited"
177      }
178    }
179  }
180}