garble.gleam

  1// SPDX-FileCopyrightText: Amolith <amolith@secluded.site>
  2//
  3// SPDX-License-Identifier: AGPL-3.0-or-later
  4
  5import api_key
  6import argv
  7import clients
  8import config
  9import gleam/io
 10import gleam/option.{None, Some}
 11import gleam/result
 12import gleam/string
 13import gleam/yielder
 14import glint
 15import prompts
 16import providers.{type Provider}
 17import stdin
 18
 19@external(erlang, "erlang", "halt")
 20fn halt(status: Int) -> Nil
 21
 22pub fn main() {
 23  glint.new()
 24  |> glint.with_name("garble")
 25  |> glint.pretty_help(glint.default_pretty_help())
 26  |> glint.add(at: [], do: garble_command())
 27  |> glint.run(argv.load().arguments)
 28}
 29
 30fn garble_command() -> glint.Command(Nil) {
 31  use <- glint.command_help("Transform stdin with an LLM")
 32  use directions <- glint.flag(
 33    glint.string_flag("directions")
 34    |> glint.flag_default("")
 35    |> glint.flag_help("Directions for how to transform the input"),
 36  )
 37  use model <- glint.flag(
 38    glint.string_flag("model")
 39    |> glint.flag_default("")
 40    |> glint.flag_help("Model to use (e.g. gpt-4o, claude-3-opus)"),
 41  )
 42  use provider <- glint.flag(
 43    glint.string_flag("provider")
 44    |> glint.flag_default("")
 45    |> glint.flag_help("Provider (e.g. openai, anthropic)"),
 46  )
 47  use reasoning <- glint.flag(
 48    glint.string_flag("reasoning")
 49    |> glint.flag_default("")
 50    |> glint.flag_help(
 51      "Reasoning effort for thinking models (low, medium, high)",
 52    ),
 53  )
 54  use endpoint <- glint.flag(
 55    glint.string_flag("endpoint")
 56    |> glint.flag_default("")
 57    |> glint.flag_help(
 58      "API endpoint URL (required for ollama, e.g. http://localhost:11434)",
 59    ),
 60  )
 61  use _, _args, flags <- glint.command()
 62
 63  // Load config file (if present) and merge with CLI flags
 64  let cfg = config.load()
 65  let assert Ok(directions_cli) = directions(flags)
 66  let assert Ok(model_cli) = model(flags)
 67  let assert Ok(provider_cli) = provider(flags)
 68  let assert Ok(reasoning_cli) = reasoning(flags)
 69  let assert Ok(endpoint_cli) = endpoint(flags)
 70  let merged =
 71    config.merge(
 72      cfg,
 73      cli_provider: provider_cli,
 74      cli_model: model_cli,
 75      cli_directions: directions_cli,
 76      cli_reasoning: reasoning_cli,
 77      cli_endpoint: endpoint_cli,
 78    )
 79
 80  // Read all stdin into a single string
 81  let input =
 82    stdin.read_lines()
 83    |> yielder.to_list()
 84    |> string.join("")
 85
 86  // Build the user message with raw input and directions
 87  let user_message = prompts.build_user_message(input, merged.directions)
 88
 89  let ollama_reasoning = case merged.reasoning {
 90    config.ReasoningEnabled(effort) -> Some(effort)
 91    _ -> None
 92  }
 93
 94  let result = case merged.provider {
 95    "ollama" ->
 96      clients.send_ollama(
 97        merged.endpoint,
 98        merged.model,
 99        prompts.system(),
100        user_message,
101        ollama_reasoning,
102      )
103    _ ->
104      case providers.get_provider(merged.provider) {
105        Ok(provider_info) -> {
106          // Resolve reasoning: explicit setting wins, then model default, then none
107          let effective_reasoning = case merged.reasoning {
108            config.ReasoningEnabled(effort) -> Some(effort)
109            config.ReasoningDisabled -> None
110            config.ReasoningNotSet -> {
111              let model_info = providers.get_model(provider_info, merged.model)
112              case model_info {
113                Ok(m) ->
114                  case m.default_reasoning_effort {
115                    Some(default) ->
116                      case config.parse_reasoning(default) {
117                        config.ReasoningEnabled(effort) -> Some(effort)
118                        _ -> None
119                      }
120                    None -> None
121                  }
122                Error(_) -> None
123              }
124            }
125          }
126          send_request(
127            provider_info,
128            merged,
129            effective_reasoning,
130            prompts.system(),
131            user_message,
132          )
133        }
134        Error(providers.FetchError(msg)) ->
135          Error("Error fetching providers: " <> msg)
136        Error(providers.ProviderNotFound(id)) ->
137          Error("Unknown provider: " <> id)
138        Error(providers.ModelNotFound(provider, model)) ->
139          Error(
140            "Unknown model '" <> model <> "' for provider '" <> provider <> "'",
141          )
142      }
143  }
144
145  case result {
146    Ok(response) -> io.print(prompts.extract_code_block(response))
147    Error(msg) -> {
148      io.println_error(msg)
149      halt(1)
150    }
151  }
152}
153
154fn send_request(
155  provider: Provider,
156  cfg: config.Config,
157  reasoning: option.Option(config.ReasoningEffort),
158  system: String,
159  user_prompt: String,
160) -> Result(String, String) {
161  use key <- result.try(api_key.get(provider, cfg))
162
163  case provider.provider_type {
164    "openai" ->
165      clients.send_openai(key, None, cfg.model, system, user_prompt, reasoning)
166    "anthropic" ->
167      clients.send_anthropic(
168        key,
169        None,
170        cfg.model,
171        system,
172        user_prompt,
173        reasoning,
174      )
175    "google" ->
176      clients.send_gemini(key, cfg.model, system, user_prompt, reasoning)
177    "openai-compat" -> {
178      case provider.api_endpoint {
179        Some(endpoint) ->
180          clients.send_openai_compat(
181            key,
182            endpoint,
183            cfg.model,
184            system,
185            user_prompt,
186            reasoning,
187            cfg.dialect,
188          )
189        None -> Error("No endpoint configured for " <> provider.id)
190      }
191    }
192    other -> Error("Unsupported provider type: " <> other)
193  }
194}