// SPDX-FileCopyrightText: Amolith // // SPDX-License-Identifier: AGPL-3.0-or-later import api_key import argv import clients import config import gleam/io import gleam/option.{None, Some} import gleam/result import gleam/string import gleam/yielder import glint import openai_compat import prompts import providers.{type Provider} import stdin @external(erlang, "erlang", "halt") fn halt(status: Int) -> Nil pub fn main() { glint.new() |> glint.with_name("garble") |> glint.pretty_help(glint.default_pretty_help()) |> glint.add(at: [], do: garble_command()) |> glint.run(argv.load().arguments) } fn garble_command() -> glint.Command(Nil) { use <- glint.command_help("Transform stdin with an LLM") use directions <- glint.flag( glint.string_flag("directions") |> glint.flag_default("") |> glint.flag_help("Directions for how to transform the input"), ) use model <- glint.flag( glint.string_flag("model") |> glint.flag_default("") |> glint.flag_help("Model to use (e.g. gpt-4o, claude-3-opus)"), ) use provider <- glint.flag( glint.string_flag("provider") |> glint.flag_default("") |> glint.flag_help("Provider (e.g. openai, anthropic)"), ) use reasoning <- glint.flag( glint.string_flag("reasoning") |> glint.flag_default("") |> glint.flag_help("Reasoning effort for thinking models (low, medium, high)"), ) use endpoint <- glint.flag( glint.string_flag("endpoint") |> glint.flag_default("") |> glint.flag_help("API endpoint URL (required for ollama, e.g. http://localhost:11434)"), ) use _, _args, flags <- glint.command() // Load config file (if present) and merge with CLI flags let cfg = config.load() let assert Ok(directions_cli) = directions(flags) let assert Ok(model_cli) = model(flags) let assert Ok(provider_cli) = provider(flags) let assert Ok(reasoning_cli) = reasoning(flags) let assert Ok(endpoint_cli) = endpoint(flags) let merged = config.merge( cfg, cli_provider: provider_cli, cli_model: model_cli, cli_directions: directions_cli, cli_reasoning: reasoning_cli, cli_endpoint: endpoint_cli, ) // Read all stdin into a single string let input = stdin.read_lines() |> yielder.to_list() |> string.join("") // Build the user message with raw input and directions let user_message = prompts.build_user_message(input, merged.directions) let ollama_reasoning = case merged.reasoning { config.ReasoningEnabled(effort) -> Some(effort) _ -> None } let result = case merged.provider { "ollama" -> clients.send_ollama( merged.endpoint, merged.model, prompts.system(), user_message, ollama_reasoning, ) _ -> case providers.get_provider(merged.provider) { Ok(provider_info) -> { // Resolve reasoning: explicit setting wins, then model default, then none let effective_reasoning = case merged.reasoning { config.ReasoningEnabled(effort) -> Some(effort) config.ReasoningDisabled -> None config.ReasoningNotSet -> { let model_info = providers.get_model(provider_info, merged.model) case model_info { Ok(m) -> case m.default_reasoning_effort { Some(default) -> case config.parse_reasoning(default) { config.ReasoningEnabled(effort) -> Some(effort) _ -> None } None -> None } Error(_) -> None } } } send_request(provider_info, merged, effective_reasoning, prompts.system(), user_message) } Error(providers.FetchError(msg)) -> Error("Error fetching providers: " <> msg) Error(providers.ProviderNotFound(id)) -> Error("Unknown provider: " <> id) Error(providers.ModelNotFound(provider, model)) -> Error( "Unknown model '" <> model <> "' for provider '" <> provider <> "'", ) } } case result { Ok(response) -> io.print(prompts.extract_code_block(response)) Error(msg) -> { io.println_error(msg) halt(1) } } } fn send_request( provider: Provider, cfg: config.Config, reasoning: option.Option(config.ReasoningEffort), system: String, user_prompt: String, ) -> Result(String, String) { use key <- result.try(api_key.get(provider, cfg)) case provider.provider_type { "openai" -> clients.send_openai(key, None, cfg.model, system, user_prompt, reasoning) "anthropic" -> clients.send_anthropic(key, None, cfg.model, system, user_prompt, reasoning) "google" -> clients.send_gemini(key, cfg.model, system, user_prompt, reasoning) "openai-compat" -> { case provider.api_endpoint { Some(endpoint) -> openai_compat.send(endpoint, key, cfg.model, system, user_prompt, reasoning) None -> Error("No endpoint configured for " <> provider.id) } } other -> Error("Unsupported provider type: " <> other) } }