1// SPDX-FileCopyrightText: Amolith <amolith@secluded.site>
2//
3// SPDX-License-Identifier: AGPL-3.0-or-later
4
5import api_key
6import argv
7import clients
8import config
9import gleam/io
10import gleam/option.{None, Some}
11import gleam/result
12import gleam/string
13import gleam/yielder
14import glint
15import openai_compat
16import prompts
17import providers.{type Provider}
18import stdin
19
20@external(erlang, "erlang", "halt")
21fn halt(status: Int) -> Nil
22
23pub fn main() {
24 glint.new()
25 |> glint.with_name("garble")
26 |> glint.pretty_help(glint.default_pretty_help())
27 |> glint.add(at: [], do: garble_command())
28 |> glint.run(argv.load().arguments)
29}
30
31fn garble_command() -> glint.Command(Nil) {
32 use <- glint.command_help("Transform stdin with an LLM")
33 use directions <- glint.flag(
34 glint.string_flag("directions")
35 |> glint.flag_default("")
36 |> glint.flag_help("Directions for how to transform the input"),
37 )
38 use model <- glint.flag(
39 glint.string_flag("model")
40 |> glint.flag_default("")
41 |> glint.flag_help("Model to use (e.g. gpt-4o, claude-3-opus)"),
42 )
43 use provider <- glint.flag(
44 glint.string_flag("provider")
45 |> glint.flag_default("")
46 |> glint.flag_help("Provider (e.g. openai, anthropic)"),
47 )
48 use reasoning <- glint.flag(
49 glint.string_flag("reasoning")
50 |> glint.flag_default("")
51 |> glint.flag_help("Reasoning effort for thinking models (low, medium, high)"),
52 )
53 use endpoint <- glint.flag(
54 glint.string_flag("endpoint")
55 |> glint.flag_default("")
56 |> glint.flag_help("API endpoint URL (required for ollama, e.g. http://localhost:11434)"),
57 )
58 use _, _args, flags <- glint.command()
59
60 // Load config file (if present) and merge with CLI flags
61 let cfg = config.load()
62 let assert Ok(directions_cli) = directions(flags)
63 let assert Ok(model_cli) = model(flags)
64 let assert Ok(provider_cli) = provider(flags)
65 let assert Ok(reasoning_cli) = reasoning(flags)
66 let assert Ok(endpoint_cli) = endpoint(flags)
67 let merged =
68 config.merge(
69 cfg,
70 cli_provider: provider_cli,
71 cli_model: model_cli,
72 cli_directions: directions_cli,
73 cli_reasoning: reasoning_cli,
74 cli_endpoint: endpoint_cli,
75 )
76
77 // Read all stdin into a single string
78 let input =
79 stdin.read_lines()
80 |> yielder.to_list()
81 |> string.join("")
82
83 // Build the user message with raw input and directions
84 let user_message = prompts.build_user_message(input, merged.directions)
85
86 let ollama_reasoning = case merged.reasoning {
87 config.ReasoningEnabled(effort) -> Some(effort)
88 _ -> None
89 }
90
91 let result = case merged.provider {
92 "ollama" ->
93 clients.send_ollama(
94 merged.endpoint,
95 merged.model,
96 prompts.system(),
97 user_message,
98 ollama_reasoning,
99 )
100 _ ->
101 case providers.get_provider(merged.provider) {
102 Ok(provider_info) -> {
103 // Resolve reasoning: explicit setting wins, then model default, then none
104 let effective_reasoning = case merged.reasoning {
105 config.ReasoningEnabled(effort) -> Some(effort)
106 config.ReasoningDisabled -> None
107 config.ReasoningNotSet -> {
108 let model_info = providers.get_model(provider_info, merged.model)
109 case model_info {
110 Ok(m) ->
111 case m.default_reasoning_effort {
112 Some(default) ->
113 case config.parse_reasoning(default) {
114 config.ReasoningEnabled(effort) -> Some(effort)
115 _ -> None
116 }
117 None -> None
118 }
119 Error(_) -> None
120 }
121 }
122 }
123 send_request(provider_info, merged, effective_reasoning, prompts.system(), user_message)
124 }
125 Error(providers.FetchError(msg)) ->
126 Error("Error fetching providers: " <> msg)
127 Error(providers.ProviderNotFound(id)) -> Error("Unknown provider: " <> id)
128 Error(providers.ModelNotFound(provider, model)) ->
129 Error(
130 "Unknown model '" <> model <> "' for provider '" <> provider <> "'",
131 )
132 }
133 }
134
135 case result {
136 Ok(response) -> io.print(prompts.extract_code_block(response))
137 Error(msg) -> {
138 io.println_error(msg)
139 halt(1)
140 }
141 }
142}
143
144fn send_request(
145 provider: Provider,
146 cfg: config.Config,
147 reasoning: option.Option(config.ReasoningEffort),
148 system: String,
149 user_prompt: String,
150) -> Result(String, String) {
151 use key <- result.try(api_key.get(provider, cfg))
152
153 case provider.provider_type {
154 "openai" ->
155 clients.send_openai(key, None, cfg.model, system, user_prompt, reasoning)
156 "anthropic" ->
157 clients.send_anthropic(key, None, cfg.model, system, user_prompt, reasoning)
158 "google" ->
159 clients.send_gemini(key, cfg.model, system, user_prompt, reasoning)
160 "openai-compat" -> {
161 case provider.api_endpoint {
162 Some(endpoint) ->
163 openai_compat.send(endpoint, key, cfg.model, system, user_prompt, reasoning)
164 None -> Error("No endpoint configured for " <> provider.id)
165 }
166 }
167 other -> Error("Unsupported provider type: " <> other)
168 }
169}