1use anyhow::{anyhow, Result};
2use assistant_slash_command::{
3 ArgumentCompletion, SlashCommand, SlashCommandOutput, SlashCommandOutputSection,
4 SlashCommandResult,
5};
6use feature_flags::FeatureFlag;
7use futures::StreamExt;
8use gpui::{AppContext, AsyncAppContext, AsyncWindowContext, Task, WeakView, WindowContext};
9use language::{CodeLabel, LspAdapterDelegate};
10use language_model::{
11 LanguageModelCompletionEvent, LanguageModelRegistry, LanguageModelRequest,
12 LanguageModelRequestMessage, Role,
13};
14use semantic_index::{FileSummary, SemanticDb};
15use smol::channel;
16use std::sync::{atomic::AtomicBool, Arc};
17use ui::{prelude::*, BorrowAppContext};
18use util::ResultExt;
19use workspace::Workspace;
20
21use crate::slash_command::create_label_for_command;
22
23pub struct AutoSlashCommandFeatureFlag;
24
25impl FeatureFlag for AutoSlashCommandFeatureFlag {
26 const NAME: &'static str = "auto-slash-command";
27}
28
29pub(crate) struct AutoCommand;
30
31impl SlashCommand for AutoCommand {
32 fn name(&self) -> String {
33 "auto".into()
34 }
35
36 fn description(&self) -> String {
37 "Automatically infer what context to add".into()
38 }
39
40 fn icon(&self) -> IconName {
41 IconName::Wand
42 }
43
44 fn menu_text(&self) -> String {
45 self.description()
46 }
47
48 fn label(&self, cx: &AppContext) -> CodeLabel {
49 create_label_for_command("auto", &["--prompt"], cx)
50 }
51
52 fn complete_argument(
53 self: Arc<Self>,
54 _arguments: &[String],
55 _cancel: Arc<AtomicBool>,
56 workspace: Option<WeakView<Workspace>>,
57 cx: &mut WindowContext,
58 ) -> Task<Result<Vec<ArgumentCompletion>>> {
59 // There's no autocomplete for a prompt, since it's arbitrary text.
60 // However, we can use this opportunity to kick off a drain of the backlog.
61 // That way, it can hopefully be done resummarizing by the time we've actually
62 // typed out our prompt. This re-runs on every keystroke during autocomplete,
63 // but in the future, we could instead do it only once, when /auto is first entered.
64 let Some(workspace) = workspace.and_then(|ws| ws.upgrade()) else {
65 log::warn!("workspace was dropped or unavailable during /auto autocomplete");
66
67 return Task::ready(Ok(Vec::new()));
68 };
69
70 let project = workspace.read(cx).project().clone();
71 let Some(project_index) =
72 cx.update_global(|index: &mut SemanticDb, cx| index.project_index(project, cx))
73 else {
74 return Task::ready(Err(anyhow!("No project indexer, cannot use /auto")));
75 };
76
77 let cx: &mut AppContext = cx;
78
79 cx.spawn(|cx: gpui::AsyncAppContext| async move {
80 let task = project_index.read_with(&cx, |project_index, cx| {
81 project_index.flush_summary_backlogs(cx)
82 })?;
83
84 cx.background_executor().spawn(task).await;
85
86 anyhow::Ok(Vec::new())
87 })
88 }
89
90 fn requires_argument(&self) -> bool {
91 true
92 }
93
94 fn run(
95 self: Arc<Self>,
96 arguments: &[String],
97 _context_slash_command_output_sections: &[SlashCommandOutputSection<language::Anchor>],
98 _context_buffer: language::BufferSnapshot,
99 workspace: WeakView<Workspace>,
100 _delegate: Option<Arc<dyn LspAdapterDelegate>>,
101 cx: &mut WindowContext,
102 ) -> Task<SlashCommandResult> {
103 let Some(workspace) = workspace.upgrade() else {
104 return Task::ready(Err(anyhow::anyhow!("workspace was dropped")));
105 };
106 if arguments.is_empty() {
107 return Task::ready(Err(anyhow!("missing prompt")));
108 };
109 let argument = arguments.join(" ");
110 let original_prompt = argument.to_string();
111 let project = workspace.read(cx).project().clone();
112 let Some(project_index) =
113 cx.update_global(|index: &mut SemanticDb, cx| index.project_index(project, cx))
114 else {
115 return Task::ready(Err(anyhow!("no project indexer")));
116 };
117
118 let task = cx.spawn(|cx: AsyncWindowContext| async move {
119 let summaries = project_index
120 .read_with(&cx, |project_index, cx| project_index.all_summaries(cx))?
121 .await?;
122
123 commands_for_summaries(&summaries, &original_prompt, &cx).await
124 });
125
126 // As a convenience, append /auto's argument to the end of the prompt
127 // so you don't have to write it again.
128 let original_prompt = argument.to_string();
129
130 cx.background_executor().spawn(async move {
131 let commands = task.await?;
132 let mut prompt = String::new();
133
134 log::info!(
135 "Translating this response into slash-commands: {:?}",
136 commands
137 );
138
139 for command in commands {
140 prompt.push('/');
141 prompt.push_str(&command.name);
142 prompt.push(' ');
143 prompt.push_str(&command.arg);
144 prompt.push('\n');
145 }
146
147 prompt.push('\n');
148 prompt.push_str(&original_prompt);
149
150 Ok(SlashCommandOutput {
151 text: prompt,
152 sections: Vec::new(),
153 run_commands_in_text: true,
154 }
155 .to_event_stream())
156 })
157 }
158}
159
160const PROMPT_INSTRUCTIONS_BEFORE_SUMMARY: &str = include_str!("prompt_before_summary.txt");
161const PROMPT_INSTRUCTIONS_AFTER_SUMMARY: &str = include_str!("prompt_after_summary.txt");
162
163fn summaries_prompt(summaries: &[FileSummary], original_prompt: &str) -> String {
164 let json_summaries = serde_json::to_string(summaries).unwrap();
165
166 format!("{PROMPT_INSTRUCTIONS_BEFORE_SUMMARY}\n{json_summaries}\n{PROMPT_INSTRUCTIONS_AFTER_SUMMARY}\n{original_prompt}")
167}
168
169/// The slash commands that the model is told about, and which we look for in the inference response.
170const SUPPORTED_SLASH_COMMANDS: &[&str] = &["search", "file"];
171
172#[derive(Debug, Clone)]
173struct CommandToRun {
174 name: String,
175 arg: String,
176}
177
178/// Given the pre-indexed file summaries for this project, as well as the original prompt
179/// string passed to `/auto`, get a list of slash commands to run, along with their arguments.
180///
181/// The prompt's output does not include the slashes (to reduce the chance that it makes a mistake),
182/// so taking one of these returned Strings and turning it into a real slash-command-with-argument
183/// involves prepending a slash to it.
184///
185/// This function will validate that each of the returned lines begins with one of SUPPORTED_SLASH_COMMANDS.
186/// Any other lines it encounters will be discarded, with a warning logged.
187async fn commands_for_summaries(
188 summaries: &[FileSummary],
189 original_prompt: &str,
190 cx: &AsyncAppContext,
191) -> Result<Vec<CommandToRun>> {
192 if summaries.is_empty() {
193 log::warn!("Inferring no context because there were no summaries available.");
194 return Ok(Vec::new());
195 }
196
197 // Use the globally configured model to translate the summaries into slash-commands,
198 // because Qwen2-7B-Instruct has not done a good job at that task.
199 let Some(model) = cx.update(|cx| LanguageModelRegistry::read_global(cx).active_model())? else {
200 log::warn!("Can't infer context because there's no active model.");
201 return Ok(Vec::new());
202 };
203 // Only go up to 90% of the actual max token count, to reduce chances of
204 // exceeding the token count due to inaccuracies in the token counting heuristic.
205 let max_token_count = (model.max_token_count() * 9) / 10;
206
207 // Rather than recursing (which would require this async function use a pinned box),
208 // we use an explicit stack of arguments and answers for when we need to "recurse."
209 let mut stack = vec![summaries];
210 let mut final_response = Vec::new();
211 let mut prompts = Vec::new();
212
213 // TODO We only need to create multiple Requests because we currently
214 // don't have the ability to tell if a CompletionProvider::complete response
215 // was a "too many tokens in this request" error. If we had that, then
216 // we could try the request once, instead of having to make separate requests
217 // to check the token count and then afterwards to run the actual prompt.
218 let make_request = |prompt: String| LanguageModelRequest {
219 messages: vec![LanguageModelRequestMessage {
220 role: Role::User,
221 content: vec![prompt.into()],
222 // Nothing in here will benefit from caching
223 cache: false,
224 }],
225 tools: Vec::new(),
226 stop: Vec::new(),
227 temperature: None,
228 };
229
230 while let Some(current_summaries) = stack.pop() {
231 // The split can result in one slice being empty and the other having one element.
232 // Whenever that happens, skip the empty one.
233 if current_summaries.is_empty() {
234 continue;
235 }
236
237 log::info!(
238 "Inferring prompt context using {} file summaries",
239 current_summaries.len()
240 );
241
242 let prompt = summaries_prompt(¤t_summaries, original_prompt);
243 let start = std::time::Instant::now();
244 // Per OpenAI, 1 token ~= 4 chars in English (we go with 4.5 to overestimate a bit, because failed API requests cost a lot of perf)
245 // Verifying this against an actual model.count_tokens() confirms that it's usually within ~5% of the correct answer, whereas
246 // getting the correct answer from tiktoken takes hundreds of milliseconds (compared to this arithmetic being ~free).
247 // source: https://help.openai.com/en/articles/4936856-what-are-tokens-and-how-to-count-them
248 let token_estimate = prompt.len() * 2 / 9;
249 let duration = start.elapsed();
250 log::info!(
251 "Time taken to count tokens for prompt of length {:?}B: {:?}",
252 prompt.len(),
253 duration
254 );
255
256 if token_estimate < max_token_count {
257 prompts.push(prompt);
258 } else if current_summaries.len() == 1 {
259 log::warn!("Inferring context for a single file's summary failed because the prompt's token length exceeded the model's token limit.");
260 } else {
261 log::info!(
262 "Context inference using file summaries resulted in a prompt containing {token_estimate} tokens, which exceeded the model's max of {max_token_count}. Retrying as two separate prompts, each including half the number of summaries.",
263 );
264 let (left, right) = current_summaries.split_at(current_summaries.len() / 2);
265 stack.push(right);
266 stack.push(left);
267 }
268 }
269
270 let all_start = std::time::Instant::now();
271
272 let (tx, rx) = channel::bounded(1024);
273
274 let completion_streams = prompts
275 .into_iter()
276 .map(|prompt| {
277 let request = make_request(prompt.clone());
278 let model = model.clone();
279 let tx = tx.clone();
280 let stream = model.stream_completion(request, &cx);
281
282 (stream, tx)
283 })
284 .collect::<Vec<_>>();
285
286 cx.background_executor()
287 .spawn(async move {
288 let futures = completion_streams
289 .into_iter()
290 .enumerate()
291 .map(|(ix, (stream, tx))| async move {
292 let start = std::time::Instant::now();
293 let events = stream.await?;
294 log::info!("Time taken for awaiting /await chunk stream #{ix}: {:?}", start.elapsed());
295
296 let completion: String = events
297 .filter_map(|event| async {
298 if let Ok(LanguageModelCompletionEvent::Text(text)) = event {
299 Some(text)
300 } else {
301 None
302 }
303 })
304 .collect()
305 .await;
306
307 log::info!("Time taken for all /auto chunks to come back for #{ix}: {:?}", start.elapsed());
308
309 for line in completion.split('\n') {
310 if let Some(first_space) = line.find(' ') {
311 let command = &line[..first_space].trim();
312 let arg = &line[first_space..].trim();
313
314 tx.send(CommandToRun {
315 name: command.to_string(),
316 arg: arg.to_string(),
317 })
318 .await?;
319 } else if !line.trim().is_empty() {
320 // All slash-commands currently supported in context inference need a space for the argument.
321 log::warn!(
322 "Context inference returned a non-blank line that contained no spaces (meaning no argument for the slash command): {:?}",
323 line
324 );
325 }
326 }
327
328 anyhow::Ok(())
329 })
330 .collect::<Vec<_>>();
331
332 let _ = futures::future::try_join_all(futures).await.log_err();
333
334 let duration = all_start.elapsed();
335 eprintln!("All futures completed in {:?}", duration);
336 })
337 .await;
338
339 drop(tx); // Close the channel so that rx.collect() won't hang. This is safe because all futures have completed.
340 let results = rx.collect::<Vec<_>>().await;
341 eprintln!(
342 "Finished collecting from the channel with {} results",
343 results.len()
344 );
345 for command in results {
346 // Don't return empty or duplicate commands
347 if !command.name.is_empty()
348 && !final_response
349 .iter()
350 .any(|cmd: &CommandToRun| cmd.name == command.name && cmd.arg == command.arg)
351 {
352 if SUPPORTED_SLASH_COMMANDS
353 .iter()
354 .any(|supported| &command.name == supported)
355 {
356 final_response.push(command);
357 } else {
358 log::warn!(
359 "Context inference returned an unrecognized slash command: {:?}",
360 command
361 );
362 }
363 }
364 }
365
366 // Sort the commands by name (reversed just so that /search appears before /file)
367 final_response.sort_by(|cmd1, cmd2| cmd1.name.cmp(&cmd2.name).reverse());
368
369 Ok(final_response)
370}