1use crate::prompts::base::{PromptArguments, PromptTemplate};
2use std::fmt::Write;
3use std::{ops::Range, path::PathBuf};
4
5use gpui::{AsyncAppContext, Model};
6use language::{Anchor, Buffer};
7
8#[derive(Clone)]
9pub struct PromptCodeSnippet {
10 path: Option<PathBuf>,
11 language_name: Option<String>,
12 content: String,
13}
14
15impl PromptCodeSnippet {
16 pub fn new(
17 buffer: Model<Buffer>,
18 range: Range<Anchor>,
19 cx: &mut AsyncAppContext,
20 ) -> anyhow::Result<Self> {
21 let (content, language_name, file_path) = buffer.update(cx, |buffer, _| {
22 let snapshot = buffer.snapshot();
23 let content = snapshot.text_for_range(range.clone()).collect::<String>();
24
25 let language_name = buffer
26 .language()
27 .map(|language| language.name().to_string().to_lowercase());
28
29 let file_path = buffer.file().map(|file| file.path().to_path_buf());
30
31 (content, language_name, file_path)
32 })?;
33
34 anyhow::Ok(PromptCodeSnippet {
35 path: file_path,
36 language_name,
37 content,
38 })
39 }
40}
41
42impl ToString for PromptCodeSnippet {
43 fn to_string(&self) -> String {
44 let path = self
45 .path
46 .as_ref()
47 .map(|path| path.to_string_lossy().to_string())
48 .unwrap_or("".to_string());
49 let language_name = self.language_name.clone().unwrap_or("".to_string());
50 let content = self.content.clone();
51
52 format!("The below code snippet may be relevant from file: {path}\n```{language_name}\n{content}\n```")
53 }
54}
55
56pub struct RepositoryContext {}
57
58impl PromptTemplate for RepositoryContext {
59 fn generate(
60 &self,
61 args: &PromptArguments,
62 max_token_length: Option<usize>,
63 ) -> anyhow::Result<(String, usize)> {
64 const MAXIMUM_SNIPPET_TOKEN_COUNT: usize = 500;
65 let template = "You are working inside a large repository, here are a few code snippets that may be useful.";
66 let mut prompt = String::new();
67
68 let mut remaining_tokens = max_token_length;
69 let separator_token_length = args.model.count_tokens("\n")?;
70 for snippet in &args.snippets {
71 let mut snippet_prompt = template.to_string();
72 let content = snippet.to_string();
73 writeln!(snippet_prompt, "{content}").unwrap();
74
75 let token_count = args.model.count_tokens(&snippet_prompt)?;
76 if token_count <= MAXIMUM_SNIPPET_TOKEN_COUNT {
77 if let Some(tokens_left) = remaining_tokens {
78 if tokens_left >= token_count {
79 writeln!(prompt, "{snippet_prompt}").unwrap();
80 remaining_tokens = if tokens_left >= (token_count + separator_token_length)
81 {
82 Some(tokens_left - token_count - separator_token_length)
83 } else {
84 Some(0)
85 };
86 }
87 } else {
88 writeln!(prompt, "{snippet_prompt}").unwrap();
89 }
90 }
91 }
92
93 let total_token_count = args.model.count_tokens(&prompt)?;
94 anyhow::Ok((prompt, total_token_count))
95 }
96}