added dumb truncation strategies to file_context and generate

KCaverly created

Change summary

crates/ai/src/templates/base.rs         | 26 ++------------------------
crates/ai/src/templates/file_context.rs | 12 +++++-------
crates/ai/src/templates/generate.rs     | 11 +++++------
3 files changed, 12 insertions(+), 37 deletions(-)

Detailed changes

crates/ai/src/templates/base.rs 🔗

@@ -1,4 +1,3 @@
-use anyhow::anyhow;
 use std::cmp::Reverse;
 use std::ops::Range;
 use std::sync::Arc;
@@ -96,36 +95,15 @@ impl PromptChain {
 
         let mut prompts = vec!["".to_string(); sorted_indices.len()];
         for idx in sorted_indices {
-            let (priority, template) = &self.templates[idx];
-
-            // If PromptPriority is marked as mandatory, we ignore the tokens outstanding
-            // However, if a prompt is generated in excess of the available tokens,
-            // we raise an error outlining that a mandatory prompt has exceeded the available
-            // balance
-            let template_tokens = if let Some(template_tokens) = tokens_outstanding {
-                match priority {
-                    &PromptPriority::Mandatory => None,
-                    _ => Some(template_tokens),
-                }
-            } else {
-                None
-            };
+            let (_, template) = &self.templates[idx];
 
             if let Some((template_prompt, prompt_token_count)) =
-                template.generate(&self.args, template_tokens).log_err()
+                template.generate(&self.args, tokens_outstanding).log_err()
             {
                 if template_prompt != "" {
                     prompts[idx] = template_prompt;
 
                     if let Some(remaining_tokens) = tokens_outstanding {
-                        if prompt_token_count > remaining_tokens
-                            && priority == &PromptPriority::Mandatory
-                        {
-                            return Err(anyhow!(
-                                "mandatory template added in excess of model capacity"
-                            ));
-                        }
-
                         let new_tokens = prompt_token_count + seperator_tokens;
                         tokens_outstanding = if remaining_tokens > new_tokens {
                             Some(remaining_tokens - new_tokens)

crates/ai/src/templates/file_context.rs 🔗

@@ -1,4 +1,3 @@
-use anyhow::anyhow;
 use language::ToOffset;
 
 use crate::templates::base::PromptArguments;
@@ -13,12 +12,6 @@ impl PromptTemplate for FileContext {
         args: &PromptArguments,
         max_token_length: Option<usize>,
     ) -> anyhow::Result<(String, usize)> {
-        if max_token_length.is_some() {
-            return Err(anyhow!(
-                "no truncation strategy established for file_context template"
-            ));
-        }
-
         let mut prompt = String::new();
 
         // Add Initial Preamble
@@ -84,6 +77,11 @@ impl PromptTemplate for FileContext {
             }
         }
 
+        // Really dumb truncation strategy
+        if let Some(max_tokens) = max_token_length {
+            prompt = args.model.truncate(&prompt, max_tokens)?;
+        }
+
         let token_count = args.model.count_tokens(&prompt)?;
         anyhow::Ok((prompt, token_count))
     }

crates/ai/src/templates/generate.rs 🔗

@@ -18,12 +18,6 @@ impl PromptTemplate for GenerateInlineContent {
         args: &PromptArguments,
         max_token_length: Option<usize>,
     ) -> anyhow::Result<(String, usize)> {
-        if max_token_length.is_some() {
-            return Err(anyhow!(
-                "no truncation strategy established for generating inline content template"
-            ));
-        }
-
         let Some(user_prompt) = &args.user_prompt else {
             return Err(anyhow!("user prompt not provided"));
         };
@@ -88,6 +82,11 @@ impl PromptTemplate for GenerateInlineContent {
             _ => {}
         }
 
+        // Really dumb truncation strategy
+        if let Some(max_tokens) = max_token_length {
+            prompt = args.model.truncate(&prompt, max_tokens)?;
+        }
+
         let token_count = args.model.count_tokens(&prompt)?;
 
         anyhow::Ok((prompt, token_count))