Put back logic for passing data collection choice to cloud (#49426)

Max Brunsfeld created

Release Notes:

- N/A

Change summary

crates/edit_prediction/src/edit_prediction.rs       | 4 ++++
crates/edit_prediction/src/edit_prediction_tests.rs | 1 +
crates/edit_prediction/src/mercury.rs               | 1 +
crates/edit_prediction/src/ollama.rs                | 2 ++
crates/edit_prediction/src/prediction.rs            | 1 +
crates/edit_prediction/src/sweep_ai.rs              | 1 +
crates/edit_prediction/src/zeta2.rs                 | 5 +++++
crates/edit_prediction_cli/src/format_prompt.rs     | 1 +
crates/zeta_prompt/src/zeta_prompt.rs               | 6 ++++++
9 files changed, 22 insertions(+)

Detailed changes

crates/edit_prediction/src/edit_prediction.rs 🔗

@@ -2209,6 +2209,10 @@ impl EditPredictionStore {
             .is_some_and(|watcher| watcher.is_project_open_source())
     }
 
+    pub(crate) fn is_data_collection_enabled(&self, cx: &App) -> bool {
+        self.data_collection_choice.is_enabled(cx)
+    }
+
     fn load_data_collection_choice() -> DataCollectionChoice {
         let choice = KEY_VALUE_STORE
             .read_kvp(ZED_PREDICT_DATA_COLLECTION_CHOICE)

crates/edit_prediction/src/edit_prediction_tests.rs 🔗

@@ -1707,6 +1707,7 @@ async fn test_edit_prediction_basic_interpolation(cx: &mut TestAppContext) {
             excerpt_ranges: None,
             preferred_model: None,
             in_open_source_repo: false,
+            can_collect_data: false,
         },
         buffer_snapshotted_at: Instant::now(),
         response_received_at: Instant::now(),

crates/edit_prediction/src/mercury.rs 🔗

@@ -100,6 +100,7 @@ impl Mercury {
                 excerpt_ranges: None,
                 preferred_model: None,
                 in_open_source_repo: false,
+                can_collect_data: false,
             };
 
             let prompt = build_prompt(&inputs);

crates/edit_prediction/src/ollama.rs 🔗

@@ -172,6 +172,7 @@ impl Ollama {
                     excerpt_ranges: None,
                     preferred_model: None,
                     in_open_source_repo: false,
+                    can_collect_data: false,
                 };
 
                 (prompt, stop_tokens, Some(editable_offset_range), inputs)
@@ -201,6 +202,7 @@ impl Ollama {
                     excerpt_ranges: None,
                     preferred_model: None,
                     in_open_source_repo: false,
+                    can_collect_data: false,
                 };
 
                 let prefix = inputs.cursor_excerpt[..inputs.cursor_offset_in_excerpt].to_string();

crates/edit_prediction/src/prediction.rs 🔗

@@ -161,6 +161,7 @@ mod tests {
                 excerpt_ranges: None,
                 preferred_model: None,
                 in_open_source_repo: false,
+                can_collect_data: false,
             },
             buffer_snapshotted_at: Instant::now(),
             response_received_at: Instant::now(),

crates/edit_prediction/src/zeta2.rs 🔗

@@ -66,6 +66,8 @@ pub fn request_prediction_with_zeta2(
         && events.iter().all(|event| event.in_open_source_repo())
         && related_files.iter().all(|file| file.in_open_source_repo);
 
+    let can_collect_data = is_open_source && store.is_data_collection_enabled(cx);
+
     let request_task = cx.background_spawn({
         async move {
             let zeta_version = raw_config
@@ -83,6 +85,7 @@ pub fn request_prediction_with_zeta2(
                 zeta_version,
                 preferred_model,
                 is_open_source,
+                can_collect_data,
             );
 
             if prompt_input_contains_special_tokens(&prompt_input, zeta_version) {
@@ -265,6 +268,7 @@ pub fn zeta2_prompt_input(
     zeta_format: ZetaFormat,
     preferred_model: Option<EditPredictionModelKind>,
     is_open_source: bool,
+    can_collect_data: bool,
 ) -> (std::ops::Range<usize>, zeta_prompt::ZetaPromptInput) {
     let cursor_point = cursor_offset.to_point(snapshot);
 
@@ -309,6 +313,7 @@ pub fn zeta2_prompt_input(
         excerpt_ranges: Some(excerpt_ranges),
         preferred_model,
         in_open_source_repo: is_open_source,
+        can_collect_data,
     };
     (editable_offset_range, prompt_input)
 }

crates/edit_prediction_cli/src/format_prompt.rs 🔗

@@ -100,6 +100,7 @@ pub async fn run_format_prompt(
                     .captured_prompt_input
                     .as_ref()
                     .map_or(false, |input| input.in_open_source_repo),
+                can_collect_data: false,
             };
             let prompt = format_zeta_prompt(&input, version);
             let prefill = zeta_prompt::get_prefill(&input, version);

crates/zeta_prompt/src/zeta_prompt.rs 🔗

@@ -65,6 +65,8 @@ pub struct ZetaPromptInput {
     pub preferred_model: Option<EditPredictionModelKind>,
     #[serde(default)]
     pub in_open_source_repo: bool,
+    #[serde(default)]
+    pub can_collect_data: bool,
 }
 
 #[derive(
@@ -1105,6 +1107,7 @@ mod tests {
             excerpt_ranges: None,
             preferred_model: None,
             in_open_source_repo: false,
+            can_collect_data: false,
         }
     }
 
@@ -1481,6 +1484,7 @@ mod tests {
             excerpt_ranges: None,
             preferred_model: None,
             in_open_source_repo: false,
+            can_collect_data: false,
         };
 
         let prompt = zeta1::format_zeta1_from_input(&input, 15..41, 0..excerpt.len());
@@ -1535,6 +1539,7 @@ mod tests {
             excerpt_ranges: None,
             preferred_model: None,
             in_open_source_repo: false,
+            can_collect_data: false,
         };
 
         let prompt = zeta1::format_zeta1_from_input(&input, 0..28, 0..28);
@@ -1584,6 +1589,7 @@ mod tests {
             excerpt_ranges: None,
             preferred_model: None,
             in_open_source_repo: false,
+            can_collect_data: false,
         };
 
         let prompt = zeta1::format_zeta1_from_input(&input, editable_range, context_range);