Detailed changes
@@ -44,8 +44,10 @@ jobs:
BRANCH: ${{ inputs.branch }}
COMMIT: ${{ inputs.commit }}
CHANNEL: ${{ inputs.channel }}
- GIT_COMMITTER_NAME: Zed Zippy
- GIT_COMMITTER_EMAIL: hi@zed.dev
+ GIT_AUTHOR_NAME: zed-zippy[bot]
+ GIT_AUTHOR_EMAIL: <234243425+zed-zippy[bot]@users.noreply.github.com>
+ GIT_COMMITTER_NAME: zed-zippy[bot]
+ GIT_COMMITTER_EMAIL: <234243425+zed-zippy[bot]@users.noreply.github.com>
GITHUB_TOKEN: ${{ steps.generate-token.outputs.token }}
defaults:
run:
@@ -34,13 +34,14 @@ jobs:
echo "Checking compliance for $TAG"
echo "tag=$TAG" >> "$GITHUB_OUTPUT"
- id: run-compliance-check
- name: compliance_check::scheduled_compliance_check::run_compliance_check
+ name: release::add_compliance_steps::run_compliance_check
run: |
cargo xtask compliance "$LATEST_TAG" --branch main --report-path "compliance-report-${GITHUB_REF_NAME}.md"
env:
- LATEST_TAG: ${{ steps.determine-version.outputs.tag }}
GITHUB_APP_ID: ${{ secrets.ZED_ZIPPY_APP_ID }}
GITHUB_APP_KEY: ${{ secrets.ZED_ZIPPY_APP_PRIVATE_KEY }}
+ LATEST_TAG: ${{ steps.determine-version.outputs.tag }}
+ continue-on-error: true
- name: '@actions/upload-artifact compliance-report-${GITHUB_REF_NAME}.md'
if: always()
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
@@ -307,7 +307,7 @@ jobs:
cache: rust
path: ~/.rustup
- id: run-compliance-check
- name: release::run_compliance_check
+ name: release::add_compliance_steps::run_compliance_check
run: |
cargo xtask compliance "$GITHUB_REF_NAME" --report-path "compliance-report-${GITHUB_REF_NAME}.md"
env:
@@ -328,7 +328,7 @@ jobs:
STATUS="✅ Compliance check passed for $COMPLIANCE_TAG"
MESSAGE=$(printf "%s\n\nReport: %s" "$STATUS" "$ARTIFACT_URL")
else
- STATUS="❌ Compliance check failed for $COMPLIANCE_TAG"
+ STATUS="❌ Preliminary compliance check failed (but this can still be fixed while the builds are running!) for $COMPLIANCE_TAG"
MESSAGE=$(printf "%s\n\nReport: %s\nPRs needing review: %s" "$STATUS" "$ARTIFACT_URL" "https://github.com/zed-industries/zed/pulls?q=is%3Apr+is%3Aclosed+label%3A%22PR+state%3Aneeds+review%22")
fi
@@ -340,6 +340,8 @@ jobs:
COMPLIANCE_OUTCOME: ${{ steps.run-compliance-check.outcome }}
COMPLIANCE_TAG: ${{ github.ref_name }}
ARTIFACT_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}#artifacts
+ outputs:
+ outcome: ${{ steps.run-compliance-check.outputs.outcome }}
timeout-minutes: 60
bundle_linux_aarch64:
needs:
@@ -641,6 +643,7 @@ jobs:
validate_release_assets:
needs:
- upload_release_assets
+ - compliance_check
runs-on: namespace-profile-2x4-ubuntu-2404
steps:
- name: release::validate_release_assets
@@ -673,13 +676,12 @@ jobs:
cache: rust
path: ~/.rustup
- id: run-compliance-check
- name: release::run_compliance_check
+ name: release::add_compliance_steps::run_compliance_check
run: |
cargo xtask compliance "$GITHUB_REF_NAME" --report-path "compliance-report-${GITHUB_REF_NAME}.md"
env:
GITHUB_APP_ID: ${{ secrets.ZED_ZIPPY_APP_ID }}
GITHUB_APP_KEY: ${{ secrets.ZED_ZIPPY_APP_PRIVATE_KEY }}
- continue-on-error: true
- name: '@actions/upload-artifact compliance-report-${GITHUB_REF_NAME}.md'
if: always()
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4
@@ -689,7 +691,7 @@ jobs:
if-no-files-found: error
overwrite: true
- name: send_compliance_slack_notification
- if: always()
+ if: failure() || needs.compliance_check.outputs.outcome != 'success'
run: |
if [ "$COMPLIANCE_OUTCOME" == "success" ]; then
STATUS="✅ Compliance check passed for $COMPLIANCE_TAG"
@@ -487,10 +487,18 @@ where
let s: Option<String> = Option::deserialize(deserializer)?;
match s {
Some(json_string) => {
+ // The devcontainer metadata label can be either a JSON array (e.g. from
+ // image-based devcontainers) or a single JSON object (e.g. from
+ // docker-compose-based devcontainers created by the devcontainer CLI).
+ // Handle both formats.
let parsed: Vec<HashMap<String, serde_json_lenient::Value>> =
- serde_json_lenient::from_str(&json_string).map_err(|e| {
- log::error!("Error deserializing metadata: {e}");
- serde::de::Error::custom(e)
+ serde_json_lenient::from_str(&json_string).or_else(|_| {
+ let single: HashMap<String, serde_json_lenient::Value> =
+ serde_json_lenient::from_str(&json_string).map_err(|e| {
+ log::error!("Error deserializing metadata: {e}");
+ serde::de::Error::custom(e)
+ })?;
+ Ok(vec![single])
})?;
Ok(Some(parsed))
}
@@ -936,6 +944,30 @@ mod test {
assert_eq!(target_dir.unwrap(), "/workspaces/cli/".to_string());
}
+ #[test]
+ fn should_deserialize_object_metadata_from_docker_compose_container() {
+ // The devcontainer CLI writes metadata as a bare JSON object (not an array)
+ // when there is only one metadata entry (e.g. docker-compose with no features).
+ // See https://github.com/devcontainers/cli/issues/1054
+ let given_config = r#"
+ {
+ "Id": "dc4e7b8ff4bf",
+ "Config": {
+ "Labels": {
+ "devcontainer.metadata": "{\"remoteUser\":\"ubuntu\"}"
+ }
+ }
+ }
+ "#;
+ let config = serde_json_lenient::from_str::<DockerInspect>(given_config).unwrap();
+
+ assert!(config.config.labels.metadata.is_some());
+ let metadata = config.config.labels.metadata.unwrap();
+ assert_eq!(metadata.len(), 1);
+ assert!(metadata[0].contains_key("remoteUser"));
+ assert_eq!(metadata[0]["remoteUser"], "ubuntu");
+ }
+
#[test]
fn should_deserialize_docker_compose_config() {
let given_config = r#"
@@ -1690,12 +1690,16 @@ impl EditPredictionStore {
settled_editable_region,
ts_error_count_before_prediction,
ts_error_count_after_prediction,
- edit_bytes_predicted_new = kept_rate_result.predicted_new_chars,
- edit_bytes_final_new = kept_rate_result.final_new_chars,
+ edit_bytes_candidate_new = kept_rate_result.candidate_new_chars,
+ edit_bytes_reference_new = kept_rate_result.reference_new_chars,
+ edit_bytes_candidate_deleted = kept_rate_result.candidate_deleted_chars,
+ edit_bytes_reference_deleted = kept_rate_result.reference_deleted_chars,
edit_bytes_kept = kept_rate_result.kept_chars,
+ edit_bytes_correctly_deleted = kept_rate_result.correctly_deleted_chars,
edit_bytes_discarded = kept_rate_result.discarded_chars,
edit_bytes_context = kept_rate_result.context_chars,
edit_bytes_kept_rate = kept_rate_result.kept_rate,
+ edit_bytes_recall_rate = kept_rate_result.recall_rate,
example,
e2e_latency = e2e_latency.as_millis(),
);
@@ -13,12 +13,33 @@ pub enum TokenAnnotation {
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub struct KeptRateResult {
- pub predicted_new_chars: usize,
- pub final_new_chars: usize,
+ /// Characters newly introduced by the candidate
+ pub candidate_new_chars: usize,
+ /// Characters newly introduced by the reference
+ pub reference_new_chars: usize,
+ /// Characters from `base` that are deleted by the candidate.
+ pub candidate_deleted_chars: usize,
+ /// Characters from `base` that are deleted by the reference.
+ pub reference_deleted_chars: usize,
+ /// Candidate new characters that are also present in the reference.
pub kept_chars: usize,
+ /// Base characters deleted by both the candidate and the reference.
+ pub correctly_deleted_chars: usize,
+ /// Candidate new characters that are not kept in the reference.
pub discarded_chars: usize,
+ /// Candidate characters treated as unchanged context
pub context_chars: usize,
+ /// Fraction of candidate edit characters that match the reference edit.
+ ///
+ /// This includes both kept newly introduced characters and correctly
+ /// deleted base characters.
pub kept_rate: f64,
+ /// Fraction of reference edit characters covered by the candidate edit.
+ ///
+ /// This includes both kept newly introduced characters and correctly
+ /// deleted base characters.
+ pub recall_rate: f64,
+ /// Per-token classification for candidate tokens used by tests.
#[cfg(test)]
pub token_annotations: Vec<TokenAnnotation>,
}
@@ -188,89 +209,127 @@ fn analyze_masked_tokens<'a>(tokens: &[&'a str], mask: &[bool]) -> (Vec<&'a str>
(unmasked_tokens, unmasked_chars, masked_chars)
}
-fn should_bail_for_dirty_final(base: &str, predicted: &str, final_text: &str) -> bool {
- let predicted_delta_chars = predicted.len().abs_diff(base.len());
- let final_delta_chars = final_text.len().abs_diff(base.len());
- predicted_delta_chars.abs_diff(final_delta_chars) > MAX_DIRTY_LENGTH_DELTA_CHARS
+fn count_unmasked_chars(tokens: &[&str], mask: &[bool]) -> usize {
+ tokens
+ .iter()
+ .zip(mask.iter())
+ .filter_map(|(&token, &is_masked)| (!is_masked).then_some(token.len()))
+ .sum()
+}
+
+fn should_bail_for_dirty_final(base: &str, candidate: &str, reference: &str) -> bool {
+ let candidate_delta_chars = candidate.len().abs_diff(base.len());
+ let reference_delta_chars = reference.len().abs_diff(base.len());
+ candidate_delta_chars.abs_diff(reference_delta_chars) > MAX_DIRTY_LENGTH_DELTA_CHARS
}
-pub fn compute_kept_rate(base: &str, predicted: &str, final_text: &str) -> KeptRateResult {
- if base == predicted && predicted == final_text {
- let predicted_tokens = tokenize(predicted);
- let context_chars = predicted_tokens.iter().map(|token| token.len()).sum();
+pub fn compute_kept_rate(base: &str, candidate: &str, reference: &str) -> KeptRateResult {
+ if base == candidate && candidate == reference {
+ let candidate_tokens = tokenize(candidate);
+ let context_chars = candidate_tokens.iter().map(|token| token.len()).sum();
return KeptRateResult {
- predicted_new_chars: 0,
- final_new_chars: 0,
+ candidate_new_chars: 0,
+ reference_new_chars: 0,
+ candidate_deleted_chars: 0,
+ reference_deleted_chars: 0,
kept_chars: 0,
+ correctly_deleted_chars: 0,
discarded_chars: 0,
context_chars,
kept_rate: 1.0,
+ recall_rate: 1.0,
#[cfg(test)]
- token_annotations: vec![TokenAnnotation::Context; predicted_tokens.len()],
+ token_annotations: vec![TokenAnnotation::Context; candidate_tokens.len()],
};
}
- if should_bail_for_dirty_final(base, predicted, final_text) {
- let predicted_new_chars = predicted.len().abs_diff(base.len());
- let final_new_chars = final_text.len().abs_diff(base.len());
+ if should_bail_for_dirty_final(base, candidate, reference) {
+ let candidate_new_chars = candidate.len().abs_diff(base.len());
+ let reference_new_chars = reference.len().abs_diff(base.len());
return KeptRateResult {
- predicted_new_chars,
- final_new_chars,
+ candidate_new_chars,
+ reference_new_chars,
+ candidate_deleted_chars: 0,
+ reference_deleted_chars: 0,
kept_chars: 0,
- discarded_chars: predicted_new_chars,
+ correctly_deleted_chars: 0,
+ discarded_chars: candidate_new_chars,
context_chars: 0,
kept_rate: 0.0,
+ recall_rate: 0.0,
#[cfg(test)]
- token_annotations: vec![TokenAnnotation::Discarded; tokenize(predicted).len()],
+ token_annotations: vec![TokenAnnotation::Discarded; tokenize(candidate).len()],
};
}
let base_tokens = tokenize(base);
- let predicted_tokens = tokenize(predicted);
- let final_tokens = tokenize(final_text);
-
- let pred_base_mask = lcs_keep_mask(&predicted_tokens, &base_tokens);
- let (pred_final_mask, final_pred_mask) = lcs_keep_masks(&predicted_tokens, &final_tokens);
- let context_mask: Vec<bool> = pred_base_mask
+ let candidate_tokens = tokenize(candidate);
+ let reference_tokens = tokenize(reference);
+
+ let (candidate_base_mask, base_candidate_mask) =
+ lcs_keep_masks(&candidate_tokens, &base_tokens);
+ let (candidate_reference_mask, reference_candidate_mask) =
+ lcs_keep_masks(&candidate_tokens, &reference_tokens);
+ let context_mask: Vec<bool> = candidate_base_mask
.iter()
- .zip(pred_final_mask.iter())
- .map(|(&in_base, &in_final)| in_base && in_final)
+ .zip(candidate_reference_mask.iter())
+ .map(|(&in_base, &in_reference)| in_base && in_reference)
.collect();
- let (stripped_predicted, predicted_new_chars, context_chars) =
- analyze_masked_tokens(&predicted_tokens, &context_mask);
+ let (stripped_candidate, candidate_new_chars, context_chars) =
+ analyze_masked_tokens(&candidate_tokens, &context_mask);
- let final_base_mask = lcs_keep_mask(&final_tokens, &base_tokens);
- let final_context_mask: Vec<bool> = final_base_mask
+ let (reference_base_mask, base_reference_mask) =
+ lcs_keep_masks(&reference_tokens, &base_tokens);
+ let reference_context_mask: Vec<bool> = reference_base_mask
.iter()
- .zip(final_pred_mask.iter())
- .map(|(&in_base, &in_predicted)| in_base && in_predicted)
+ .zip(reference_candidate_mask.iter())
+ .map(|(&in_base, &in_candidate)| in_base && in_candidate)
.collect();
- let (stripped_final, final_new_chars, _) =
- analyze_masked_tokens(&final_tokens, &final_context_mask);
+ let (stripped_reference, reference_new_chars, _) =
+ analyze_masked_tokens(&reference_tokens, &reference_context_mask);
- let keep_mask = lcs_keep_mask(&stripped_predicted, &stripped_final);
+ let keep_mask = lcs_keep_mask(&stripped_candidate, &stripped_reference);
- let kept_chars: usize = stripped_predicted
+ let kept_chars: usize = stripped_candidate
.iter()
.zip(keep_mask.iter())
.filter_map(|(&token, &is_kept)| is_kept.then_some(token.len()))
.sum();
- let discarded_chars = predicted_new_chars - kept_chars;
+ let candidate_deleted_chars = count_unmasked_chars(&base_tokens, &base_candidate_mask);
+ let reference_deleted_chars = count_unmasked_chars(&base_tokens, &base_reference_mask);
+ let correctly_deleted_chars: usize = base_tokens
+ .iter()
+ .zip(base_candidate_mask.iter().zip(base_reference_mask.iter()))
+ .filter_map(|(&token, (&in_candidate, &in_reference))| {
+ (!in_candidate && !in_reference).then_some(token.len())
+ })
+ .sum();
+
+ let discarded_chars = candidate_new_chars - kept_chars;
+ let matched_edit_chars = kept_chars + correctly_deleted_chars;
+ let candidate_edit_chars = candidate_new_chars + candidate_deleted_chars;
+ let reference_edit_chars = reference_new_chars + reference_deleted_chars;
- let kept_rate = if predicted_new_chars == 0 {
- if final_new_chars == 0 { 1.0 } else { 0.0 }
+ let kept_rate = if candidate_edit_chars == 0 {
+ if reference_edit_chars == 0 { 1.0 } else { 0.0 }
} else {
- kept_chars as f64 / predicted_new_chars as f64
+ matched_edit_chars as f64 / candidate_edit_chars as f64
+ };
+
+ let recall_rate = if reference_edit_chars == 0 {
+ if candidate_edit_chars == 0 { 1.0 } else { 0.0 }
+ } else {
+ matched_edit_chars as f64 / reference_edit_chars as f64
};
#[cfg(test)]
let token_annotations = {
- let mut token_annotations = Vec::with_capacity(predicted_tokens.len());
+ let mut token_annotations = Vec::with_capacity(candidate_tokens.len());
let mut new_index = 0;
- for (token_index, _token) in predicted_tokens.iter().enumerate() {
+ for (token_index, _token) in candidate_tokens.iter().enumerate() {
if context_mask[token_index] {
token_annotations.push(TokenAnnotation::Context);
} else {
@@ -288,12 +347,16 @@ pub fn compute_kept_rate(base: &str, predicted: &str, final_text: &str) -> KeptR
};
KeptRateResult {
- predicted_new_chars,
- final_new_chars,
+ candidate_new_chars,
+ reference_new_chars,
+ candidate_deleted_chars,
+ reference_deleted_chars,
kept_chars,
+ correctly_deleted_chars,
discarded_chars,
context_chars,
kept_rate,
+ recall_rate,
#[cfg(test)]
token_annotations,
}
@@ -327,7 +390,8 @@ mod test_kept_rate {
fn test_rate_extremes() {
let no_change = compute_kept_rate("foo bar", "foo bar", "foo bar");
assert!((no_change.kept_rate - 1.0).abs() < 1e-6);
- assert_eq!(no_change.predicted_new_chars, 0);
+ assert!((no_change.recall_rate - 1.0).abs() < 1e-6);
+ assert_eq!(no_change.candidate_new_chars, 0);
assert!(
no_change
.token_annotations
@@ -337,15 +401,17 @@ mod test_kept_rate {
let accepted = compute_kept_rate("old", "new", "new");
assert!((accepted.kept_rate - 1.0).abs() < 1e-6);
+ assert!((accepted.recall_rate - 1.0).abs() < 1e-6);
let discarded = compute_kept_rate("old", "old", "new");
assert!((discarded.kept_rate - 0.0).abs() < 1e-6);
+ assert!((discarded.recall_rate - 0.0).abs() < 1e-6);
}
#[test]
fn test_pure_addition() {
let kept = compute_kept_rate("", "brand new line\n", "brand new line\n");
- assert_eq!(kept.kept_chars, kept.predicted_new_chars);
+ assert_eq!(kept.kept_chars, kept.candidate_new_chars);
assert!(
kept.token_annotations
.iter()
@@ -354,26 +420,28 @@ mod test_kept_rate {
let discarded =
compute_kept_rate("", "brand new line\n", "something completely different\n");
- assert!(discarded.kept_chars < discarded.predicted_new_chars);
+ assert!(discarded.kept_chars < discarded.candidate_new_chars);
}
#[test]
fn test_decoy_when_base_excluded() {
let base = " decoy.when(mock_sync_hardware_api.sp()).then_return(SpeedStatus.IDLE)\n";
- let predicted = " decoy.when(mock_sync_module_hardware.speed_status).then_return(SpeedStatus.IDLE)\n";
- let final_text = " decoy.when(mock_sync_module_hardware.speed_status).then_return(SpeedStatus.IDLE)\n";
- let result = compute_kept_rate(base, predicted, final_text);
+ let candidate = " decoy.when(mock_sync_module_hardware.speed_status).then_return(SpeedStatus.IDLE)\n";
+ let reference = " decoy.when(mock_sync_module_hardware.speed_status).then_return(SpeedStatus.IDLE)\n";
+ let result = compute_kept_rate(base, candidate, reference);
let expected_new = "mock_sync_module_hardware".len() + "speed_status".len();
- assert_eq!(result.predicted_new_chars, expected_new);
+ assert_eq!(result.candidate_new_chars, expected_new);
+ assert!(result.correctly_deleted_chars > 0);
assert!((result.kept_rate - 1.0).abs() < 1e-6);
+ assert!((result.recall_rate - 1.0).abs() < 1e-6);
}
#[test]
fn test_missing_deletion() {
let base = " fn select_next_edit(&mut self, _: &NextEdit, _: &mut Window, cx: &mut Context<Self>) {\n epr\n";
- let predicted = " fn select_next_edit(&mut self, _: &NextEdit, _: &mut Window, cx: &mut Context<Self>) {\n epr\neprintln!(\"\");\n";
- let final_text = " fn select_next_edit(&mut self, _: &NextEdit, _: &mut Window, cx: &mut Context<Self>) {\n eprintln!(\"\");\n";
- let result = compute_kept_rate(base, predicted, final_text);
+ let candidate = " fn select_next_edit(&mut self, _: &NextEdit, _: &mut Window, cx: &mut Context<Self>) {\n epr\neprintln!(\"\");\n";
+ let reference = " fn select_next_edit(&mut self, _: &NextEdit, _: &mut Window, cx: &mut Context<Self>) {\n eprintln!(\"\");\n";
+ let result = compute_kept_rate(base, candidate, reference);
assert!(
result.kept_rate < 0.85,
"expected kept_rate < 0.85, got {}",
@@ -385,7 +453,12 @@ mod test_kept_rate {
#[test]
fn test_empty_prediction() {
let result = compute_kept_rate("old line\n", "", "new line\n");
- assert!((result.kept_rate - 0.0).abs() < 1e-6);
+ assert_eq!(result.candidate_new_chars, 0);
+ assert!(result.candidate_deleted_chars > 0);
+ assert!(result.correctly_deleted_chars > 0);
+ assert!(result.correctly_deleted_chars < result.candidate_deleted_chars);
+ assert!(result.kept_rate > 0.0 && result.kept_rate < 1.0);
+ assert!(result.recall_rate > 0.0 && result.recall_rate < 1.0);
}
#[test]
@@ -399,24 +472,25 @@ mod test_kept_rate {
#[test]
fn test_bails_for_dirty_final() {
let base = "fn example() {\n work();\n}\n";
- let predicted = "fn example() {\n work();\n predicted();\n}\n";
- let final_text = format!(
+ let candidate = "fn example() {\n work();\n predicted();\n}\n";
+ let reference = format!(
"fn example() {{\n work();\n {}\n}}\n",
"settled();\n ".repeat(MAX_DIRTY_LENGTH_DELTA_CHARS / 8 + 64)
);
- let result = compute_kept_rate(base, predicted, &final_text);
+ let result = compute_kept_rate(base, candidate, &reference);
assert_eq!(result.kept_rate, 0.0);
+ assert_eq!(result.recall_rate, 0.0);
assert_eq!(result.kept_chars, 0);
- assert_eq!(result.discarded_chars, result.predicted_new_chars);
+ assert_eq!(result.discarded_chars, result.candidate_new_chars);
}
#[test]
fn test_eprintln_token_alignment() {
let base = " fn select_next_edit(&mut self, _: &NextEdit, _: &mut Window, cx: &mut Context<Self>) {\n epr\n";
- let predicted = " fn select_next_edit(&mut self, _: &NextEdit, _: &mut Window, cx: &mut Context<Self>) {\n eprintln!(\"hello world!\");\n";
- let final_text = " fn select_next_edit(&mut self, _: &NextEdit, _: &mut Window, cx: &mut Context<Self>) {\n eprintln!(\"\");\n";
- let result = compute_kept_rate(base, predicted, final_text);
+ let candidate = " fn select_next_edit(&mut self, _: &NextEdit, _: &mut Window, cx: &mut Context<Self>) {\n eprintln!(\"hello world!\");\n";
+ let reference = " fn select_next_edit(&mut self, _: &NextEdit, _: &mut Window, cx: &mut Context<Self>) {\n eprintln!(\"\");\n";
+ let result = compute_kept_rate(base, candidate, reference);
assert!(result.discarded_chars > 0);
assert!(result.kept_chars > 0);
assert!(result.kept_rate > 0.0 && result.kept_rate < 1.0);
@@ -427,14 +501,18 @@ mod test_kept_rate {
#[test]
fn test_annotations_rename() {
let base = " foo(old_name)\n";
- let predicted = " foo(new_name)\n";
- let final_text = " foo(new_name)\n";
- let result = compute_kept_rate(base, predicted, final_text);
-
- assert_eq!(result.predicted_new_chars, "new_name".len());
- assert_eq!(result.token_annotations.len(), tokenize(predicted).len());
-
- for (&token, &annotation) in tokenize(predicted).iter().zip(&result.token_annotations) {
+ let candidate = " foo(new_name)\n";
+ let reference = " foo(new_name)\n";
+ let result = compute_kept_rate(base, candidate, reference);
+
+ assert_eq!(result.candidate_new_chars, "new_name".len());
+ assert_eq!(result.candidate_deleted_chars, "old_name".len());
+ assert_eq!(result.reference_deleted_chars, "old_name".len());
+ assert_eq!(result.correctly_deleted_chars, "old_name".len());
+ assert!((result.recall_rate - 1.0).abs() < 1e-6);
+ assert_eq!(result.token_annotations.len(), tokenize(candidate).len());
+
+ for (&token, &annotation) in tokenize(candidate).iter().zip(&result.token_annotations) {
if token == "new_name" {
assert_eq!(annotation, TokenAnnotation::Kept);
} else {
@@ -446,12 +524,12 @@ mod test_kept_rate {
#[test]
fn test_annotations_eprintln_coloring() {
let base = " fn select_next_edit(&mut self, _: &NextEdit, _: &mut Window, cx: &mut Context<Self>) {\n epr\n";
- let predicted = " fn select_next_edit(&mut self, _: &NextEdit, _: &mut Window, cx: &mut Context<Self>) {\n eprintln!(\"hello world!\");\n";
- let final_text = " fn select_next_edit(&mut self, _: &NextEdit, _: &mut Window, cx: &mut Context<Self>) {\n eprintln!(\"\");\n";
- let result = compute_kept_rate(base, predicted, final_text);
- let predicted_tokens = tokenize(predicted);
+ let candidate = " fn select_next_edit(&mut self, _: &NextEdit, _: &mut Window, cx: &mut Context<Self>) {\n eprintln!(\"hello world!\");\n";
+ let reference = " fn select_next_edit(&mut self, _: &NextEdit, _: &mut Window, cx: &mut Context<Self>) {\n eprintln!(\"\");\n";
+ let result = compute_kept_rate(base, candidate, reference);
+ let candidate_tokens = tokenize(candidate);
- let eprintln_index = predicted_tokens
+ let eprintln_index = candidate_tokens
.iter()
.position(|&token| token == "eprintln")
.expect("eprintln token not found");
@@ -485,12 +563,15 @@ mod test_kept_rate {
#[test]
fn test_repetitive_tokens_remain_discarded() {
let base = "foo + foo + foo + foo + foo\n".repeat(16);
- let predicted = "foo + foo + prediction_token + foo + foo\n".repeat(16);
- let final_text = "foo + foo + kept_token + foo + foo\n".repeat(16);
- let result = compute_kept_rate(&base, &predicted, &final_text);
+ let candidate = "foo + foo + prediction_token + foo + foo\n".repeat(16);
+ let reference = "foo + foo + kept_token + foo + foo\n".repeat(16);
+ let result = compute_kept_rate(&base, &candidate, &reference);
assert_eq!(result.kept_chars, 0);
- assert_eq!(result.discarded_chars, result.predicted_new_chars);
- assert_eq!(result.predicted_new_chars, "prediction_token".len() * 16);
+ assert_eq!(result.correctly_deleted_chars, "foo".len() * 16);
+ assert_eq!(result.discarded_chars, result.candidate_new_chars);
+ assert_eq!(result.candidate_new_chars, "prediction_token".len() * 16);
+ assert!(result.kept_rate > 0.0);
+ assert!(result.recall_rate > 0.0);
}
}
@@ -6,6 +6,7 @@ use edit_prediction_types::{
DataCollectionState, EditPredictionDelegate, EditPredictionDiscardReason,
EditPredictionIconSet, SuggestionDisplayType,
};
+use feature_flags::FeatureFlagAppExt;
use gpui::{App, Entity, prelude::*};
use language::{Buffer, ToPoint as _};
use project::Project;
@@ -73,6 +74,24 @@ impl EditPredictionDelegate for ZedEditPredictionDelegate {
self.store
.read(cx)
.is_file_open_source(&self.project, file, cx);
+
+ if let Some(organization_configuration) = self
+ .store
+ .read(cx)
+ .user_store
+ .read(cx)
+ .current_organization_configuration()
+ {
+ if !organization_configuration
+ .edit_prediction
+ .is_feedback_enabled
+ {
+ return DataCollectionState::Disabled {
+ is_project_open_source,
+ };
+ }
+ }
+
if self.store.read(cx).data_collection_choice.is_enabled(cx) {
DataCollectionState::Enabled {
is_project_open_source,
@@ -89,6 +108,29 @@ impl EditPredictionDelegate for ZedEditPredictionDelegate {
}
}
+ fn can_toggle_data_collection(&self, cx: &App) -> bool {
+ if cx.is_staff() {
+ return false;
+ }
+
+ if let Some(organization_configuration) = self
+ .store
+ .read(cx)
+ .user_store
+ .read(cx)
+ .current_organization_configuration()
+ {
+ if !organization_configuration
+ .edit_prediction
+ .is_feedback_enabled
+ {
+ return false;
+ }
+ }
+
+ true
+ }
+
fn toggle_data_collection(&mut self, cx: &mut App) {
self.store.update(cx, |store, cx| {
store.toggle_data_collection_choice(cx);
@@ -187,6 +187,14 @@ pub struct ExampleScore {
#[serde(default, skip_serializing_if = "Option::is_none")]
pub kept_rate: Option<f64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
+ pub recall_rate: Option<f64>,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub kept_chars: Option<usize>,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub correctly_deleted_chars: Option<usize>,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
+ pub discarded_chars: Option<usize>,
+ #[serde(default, skip_serializing_if = "Option::is_none")]
pub cumulative_logprob: Option<f64>,
#[serde(default, skip_serializing_if = "Option::is_none")]
pub avg_logprob: Option<f64>,
@@ -85,6 +85,10 @@ pub async fn run_scoring(
inserted_tokens: 0,
deleted_tokens: 0,
kept_rate: None,
+ recall_rate: None,
+ kept_chars: None,
+ correctly_deleted_chars: None,
+ discarded_chars: None,
cumulative_logprob: None,
avg_logprob: None,
};
@@ -187,9 +191,20 @@ pub async fn run_scoring(
prediction.actual_cursor.as_ref(),
);
- let kept_rate = best_expected_text.map(|final_text| {
- metrics::compute_kept_rate(original_text, &actual_text, final_text).kept_rate
- });
+ let (kept_rate, recall_rate, kept_chars, correctly_deleted_chars, discarded_chars) =
+ best_expected_text
+ .map(|reference_text| {
+ let result =
+ metrics::compute_kept_rate(original_text, &actual_text, reference_text);
+ (
+ Some(result.kept_rate),
+ Some(result.recall_rate),
+ Some(result.kept_chars),
+ Some(result.correctly_deleted_chars),
+ Some(result.discarded_chars),
+ )
+ })
+ .unwrap_or((None, None, None, None, None));
scores.push(ExampleScore {
delta_chr_f: best_delta_chr_f_metrics.score as f32,
@@ -211,6 +226,10 @@ pub async fn run_scoring(
inserted_tokens: token_changes.inserted_tokens,
deleted_tokens: token_changes.deleted_tokens,
kept_rate,
+ recall_rate,
+ kept_chars,
+ correctly_deleted_chars,
+ discarded_chars,
cumulative_logprob: prediction.cumulative_logprob,
avg_logprob: prediction.avg_logprob,
});
@@ -277,6 +296,11 @@ pub fn print_report(examples: &[Example], verbose: bool) {
let mut isolated_whitespace_count: usize = 0;
let mut kept_rate_sum: f64 = 0.0;
let mut kept_rate_count: usize = 0;
+ let mut kept_chars_total: usize = 0;
+ let mut correctly_deleted_chars_total: usize = 0;
+ let mut discarded_chars_total: usize = 0;
+ let mut recall_rate_sum: f64 = 0.0;
+ let mut recall_rate_count: usize = 0;
let mut patch_inserted_tokens: Vec<usize> = Vec::new();
let mut patch_deleted_tokens: Vec<usize> = Vec::new();
let mut predictions_with_patch: usize = 0;
@@ -369,11 +393,24 @@ pub fn print_report(examples: &[Example], verbose: bool) {
isolated_whitespace_count += 1;
}
- // Accumulate kept rate metrics
+ // Accumulate kept and recall rate metrics
if let Some(kr) = score.kept_rate {
kept_rate_sum += kr;
kept_rate_count += 1;
}
+ if let Some(kept_chars) = score.kept_chars {
+ kept_chars_total += kept_chars;
+ }
+ if let Some(correctly_deleted_chars) = score.correctly_deleted_chars {
+ correctly_deleted_chars_total += correctly_deleted_chars;
+ }
+ if let Some(discarded_chars) = score.discarded_chars {
+ discarded_chars_total += discarded_chars;
+ }
+ if let Some(rr) = score.recall_rate {
+ recall_rate_sum += rr;
+ recall_rate_count += 1;
+ }
// Accumulate token change metrics (only for predictions that produced a patch)
let has_patch = example
@@ -504,13 +541,24 @@ pub fn print_report(examples: &[Example], verbose: bool) {
println!("Isolated whitespace changes: {}", isolated_ws_str);
}
- // Print kept rate metrics
+ // Print kept and recall rate metrics
if kept_rate_count > 0 {
let avg_kept_rate = kept_rate_sum / kept_rate_count as f64;
println!(
- "Kept rate: {:.1}% avg ({} evaluated)",
+ "Kept rate: {:.1}% avg ({} evaluated, kept chars: {}, correctly deleted chars: {}, discarded chars: {})",
avg_kept_rate * 100.0,
- kept_rate_count
+ kept_rate_count,
+ kept_chars_total,
+ correctly_deleted_chars_total,
+ discarded_chars_total
+ );
+ }
+ if recall_rate_count > 0 {
+ let avg_recall_rate = recall_rate_sum / recall_rate_count as f64;
+ println!(
+ "Recall rate: {:.1}% avg ({} evaluated)",
+ avg_recall_rate * 100.0,
+ recall_rate_count
);
}
@@ -618,6 +666,14 @@ pub struct SummaryJson {
pub isolated_whitespace_rate: Option<f32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub avg_kept_rate: Option<f64>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub avg_recall_rate: Option<f64>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub total_kept_chars: Option<usize>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub total_correctly_deleted_chars: Option<usize>,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ pub total_discarded_chars: Option<usize>,
}
pub fn compute_summary(examples: &[Example]) -> SummaryJson {
@@ -645,6 +701,14 @@ pub fn compute_summary(examples: &[Example]) -> SummaryJson {
let mut isolated_whitespace_count: usize = 0;
let mut kept_rate_sum: f64 = 0.0;
let mut kept_rate_count: usize = 0;
+ let mut kept_chars_total: usize = 0;
+ let mut kept_chars_count: usize = 0;
+ let mut correctly_deleted_chars_total: usize = 0;
+ let mut correctly_deleted_chars_count: usize = 0;
+ let mut discarded_chars_total: usize = 0;
+ let mut discarded_chars_count: usize = 0;
+ let mut recall_rate_sum: f64 = 0.0;
+ let mut recall_rate_count: usize = 0;
for example in examples {
for (score_idx, score) in example.score.iter().enumerate() {
@@ -685,11 +749,27 @@ pub fn compute_summary(examples: &[Example]) -> SummaryJson {
isolated_whitespace_count += 1;
}
- // Accumulate kept rate metrics
+ // Accumulate kept and recall rate metrics
if let Some(kr) = score.kept_rate {
kept_rate_sum += kr;
kept_rate_count += 1;
}
+ if let Some(kept_chars) = score.kept_chars {
+ kept_chars_total += kept_chars;
+ kept_chars_count += 1;
+ }
+ if let Some(correctly_deleted_chars) = score.correctly_deleted_chars {
+ correctly_deleted_chars_total += correctly_deleted_chars;
+ correctly_deleted_chars_count += 1;
+ }
+ if let Some(discarded_chars) = score.discarded_chars {
+ discarded_chars_total += discarded_chars;
+ discarded_chars_count += 1;
+ }
+ if let Some(rr) = score.recall_rate {
+ recall_rate_sum += rr;
+ recall_rate_count += 1;
+ }
// Accumulate cursor metrics
if let Some(exact_match) = score.cursor_exact_match {
@@ -771,6 +851,30 @@ pub fn compute_summary(examples: &[Example]) -> SummaryJson {
None
};
+ let avg_recall_rate = if recall_rate_count > 0 {
+ Some(recall_rate_sum / recall_rate_count as f64)
+ } else {
+ None
+ };
+
+ let total_kept_chars = if kept_chars_count > 0 {
+ Some(kept_chars_total)
+ } else {
+ None
+ };
+
+ let total_correctly_deleted_chars = if correctly_deleted_chars_count > 0 {
+ Some(correctly_deleted_chars_total)
+ } else {
+ None
+ };
+
+ let total_discarded_chars = if discarded_chars_count > 0 {
+ Some(discarded_chars_total)
+ } else {
+ None
+ };
+
SummaryJson {
total_examples: total_scores,
avg_delta_chr_f,
@@ -804,6 +908,10 @@ pub fn compute_summary(examples: &[Example]) -> SummaryJson {
wrong_editable_region_rate,
isolated_whitespace_rate,
avg_kept_rate,
+ avg_recall_rate,
+ total_kept_chars,
+ total_correctly_deleted_chars,
+ total_discarded_chars,
}
}
@@ -168,6 +168,10 @@ pub trait EditPredictionDelegate: 'static + Sized {
None
}
+ fn can_toggle_data_collection(&self, _cx: &App) -> bool {
+ true
+ }
+
fn toggle_data_collection(&mut self, _cx: &mut App) {}
fn is_enabled(
&self,
@@ -209,6 +213,7 @@ pub trait EditPredictionDelegateHandle {
fn icons(&self, cx: &App) -> EditPredictionIconSet;
fn data_collection_state(&self, cx: &App) -> DataCollectionState;
fn usage(&self, cx: &App) -> Option<EditPredictionUsage>;
+ fn can_toggle_data_collection(&self, cx: &App) -> bool;
fn toggle_data_collection(&self, cx: &mut App);
fn is_refreshing(&self, cx: &App) -> bool;
fn refresh(
@@ -265,6 +270,10 @@ where
self.read(cx).usage(cx)
}
+ fn can_toggle_data_collection(&self, cx: &App) -> bool {
+ self.read(cx).can_toggle_data_collection(cx)
+ }
+
fn toggle_data_collection(&self, cx: &mut App) {
self.update(cx, |this, cx| this.toggle_data_collection(cx))
}
@@ -790,7 +790,7 @@ impl EditPredictionButton {
.toggleable(IconPosition::Start, data_collection.is_enabled())
.icon(icon_name)
.icon_color(icon_color)
- .disabled(cx.is_staff())
+ .disabled(!provider.can_toggle_data_collection(cx))
.documentation_aside(DocumentationSide::Left, move |cx| {
let (msg, label_color, icon_name, icon_color) = match (is_open_source, is_collecting) {
(true, true) => (
@@ -22,7 +22,7 @@ use util::ResultExt;
use workspace::notifications::DetachAndPromptErr;
use workspace::{ModalView, Workspace};
-use crate::{branch_picker, git_panel::show_error_toast, resolve_active_repository};
+use crate::{branch_picker, git_panel::show_error_toast};
actions!(
branch_picker,
@@ -59,7 +59,7 @@ pub fn open(
cx: &mut Context<Workspace>,
) {
let workspace_handle = workspace.weak_handle();
- let repository = resolve_active_repository(workspace, cx);
+ let repository = workspace.project().read(cx).active_repository(cx);
workspace.toggle_modal(window, cx, |window, cx| {
BranchList::new(
@@ -582,7 +582,7 @@ fn open_with_tab(
cx: &mut Context<Workspace>,
) {
let workspace_handle = workspace.weak_handle();
- let repository = crate::resolve_active_repository(workspace, cx);
+ let repository = workspace.project().read(cx).active_repository(cx);
workspace.toggle_modal(window, cx, |window, cx| {
GitPicker::new(workspace_handle, repository, tab, rems(34.), window, cx)
@@ -281,33 +281,6 @@ fn open_modified_files(
}
}
-/// Resolves the repository for git operations, respecting the workspace's
-/// active worktree override from the project dropdown.
-pub fn resolve_active_repository(workspace: &Workspace, cx: &App) -> Option<Entity<Repository>> {
- let project = workspace.project().read(cx);
- workspace
- .active_worktree_override()
- .and_then(|override_id| {
- project
- .worktree_for_id(override_id, cx)
- .and_then(|worktree| {
- let worktree_abs_path = worktree.read(cx).abs_path();
- let git_store = project.git_store().read(cx);
- git_store
- .repositories()
- .values()
- .filter(|repo| {
- let repo_path = &repo.read(cx).work_directory_abs_path;
- *repo_path == worktree_abs_path
- || worktree_abs_path.starts_with(repo_path.as_ref())
- })
- .max_by_key(|repo| repo.read(cx).work_directory_abs_path.as_os_str().len())
- .cloned()
- })
- })
- .or_else(|| project.active_repository(cx))
-}
-
pub fn git_status_icon(status: FileStatus) -> impl IntoElement {
GitStatusIcon::new(status)
}
@@ -2,7 +2,6 @@ use crate::{
conflict_view::ConflictAddon,
git_panel::{GitPanel, GitPanelAddon, GitStatusEntry},
git_panel_settings::GitPanelSettings,
- resolve_active_repository,
};
use agent_settings::AgentSettings;
use anyhow::{Context as _, Result, anyhow};
@@ -205,7 +204,7 @@ impl ProjectDiff {
"Action"
}
);
- let intended_repo = resolve_active_repository(workspace, cx);
+ let intended_repo = workspace.project().read(cx).active_repository(cx);
let existing = workspace
.items_of_type::<Self>(cx)
@@ -2708,7 +2707,7 @@ mod tests {
}
#[gpui::test]
- async fn test_deploy_at_respects_worktree_override(cx: &mut TestAppContext) {
+ async fn test_deploy_at_respects_active_repository_selection(cx: &mut TestAppContext) {
init_test(cx);
let fs = FakeFs::new(cx.executor());
@@ -2759,9 +2758,12 @@ mod tests {
let workspace = multi_workspace.read_with(cx, |mw, _| mw.workspace().clone());
cx.run_until_parked();
- // Select project A via the dropdown override and open the diff.
+ // Select project A explicitly and open the diff.
workspace.update(cx, |workspace, cx| {
- workspace.set_active_worktree_override(Some(worktree_a_id), cx);
+ let git_store = workspace.project().read(cx).git_store().clone();
+ git_store.update(cx, |git_store, cx| {
+ git_store.set_active_repo_for_worktree(worktree_a_id, cx);
+ });
});
cx.focus(&workspace);
cx.update(|window, cx| {
@@ -2776,9 +2778,12 @@ mod tests {
assert_eq!(paths_a.len(), 1);
assert_eq!(*paths_a[0], *"a.txt");
- // Switch the override to project B and re-run the diff action.
+ // Switch the explicit active repository to project B and re-run the diff action.
workspace.update(cx, |workspace, cx| {
- workspace.set_active_worktree_override(Some(worktree_b_id), cx);
+ let git_store = workspace.project().read(cx).git_store().clone();
+ git_store.update(cx, |git_store, cx| {
+ git_store.set_active_repo_for_worktree(worktree_b_id, cx);
+ });
});
cx.focus(&workspace);
cx.update(|window, cx| {
@@ -594,16 +594,49 @@ impl GitStore {
pub fn is_local(&self) -> bool {
matches!(self.state, GitStoreState::Local { .. })
}
+
+ fn set_active_repo_id(&mut self, repo_id: RepositoryId, cx: &mut Context<Self>) {
+ if self.active_repo_id != Some(repo_id) {
+ self.active_repo_id = Some(repo_id);
+ cx.emit(GitStoreEvent::ActiveRepositoryChanged(Some(repo_id)));
+ }
+ }
+
pub fn set_active_repo_for_path(&mut self, project_path: &ProjectPath, cx: &mut Context<Self>) {
if let Some((repo, _)) = self.repository_and_path_for_project_path(project_path, cx) {
- let id = repo.read(cx).id;
- if self.active_repo_id != Some(id) {
- self.active_repo_id = Some(id);
- cx.emit(GitStoreEvent::ActiveRepositoryChanged(Some(id)));
- }
+ self.set_active_repo_id(repo.read(cx).id, cx);
}
}
+ pub fn set_active_repo_for_worktree(
+ &mut self,
+ worktree_id: WorktreeId,
+ cx: &mut Context<Self>,
+ ) {
+ let Some(worktree) = self
+ .worktree_store
+ .read(cx)
+ .worktree_for_id(worktree_id, cx)
+ else {
+ return;
+ };
+ let worktree_abs_path = worktree.read(cx).abs_path();
+ let Some(repo_id) = self
+ .repositories
+ .values()
+ .filter(|repo| {
+ let repo_path = &repo.read(cx).work_directory_abs_path;
+ *repo_path == worktree_abs_path || worktree_abs_path.starts_with(repo_path.as_ref())
+ })
+ .max_by_key(|repo| repo.read(cx).work_directory_abs_path.as_os_str().len())
+ .map(|repo| repo.read(cx).id)
+ else {
+ return;
+ };
+
+ self.set_active_repo_id(repo_id, cx);
+ }
+
pub fn shared(&mut self, project_id: u64, client: AnyProtoClient, cx: &mut Context<Self>) {
match &mut self.state {
GitStoreState::Remote {
@@ -168,22 +168,20 @@ fn get_open_folders(workspace: &Workspace, cx: &App) -> Vec<OpenFolderEntry> {
return Vec::new();
}
- let active_worktree_id = workspace.active_worktree_override().or_else(|| {
- if let Some(repo) = project.active_repository(cx) {
- let repo = repo.read(cx);
- let repo_path = &repo.work_directory_abs_path;
- for worktree in project.visible_worktrees(cx) {
- let worktree_path = worktree.read(cx).abs_path();
- if worktree_path == *repo_path || worktree_path.starts_with(repo_path.as_ref()) {
- return Some(worktree.read(cx).id());
- }
- }
- }
+ let active_worktree_id = if let Some(repo) = project.active_repository(cx) {
+ let repo = repo.read(cx);
+ let repo_path = &repo.work_directory_abs_path;
+ project.visible_worktrees(cx).find_map(|worktree| {
+ let worktree_path = worktree.read(cx).abs_path();
+ (worktree_path == *repo_path || worktree_path.starts_with(repo_path.as_ref()))
+ .then(|| worktree.read(cx).id())
+ })
+ } else {
project
.visible_worktrees(cx)
.next()
.map(|wt| wt.read(cx).id())
- });
+ };
let mut all_paths: Vec<PathBuf> = visible_worktrees
.iter()
@@ -1118,7 +1116,10 @@ impl PickerDelegate for RecentProjectsDelegate {
let worktree_id = folder.worktree_id;
if let Some(workspace) = self.workspace.upgrade() {
workspace.update(cx, |workspace, cx| {
- workspace.set_active_worktree_override(Some(worktree_id), cx);
+ let git_store = workspace.project().read(cx).git_store().clone();
+ git_store.update(cx, |git_store, cx| {
+ git_store.set_active_repo_for_worktree(worktree_id, cx);
+ });
});
}
cx.emit(DismissEvent);
@@ -761,7 +761,8 @@ impl RemoteConnection for DockerExecConnection {
const TILDE_PREFIX: &'static str = "~/";
if working_dir.starts_with(TILDE_PREFIX) {
let working_dir = working_dir.trim_start_matches("~").trim_start_matches("/");
- parsed_working_dir = Some(format!("$HOME/{working_dir}"));
+ parsed_working_dir =
+ Some(format!("{}/{}", self.remote_dir_for_server, working_dir));
} else {
parsed_working_dir = Some(working_dir);
}
@@ -33,7 +33,6 @@ use onboarding_banner::OnboardingBanner;
use project::{Project, git_store::GitStoreEvent, trusted_worktrees::TrustedWorktrees};
use remote::RemoteConnectionOptions;
use settings::Settings;
-use settings::WorktreeId;
use std::sync::Arc;
use std::time::Duration;
@@ -377,27 +376,13 @@ impl TitleBar {
cx.notify()
}),
);
- subscriptions.push(
- cx.subscribe(&project, |this, _, event: &project::Event, cx| {
- if let project::Event::BufferEdited = event {
- // Clear override when user types in any editor,
- // so the title bar reflects the project they're actually working in
- this.clear_active_worktree_override(cx);
- cx.notify();
- }
- }),
- );
+
subscriptions.push(cx.observe(&active_call, |this, _, cx| this.active_call_changed(cx)));
subscriptions.push(cx.observe_window_activation(window, Self::window_activation_changed));
subscriptions.push(
- cx.subscribe(&git_store, move |this, _, event, cx| match event {
- GitStoreEvent::ActiveRepositoryChanged(_) => {
- // Clear override when focus-derived active repo changes
- // (meaning the user focused a file from a different project)
- this.clear_active_worktree_override(cx);
- cx.notify();
- }
- GitStoreEvent::RepositoryUpdated(_, _, true) => {
+ cx.subscribe(&git_store, move |_, _, event, cx| match event {
+ GitStoreEvent::ActiveRepositoryChanged(_)
+ | GitStoreEvent::RepositoryUpdated(_, _, true) => {
cx.notify();
}
_ => {}
@@ -451,20 +436,11 @@ impl TitleBar {
}
/// Returns the worktree to display in the title bar.
- /// - If there's an override set on the workspace, use that (if still valid)
- /// - Otherwise, derive from the active repository
+ /// - Prefer the worktree owning the project's active repository
/// - Fall back to the first visible worktree
pub fn effective_active_worktree(&self, cx: &App) -> Option<Entity<project::Worktree>> {
let project = self.project.read(cx);
- if let Some(workspace) = self.workspace.upgrade() {
- if let Some(override_id) = workspace.read(cx).active_worktree_override() {
- if let Some(worktree) = project.worktree_for_id(override_id, cx) {
- return Some(worktree);
- }
- }
- }
-
if let Some(repo) = project.active_repository(cx) {
let repo = repo.read(cx);
let repo_path = &repo.work_directory_abs_path;
@@ -480,28 +456,6 @@ impl TitleBar {
project.visible_worktrees(cx).next()
}
- pub fn set_active_worktree_override(
- &mut self,
- worktree_id: WorktreeId,
- cx: &mut Context<Self>,
- ) {
- if let Some(workspace) = self.workspace.upgrade() {
- workspace.update(cx, |workspace, cx| {
- workspace.set_active_worktree_override(Some(worktree_id), cx);
- });
- }
- cx.notify();
- }
-
- fn clear_active_worktree_override(&mut self, cx: &mut Context<Self>) {
- if let Some(workspace) = self.workspace.upgrade() {
- workspace.update(cx, |workspace, cx| {
- workspace.clear_active_worktree_override(cx);
- });
- }
- cx.notify();
- }
-
fn get_repository_for_worktree(
&self,
worktree: &Entity<project::Worktree>,
@@ -1325,7 +1325,6 @@ pub struct Workspace {
bottom_dock: Entity<Dock>,
right_dock: Entity<Dock>,
panes: Vec<Entity<Pane>>,
- active_worktree_override: Option<WorktreeId>,
panes_by_item: HashMap<EntityId, WeakEntity<Pane>>,
active_pane: Entity<Pane>,
last_active_center_pane: Option<WeakEntity<Pane>>,
@@ -1758,7 +1757,6 @@ impl Workspace {
modal_layer,
toast_layer,
titlebar_item: None,
- active_worktree_override: None,
notifications: Notifications::default(),
suppressed_notifications: HashSet::default(),
left_dock,
@@ -2951,27 +2949,6 @@ impl Workspace {
self.titlebar_item.clone()
}
- /// Returns the worktree override set by the user (e.g., via the project dropdown).
- /// When set, git-related operations should use this worktree instead of deriving
- /// the active worktree from the focused file.
- pub fn active_worktree_override(&self) -> Option<WorktreeId> {
- self.active_worktree_override
- }
-
- pub fn set_active_worktree_override(
- &mut self,
- worktree_id: Option<WorktreeId>,
- cx: &mut Context<Self>,
- ) {
- self.active_worktree_override = worktree_id;
- cx.notify();
- }
-
- pub fn clear_active_worktree_override(&mut self, cx: &mut Context<Self>) {
- self.active_worktree_override = None;
- cx.notify();
- }
-
/// Call the given callback with a workspace whose project is local or remote via WSL (allowing host access).
///
/// If the given workspace has a local project, then it will be passed
@@ -39,8 +39,16 @@ fn run_cherry_pick(
.add_env(("BRANCH", branch.to_string()))
.add_env(("COMMIT", commit.to_string()))
.add_env(("CHANNEL", channel.to_string()))
- .add_env(("GIT_COMMITTER_NAME", "Zed Zippy"))
- .add_env(("GIT_COMMITTER_EMAIL", "hi@zed.dev"))
+ .add_env(("GIT_AUTHOR_NAME", "zed-zippy[bot]"))
+ .add_env((
+ "GIT_AUTHOR_EMAIL",
+ "<234243425+zed-zippy[bot]@users.noreply.github.com>",
+ ))
+ .add_env(("GIT_COMMITTER_NAME", "zed-zippy[bot]"))
+ .add_env((
+ "GIT_COMMITTER_EMAIL",
+ "<234243425+zed-zippy[bot]@users.noreply.github.com>",
+ ))
.add_env(("GITHUB_TOKEN", token))
}
@@ -1,14 +1,10 @@
-use gh_workflow::{Event, Job, Run, Schedule, Step, Workflow, WorkflowDispatch};
-use indoc::formatdoc;
+use gh_workflow::{Event, Job, Schedule, Workflow, WorkflowDispatch};
use crate::tasks::workflows::{
- release::{
- COMPLIANCE_REPORT_PATH, COMPLIANCE_STEP_ID, ComplianceContext,
- add_compliance_notification_steps,
- },
+ release::{ComplianceContext, add_compliance_steps},
runners,
steps::{self, CommonJobConditions, named},
- vars::{self, StepOutput},
+ vars::StepOutput,
};
pub fn compliance_check() -> Workflow {
@@ -37,31 +33,20 @@ fn scheduled_compliance_check() -> steps::NamedJob {
let tag_output = StepOutput::new(&determine_version_step, "tag");
- fn run_compliance_check(tag: &StepOutput) -> Step<Run> {
- named::bash(
- formatdoc! {r#"
- cargo xtask compliance "$LATEST_TAG" --branch main --report-path "{COMPLIANCE_REPORT_PATH}"
- "#,
- }
- )
- .id(COMPLIANCE_STEP_ID)
- .add_env(("LATEST_TAG", tag.to_string()))
- .add_env(("GITHUB_APP_ID", vars::ZED_ZIPPY_APP_ID))
- .add_env(("GITHUB_APP_KEY", vars::ZED_ZIPPY_APP_PRIVATE_KEY))
- }
-
let job = Job::default()
.with_repository_owner_guard()
.runs_on(runners::LINUX_SMALL)
.add_step(steps::checkout_repo().with_full_history())
.add_step(steps::cache_rust_dependencies_namespace())
- .add_step(determine_version_step)
- .add_step(run_compliance_check(&tag_output));
-
- named::job(add_compliance_notification_steps(
- job,
- ComplianceContext::Scheduled {
- tag_source: tag_output,
- },
- ))
+ .add_step(determine_version_step);
+
+ named::job(
+ add_compliance_steps(
+ job,
+ ComplianceContext::Scheduled {
+ tag_source: tag_output,
+ },
+ )
+ .0,
+ )
}
@@ -6,7 +6,7 @@ use crate::tasks::workflows::{
run_tests,
runners::{self, Arch, Platform},
steps::{self, FluentBuilder, NamedJob, dependant_job, named, release_job},
- vars::{self, StepOutput, assets},
+ vars::{self, JobOutput, StepOutput, assets},
};
const CURRENT_ACTION_RUN_URL: &str =
@@ -22,7 +22,7 @@ pub(crate) fn release() -> Workflow {
let check_scripts = run_tests::check_scripts();
let create_draft_release = create_draft_release();
- let compliance = compliance_check();
+ let (non_blocking_compliance_run, job_output) = compliance_check();
let bundle = ReleaseBundleJobs {
linux_aarch64: bundle_linux(
@@ -58,7 +58,10 @@ pub(crate) fn release() -> Workflow {
};
let upload_release_assets = upload_release_assets(&[&create_draft_release], &bundle);
- let validate_release_assets = validate_release_assets(&[&upload_release_assets]);
+ let validate_release_assets = validate_release_assets(
+ &[&upload_release_assets, &non_blocking_compliance_run],
+ job_output,
+ );
let auto_release_preview = auto_release_preview(&[&validate_release_assets]);
@@ -93,7 +96,10 @@ pub(crate) fn release() -> Workflow {
.add_job(windows_clippy.name, windows_clippy.job)
.add_job(check_scripts.name, check_scripts.job)
.add_job(create_draft_release.name, create_draft_release.job)
- .add_job(compliance.name, compliance.job)
+ .add_job(
+ non_blocking_compliance_run.name,
+ non_blocking_compliance_run.job,
+ )
.map(|mut workflow| {
for job in bundle.into_jobs() {
workflow = workflow.add_job(job.name, job.job);
@@ -156,25 +162,65 @@ pub(crate) const COMPLIANCE_STEP_ID: &str = "run-compliance-check";
const NEEDS_REVIEW_PULLS_URL: &str = "https://github.com/zed-industries/zed/pulls?q=is%3Apr+is%3Aclosed+label%3A%22PR+state%3Aneeds+review%22";
pub(crate) enum ComplianceContext {
- Release,
+ Release { non_blocking_outcome: JobOutput },
ReleaseNonBlocking,
Scheduled { tag_source: StepOutput },
}
-pub(crate) fn add_compliance_notification_steps(
+impl ComplianceContext {
+ fn tag_source(&self) -> Option<&StepOutput> {
+ match self {
+ ComplianceContext::Scheduled { tag_source } => Some(tag_source),
+ _ => None,
+ }
+ }
+}
+
+pub(crate) fn add_compliance_steps(
job: gh_workflow::Job,
context: ComplianceContext,
-) -> gh_workflow::Job {
+) -> (gh_workflow::Job, StepOutput) {
+ fn run_compliance_check(context: &ComplianceContext) -> (Step<Run>, StepOutput) {
+ let job = named::bash(
+ formatdoc! {r#"
+ cargo xtask compliance {target} --report-path "{COMPLIANCE_REPORT_PATH}"
+ "#,
+ target = if context.tag_source().is_some() { r#""$LATEST_TAG" --branch main"# } else { r#""$GITHUB_REF_NAME""# },
+ }
+ )
+ .id(COMPLIANCE_STEP_ID)
+ .add_env(("GITHUB_APP_ID", vars::ZED_ZIPPY_APP_ID))
+ .add_env(("GITHUB_APP_KEY", vars::ZED_ZIPPY_APP_PRIVATE_KEY))
+ .when_some(context.tag_source(), |step, tag_source| {
+ step.add_env(("LATEST_TAG", tag_source.to_string()))
+ })
+ .when(
+ matches!(
+ context,
+ ComplianceContext::Scheduled { .. } | ComplianceContext::ReleaseNonBlocking
+ ),
+ |step| step.continue_on_error(true),
+ );
+
+ let result = StepOutput::new_unchecked(&job, "outcome");
+ (job, result)
+ }
+
let upload_step = upload_artifact(COMPLIANCE_REPORT_PATH)
.if_condition(Expression::new("always()"))
- .when(matches!(context, ComplianceContext::Release), |step| {
- step.add_with(("overwrite", true))
- });
+ .when(
+ matches!(context, ComplianceContext::Release { .. }),
+ |step| step.add_with(("overwrite", true)),
+ );
let (success_prefix, failure_prefix) = match context {
- ComplianceContext::Release | ComplianceContext::ReleaseNonBlocking => {
+ ComplianceContext::Release { .. } => {
("✅ Compliance check passed", "❌ Compliance check failed")
}
+ ComplianceContext::ReleaseNonBlocking => (
+ "✅ Compliance check passed",
+ "❌ Preliminary compliance check failed (but this can still be fixed while the builds are running!)",
+ ),
ComplianceContext::Scheduled { .. } => (
"✅ Scheduled compliance check passed",
"⚠️ Scheduled compliance check failed",
@@ -198,7 +244,17 @@ pub(crate) fn add_compliance_notification_steps(
let notification_step = Step::new("send_compliance_slack_notification")
.run(&script)
- .if_condition(Expression::new("always()"))
+ .if_condition(match &context {
+ ComplianceContext::Release {
+ non_blocking_outcome,
+ } => Expression::new(format!(
+ "failure() || {prior_outcome} != 'success'",
+ prior_outcome = non_blocking_outcome.expr()
+ )),
+ ComplianceContext::Scheduled { .. } | ComplianceContext::ReleaseNonBlocking => {
+ Expression::new("always()")
+ }
+ })
.add_env(("SLACK_WEBHOOK", vars::SLACK_WEBHOOK_WORKFLOW_FAILURES))
.add_env((
"COMPLIANCE_OUTCOME",
@@ -206,8 +262,8 @@ pub(crate) fn add_compliance_notification_steps(
))
.add_env((
"COMPLIANCE_TAG",
- match context {
- ComplianceContext::Release | ComplianceContext::ReleaseNonBlocking => {
+ match &context {
+ ComplianceContext::Release { .. } | ComplianceContext::ReleaseNonBlocking => {
Context::github().ref_name().to_string()
}
ComplianceContext::Scheduled { tag_source } => tag_source.to_string(),
@@ -218,21 +274,21 @@ pub(crate) fn add_compliance_notification_steps(
format!("{CURRENT_ACTION_RUN_URL}#artifacts"),
));
- job.add_step(upload_step).add_step(notification_step)
-}
+ let (compliance_step, check_result) = run_compliance_check(&context);
-fn run_compliance_check() -> Step<Run> {
- named::bash(formatdoc! {r#"
- cargo xtask compliance "$GITHUB_REF_NAME" --report-path "{COMPLIANCE_REPORT_PATH}"
- "#,
- })
- .id(COMPLIANCE_STEP_ID)
- .add_env(("GITHUB_APP_ID", vars::ZED_ZIPPY_APP_ID))
- .add_env(("GITHUB_APP_KEY", vars::ZED_ZIPPY_APP_PRIVATE_KEY))
- .continue_on_error(true)
+ (
+ job.add_step(compliance_step)
+ .add_step(upload_step)
+ .add_step(notification_step)
+ .when(
+ matches!(context, ComplianceContext::ReleaseNonBlocking),
+ |step| step.outputs([("outcome".to_string(), check_result.to_string())]),
+ ),
+ check_result,
+ )
}
-fn compliance_check() -> NamedJob {
+fn compliance_check() -> (NamedJob, JobOutput) {
let job = release_job(&[])
.runs_on(runners::LINUX_SMALL)
.add_step(
@@ -240,16 +296,17 @@ fn compliance_check() -> NamedJob {
.with_full_history()
.with_ref(Context::github().ref_()),
)
- .add_step(steps::cache_rust_dependencies_namespace())
- .add_step(run_compliance_check());
+ .add_step(steps::cache_rust_dependencies_namespace());
+
+ let (compliance_job, check_result) =
+ add_compliance_steps(job, ComplianceContext::ReleaseNonBlocking);
+ let compliance_job = named::job(compliance_job);
+ let check_result = check_result.as_job_output(&compliance_job);
- named::job(add_compliance_notification_steps(
- job,
- ComplianceContext::ReleaseNonBlocking,
- ))
+ (compliance_job, check_result)
}
-fn validate_release_assets(deps: &[&NamedJob]) -> NamedJob {
+fn validate_release_assets(deps: &[&NamedJob], context_check_result: JobOutput) -> NamedJob {
let expected_assets: Vec<String> = assets::all().iter().map(|a| format!("\"{a}\"")).collect();
let expected_assets_json = format!("[{}]", expected_assets.join(", "));
@@ -279,13 +336,17 @@ fn validate_release_assets(deps: &[&NamedJob]) -> NamedJob {
.with_full_history()
.with_ref(Context::github().ref_()),
)
- .add_step(steps::cache_rust_dependencies_namespace())
- .add_step(run_compliance_check());
+ .add_step(steps::cache_rust_dependencies_namespace());
- named::job(add_compliance_notification_steps(
- job,
- ComplianceContext::Release,
- ))
+ named::job(
+ add_compliance_steps(
+ job,
+ ComplianceContext::Release {
+ non_blocking_outcome: context_check_result,
+ },
+ )
+ .0,
+ )
}
fn auto_release_preview(deps: &[&NamedJob]) -> NamedJob {
@@ -167,7 +167,7 @@ impl StepOutput {
.run
.as_ref()
.is_none_or(|run_command| run_command.contains(name)),
- "Step Output name {name} must occur at least once in run command with ID {step_id}!"
+ "Step output with name '{name}' must occur at least once in run command with ID {step_id}!"
);
Self { name, step_id }