Detailed changes
@@ -3969,6 +3969,45 @@ mod tests {
);
}
+ #[gpui::test]
+ async fn test_streaming_edit_preserves_blank_line_after_trailing_newline_replacement(
+ cx: &mut TestAppContext,
+ ) {
+ let file_content = "before\ntarget\n\nafter\n";
+ let old_text = "target\n";
+ let new_text = "one\ntwo\ntarget\n";
+ let expected = "before\none\ntwo\ntarget\n\nafter\n";
+
+ let (tool, _project, _action_log, _fs, _thread) =
+ setup_test(cx, json!({"file.rs": file_content})).await;
+ let (sender, input) = ToolInput::<StreamingEditFileToolInput>::test();
+ let (event_stream, _receiver) = ToolCallEventStream::test();
+ let task = cx.update(|cx| tool.clone().run(input, event_stream, cx));
+
+ sender.send_final(json!({
+ "display_description": "description",
+ "path": "root/file.rs",
+ "mode": "edit",
+ "edits": [{"old_text": old_text, "new_text": new_text}]
+ }));
+
+ let result = task.await;
+
+ let StreamingEditFileToolOutput::Success {
+ new_text: final_text,
+ ..
+ } = result.unwrap()
+ else {
+ panic!("expected success");
+ };
+
+ pretty_assertions::assert_eq!(
+ final_text,
+ expected,
+ "Edit should preserve a single blank line before test_after"
+ );
+ }
+
#[gpui::test]
async fn test_streaming_reject_created_file_deletes_it(cx: &mut TestAppContext) {
let (tool, _project, action_log, fs, _thread) = setup_test(cx, json!({"dir": {}})).await;
@@ -78,7 +78,7 @@ impl ToolEditParser {
if partial.new_text.is_some() {
// new_text appeared, so old_text is done — emit everything.
let start = state.old_text_emitted_len.min(old_text.len());
- let chunk = old_text[start..].to_string();
+ let chunk = normalize_done_chunk(old_text[start..].to_string());
state.old_text_done = true;
state.old_text_emitted_len = old_text.len();
events.push(ToolEditEvent::OldTextChunk {
@@ -87,7 +87,8 @@ impl ToolEditParser {
done: true,
});
} else {
- let safe_end = safe_emit_end(old_text);
+ let safe_end = safe_emit_end_for_edit_text(old_text);
+
if safe_end > state.old_text_emitted_len {
let chunk = old_text[state.old_text_emitted_len..safe_end].to_string();
state.old_text_emitted_len = safe_end;
@@ -104,7 +105,8 @@ impl ToolEditParser {
if let Some(new_text) = &partial.new_text
&& !state.new_text_done
{
- let safe_end = safe_emit_end(new_text);
+ let safe_end = safe_emit_end_for_edit_text(new_text);
+
if safe_end > state.new_text_emitted_len {
let chunk = new_text[state.new_text_emitted_len..safe_end].to_string();
state.new_text_emitted_len = safe_end;
@@ -160,7 +162,7 @@ impl ToolEditParser {
if !state.old_text_done {
let start = state.old_text_emitted_len.min(edit.old_text.len());
- let chunk = edit.old_text[start..].to_string();
+ let chunk = normalize_done_chunk(edit.old_text[start..].to_string());
state.old_text_done = true;
state.old_text_emitted_len = edit.old_text.len();
events.push(ToolEditEvent::OldTextChunk {
@@ -172,7 +174,7 @@ impl ToolEditParser {
if !state.new_text_done {
let start = state.new_text_emitted_len.min(edit.new_text.len());
- let chunk = edit.new_text[start..].to_string();
+ let chunk = normalize_done_chunk(edit.new_text[start..].to_string());
state.new_text_done = true;
state.new_text_emitted_len = edit.new_text.len();
events.push(ToolEditEvent::NewTextChunk {
@@ -252,6 +254,22 @@ fn safe_emit_end(text: &str) -> usize {
}
}
+fn safe_emit_end_for_edit_text(text: &str) -> usize {
+ let safe_end = safe_emit_end(text);
+ if safe_end > 0 && text.as_bytes()[safe_end - 1] == b'\n' {
+ safe_end - 1
+ } else {
+ safe_end
+ }
+}
+
+fn normalize_done_chunk(mut chunk: String) -> String {
+ if chunk.ends_with('\n') {
+ chunk.pop();
+ }
+ chunk
+}
+
#[cfg(test)]
mod tests {
use super::*;
@@ -337,6 +355,69 @@ mod tests {
);
}
+ #[test]
+ fn test_done_chunks_strip_trailing_newline() {
+ let mut parser = ToolEditParser::default();
+
+ let events = parser.finalize_edits(&[Edit {
+ old_text: "before\n".into(),
+ new_text: "after\n".into(),
+ }]);
+ assert_eq!(
+ events.as_slice(),
+ &[
+ ToolEditEvent::OldTextChunk {
+ edit_index: 0,
+ chunk: "before".into(),
+ done: true,
+ },
+ ToolEditEvent::NewTextChunk {
+ edit_index: 0,
+ chunk: "after".into(),
+ done: true,
+ },
+ ]
+ );
+ }
+
+ #[test]
+ fn test_partial_edit_chunks_hold_back_trailing_newline() {
+ let mut parser = ToolEditParser::default();
+
+ let events = parser.push_edits(&[PartialEdit {
+ old_text: Some("before\n".into()),
+ new_text: Some("after\n".into()),
+ }]);
+ assert_eq!(
+ events.as_slice(),
+ &[
+ ToolEditEvent::OldTextChunk {
+ edit_index: 0,
+ chunk: "before".into(),
+ done: true,
+ },
+ ToolEditEvent::NewTextChunk {
+ edit_index: 0,
+ chunk: "after".into(),
+ done: false,
+ },
+ ]
+ );
+
+ let events = parser.finalize_edits(&[Edit {
+ old_text: "before\n".into(),
+ new_text: "after\n".into(),
+ }]);
+ assert_eq!(
+ events.as_slice(),
+ &[ToolEditEvent::NewTextChunk {
+ edit_index: 0,
+ chunk: "".into(),
+ done: true,
+ }]
+ );
+ }
+
#[test]
fn test_multiple_edits_sequential() {
let mut parser = ToolEditParser::default();
@@ -858,22 +939,16 @@ mod tests {
);
// Next partial: the fixer corrects the escape to \n.
- // The held-back byte was wrong, but we never emitted it. Now the
- // correct newline at that position is emitted normally.
+ // Because edit text also holds back a trailing newline, nothing new
+ // is emitted yet.
let events = parser.push_edits(&[PartialEdit {
old_text: Some("hello,\n".into()),
new_text: None,
}]);
- assert_eq!(
- events.as_slice(),
- &[ToolEditEvent::OldTextChunk {
- edit_index: 0,
- chunk: "\n".into(),
- done: false,
- }]
- );
+ assert!(events.is_empty());
- // Continue normally.
+ // Continue normally. The held-back newline is emitted together with the
+ // next content once it is no longer trailing.
let events = parser.push_edits(&[PartialEdit {
old_text: Some("hello,\nworld".into()),
new_text: None,
@@ -882,7 +957,7 @@ mod tests {
events.as_slice(),
&[ToolEditEvent::OldTextChunk {
edit_index: 0,
- chunk: "world".into(),
+ chunk: "\nworld".into(),
done: false,
}]
);
@@ -919,7 +994,7 @@ mod tests {
},
ToolEditEvent::NewTextChunk {
edit_index: 0,
- chunk: "LINE1\n".into(),
+ chunk: "LINE1".into(),
done: false,
},
]
@@ -933,7 +1008,7 @@ mod tests {
events.as_slice(),
&[ToolEditEvent::NewTextChunk {
edit_index: 0,
- chunk: "LINE2\nLINE3".into(),
+ chunk: "\nLINE2\nLINE3".into(),
done: false,
}]
);
@@ -23,6 +23,7 @@ test-support = [
"workspace/test-support",
"agent/test-support",
]
+audio = ["dep:audio"]
unit-eval = []
[dependencies]
@@ -38,7 +39,7 @@ heapless.workspace = true
assistant_text_thread.workspace = true
assistant_slash_command.workspace = true
assistant_slash_commands.workspace = true
-audio.workspace = true
+audio = { workspace = true, optional = true }
base64.workspace = true
buffer_diff.workspace = true
chrono.workspace = true
@@ -13,6 +13,7 @@ use agent_servers::AgentServerDelegate;
use agent_servers::{AgentServer, GEMINI_TERMINAL_AUTH_METHOD_ID};
use agent_settings::{AgentProfileId, AgentSettings};
use anyhow::{Result, anyhow};
+#[cfg(feature = "audio")]
use audio::{Audio, Sound};
use buffer_diff::BufferDiff;
use client::zed_urls;
@@ -2278,6 +2279,7 @@ impl ConversationView {
window: &mut Window,
cx: &mut Context<Self>,
) {
+ #[cfg(feature = "audio")]
self.play_notification_sound(window, cx);
self.show_notification(caption, icon, window, cx);
}
@@ -7,55 +7,44 @@
# Or use the helper script:
# crates/eval_cli/script/build-linux
-FROM rust:1.93.1-bookworm AS builder
+FROM rust:1.93 AS builder
WORKDIR /app
-# Install build dependencies (subset of script/linux needed for headless GPUI).
+ # Pre-install the toolchain specified in rust-toolchain.toml so it is cached.
+RUN rustup toolchain install 1.93 --profile minimal \
+ --component rustfmt --component clippy --component rust-analyzer --component rust-src \
+ --target wasm32-wasip2 --target wasm32-unknown-unknown --target x86_64-unknown-linux-musl --target x86_64-unknown-linux-gnu
+
+# Install build tools. cmake + build-essential are needed for vendored C
+# libraries (libgit2-sys, zstd-sys, libsqlite3-sys). No audio/GUI -dev
+# packages required — eval-cli runs headless with those features disabled.
+#
+# cargo-zigbuild cross-compiles against a specific glibc version (2.31 =
+# Debian Bullseye / Ubuntu Focal) so the resulting binary is portable to
+# any Linux distro with glibc >= 2.31.
RUN apt-get update && apt-get install -y --no-install-recommends \
cmake \
- clang \
- g++ \
- libasound2-dev \
- libfontconfig-dev \
- libgit2-dev \
- libglib2.0-dev \
- libssl-dev \
- libwayland-dev \
- libx11-xcb-dev \
- libxkbcommon-x11-dev \
- libzstd-dev \
- libsqlite3-dev \
build-essential \
curl \
+ xz-utils \
&& rm -rf /var/lib/apt/lists/*
-# Install wild linker for faster linking (built from source to match bookworm's glibc).
-RUN cargo install --locked wild-linker --version 0.8.0 --root /usr/local
+RUN mkdir -p /opt/zig \
+ && curl -fsSL https://ziglang.org/download/0.15.2/zig-x86_64-linux-0.15.2.tar.xz \
+ | tar -xJ -C /opt/zig --strip-components=1 \
+ && ln -s /opt/zig/zig /usr/local/bin/zig
-# Download WASI SDK (needed by some dependencies).
-ARG TARGETARCH
-RUN mkdir -p /app/target && \
- WASI_ARCH=$([ "$TARGETARCH" = "arm64" ] && echo "arm64" || echo "x86_64") && \
- curl -L "https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-25/wasi-sdk-25.0-${WASI_ARCH}-linux.tar.gz" \
- | tar -xz -C /app/target && \
- mv /app/target/wasi-sdk-25.0-${WASI_ARCH}-linux /app/target/wasi-sdk
-
-# Pre-install the toolchain specified in rust-toolchain.toml so it is cached.
-RUN rustup toolchain install 1.93 --profile minimal \
- --component rustfmt --component clippy --component rust-analyzer --component rust-src \
- --target wasm32-wasip2 --target wasm32-unknown-unknown --target x86_64-unknown-linux-musl
+RUN cargo install --locked cargo-zigbuild
COPY . .
-ENV CC=clang CXX=clang++
-ENV RUSTFLAGS="-C linker=clang -C link-arg=--ld-path=wild"
-
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/cargo/git \
--mount=type=cache,target=/app/target \
- cargo build --release --package eval_cli && \
- cp /app/target/release/eval-cli /eval-cli && \
+ cargo zigbuild --release --package eval_cli \
+ --target x86_64-unknown-linux-gnu.2.31 && \
+ cp /app/target/x86_64-unknown-linux-gnu/release/eval-cli /eval-cli && \
strip /eval-cli
FROM scratch
@@ -1,8 +1,10 @@
#!/usr/bin/env bash
#
# Build eval-cli for x86_64 Linux from any host (macOS, Linux, etc.)
-# using Docker. The resulting binary is placed at the path printed on
-# completion (default: target/eval-cli).
+# using Docker + cargo-zigbuild. Targets glibc 2.31 (Debian Bullseye /
+# Ubuntu Focal) so the binary is portable to any modern Linux distro.
+# The resulting binary is placed at the path printed on completion
+# (default: target/eval-cli).
#
# Usage:
# crates/eval_cli/script/build-linux [--output PATH]
@@ -36,7 +38,7 @@ cd "$REPO_ROOT"
IMAGE_TAG="eval-cli-builder"
-echo "Building eval-cli for x86_64-unknown-linux-gnu..."
+echo "Building eval-cli for x86_64-unknown-linux-gnu (glibc >= 2.31)..."
echo " Repo root: $REPO_ROOT"
echo " Output: $OUTPUT"
echo ""
@@ -22,7 +22,7 @@ import os
import shlex
from pathlib import Path
-from harbor.agents.installed.base import BaseInstalledAgent, ExecInput
+from harbor.agents.installed.base import BaseInstalledAgent, with_prompt_template
from harbor.environments.base import BaseEnvironment
from harbor.models.agent.context import AgentContext
@@ -51,12 +51,143 @@ class ZedAgent(BaseInstalledAgent):
def name() -> str:
return "zed"
- @property
- def _install_agent_template_path(self) -> Path:
- return Path(__file__).parent / "install.sh.j2"
+ async def _detect_workdir(self, environment: BaseEnvironment) -> str:
+ """Detect the repo working directory inside the container.
+
+ Checks, in order:
+ 1. Explicit ``EVAL_CLI_WORKDIR`` extra-env override
+ 2. ``/app`` (SWE-bench Pro)
+ 3. ``/testbed`` (SWE-bench Verified)
+ 4. ``/repo``
+ 5. First git repo found under ``/`` (max depth 3)
+ """
+ override = self._extra_env.get("EVAL_CLI_WORKDIR")
+ if override:
+ return override
+
+ result = await self.exec_as_agent(
+ environment,
+ command=(
+ "for d in /app /testbed /repo; do "
+ ' if [ -d "$d/.git" ]; then echo "$d"; exit 0; fi; '
+ "done; "
+ "find / -maxdepth 3 -name .git -type d 2>/dev/null "
+ '| head -1 | sed "s|/.git$||"'
+ ),
+ )
+ workdir = result.stdout.strip()
+ if not workdir:
+ raise RuntimeError(
+ "Could not find a git repository in the container. "
+ "Set EVAL_CLI_WORKDIR explicitly via --ae EVAL_CLI_WORKDIR=/path/to/repo"
+ )
+ return workdir
+
+ async def install(self, environment: BaseEnvironment) -> None:
+ await self.exec_as_root(
+ environment,
+ command=(
+ "apt-get update && "
+ "apt-get install -y --no-install-recommends "
+ "ca-certificates "
+ "curl "
+ "git"
+ ),
+ env={"DEBIAN_FRONTEND": "noninteractive"},
+ )
+
+ await self.exec_as_root(
+ environment,
+ command=(
+ "curl -fsSL https://deb.nodesource.com/setup_22.x | bash - && "
+ "apt-get install -y --no-install-recommends nodejs"
+ ),
+ env={"DEBIAN_FRONTEND": "noninteractive"},
+ )
+
+ # Pre-install default LSPs so Zed doesn't have to download them at
+ # runtime. Each gets its own subdirectory under $ZED_DATA_DIR/languages.
+ await self.exec_as_agent(
+ environment,
+ command=(
+ "set -euo pipefail; "
+ 'ZED_DATA_DIR="${XDG_DATA_HOME:-$HOME/.local/share}/zed"; '
+ # basedpyright (Python - default type checker)
+ 'BASEDPYRIGHT_DIR="$ZED_DATA_DIR/languages/basedpyright"; '
+ 'mkdir -p "$BASEDPYRIGHT_DIR"; '
+ 'npm install --prefix "$BASEDPYRIGHT_DIR" --save-exact basedpyright; '
+ # typescript-language-server (TypeScript/JS - default LSP)
+ 'TSSERVER_DIR="$ZED_DATA_DIR/languages/typescript-language-server"; '
+ 'mkdir -p "$TSSERVER_DIR"; '
+ 'npm install --prefix "$TSSERVER_DIR" --save-exact typescript typescript-language-server; '
+ # vtsls (VS Code TypeScript language features)
+ 'VTSLS_DIR="$ZED_DATA_DIR/languages/vtsls"; '
+ 'mkdir -p "$VTSLS_DIR"; '
+ 'npm install --prefix "$VTSLS_DIR" --save-exact @vtsls/language-server typescript; '
+ # tailwindcss-language-server
+ 'TAILWIND_DIR="$ZED_DATA_DIR/languages/tailwindcss-language-server"; '
+ 'mkdir -p "$TAILWIND_DIR"; '
+ 'npm install --prefix "$TAILWIND_DIR" --save-exact @tailwindcss/language-server'
+ ),
+ )
- async def setup(self, environment: BaseEnvironment) -> None:
- await environment.exec(command="mkdir -p /installed-agent")
+ # eslint LSP (downloaded from zed-industries/vscode-eslint GitHub release,
+ # then compiled — this mirrors what Zed does at runtime).
+ await self.exec_as_agent(
+ environment,
+ command=(
+ "set -euo pipefail; "
+ 'ZED_DATA_DIR="${XDG_DATA_HOME:-$HOME/.local/share}/zed"; '
+ 'ESLINT_DIR="$ZED_DATA_DIR/languages/eslint/vscode-eslint-2.4.4"; '
+ 'mkdir -p "$ESLINT_DIR"; '
+ 'curl -fsSL "https://github.com/zed-industries/vscode-eslint/archive/refs/tags/release/2.4.4.tar.gz" '
+ '| tar -xz -C "$ESLINT_DIR"; '
+ 'mv "$ESLINT_DIR"/vscode-eslint-release-2.4.4 "$ESLINT_DIR/vscode-eslint"; '
+ 'cd "$ESLINT_DIR/vscode-eslint" && npm install && npm run compile'
+ ),
+ )
+
+ # gopls (Go - default LSP). Only install when Go is present in the
+ # container (i.e. Go-related SWE-bench tasks).
+ await self.exec_as_agent(
+ environment,
+ command=(
+ "if command -v go >/dev/null 2>&1; then "
+ "go install golang.org/x/tools/gopls@latest; "
+ "fi"
+ ),
+ )
+
+ await self.exec_as_agent(
+ environment,
+ command=(
+ "curl -LsSf https://astral.sh/uv/install.sh | sh && "
+ '. "$HOME/.local/bin/env"'
+ ),
+ )
+
+ agent_home_result = await self.exec_as_agent(
+ environment,
+ command='printf %s "$HOME"',
+ )
+ agent_home = agent_home_result.stdout.strip()
+ if not agent_home:
+ raise RuntimeError("Could not determine agent home directory")
+
+ await self.exec_as_root(
+ environment,
+ command=(
+ f"ln -sf {shlex.quote(agent_home + '/.local/bin/uv')} /usr/local/bin/uv && "
+ f"ln -sf {shlex.quote(agent_home + '/.local/bin/uvx')} /usr/local/bin/uvx"
+ ),
+ )
+
+ # Install a modern ruff so `ruff server` works without --preview.
+ # This also makes it available as a CLI tool for the agent.
+ await self.exec_as_agent(
+ environment,
+ command=('export PATH="$HOME/.local/bin:$PATH" && uv tool install ruff'),
+ )
if self._binary_path:
binary = Path(self._binary_path)
@@ -69,18 +200,29 @@ class ZedAgent(BaseInstalledAgent):
source_path=binary,
target_path="/usr/local/bin/eval-cli",
)
- await environment.exec(command="chmod +x /usr/local/bin/eval-cli")
-
- await super().setup(environment)
+ await self.exec_as_root(
+ environment,
+ command="chmod +x /usr/local/bin/eval-cli && eval-cli --help",
+ )
+ return
- @property
- def _template_variables(self) -> dict[str, str]:
- variables = super()._template_variables
- if self._binary_path:
- variables["binary_uploaded"] = "true"
if self._download_url:
- variables["download_url"] = self._download_url
- return variables
+ await self.exec_as_root(
+ environment,
+ command=(
+ f"curl -fsSL {shlex.quote(self._download_url)} "
+ "-o /usr/local/bin/eval-cli && "
+ "chmod +x /usr/local/bin/eval-cli && "
+ "eval-cli --help"
+ ),
+ )
+ return
+
+ raise ValueError(
+ "No eval-cli binary provided. "
+ "Either pass binary_path=/path/to/target/release/eval-cli "
+ "or set download_url=/EVAL_CLI_DOWNLOAD_URL."
+ )
def populate_context_post_run(self, context: AgentContext) -> None:
result_data = None
@@ -131,18 +273,27 @@ class ZedAgent(BaseInstalledAgent):
return env
- def create_run_agent_commands(self, instruction: str) -> list[ExecInput]:
+ @with_prompt_template
+ async def run(
+ self, instruction: str, environment: BaseEnvironment, context: AgentContext
+ ) -> None:
escaped_instruction = shlex.quote(instruction)
env = self._get_api_env()
- parts = ["eval-cli", "--workdir /testbed", "--output-dir /logs/agent"]
+ workdir = await self._detect_workdir(environment)
+
+ parts = [
+ "eval-cli",
+ f"--workdir {shlex.quote(workdir)}",
+ "--output-dir /logs/agent",
+ ]
if self.model_name:
- parts.append(f"--model {self.model_name}")
+ parts.append(f"--model {shlex.quote(self.model_name)}")
timeout = self._extra_env.get("EVAL_CLI_TIMEOUT")
if timeout:
- parts.append(f"--timeout {timeout}")
+ parts.append(f"--timeout {shlex.quote(timeout)}")
staff = self._extra_env.get("EVAL_CLI_STAFF")
if staff and staff.lower() == "false":
@@ -161,18 +312,20 @@ class ZedAgent(BaseInstalledAgent):
parts.append(f"--instruction {escaped_instruction}")
- eval_cli_command = (
- " ".join(parts) + " 2>&1 | stdbuf -oL tee /logs/agent/eval-cli.txt"
+ await self.exec_as_agent(
+ environment,
+ command=(
+ " ".join(parts) + " 2>&1 | stdbuf -oL tee /logs/agent/eval-cli.txt"
+ ),
+ env=env,
)
- patch_command = (
- "cd /testbed && "
- "git add -A && "
- "git diff --cached HEAD > /logs/agent/patch.diff && "
- 'echo "Patch size: $(wc -c < /logs/agent/patch.diff) bytes"'
+ await self.exec_as_agent(
+ environment,
+ command=(
+ "git add -A && "
+ "git diff --cached HEAD > /logs/agent/patch.diff && "
+ 'echo "Patch size: $(wc -c < /logs/agent/patch.diff) bytes"'
+ ),
+ cwd=workdir,
)
-
- return [
- ExecInput(command=eval_cli_command, env=env),
- ExecInput(command=patch_command),
- ]
@@ -1,55 +0,0 @@
-#!/bin/bash
-set -euo pipefail
-
-# Install runtime dependencies needed by the eval-cli binary (dynamically linked
-# against glibc + these shared libraries from its GPUI/terminal/language stacks).
-apt-get update
-apt-get install -y --no-install-recommends \
- ca-certificates \
- curl \
- git \
- libasound2 \
- libfontconfig1 \
- libglib2.0-0 \
- libsqlite3-0 \
- libssl3 \
- libwayland-client0 \
- libx11-xcb1 \
- libxkbcommon-x11-0 \
- libzstd1
-
-# Install Node.js 22 LTS (needed by language servers like basedpyright).
-curl -fsSL https://deb.nodesource.com/setup_22.x | bash -
-apt-get install -y --no-install-recommends nodejs
-
-# Preinstall basedpyright in Zed's language server cache to avoid first-run npm install latency.
-ZED_DATA_DIR="${XDG_DATA_HOME:-$HOME/.local/share}/zed"
-BASEDPYRIGHT_DIR="$ZED_DATA_DIR/languages/basedpyright"
-mkdir -p "$BASEDPYRIGHT_DIR"
-npm install --prefix "$BASEDPYRIGHT_DIR" --save-exact basedpyright
-
-# Install uv (needed for running Python tests in SWE-bench tasks).
-curl -LsSf https://astral.sh/uv/install.sh | sh
-. "$HOME/.local/bin/env"
-ln -sf "$HOME/.local/bin/uv" /usr/local/bin/uv
-ln -sf "$HOME/.local/bin/uvx" /usr/local/bin/uvx
-
-{% if binary_uploaded is defined %}
-# Binary was uploaded directly via setup() — just verify it works.
-eval-cli --help
-{% elif download_url is defined %}
-curl -fsSL "{{ download_url }}" -o /usr/local/bin/eval-cli
-chmod +x /usr/local/bin/eval-cli
-eval-cli --help
-{% else %}
-echo "ERROR: No eval-cli binary provided."
-echo ""
-echo "Either pass binary_path= to upload a local build:"
-echo " --ae binary_path=/path/to/target/release/eval-cli"
-echo ""
-echo "Or set download_url= / EVAL_CLI_DOWNLOAD_URL:"
-echo " --ae download_url=https://example.com/eval-cli"
-exit 1
-{% endif %}
-
-echo "INSTALL_SUCCESS"
@@ -697,7 +697,7 @@ impl Asset for ImageAssetLoader {
Ok(Arc::new(RenderImage::new(data)))
} else {
svg_renderer
- .render_single_frame(&bytes, 1.0, true)
+ .render_single_frame(&bytes, 1.0)
.map_err(Into::into)
}
}
@@ -2108,7 +2108,7 @@ impl Image {
ImageFormat::Ico => frames_for_image(&self.bytes, image::ImageFormat::Ico)?,
ImageFormat::Svg => {
return svg_renderer
- .render_single_frame(&self.bytes, 1.0, false)
+ .render_single_frame(&self.bytes, 1.0)
.map_err(Into::into);
}
};
@@ -2190,6 +2190,30 @@ impl From<String> for ClipboardString {
}
}
+#[cfg(test)]
+mod image_tests {
+ use super::*;
+ use std::sync::Arc;
+
+ #[test]
+ fn test_svg_image_to_image_data_converts_to_bgra() {
+ let image = Image::from_bytes(
+ ImageFormat::Svg,
+ br##"<svg xmlns="http://www.w3.org/2000/svg" width="1" height="1">
+<rect width="1" height="1" fill="#38BDF8"/>
+</svg>"##
+ .to_vec(),
+ );
+
+ let render_image = image.to_image_data(SvgRenderer::new(Arc::new(()))).unwrap();
+ let bytes = render_image.as_bytes(0).unwrap();
+
+ for pixel in bytes.chunks_exact(4) {
+ assert_eq!(pixel, &[0xF8, 0xBD, 0x38, 0xFF]);
+ }
+ }
+}
+
#[cfg(all(test, any(target_os = "linux", target_os = "freebsd")))]
mod tests {
use super::*;
@@ -150,7 +150,6 @@ impl SvgRenderer {
&self,
bytes: &[u8],
scale_factor: f32,
- to_brga: bool,
) -> Result<Arc<RenderImage>, usvg::Error> {
self.render_pixmap(
bytes,
@@ -161,10 +160,8 @@ impl SvgRenderer {
image::ImageBuffer::from_raw(pixmap.width(), pixmap.height(), pixmap.take())
.unwrap();
- if to_brga {
- for pixel in buffer.chunks_exact_mut(4) {
- swap_rgba_pa_to_bgra(pixel);
- }
+ for pixel in buffer.chunks_exact_mut(4) {
+ swap_rgba_pa_to_bgra(pixel);
}
let mut image = RenderImage::new(SmallVec::from_const([Frame::new(buffer)]));
@@ -188,6 +188,14 @@ unsafe fn build_classes() {
sel!(mouseDragged:),
handle_view_event as extern "C" fn(&Object, Sel, id),
);
+ decl.add_method(
+ sel!(rightMouseDragged:),
+ handle_view_event as extern "C" fn(&Object, Sel, id),
+ );
+ decl.add_method(
+ sel!(otherMouseDragged:),
+ handle_view_event as extern "C" fn(&Object, Sel, id),
+ );
decl.add_method(
sel!(scrollWheel:),
handle_view_event as extern "C" fn(&Object, Sel, id),
@@ -67,6 +67,7 @@ pub fn adapt_schema_to_format(
if let Value::Object(obj) = json {
obj.remove("$schema");
obj.remove("title");
+ obj.remove("description");
}
match format {
@@ -105,9 +106,12 @@ fn adapt_to_json_schema_subset(json: &mut Value) -> Result<()> {
);
}
- const KEYS_TO_REMOVE: [(&str, fn(&Value) -> bool); 5] = [
+ const KEYS_TO_REMOVE: [(&str, fn(&Value) -> bool); 6] = [
("format", |value| value.is_string()),
- ("additionalProperties", |value| value.is_boolean()),
+ // Gemini doesn't support `additionalProperties` in any form (boolean or schema object)
+ ("additionalProperties", |_| true),
+ // Gemini doesn't support `propertyNames`
+ ("propertyNames", |_| true),
("exclusiveMinimum", |value| value.is_number()),
("exclusiveMaximum", |value| value.is_number()),
("optional", |value| value.is_boolean()),
@@ -234,6 +238,28 @@ mod tests {
"format": {},
})
);
+
+ // additionalProperties as an object schema is also unsupported by Gemini
+ let mut json = json!({
+ "type": "object",
+ "properties": {
+ "name": { "type": "string" }
+ },
+ "additionalProperties": { "type": "string" },
+ "propertyNames": { "pattern": "^[A-Za-z]+$" }
+ });
+
+ adapt_to_json_schema_subset(&mut json).unwrap();
+
+ assert_eq!(
+ json,
+ json!({
+ "type": "object",
+ "properties": {
+ "name": { "type": "string" }
+ }
+ })
+ );
}
#[test]
@@ -106,7 +106,7 @@ impl CachedMermaidDiagram {
let svg_string = mermaid_rs_renderer::render(&contents.contents)?;
let scale = contents.scale as f32 / 100.0;
svg_renderer
- .render_single_frame(svg_string.as_bytes(), scale, true)
+ .render_single_frame(svg_string.as_bytes(), scale)
.map_err(|error| anyhow::anyhow!("{error}"))
})
.await;
@@ -325,7 +325,6 @@ mod tests {
.render_single_frame(
br#"<svg xmlns="http://www.w3.org/2000/svg" width="1" height="1"></svg>"#,
1.0,
- true,
)
.unwrap()
})
@@ -20,7 +20,7 @@ action_log.workspace = true
agent.workspace = true
agent-client-protocol.workspace = true
agent_settings.workspace = true
-agent_ui.workspace = true
+agent_ui = { workspace = true, features = ["audio"] }
anyhow.workspace = true
chrono.workspace = true
collections.workspace = true
@@ -110,7 +110,7 @@ impl SvgPreviewView {
let renderer = cx.svg_renderer();
let content = buffer.read(cx).snapshot();
let background_task = cx.background_spawn(async move {
- renderer.render_single_frame(content.text().as_bytes(), SCALE_FACTOR, true)
+ renderer.render_single_frame(content.text().as_bytes(), SCALE_FACTOR)
});
self._refresh = cx.spawn_in(window, async move |this, cx| {
@@ -68,7 +68,7 @@ activity_indicator.workspace = true
agent.workspace = true
agent-client-protocol.workspace = true
agent_settings.workspace = true
-agent_ui.workspace = true
+agent_ui = { workspace = true, features = ["audio"] }
anyhow.workspace = true
askpass.workspace = true
assets.workspace = true