agent.py

  1"""Harbor agent wrapper for Zed's eval-cli binary.
  2
  3Usage:
  4    # Build eval-cli locally first:
  5    cargo build --release -p eval_cli
  6
  7    # Run via Harbor with a local binary:
  8    harbor run -d "dataset@version" \
  9        --agent-import-path zed_eval.agent:ZedAgent \
 10        --ae binary_path=/path/to/target/release/eval-cli \
 11        --agent-model anthropic/claude-sonnet-4-6-latest
 12
 13    # Or with a download URL (for CI):
 14    harbor run -d "dataset@version" \
 15        --agent-import-path zed_eval.agent:ZedAgent \
 16        --ae download_url=https://example.com/eval-cli \
 17        --agent-model anthropic/claude-sonnet-4-6-latest
 18"""
 19
 20import json
 21import os
 22import shlex
 23from pathlib import Path
 24
 25from harbor.agents.installed.base import BaseInstalledAgent, ExecInput
 26from harbor.environments.base import BaseEnvironment
 27from harbor.models.agent.context import AgentContext
 28
 29
 30class ZedAgent(BaseInstalledAgent):
 31    """Runs Zed's headless AI agent (eval-cli) to solve tasks.
 32
 33    The eval-cli binary boots a headless GPUI application and uses the same
 34    NativeAgent + AcpThread pipeline as the production Zed editor, driving
 35    the full agentic loop (tool calls, subagents, retries) without a GUI.
 36    """
 37
 38    def __init__(
 39        self,
 40        logs_dir: Path,
 41        binary_path: str | None = None,
 42        download_url: str | None = None,
 43        *args,
 44        **kwargs,
 45    ):
 46        super().__init__(logs_dir, *args, **kwargs)
 47        self._binary_path = binary_path
 48        self._download_url = download_url or os.environ.get("EVAL_CLI_DOWNLOAD_URL")
 49
 50    @staticmethod
 51    def name() -> str:
 52        return "zed"
 53
 54    @property
 55    def _install_agent_template_path(self) -> Path:
 56        return Path(__file__).parent / "install.sh.j2"
 57
 58    async def setup(self, environment: BaseEnvironment) -> None:
 59        await environment.exec(command="mkdir -p /installed-agent")
 60
 61        if self._binary_path:
 62            binary = Path(self._binary_path)
 63            if not binary.exists():
 64                raise FileNotFoundError(
 65                    f"eval-cli binary not found at {binary}. "
 66                    "Build it with: cargo build --release -p eval_cli"
 67                )
 68            await environment.upload_file(
 69                source_path=binary,
 70                target_path="/usr/local/bin/eval-cli",
 71            )
 72            await environment.exec(command="chmod +x /usr/local/bin/eval-cli")
 73
 74        await super().setup(environment)
 75
 76    @property
 77    def _template_variables(self) -> dict[str, str]:
 78        variables = super()._template_variables
 79        if self._binary_path:
 80            variables["binary_uploaded"] = "true"
 81        if self._download_url:
 82            variables["download_url"] = self._download_url
 83        return variables
 84
 85    def populate_context_post_run(self, context: AgentContext) -> None:
 86        result_data = None
 87        for json_file in self.logs_dir.rglob("result.json"):
 88            try:
 89                result_data = json.loads(json_file.read_text())
 90                break
 91            except (json.JSONDecodeError, OSError):
 92                continue
 93
 94        if result_data is None:
 95            self.logger.warning("Could not find or parse result.json from eval-cli")
 96            return
 97
 98        if result_data.get("input_tokens") is not None:
 99            context.n_input_tokens = result_data["input_tokens"]
100        if result_data.get("output_tokens") is not None:
101            context.n_output_tokens = result_data["output_tokens"]
102        if result_data.get("cache_read_input_tokens") is not None:
103            context.n_cache_tokens = result_data["cache_read_input_tokens"]
104
105        context.metadata = {
106            "status": result_data.get("status"),
107            "duration_secs": result_data.get("duration_secs"),
108            "model": result_data.get("model"),
109        }
110
111    def _get_api_env(self) -> dict[str, str]:
112        env: dict[str, str] = {}
113        if not self.model_name or "/" not in self.model_name:
114            return env
115
116        provider = self.model_name.split("/", 1)[0]
117        provider_env_map = {
118            "anthropic": "ANTHROPIC_API_KEY",
119            "openai": "OPENAI_API_KEY",
120            "google": "GEMINI_API_KEY",
121            "gemini": "GEMINI_API_KEY",
122            "deepseek": "DEEPSEEK_API_KEY",
123            "mistral": "MISTRAL_API_KEY",
124        }
125
126        env_var = provider_env_map.get(provider)
127        if env_var:
128            api_key = os.environ.get(env_var, "")
129            if api_key:
130                env[env_var] = api_key
131
132        return env
133
134    def create_run_agent_commands(self, instruction: str) -> list[ExecInput]:
135        escaped_instruction = shlex.quote(instruction)
136        env = self._get_api_env()
137
138        parts = ["eval-cli", "--workdir /testbed", "--output-dir /logs/agent"]
139
140        if self.model_name:
141            parts.append(f"--model {self.model_name}")
142
143        timeout = self._extra_env.get("EVAL_CLI_TIMEOUT")
144        if timeout:
145            parts.append(f"--timeout {timeout}")
146
147        parts.append(f"--instruction {escaped_instruction}")
148
149        eval_cli_command = " ".join(parts) + " 2>&1 | stdbuf -oL tee /logs/agent/eval-cli.txt"
150
151        patch_command = (
152            "cd /testbed && "
153            "git add -A && "
154            "git diff --cached HEAD > /logs/agent/patch.diff && "
155            "echo \"Patch size: $(wc -c < /logs/agent/patch.diff) bytes\""
156        )
157
158        return [
159            ExecInput(command=eval_cli_command, env=env),
160            ExecInput(command=patch_command),
161        ]