Detailed changes
@@ -13,7 +13,10 @@ CLI that dispatches specialized AI research subagents for web research and git r
```bash
bun run dev # Run CLI via bun (development)
bun run build # Build to dist/
-bun run typecheck # TypeScript check (also: bun run lint)
+bun run typecheck # TypeScript check
+bun run lint # Biome check + typecheck
+bun run format # Auto-format with Biome
+bun run check # Biome check with auto-fix
```
Run `bun test` to execute the test suite.
@@ -97,7 +100,7 @@ Custom `system_prompt_path` in config replaces the built prompt entirely.
## Gotchas
-1. **Grep requires ripgrep** - `createGrepTool` checks for `rg` at tool creation time and throws if missing
+1. **Grep requires ripgrep** - `createGrepTool` checks for `rg` at execution time and returns an error message if missing
2. **Web tools require credentials** - `KAGI_SESSION_TOKEN` and `TABSTACK_API_KEY` via env or config
3. **Shallow clones by default** - repo mode uses `--depth 1 --filter=blob:limit=5m` unless `--full`
4. **Pre-fetch injection** - web command with `-u URL` wraps content in `<attached_content>` XML tags; content β€50KB is inlined, larger content is stored in workspace
@@ -0,0 +1,48 @@
+{
+ "$schema": "https://biomejs.dev/schemas/2.3.14/schema.json",
+ "vcs": {
+ "enabled": true,
+ "clientKind": "git",
+ "useIgnoreFile": true
+ },
+ "files": {
+ "ignoreUnknown": false
+ },
+ "formatter": {
+ "enabled": true,
+ "indentStyle": "space",
+ "indentWidth": 2
+ },
+ "linter": {
+ "enabled": true,
+ "rules": {
+ "recommended": true,
+ "suspicious": {
+ "noExplicitAny": "off",
+ "noTemplateCurlyInString": "off"
+ },
+ "complexity": {
+ "useLiteralKeys": "off"
+ },
+ "style": {
+ "noParameterAssign": "off",
+ "useTemplate": "off",
+ "noNonNullAssertion": "off"
+ }
+ }
+ },
+ "javascript": {
+ "formatter": {
+ "quoteStyle": "double",
+ "semicolons": "always"
+ }
+ },
+ "assist": {
+ "enabled": true,
+ "actions": {
+ "source": {
+ "organizeImports": "on"
+ }
+ }
+ }
+}
@@ -14,6 +14,7 @@
"toml": "^3.0.0",
},
"devDependencies": {
+ "@biomejs/biome": "^2.3.14",
"@types/node": "^25.5.0",
"bun-types": "^1.3.11",
"typescript": "^5.7.3",
@@ -93,6 +94,24 @@
"@babel/runtime": ["@babel/runtime@7.28.6", "", {}, "sha512-05WQkdpL9COIMz4LjTxGpPNCdlpyimKppYNoJ5Di5EUObifl8t4tuLuUBBZEpoLYOmfvIWrsp9fCl0HoPRVTdA=="],
+ "@biomejs/biome": ["@biomejs/biome@2.4.12", "", { "optionalDependencies": { "@biomejs/cli-darwin-arm64": "2.4.12", "@biomejs/cli-darwin-x64": "2.4.12", "@biomejs/cli-linux-arm64": "2.4.12", "@biomejs/cli-linux-arm64-musl": "2.4.12", "@biomejs/cli-linux-x64": "2.4.12", "@biomejs/cli-linux-x64-musl": "2.4.12", "@biomejs/cli-win32-arm64": "2.4.12", "@biomejs/cli-win32-x64": "2.4.12" }, "bin": { "biome": "bin/biome" } }, "sha512-Rro7adQl3NLq/zJCIL98eElXKI8eEiBtoeu5TbXF/U3qbjuSc7Jb5rjUbeHHcquDWeSf3HnGP7XI5qGrlRk/pA=="],
+
+ "@biomejs/cli-darwin-arm64": ["@biomejs/cli-darwin-arm64@2.4.12", "", { "os": "darwin", "cpu": "arm64" }, "sha512-BnMU4Pc3ciEVteVpZ0BK33MLr7X57F5w1dwDLDn+/iy/yTrA4Q/N2yftidFtsA4vrDh0FMXDpacNV/Tl3fbmng=="],
+
+ "@biomejs/cli-darwin-x64": ["@biomejs/cli-darwin-x64@2.4.12", "", { "os": "darwin", "cpu": "x64" }, "sha512-x9uJ0bI1rJsWICp3VH8w/5PnAVD3A7SqzDpbrfoUQX1QyWrK5jSU4fRLo/wSgGeplCivbxBRKmt5Xq4/nWvq8A=="],
+
+ "@biomejs/cli-linux-arm64": ["@biomejs/cli-linux-arm64@2.4.12", "", { "os": "linux", "cpu": "arm64" }, "sha512-tOwuCuZZtKi1jVzbk/5nXmIsziOB6yqN8c9r9QM0EJYPU6DpQWf11uBOSCfFKKM4H3d9ZoarvlgMfbcuD051Pw=="],
+
+ "@biomejs/cli-linux-arm64-musl": ["@biomejs/cli-linux-arm64-musl@2.4.12", "", { "os": "linux", "cpu": "arm64" }, "sha512-FhfpkAAlKL6kwvcVap0Hgp4AhZmtd3YImg0kK1jd7C/aSoh4SfsB2f++yG1rU0lr8Y5MCFJrcSkmssiL9Xnnig=="],
+
+ "@biomejs/cli-linux-x64": ["@biomejs/cli-linux-x64@2.4.12", "", { "os": "linux", "cpu": "x64" }, "sha512-8pFeAnLU9QdW9jCIslB/v82bI0lhBmz2ZAKc8pVMFPO0t0wAHsoEkrUQUbMkIorTRIjbqyNZHA3lEXavsPWYSw=="],
+
+ "@biomejs/cli-linux-x64-musl": ["@biomejs/cli-linux-x64-musl@2.4.12", "", { "os": "linux", "cpu": "x64" }, "sha512-dwTIgZrGutzhkQCuvHynCkyW6hJxUuyZqKKO0YNfaS2GUoRO+tOvxXZqZB6SkWAOdfZTzwaw8IEdUnIkHKHoew=="],
+
+ "@biomejs/cli-win32-arm64": ["@biomejs/cli-win32-arm64@2.4.12", "", { "os": "win32", "cpu": "arm64" }, "sha512-B0DLnx0vA9ya/3v7XyCaP+/lCpnbWbMOfUFFve+xb5OxyYvdHaS55YsSddr228Y+JAFk58agCuZTsqNiw2a6ig=="],
+
+ "@biomejs/cli-win32-x64": ["@biomejs/cli-win32-x64@2.4.12", "", { "os": "win32", "cpu": "x64" }, "sha512-yMckRzTyZ83hkk8iDFWswqSdU8tvZxspJKnYNh7JZr/zhZNOlzH13k4ecboU6MurKExCe2HUkH75pGI/O2JwGA=="],
+
"@google/genai": ["@google/genai@1.40.0", "", { "dependencies": { "google-auth-library": "^10.3.0", "protobufjs": "^7.5.4", "ws": "^8.18.0" }, "peerDependencies": { "@modelcontextprotocol/sdk": "^1.25.2" }, "optionalPeers": ["@modelcontextprotocol/sdk"] }, "sha512-fhIww8smT0QYRX78qWOiz/nIQhHMF5wXOrlXvj33HBrz3vKDBb+wibLcEmTA+L9dmPD4KmfNr7UF3LDQVTXNjA=="],
"@isaacs/cliui": ["@isaacs/cliui@8.0.2", "", { "dependencies": { "string-width": "^5.1.2", "string-width-cjs": "npm:string-width@^4.2.0", "strip-ansi": "^7.0.1", "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", "wrap-ansi": "^8.1.0", "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" } }, "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA=="],
@@ -15,8 +15,10 @@
"scripts": {
"dev": "bun src/cli/index.ts",
"build": "bun build src/cli/index.ts --outdir dist --target=node",
- "start": "bun dist/cli/index.js",
- "lint": "bun run --silent typecheck",
+ "start": "node dist/index.js",
+ "format": "biome format --write .",
+ "check": "biome check --write .",
+ "lint": "biome check . && bun run --silent typecheck",
"test": "bun test",
"typecheck": "tsc --noEmit",
"prepublishOnly": "bun run build"
@@ -31,6 +33,7 @@
"toml": "^3.0.0"
},
"devDependencies": {
+ "@biomejs/biome": "^2.3.14",
"@types/node": "^25.5.0",
"bun-types": "^1.3.11",
"typescript": "^5.7.3"
@@ -3,12 +3,9 @@
// SPDX-License-Identifier: GPL-3.0-or-later
import { getModel, type Model } from "@mariozechner/pi-ai";
-import {
- type CustomModelConfig,
- type RumiloConfig,
-} from "../config/schema.js";
-import { ConfigError } from "../util/errors.js";
+import type { CustomModelConfig, RumiloConfig } from "../config/schema.js";
import { resolveHeaders } from "../util/env.js";
+import { ConfigError } from "../util/errors.js";
export function resolveModel(
modelString: string,
@@ -16,14 +13,18 @@ export function resolveModel(
): Model<any> {
const colonIndex = modelString.indexOf(":");
if (colonIndex === -1) {
- throw new ConfigError(`Invalid model format: "${modelString}". Expected provider:model (e.g. anthropic:claude-sonnet-4-20250514)`);
+ throw new ConfigError(
+ `Invalid model format: "${modelString}". Expected provider:model (e.g. anthropic:claude-sonnet-4-20250514)`,
+ );
}
const provider = modelString.slice(0, colonIndex);
const modelName = modelString.slice(colonIndex + 1);
if (!provider || !modelName) {
- throw new ConfigError(`Invalid model format: "${modelString}". Expected provider:model (e.g. anthropic:claude-sonnet-4-20250514)`);
+ throw new ConfigError(
+ `Invalid model format: "${modelString}". Expected provider:model (e.g. anthropic:claude-sonnet-4-20250514)`,
+ );
}
// Handle custom models
@@ -35,7 +36,10 @@ export function resolveModel(
return getModel(provider as any, modelName);
}
-function resolveCustomModel(modelName: string, config: RumiloConfig): Model<any> {
+function resolveCustomModel(
+ modelName: string,
+ config: RumiloConfig,
+): Model<any> {
if (!config.custom_models) {
throw new ConfigError(
`No custom models defined in config. Add a [custom_models.${modelName}] section to use custom:${modelName}`,
@@ -94,9 +98,7 @@ function buildCustomModel(config: CustomModelConfig): Model<any> {
return model;
}
-function convertCompatConfig(
- compat: CustomModelConfig["compat"],
-): any {
+function convertCompatConfig(compat: CustomModelConfig["compat"]): any {
if (!compat) {
throw new Error("Compat config is expected to be defined");
}
@@ -108,7 +110,8 @@ function convertCompatConfig(
supportsUsageInStreaming: compat.supports_usage_in_streaming,
maxTokensField: compat.max_tokens_field,
requiresToolResultName: compat.requires_tool_result_name,
- requiresAssistantAfterToolResult: compat.requires_assistant_after_tool_result,
+ requiresAssistantAfterToolResult:
+ compat.requires_assistant_after_tool_result,
requiresThinkingAsText: compat.requires_thinking_as_text,
requiresMistralToolIds: compat.requires_mistral_tool_ids,
thinkingFormat: compat.thinking_format,
@@ -9,7 +9,7 @@ export interface RepoPromptContext {
export function buildRepoPrompt(ctx: RepoPromptContext): string {
const historyGuidance = ctx.hasHistory
- ? "\nGit history is available. Blame and log can reveal why code exists and how it evolved. Use them when the question involves \"why\" or \"when,\" or when current code is confusing without context."
+ ? '\nGit history is available. Blame and log can reveal why code exists and how it evolved. Use them when the question involves "why" or "when," or when current code is confusing without context.'
: "";
const historyEnv = ctx.hasHistory
@@ -2,12 +2,16 @@
//
// SPDX-License-Identifier: GPL-3.0-or-later
-import { Agent, type AgentEvent, type AgentTool } from "@mariozechner/pi-agent-core";
-import { getEnvApiKey, type AssistantMessage } from "@mariozechner/pi-ai";
+import {
+ Agent,
+ type AgentEvent,
+ type AgentTool,
+} from "@mariozechner/pi-agent-core";
+import { type AssistantMessage, getEnvApiKey } from "@mariozechner/pi-ai";
import type { RumiloConfig } from "../config/schema.js";
-import { resolveModel } from "./model-resolver.js";
-import { AgentError } from "../util/errors.js";
import { resolveConfigValue } from "../util/env.js";
+import { AgentError } from "../util/errors.js";
+import { resolveModel } from "./model-resolver.js";
export interface AgentRunOptions {
model: string;
@@ -32,7 +36,9 @@ export interface AgentRunResult {
* names, `$VAR` references, and `!shell` commands).
* 2. pi-aiβs built-in env-var lookup (`ANTHROPIC_API_KEY`, `OPENAI_API_KEY`, etc.).
*/
-export function buildGetApiKey(config: RumiloConfig): (provider: string) => string | undefined {
+export function buildGetApiKey(
+ config: RumiloConfig,
+): (provider: string) => string | undefined {
return (provider: string) => {
if (config.custom_models) {
for (const model of Object.values(config.custom_models)) {
@@ -46,7 +52,10 @@ export function buildGetApiKey(config: RumiloConfig): (provider: string) => stri
};
}
-export async function runAgent(query: string, options: AgentRunOptions): Promise<AgentRunResult> {
+export async function runAgent(
+ query: string,
+ options: AgentRunOptions,
+): Promise<AgentRunResult> {
const agent = new Agent({
initialState: {
systemPrompt: options.systemPrompt,
@@ -74,20 +83,27 @@ export async function runAgent(query: string, options: AgentRunOptions): Promise
// Check if the last assistant message indicates an error
if (last?.stopReason === "error") {
- throw new AgentError(last.errorMessage ?? "Agent stopped with an unknown error");
+ throw new AgentError(
+ last.errorMessage ?? "Agent stopped with an unknown error",
+ );
}
const text = last?.content
- ?.filter((content): content is Extract<typeof content, { type: "text" }> => content.type === "text")
+ ?.filter(
+ (content): content is Extract<typeof content, { type: "text" }> =>
+ content.type === "text",
+ )
.map((content) => content.text)
.join("")
.trim();
if (text === undefined || text === "") {
- throw new AgentError("Agent completed without producing a text response");
+ throw new AgentError("Agent returned no text response");
}
- const requestCount = agent.state.messages.filter((msg) => msg.role === "assistant").length;
+ const requestCount = agent.state.messages.filter(
+ (msg) => msg.role === "assistant",
+ ).length;
return {
message: text,
@@ -4,90 +4,125 @@
import { spawnSync } from "node:child_process";
import { relative } from "node:path";
-import { Type } from "@sinclair/typebox";
import type { AgentTool } from "@mariozechner/pi-agent-core";
-import { resolveToCwd, ensureWorkspacePath } from "./path-utils.js";
-import { DEFAULT_MAX_BYTES, formatSize, truncateHead } from "../../util/truncate.js";
-import { ToolInputError } from "../../util/errors.js";
+import { Type } from "@sinclair/typebox";
+import {
+ DEFAULT_MAX_BYTES,
+ formatSize,
+ truncateHead,
+} from "../../util/truncate.js";
+import { ensureWorkspacePath, resolveToCwd } from "./path-utils.js";
const DEFAULT_LIMIT = 1000;
const FindSchema = Type.Object({
- pattern: Type.String({ description: "Glob pattern to match files, e.g. '*.ts', '**/*.json'" }),
- path: Type.Optional(Type.String({ description: "Directory to search in (default: current directory)" })),
- limit: Type.Optional(Type.Number({ description: "Maximum number of results (default: 1000)" })),
+ pattern: Type.String({
+ description: "Glob pattern to match files, e.g. '*.ts', '**/*.json'",
+ }),
+ path: Type.Optional(
+ Type.String({
+ description: "Directory to search in (default: current directory)",
+ }),
+ ),
+ limit: Type.Optional(
+ Type.Number({ description: "Maximum number of results (default: 1000)" }),
+ ),
});
export const createFindTool = (workspacePath: string): AgentTool => {
- const fdResult = spawnSync("which", ["fd"], { encoding: "utf-8" });
- const fdPath = fdResult.stdout?.trim();
- if (!fdPath) throw new ToolInputError("find requires fd to be installed");
-
return {
name: "find",
label: "Find Files",
- description: `Search for files matching a glob pattern. Returns up to ${DEFAULT_LIMIT} results and ${DEFAULT_MAX_BYTES / 1024}KB of output. Respects .gitignore.`,
+ description: `Search for files by glob pattern using fd. Returns up to ${DEFAULT_LIMIT} results and ${DEFAULT_MAX_BYTES / 1024} KB of output. Respects .gitignore.`,
parameters: FindSchema as any,
execute: async (_toolCallId: string, params: any) => {
+ let fdPath: string | undefined;
+ for (const name of ["fd", "fdfind"]) {
+ const result = spawnSync("which", [name], { encoding: "utf-8" });
+ if (result.status === 0 && result.stdout?.trim()) {
+ fdPath = result.stdout.trim();
+ break;
+ }
+ }
+ if (!fdPath) {
+ return {
+ content: [
+ {
+ type: "text",
+ text: "fd is not installed. Please tell the user to install fd-find to use file search functionality.",
+ },
+ ],
+ details: { error: "fd_not_found" },
+ };
+ }
+
const searchDir: string = params.path || ".";
const effectiveLimit = params.limit ?? DEFAULT_LIMIT;
const searchPath = resolveToCwd(searchDir, workspacePath);
ensureWorkspacePath(workspacePath, searchPath);
- const args = [
- "--glob",
- "--color=never",
- "--hidden",
- "--max-results",
- String(effectiveLimit),
- params.pattern,
- searchPath,
- ];
+ const args = [
+ "--glob",
+ "--color=never",
+ "--hidden",
+ "--absolute-path",
+ "--max-results",
+ String(effectiveLimit),
+ params.pattern,
+ searchPath,
+ ];
- const result = spawnSync(fdPath, args, {
- encoding: "utf-8",
- maxBuffer: 10 * 1024 * 1024,
- });
+ const result = spawnSync(fdPath, args, {
+ encoding: "utf-8",
+ maxBuffer: 10 * 1024 * 1024,
+ });
- const rawOutput = result.stdout?.trim() ?? "";
- if (!rawOutput) {
- return {
- content: [{ type: "text", text: "No files found matching pattern" }],
- details: { pattern: params.pattern, path: searchDir, matches: 0 },
- };
- }
+ const rawOutput = result.stdout?.trim() ?? "";
+ if (!rawOutput) {
+ return {
+ content: [{ type: "text", text: "No files found matching pattern" }],
+ details: { pattern: params.pattern, path: searchDir, matches: 0 },
+ };
+ }
- const lines = rawOutput.split("\n").map((line) => {
- const isDir = line.endsWith("/");
- const rel = relative(searchPath, line);
- return isDir && !rel.endsWith("/") ? `${rel}/` : rel;
- });
+ const lines = rawOutput.split("\n").map((line) => {
+ const isDir = line.endsWith("/");
+ const rel = relative(searchPath, line);
+ return isDir && !rel.endsWith("/") ? `${rel}/` : rel;
+ });
- const relativized = lines.join("\n");
- const truncated = truncateHead(relativized, { maxLines: Number.MAX_SAFE_INTEGER });
+ const relativized = lines.join("\n");
+ const truncated = truncateHead(relativized, {
+ maxLines: Number.MAX_SAFE_INTEGER,
+ });
- const notices: string[] = [];
- if (lines.length >= effectiveLimit) {
- notices.push(`Result limit reached (${effectiveLimit}). Narrow your pattern for complete results.`);
- }
- if (truncated.truncatedBy === "bytes") {
- const droppedBytes = truncated.totalBytes - truncated.outputBytes;
- notices.push(`Output truncated by ${formatSize(droppedBytes)} to fit ${formatSize(DEFAULT_MAX_BYTES)} limit.`);
- }
+ const notices: string[] = [];
+ if (lines.length >= effectiveLimit) {
+ notices.push(
+ `Result limit reached (${effectiveLimit}). Narrow your pattern for complete results.`,
+ );
+ }
+ if (truncated.truncatedBy === "bytes") {
+ const droppedBytes = truncated.totalBytes - truncated.outputBytes;
+ notices.push(
+ `Output truncated by ${formatSize(droppedBytes)} to fit ${formatSize(DEFAULT_MAX_BYTES)} limit.`,
+ );
+ }
- const output = notices.length > 0
- ? `${truncated.content}\n\n${notices.join("\n")}`
- : truncated.content;
+ const output =
+ notices.length > 0
+ ? `${truncated.content}\n\n${notices.join("\n")}`
+ : truncated.content;
- return {
- content: [{ type: "text", text: output }],
- details: {
- pattern: params.pattern,
- path: searchDir,
- matches: lines.length,
- ...(truncated.truncated && { truncatedBy: truncated.truncatedBy }),
- },
- };
+ return {
+ content: [{ type: "text", text: output }],
+ details: {
+ pattern: params.pattern,
+ path: searchDir,
+ matches: lines.length,
+ ...(truncated.truncated && { truncatedBy: truncated.truncatedBy }),
+ },
+ };
},
};
};
@@ -2,8 +2,8 @@
//
// SPDX-License-Identifier: GPL-3.0-or-later
-import { Type } from "@sinclair/typebox";
import type { AgentTool } from "@mariozechner/pi-agent-core";
+import { Type } from "@sinclair/typebox";
import simpleGit from "simple-git";
import { ToolInputError } from "../../../util/errors.js";
import { formatSize, truncateHead } from "../../../util/truncate.js";
@@ -19,12 +19,19 @@ const BlameSchema = Type.Object({
export const createGitBlameTool = (workspacePath: string): AgentTool => ({
name: "git_blame",
label: "Git Blame",
- description: "Show line-by-line commit attribution for a file.",
+ description: "Blame a file to see commit attribution.",
parameters: BlameSchema as any,
execute: async (_toolCallId: string, params: any) => {
if (!String(params.path ?? "").trim()) {
throw new ToolInputError("path must be a non-empty string");
}
+ if (
+ String(params.path ?? "")
+ .trim()
+ .startsWith("-")
+ ) {
+ throw new ToolInputError("path must not start with '-'");
+ }
const git = simpleGit(workspacePath);
const raw = await git.raw(["blame", "--", params.path]);
const truncation = truncateHead(raw);
@@ -36,7 +43,10 @@ export const createGitBlameTool = (workspacePath: string): AgentTool => ({
return {
content: [{ type: "text", text }],
- details: { path: params.path, ...(truncation.truncated ? { truncation } : {}) },
+ details: {
+ path: params.path,
+ ...(truncation.truncated ? { truncation } : {}),
+ },
};
},
});
@@ -2,8 +2,8 @@
//
// SPDX-License-Identifier: GPL-3.0-or-later
-import { Type } from "@sinclair/typebox";
import type { AgentTool } from "@mariozechner/pi-agent-core";
+import { Type } from "@sinclair/typebox";
import simpleGit from "simple-git";
import { ToolInputError } from "../../../util/errors.js";
@@ -24,6 +24,13 @@ export const createGitCheckoutTool = (workspacePath: string): AgentTool => ({
if (!String(params.ref ?? "").trim()) {
throw new ToolInputError("ref must be a non-empty string");
}
+ if (
+ String(params.ref ?? "")
+ .trim()
+ .startsWith("-")
+ ) {
+ throw new ToolInputError("ref must not start with '-'");
+ }
const git = simpleGit(workspacePath);
await git.checkout(params.ref);
@@ -2,8 +2,8 @@
//
// SPDX-License-Identifier: GPL-3.0-or-later
-import { Type } from "@sinclair/typebox";
import type { AgentTool } from "@mariozechner/pi-agent-core";
+import { Type } from "@sinclair/typebox";
import simpleGit from "simple-git";
import { ToolInputError } from "../../../util/errors.js";
import { formatSize, truncateHead } from "../../../util/truncate.js";
@@ -21,7 +21,7 @@ const DiffSchema = Type.Object({
export const createGitDiffTool = (workspacePath: string): AgentTool => ({
name: "git_diff",
label: "Git Diff",
- description: "Show diff between refs, or between a ref and the working tree. Omit both refs for unstaged changes.",
+ description: "Show diff between refs or working tree.",
parameters: DiffSchema as any,
execute: async (_toolCallId: string, params: any) => {
const git = simpleGit(workspacePath);
@@ -30,12 +30,33 @@ export const createGitDiffTool = (workspacePath: string): AgentTool => ({
if (params.ref && !String(params.ref).trim()) {
throw new ToolInputError("ref must be a non-empty string");
}
+ if (
+ String(params.ref ?? "")
+ .trim()
+ .startsWith("-")
+ ) {
+ throw new ToolInputError("ref must not start with '-'");
+ }
if (params.ref2 && !String(params.ref2).trim()) {
throw new ToolInputError("ref2 must be a non-empty string");
}
+ if (
+ String(params.ref2 ?? "")
+ .trim()
+ .startsWith("-")
+ ) {
+ throw new ToolInputError("ref2 must not start with '-'");
+ }
if (params.path && !String(params.path).trim()) {
throw new ToolInputError("path must be a non-empty string");
}
+ if (
+ String(params.path ?? "")
+ .trim()
+ .startsWith("-")
+ ) {
+ throw new ToolInputError("path must not start with '-'");
+ }
if (params.ref) args.push(params.ref);
if (params.ref2) args.push(params.ref2);
if (params.path) args.push("--", params.path);
@@ -50,7 +71,10 @@ export const createGitDiffTool = (workspacePath: string): AgentTool => ({
return {
content: [{ type: "text", text }],
- details: { path: params.path ?? null, ...(truncation.truncated ? { truncation } : {}) },
+ details: {
+ path: params.path ?? null,
+ ...(truncation.truncated ? { truncation } : {}),
+ },
};
},
});
@@ -2,9 +2,9 @@
//
// SPDX-License-Identifier: GPL-3.0-or-later
-export { createGitLogTool } from "./log.js";
-export { createGitShowTool } from "./show.js";
export { createGitBlameTool } from "./blame.js";
+export { createGitCheckoutTool } from "./checkout.js";
export { createGitDiffTool } from "./diff.js";
+export { createGitLogTool } from "./log.js";
export { createGitRefsTool } from "./refs.js";
-export { createGitCheckoutTool } from "./checkout.js";
+export { createGitShowTool } from "./show.js";
@@ -2,8 +2,8 @@
//
// SPDX-License-Identifier: GPL-3.0-or-later
-import { Type } from "@sinclair/typebox";
import type { AgentTool } from "@mariozechner/pi-agent-core";
+import { Type } from "@sinclair/typebox";
import simpleGit from "simple-git";
import { ToolInputError } from "../../../util/errors.js";
@@ -14,18 +14,31 @@ import { ToolInputError } from "../../../util/errors.js";
const DEFAULT_LOG_LIMIT = 20;
const LogSchema = Type.Object({
- path: Type.Optional(Type.String({ description: "Filter to commits touching this path" })),
- author: Type.Optional(Type.String({ description: "Filter by author name/email" })),
- since: Type.Optional(Type.String({ description: "Commits after this date (e.g., 2024-01-01)" })),
- until: Type.Optional(Type.String({ description: "Commits before this date" })),
- n: Type.Optional(Type.Number({ description: "Maximum number of commits (default: 20)" })),
- oneline: Type.Optional(Type.Boolean({ description: "Compact one-line format (default: false)" })),
+ path: Type.Optional(
+ Type.String({ description: "Filter to commits touching this path" }),
+ ),
+ author: Type.Optional(
+ Type.String({ description: "Filter by author name/email" }),
+ ),
+ since: Type.Optional(
+ Type.String({ description: "Commits after this date (e.g., 2024-01-01)" }),
+ ),
+ until: Type.Optional(
+ Type.String({ description: "Commits before this date" }),
+ ),
+ n: Type.Optional(
+ Type.Number({ description: "Maximum number of commits (default: 20)" }),
+ ),
+ oneline: Type.Optional(
+ Type.Boolean({ description: "Compact one-line format (default: false)" }),
+ ),
});
export const createGitLogTool = (workspacePath: string): AgentTool => ({
name: "git_log",
label: "Git Log",
- description: "View commit history with optional path, author, and date filters.",
+ description:
+ "View commit history. Supports filtering by path, author, date range, and count.",
parameters: LogSchema as any,
execute: async (_toolCallId: string, params: any) => {
const git = simpleGit(workspacePath);
@@ -37,10 +50,20 @@ export const createGitLogTool = (workspacePath: string): AgentTool => ({
}
options.push("-n", String(Math.floor(limit)));
if (params.oneline) options.push("--oneline");
+ if (
+ String(params.path ?? "")
+ .trim()
+ .startsWith("-")
+ ) {
+ throw new ToolInputError("path must not start with '-'");
+ }
if (params.author !== undefined) {
if (!String(params.author).trim()) {
throw new ToolInputError("author must be a non-empty string");
}
+ if (String(params.author).trim().startsWith("-")) {
+ throw new ToolInputError("author must not start with '-'");
+ }
options.push(`--author=${params.author}`);
}
if (params.since !== undefined) {
@@ -56,7 +79,9 @@ export const createGitLogTool = (workspacePath: string): AgentTool => ({
options.push(`--until=${params.until}`);
}
- const result = await git.log(options.concat(params.path ? ["--", params.path] : []));
+ const result = await git.log(
+ options.concat(params.path ? ["--", params.path] : []),
+ );
const text = result.all
.map((entry) =>
@@ -2,8 +2,8 @@
//
// SPDX-License-Identifier: GPL-3.0-or-later
-import { Type } from "@sinclair/typebox";
import type { AgentTool } from "@mariozechner/pi-agent-core";
+import { Type } from "@sinclair/typebox";
import simpleGit from "simple-git";
import { formatSize, truncateHead } from "../../../util/truncate.js";
@@ -22,7 +22,7 @@ const RefsSchema = Type.Object({
export const createGitRefsTool = (workspacePath: string): AgentTool => ({
name: "git_refs",
label: "Git Refs",
- description: "List branches, tags, or remotes.",
+ description: "List branches, tags, or remote-tracking branches.",
parameters: RefsSchema as any,
execute: async (_toolCallId: string, params: any) => {
const git = simpleGit(workspacePath);
@@ -37,7 +37,7 @@ export const createGitRefsTool = (workspacePath: string): AgentTool => ({
} else if (params.type === "remotes") {
raw = await git.raw(["branch", "-r"]);
} else {
- raw = await git.raw(["branch", "-a"]);
+ raw = await git.raw(["branch"]);
}
const truncation = truncateHead(raw);
@@ -48,7 +48,10 @@ export const createGitRefsTool = (workspacePath: string): AgentTool => ({
return {
content: [{ type: "text", text }],
- details: { ...baseDetails, ...(truncation.truncated ? { truncation } : {}) },
+ details: {
+ ...baseDetails,
+ ...(truncation.truncated ? { truncation } : {}),
+ },
};
},
});
@@ -2,8 +2,8 @@
//
// SPDX-License-Identifier: GPL-3.0-or-later
-import { Type } from "@sinclair/typebox";
import type { AgentTool } from "@mariozechner/pi-agent-core";
+import { Type } from "@sinclair/typebox";
import simpleGit from "simple-git";
import { ToolInputError } from "../../../util/errors.js";
import { formatSize, truncateHead } from "../../../util/truncate.js";
@@ -19,12 +19,19 @@ const ShowSchema = Type.Object({
export const createGitShowTool = (workspacePath: string): AgentTool => ({
name: "git_show",
label: "Git Show",
- description: "Show commit message and diff for a given ref.",
+ description: "Show details for a commit or object.",
parameters: ShowSchema as any,
execute: async (_toolCallId: string, params: any) => {
if (!String(params.ref ?? "").trim()) {
throw new ToolInputError("ref must be a non-empty string");
}
+ if (
+ String(params.ref ?? "")
+ .trim()
+ .startsWith("-")
+ ) {
+ throw new ToolInputError("ref must not start with '-'");
+ }
const git = simpleGit(workspacePath);
const raw = await git.show([params.ref]);
const truncation = truncateHead(raw);
@@ -36,7 +43,10 @@ export const createGitShowTool = (workspacePath: string): AgentTool => ({
return {
content: [{ type: "text", text }],
- details: { ref: params.ref, ...(truncation.truncated ? { truncation } : {}) },
+ details: {
+ ref: params.ref,
+ ...(truncation.truncated ? { truncation } : {}),
+ },
};
},
});
@@ -3,12 +3,12 @@
// SPDX-License-Identifier: GPL-3.0-or-later
import { spawn, spawnSync } from "node:child_process";
-import { createInterface } from "node:readline";
import { readFileSync, statSync } from "node:fs";
-import { relative, basename } from "node:path";
-import { Type } from "@sinclair/typebox";
+import { basename, relative } from "node:path";
+import { createInterface } from "node:readline";
import type { AgentTool } from "@mariozechner/pi-agent-core";
-import { resolveToCwd, ensureWorkspacePath } from "./path-utils.js";
+import { Type } from "@sinclair/typebox";
+import { ToolInputError } from "../../util/errors.js";
import {
DEFAULT_MAX_BYTES,
formatSize,
@@ -16,35 +16,62 @@ import {
truncateHead,
truncateLine,
} from "../../util/truncate.js";
-import { ToolInputError } from "../../util/errors.js";
+import { ensureWorkspacePath, resolveToCwd } from "./path-utils.js";
const DEFAULT_LIMIT = 100;
const GrepSchema = Type.Object({
- pattern: Type.String({ description: "Search pattern (regex or literal string)" }),
- path: Type.Optional(Type.String({ description: "Directory or file to search (default: current directory)" })),
- glob: Type.Optional(Type.String({ description: "Filter files by glob pattern, e.g. '*.ts'" })),
- ignoreCase: Type.Optional(Type.Boolean({ description: "Case-insensitive search (default: false)" })),
+ pattern: Type.String({
+ description: "Search pattern (regex or literal string)",
+ }),
+ path: Type.Optional(
+ Type.String({
+ description: "Directory or file to search (default: current directory)",
+ }),
+ ),
+ glob: Type.Optional(
+ Type.String({ description: "Filter files by glob pattern, e.g. '*.ts'" }),
+ ),
+ ignoreCase: Type.Optional(
+ Type.Boolean({ description: "Case-insensitive search (default: false)" }),
+ ),
literal: Type.Optional(
- Type.Boolean({ description: "Treat pattern as literal string instead of regex (default: false)" }),
+ Type.Boolean({
+ description:
+ "Treat pattern as literal string instead of regex (default: false)",
+ }),
+ ),
+ context: Type.Optional(
+ Type.Number({
+ description: "Lines of context around each match (default: 0)",
+ }),
+ ),
+ limit: Type.Optional(
+ Type.Number({ description: "Maximum matches to return (default: 100)" }),
),
- context: Type.Optional(Type.Number({ description: "Lines of context around each match (default: 0)" })),
- limit: Type.Optional(Type.Number({ description: "Maximum matches to return (default: 100)" })),
});
export const createGrepTool = (workspacePath: string): AgentTool => {
- const rgResult = spawnSync("which", ["rg"], { encoding: "utf-8" });
- if (rgResult.status !== 0) {
- throw new ToolInputError("grep requires ripgrep (rg) to be installed");
- }
- const rgPath = rgResult.stdout.trim();
-
return {
name: "grep",
label: "Grep",
- description: `Search file contents for a pattern. Returns up to ${DEFAULT_LIMIT} matches or ${DEFAULT_MAX_BYTES / 1024}KB, whichever is reached first. Lines over ${GREP_MAX_LINE_LENGTH} chars are truncated. Respects .gitignore.`,
+ description: `Search file contents for a pattern using ripgrep. Returns up to ${DEFAULT_LIMIT} matches or ${DEFAULT_MAX_BYTES / 1024}KB (whichever is hit first). Lines truncated to ${GREP_MAX_LINE_LENGTH} chars. Respects .gitignore.`,
parameters: GrepSchema as any,
execute: async (_toolCallId: string, params: any) => {
+ const rgResult = spawnSync("which", ["rg"], { encoding: "utf-8" });
+ if (rgResult.status !== 0) {
+ return {
+ content: [
+ {
+ type: "text",
+ text: "ripgrep (rg) is not installed. Please tell the user to install ripgrep to use search functionality.",
+ },
+ ],
+ details: { error: "rg_not_found" },
+ };
+ }
+ const rgPath = rgResult.stdout.trim();
+
const searchDir: string | undefined = params.path;
const searchPath = resolveToCwd(searchDir || ".", workspacePath);
ensureWorkspacePath(workspacePath, searchPath);
@@ -52,143 +79,163 @@ export const createGrepTool = (workspacePath: string): AgentTool => {
try {
isDirectory = statSync(searchPath).isDirectory();
} catch {
- throw new ToolInputError(`Path does not exist or is not accessible: ${searchDir || "."}`);
- }
-
- const effectiveLimit: number = Math.max(1, Math.floor(params.limit ?? DEFAULT_LIMIT));
- const contextLines: number = Math.max(0, Math.floor(params.context ?? 0));
-
- const args: string[] = ["--json", "--line-number", "--color=never", "--hidden"];
- if (params.ignoreCase) args.push("--ignore-case");
- if (params.literal) args.push("--fixed-strings");
- if (params.glob) args.push("--glob", params.glob);
- args.push(params.pattern, searchPath);
-
- return new Promise((resolve, reject) => {
- const child = spawn(rgPath, args, { stdio: ["ignore", "pipe", "pipe"] });
-
- const rl = createInterface({ input: child.stdout! });
- const fileCache = new Map<string, string[]>();
-
- interface MatchEntry {
- filePath: string;
- lineNumber: number;
+ throw new ToolInputError(
+ `Path does not exist or is not accessible: ${searchDir || "."}`,
+ );
}
- const matches: MatchEntry[] = [];
- let stderrData = "";
- let limitReached = false;
+ const effectiveLimit: number = Math.max(
+ 1,
+ Math.floor(params.limit ?? DEFAULT_LIMIT),
+ );
+ const contextLines: number = Math.max(0, Math.floor(params.context ?? 0));
+
+ const args: string[] = [
+ "--json",
+ "--line-number",
+ "--color=never",
+ "--hidden",
+ ];
+ if (params.ignoreCase) args.push("--ignore-case");
+ if (params.literal) args.push("--fixed-strings");
+ if (params.glob) args.push("--glob", params.glob);
+ args.push(params.pattern, searchPath);
+
+ return new Promise((resolve, reject) => {
+ const child = spawn(rgPath, args, {
+ stdio: ["ignore", "pipe", "pipe"],
+ });
- child.stderr!.on("data", (chunk: Buffer) => {
- stderrData += chunk.toString();
- });
+ const rl = createInterface({ input: child.stdout! });
+ const fileCache = new Map<string, string[]>();
- rl.on("line", (line: string) => {
- if (matches.length >= effectiveLimit) {
- if (!limitReached) {
- limitReached = true;
- child.kill();
- }
- return;
+ interface MatchEntry {
+ filePath: string;
+ lineNumber: number;
}
- try {
- const parsed = JSON.parse(line);
- if (parsed.type === "match") {
- const filePath: string = parsed.data.path.text;
- const lineNumber: number = parsed.data.line_number;
- matches.push({ filePath, lineNumber });
- }
- } catch {
- // ignore malformed JSON lines
- }
- });
+ const matches: MatchEntry[] = [];
+ let stderrData = "";
+ let limitReached = false;
- child.on("close", (code: number | null) => {
- if (code !== 0 && code !== 1 && !limitReached) {
- reject(new Error(`rg exited with code ${code}: ${stderrData.trim()}`));
- return;
- }
+ child.stderr?.on("data", (chunk: Buffer) => {
+ stderrData += chunk.toString();
+ });
- if (matches.length === 0) {
- resolve({
- content: [{ type: "text", text: "No matches found" }],
- details: { matches: 0 },
- });
- return;
- }
+ rl.on("line", (line: string) => {
+ if (matches.length >= effectiveLimit) {
+ if (!limitReached) {
+ limitReached = true;
+ child.kill();
+ }
+ return;
+ }
- const outputLines: string[] = [];
- let anyLineTruncated = false;
-
- for (const match of matches) {
- let lines: string[];
- if (fileCache.has(match.filePath)) {
- lines = fileCache.get(match.filePath)!;
- } else {
- try {
- lines = readFileSync(match.filePath, "utf-8").split(/\r?\n/);
- fileCache.set(match.filePath, lines);
- } catch {
- lines = [];
+ try {
+ const parsed = JSON.parse(line);
+ if (parsed.type === "match") {
+ const filePath: string = parsed.data.path.text;
+ const lineNumber: number = parsed.data.line_number;
+ matches.push({ filePath, lineNumber });
}
+ } catch {
+ // ignore malformed JSON lines
}
+ });
- const relPath = isDirectory
- ? relative(searchPath, match.filePath)
- : basename(match.filePath);
+ child.on("close", (code: number | null) => {
+ if (code !== 0 && code !== 1 && !limitReached) {
+ reject(
+ new Error(`rg exited with code ${code}: ${stderrData.trim()}`),
+ );
+ return;
+ }
- const startLine = Math.max(0, match.lineNumber - 1 - contextLines);
- const endLine = Math.min(lines.length, match.lineNumber + contextLines);
+ if (matches.length === 0) {
+ resolve({
+ content: [{ type: "text", text: "No matches found" }],
+ details: { matches: 0 },
+ });
+ return;
+ }
- for (let i = startLine; i < endLine; i++) {
- const lineContent = lines[i] ?? "";
- const { text: truncatedContent, wasTruncated } = truncateLine(lineContent);
- if (wasTruncated) anyLineTruncated = true;
+ const outputLines: string[] = [];
+ let anyLineTruncated = false;
- const lineNum = i + 1;
- if (lineNum === match.lineNumber) {
- outputLines.push(`${relPath}:${lineNum}: ${truncatedContent}`);
+ for (const match of matches) {
+ let lines: string[];
+ if (fileCache.has(match.filePath)) {
+ lines = fileCache.get(match.filePath)!;
} else {
- outputLines.push(`${relPath}-${lineNum}- ${truncatedContent}`);
+ try {
+ lines = readFileSync(match.filePath, "utf-8").split(/\r?\n/);
+ fileCache.set(match.filePath, lines);
+ } catch {
+ lines = [];
+ }
+ }
+
+ const relPath = isDirectory
+ ? relative(searchPath, match.filePath)
+ : basename(match.filePath);
+
+ const startLine = Math.max(0, match.lineNumber - 1 - contextLines);
+ const endLine = Math.min(
+ lines.length,
+ match.lineNumber + contextLines,
+ );
+
+ for (let i = startLine; i < endLine; i++) {
+ const lineContent = lines[i] ?? "";
+ const { text: truncatedContent, wasTruncated } =
+ truncateLine(lineContent);
+ if (wasTruncated) anyLineTruncated = true;
+
+ const lineNum = i + 1;
+ if (lineNum === match.lineNumber) {
+ outputLines.push(`${relPath}:${lineNum}: ${truncatedContent}`);
+ } else {
+ outputLines.push(`${relPath}-${lineNum}- ${truncatedContent}`);
+ }
}
}
- }
- const rawOutput = outputLines.join("\n");
- const truncation = truncateHead(rawOutput, { maxLines: Number.MAX_SAFE_INTEGER });
+ const rawOutput = outputLines.join("\n");
+ const truncation = truncateHead(rawOutput, {
+ maxLines: Number.MAX_SAFE_INTEGER,
+ });
- const notices: string[] = [];
- if (limitReached) {
- notices.push(
- `${effectiveLimit} matches limit reached. Use limit=${effectiveLimit * 2} for more, or refine pattern`,
- );
- }
- if (truncation.truncated) {
- notices.push(`${formatSize(DEFAULT_MAX_BYTES)} limit reached`);
- }
- if (anyLineTruncated) {
- notices.push(
- `Some lines truncated to ${GREP_MAX_LINE_LENGTH} chars. Use read tool to see full lines`,
- );
- }
+ const notices: string[] = [];
+ if (limitReached) {
+ notices.push(
+ `${effectiveLimit} matches limit reached. Use limit=${effectiveLimit * 2} for more, or refine pattern`,
+ );
+ }
+ if (truncation.truncated) {
+ notices.push(`${formatSize(DEFAULT_MAX_BYTES)} limit reached`);
+ }
+ if (anyLineTruncated) {
+ notices.push(
+ `Some lines truncated to ${GREP_MAX_LINE_LENGTH} chars. Use read tool to see full lines`,
+ );
+ }
- let output = truncation.content;
- if (notices.length > 0) {
- output += `\n\n[${notices.join(". ")}]`;
- }
+ let output = truncation.content;
+ if (notices.length > 0) {
+ output += `\n\n[${notices.join(". ")}]`;
+ }
- resolve({
- content: [{ type: "text", text: output }],
- details: {
- matches: matches.length,
- ...(limitReached ? { matchLimitReached: effectiveLimit } : {}),
- ...(truncation.truncated ? { truncation } : {}),
- ...(anyLineTruncated ? { linesTruncated: true } : {}),
- },
+ resolve({
+ content: [{ type: "text", text: output }],
+ details: {
+ matches: matches.length,
+ ...(limitReached ? { matchLimitReached: effectiveLimit } : {}),
+ ...(truncation.truncated ? { truncation } : {}),
+ ...(anyLineTruncated ? { linesTruncated: true } : {}),
+ },
+ });
});
});
- });
},
};
};
@@ -11,8 +11,8 @@ export interface ToolBundle {
tools: AgentTool<any>[];
}
-export { ensureWorkspacePath } from "./path-utils.js";
-export { createReadTool } from "./read.js";
+export { createFindTool } from "./find.js";
export { createGrepTool } from "./grep.js";
export { createLsTool } from "./ls.js";
-export { createFindTool } from "./find.js";
+export { ensureWorkspacePath } from "./path-utils.js";
+export { createReadTool } from "./read.js";
@@ -4,16 +4,28 @@
import { existsSync, readdirSync, statSync } from "node:fs";
import { join } from "node:path";
-import { Type } from "@sinclair/typebox";
import type { AgentTool } from "@mariozechner/pi-agent-core";
-import { resolveToCwd, ensureWorkspacePath } from "./path-utils.js";
-import { DEFAULT_MAX_BYTES, formatSize, truncateHead } from "../../util/truncate.js";
+import { Type } from "@sinclair/typebox";
+import {
+ DEFAULT_MAX_BYTES,
+ formatSize,
+ truncateHead,
+} from "../../util/truncate.js";
+import { ensureWorkspacePath, resolveToCwd } from "./path-utils.js";
const DEFAULT_LIMIT = 500;
const LsSchema = Type.Object({
- path: Type.Optional(Type.String({ description: "Directory to list (default: current directory)" })),
- limit: Type.Optional(Type.Number({ description: "Maximum number of entries to return (default: 500)" })),
+ path: Type.Optional(
+ Type.String({
+ description: "Directory to list (default: current directory)",
+ }),
+ ),
+ limit: Type.Optional(
+ Type.Number({
+ description: "Maximum number of entries to return (default: 500)",
+ }),
+ ),
});
export const createLsTool = (workspacePath: string): AgentTool => ({
@@ -35,7 +47,9 @@ export const createLsTool = (workspacePath: string): AgentTool => ({
}
const entries = readdirSync(resolved);
- entries.sort((a, b) => a.localeCompare(b, undefined, { sensitivity: "base" }));
+ entries.sort((a, b) =>
+ a.localeCompare(b, undefined, { sensitivity: "base" }),
+ );
const effectiveLimit = params.limit ?? DEFAULT_LIMIT;
const limited = entries.slice(0, effectiveLimit);
@@ -59,11 +73,15 @@ export const createLsTool = (workspacePath: string): AgentTool => ({
}
const rawOutput = lines.join("\n");
- const truncation = truncateHead(rawOutput, { maxLines: Number.MAX_SAFE_INTEGER });
+ const truncation = truncateHead(rawOutput, {
+ maxLines: Number.MAX_SAFE_INTEGER,
+ });
const notices: string[] = [];
if (entryLimitReached) {
- notices.push(`Entry limit reached: showing ${effectiveLimit} of ${entries.length} entries.`);
+ notices.push(
+ `Entry limit reached: showing ${effectiveLimit} of ${entries.length} entries.`,
+ );
}
if (truncation.truncated) {
notices.push(
@@ -9,66 +9,70 @@ import { ToolInputError } from "../../util/errors.js";
const UNICODE_SPACES = /[\u00A0\u2000-\u200A\u202F\u205F\u3000]/g;
function normalizeAtPrefix(filePath: string): string {
- return filePath.startsWith("@") ? filePath.slice(1) : filePath;
+ return filePath.startsWith("@") ? filePath.slice(1) : filePath;
}
function fileExists(path: string): boolean {
- try {
- accessSync(path, constants.F_OK);
- return true;
- } catch {
- return false;
- }
+ try {
+ accessSync(path, constants.F_OK);
+ return true;
+ } catch {
+ return false;
+ }
}
function tryMacOSScreenshotPath(filePath: string): string {
- return filePath.replace(/ (AM|PM)\./g, "\u202F$1.");
+ return filePath.replace(/ (AM|PM)\./g, "\u202F$1.");
}
function tryNFDVariant(filePath: string): string {
- return filePath.normalize("NFD");
+ return filePath.normalize("NFD");
}
function tryCurlyQuoteVariant(filePath: string): string {
- return filePath.replace(/'/g, "\u2019");
+ return filePath.replace(/'/g, "\u2019");
}
export function expandPath(filePath: string): string {
- let result = filePath.replace(UNICODE_SPACES, " ");
- result = normalizeAtPrefix(result);
+ let result = filePath.replace(UNICODE_SPACES, " ");
+ result = normalizeAtPrefix(result);
- // NOTE: tilde expansion is intentionally omitted.
- // In a workspace-sandboxed context, expanding ~ to the user's home
- // directory would bypass workspace containment. Tildes are treated
- // as literal path characters.
+ // NOTE: tilde expansion is intentionally omitted.
+ // In a workspace-sandboxed context, expanding ~ to the user's home
+ // directory would bypass workspace containment. Tildes are treated
+ // as literal path characters.
- return result;
+ return result;
}
export function resolveToCwd(filePath: string, cwd: string): string {
- const expanded = expandPath(filePath);
- if (isAbsolute(expanded)) {
- return expanded;
- }
- return resolvePath(cwd, expanded);
+ const expanded = expandPath(filePath);
+ if (isAbsolute(expanded)) {
+ return expanded;
+ }
+ return resolvePath(cwd, expanded);
}
export function resolveReadPath(filePath: string, cwd: string): string {
- const resolved = resolveToCwd(filePath, cwd);
-
- if (fileExists(resolved)) {
- return resolved;
- }
-
- const variants = [tryNFDVariant, tryMacOSScreenshotPath, tryCurlyQuoteVariant];
- for (const variant of variants) {
- const candidate = variant(resolved);
- if (candidate !== resolved && fileExists(candidate)) {
- return candidate;
- }
- }
-
- return resolved;
+ const resolved = resolveToCwd(filePath, cwd);
+
+ if (fileExists(resolved)) {
+ return resolved;
+ }
+
+ const variants = [
+ tryNFDVariant,
+ tryMacOSScreenshotPath,
+ tryCurlyQuoteVariant,
+ ];
+ for (const variant of variants) {
+ const candidate = variant(resolved);
+ if (candidate !== resolved && fileExists(candidate)) {
+ return candidate;
+ }
+ }
+
+ return resolved;
}
/**
@@ -78,40 +82,49 @@ export function resolveReadPath(filePath: string, cwd: string): string {
* exist yet while still catching symlink escapes in any ancestor directory.
*/
function safeRealpath(p: string): string {
- try {
- return realpathSync(p);
- } catch (err: any) {
- if (err?.code === "ENOENT") {
- const parent = dirname(p);
- if (parent === p) {
- // filesystem root β nothing more to resolve
- return p;
- }
- const realParent = safeRealpath(parent);
- const tail = p.slice(parent.length);
- return realParent + tail;
- }
- throw err;
- }
+ try {
+ return realpathSync(p);
+ } catch (err: any) {
+ if (err?.code === "ENOENT") {
+ const parent = dirname(p);
+ if (parent === p) {
+ // filesystem root β nothing more to resolve
+ return p;
+ }
+ const realParent = safeRealpath(parent);
+ const tail = p.slice(parent.length);
+ return realParent + tail;
+ }
+ throw err;
+ }
}
-export function ensureWorkspacePath(workspacePath: string, targetPath: string): string {
- const resolved = resolvePath(workspacePath, targetPath);
-
- // Quick textual check first (catches the common case cheaply)
- const root = workspacePath.endsWith(sep) ? workspacePath : `${workspacePath}${sep}`;
- if (resolved !== workspacePath && !resolved.startsWith(root)) {
- throw new ToolInputError(`Path escapes workspace: ${targetPath}`);
- }
-
- // Resolve symlinks to catch symlink-based escapes
- const realWorkspace = safeRealpath(workspacePath);
- const realTarget = safeRealpath(resolved);
- const realRoot = realWorkspace.endsWith(sep) ? realWorkspace : `${realWorkspace}${sep}`;
-
- if (realTarget !== realWorkspace && !realTarget.startsWith(realRoot)) {
- throw new ToolInputError(`Path escapes workspace via symlink: ${targetPath}`);
- }
-
- return resolved;
+export function ensureWorkspacePath(
+ workspacePath: string,
+ targetPath: string,
+): string {
+ const resolved = resolvePath(workspacePath, targetPath);
+
+ // Quick textual check first (catches the common case cheaply)
+ const root = workspacePath.endsWith(sep)
+ ? workspacePath
+ : `${workspacePath}${sep}`;
+ if (resolved !== workspacePath && !resolved.startsWith(root)) {
+ throw new ToolInputError(`Path escapes workspace: ${targetPath}`);
+ }
+
+ // Resolve symlinks to catch symlink-based escapes
+ const realWorkspace = safeRealpath(workspacePath);
+ const realTarget = safeRealpath(resolved);
+ const realRoot = realWorkspace.endsWith(sep)
+ ? realWorkspace
+ : `${realWorkspace}${sep}`;
+
+ if (realTarget !== realWorkspace && !realTarget.startsWith(realRoot)) {
+ throw new ToolInputError(
+ `Path escapes workspace via symlink: ${targetPath}`,
+ );
+ }
+
+ return resolved;
}
@@ -3,86 +3,100 @@
// SPDX-License-Identifier: GPL-3.0-or-later
import { readFile, stat } from "node:fs/promises";
-import { Type } from "@sinclair/typebox";
import type { AgentTool } from "@mariozechner/pi-agent-core";
-import { resolveReadPath, ensureWorkspacePath } from "./path-utils.js";
-import { DEFAULT_MAX_BYTES, DEFAULT_MAX_LINES, formatSize, truncateHead } from "../../util/truncate.js";
+import { Type } from "@sinclair/typebox";
import { ToolInputError } from "../../util/errors.js";
+import {
+ DEFAULT_MAX_BYTES,
+ DEFAULT_MAX_LINES,
+ formatSize,
+ truncateHead,
+} from "../../util/truncate.js";
+import { ensureWorkspacePath, resolveReadPath } from "./path-utils.js";
const MAX_READ_BYTES = 5 * 1024 * 1024;
const ReadSchema = Type.Object({
- path: Type.String({ description: "File path relative to workspace root" }),
- offset: Type.Optional(Type.Number({ description: "1-based starting line (default: 1)" })),
- limit: Type.Optional(Type.Number({ description: "Maximum lines to return" })),
+ path: Type.String({ description: "Path relative to workspace root" }),
+ offset: Type.Optional(
+ Type.Number({ description: "1-based starting line (default: 1)" }),
+ ),
+ limit: Type.Optional(Type.Number({ description: "Maximum lines to return" })),
});
export const createReadTool = (workspacePath: string): AgentTool => ({
- name: "read",
- label: "Read File",
- description: `Read a file's contents. Returns up to ${DEFAULT_MAX_LINES} lines or ${DEFAULT_MAX_BYTES / 1024}KB, whichever is reached first. Use offset and limit to paginate large files.`,
- parameters: ReadSchema as any,
- execute: async (_toolCallId: string, params: any) => {
- const absolutePath = resolveReadPath(params.path, workspacePath);
- ensureWorkspacePath(workspacePath, absolutePath);
- const fileStats = await stat(absolutePath);
-
- if (fileStats.size > MAX_READ_BYTES) {
- throw new ToolInputError(`File too large (>5MB): ${params.path}`);
- }
-
- const raw = await readFile(absolutePath, "utf8");
- const allLines = raw.split("\n");
- const totalFileLines = allLines.length;
-
- const startLine = params.offset ? Math.max(0, params.offset - 1) : 0;
- const startLineDisplay = startLine + 1;
-
- if (startLine >= allLines.length) {
- throw new Error(`Offset ${params.offset} is beyond end of file (${allLines.length} lines total)`);
- }
-
- let selectedContent: string;
- let userLimitedLines: number | undefined;
- if (params.limit !== undefined) {
- const endLine = Math.min(startLine + params.limit, allLines.length);
- selectedContent = allLines.slice(startLine, endLine).join("\n");
- userLimitedLines = endLine - startLine;
- } else {
- selectedContent = allLines.slice(startLine).join("\n");
- }
-
- const truncation = truncateHead(selectedContent);
-
- let output: string;
-
- if (truncation.firstLineExceedsLimit) {
- const firstLineSize = formatSize(Buffer.byteLength(allLines[startLine] ?? "", "utf-8"));
- output = `[Line ${startLineDisplay} is ${firstLineSize}, exceeds ${formatSize(DEFAULT_MAX_BYTES)} limit. Use a local file viewer to extract that single line by number.]`;
- } else if (truncation.truncated) {
- const endLineDisplay = startLineDisplay + truncation.outputLines - 1;
- const nextOffset = endLineDisplay + 1;
-
- output = truncation.content;
-
- if (truncation.truncatedBy === "lines") {
- output += `\n\n[Showing lines ${startLineDisplay}-${endLineDisplay} of ${totalFileLines}. Use offset=${nextOffset} to continue.]`;
- } else {
- output += `\n\n[Showing lines ${startLineDisplay}-${endLineDisplay} of ${totalFileLines} (${formatSize(DEFAULT_MAX_BYTES)} limit). Use offset=${nextOffset} to continue.]`;
- }
- } else if (userLimitedLines !== undefined && startLine + userLimitedLines < allLines.length) {
- const remaining = allLines.length - (startLine + userLimitedLines);
- const nextOffset = startLine + userLimitedLines + 1;
-
- output = truncation.content;
- output += `\n\n[${remaining} more lines in file. Use offset=${nextOffset} to continue.]`;
- } else {
- output = truncation.content;
- }
-
- return {
- content: [{ type: "text", text: output }],
- details: { truncation },
- };
- },
+ name: "read",
+ label: "Read File",
+ description: `Read a file's contents. Output is truncated to ${DEFAULT_MAX_LINES} lines or ${DEFAULT_MAX_BYTES / 1024}KB (whichever is hit first). Use offset/limit for large files.`,
+ parameters: ReadSchema as any,
+ execute: async (_toolCallId: string, params: any) => {
+ const absolutePath = resolveReadPath(params.path, workspacePath);
+ ensureWorkspacePath(workspacePath, absolutePath);
+ const fileStats = await stat(absolutePath);
+
+ if (fileStats.size > MAX_READ_BYTES) {
+ throw new ToolInputError(`File exceeds 5MB limit: ${params.path}`);
+ }
+
+ const raw = await readFile(absolutePath, "utf8");
+ const allLines = raw.split("\n");
+ const totalFileLines = allLines.length;
+
+ const startLine = params.offset ? Math.max(0, params.offset - 1) : 0;
+ const startLineDisplay = startLine + 1;
+
+ if (startLine >= allLines.length) {
+ throw new Error(
+ `Offset ${params.offset} is beyond end of file (${allLines.length} lines total)`,
+ );
+ }
+
+ let selectedContent: string;
+ let userLimitedLines: number | undefined;
+ if (params.limit !== undefined) {
+ const endLine = Math.min(startLine + params.limit, allLines.length);
+ selectedContent = allLines.slice(startLine, endLine).join("\n");
+ userLimitedLines = endLine - startLine;
+ } else {
+ selectedContent = allLines.slice(startLine).join("\n");
+ }
+
+ const truncation = truncateHead(selectedContent);
+
+ let output: string;
+
+ if (truncation.firstLineExceedsLimit) {
+ const firstLineSize = formatSize(
+ Buffer.byteLength(allLines[startLine] ?? "", "utf-8"),
+ );
+ output = `[Line ${startLineDisplay} is ${firstLineSize}, exceeds ${formatSize(DEFAULT_MAX_BYTES)} limit. Use a local file viewer to extract that single line by number.]`;
+ } else if (truncation.truncated) {
+ const endLineDisplay = startLineDisplay + truncation.outputLines - 1;
+ const nextOffset = endLineDisplay + 1;
+
+ output = truncation.content;
+
+ if (truncation.truncatedBy === "lines") {
+ output += `\n\n[Showing lines ${startLineDisplay}-${endLineDisplay} of ${totalFileLines}. Use offset=${nextOffset} to continue.]`;
+ } else {
+ output += `\n\n[Showing lines ${startLineDisplay}-${endLineDisplay} of ${totalFileLines} (${formatSize(DEFAULT_MAX_BYTES)} limit). Use offset=${nextOffset} to continue.]`;
+ }
+ } else if (
+ userLimitedLines !== undefined &&
+ startLine + userLimitedLines < allLines.length
+ ) {
+ const remaining = allLines.length - (startLine + userLimitedLines);
+ const nextOffset = startLine + userLimitedLines + 1;
+
+ output = truncation.content;
+ output += `\n\n[${remaining} more lines in file. Use offset=${nextOffset} to continue.]`;
+ } else {
+ output = truncation.content;
+ }
+
+ return {
+ content: [{ type: "text", text: output }],
+ details: { truncation },
+ };
+ },
});
@@ -2,8 +2,8 @@
//
// SPDX-License-Identifier: GPL-3.0-or-later
-import { Type } from "@sinclair/typebox";
import type { AgentTool } from "@mariozechner/pi-agent-core";
+import { Type } from "@sinclair/typebox";
import Tabstack from "@tabstack/sdk";
import { FetchError, ToolInputError } from "../../util/errors.js";
@@ -14,18 +14,18 @@ export interface WebFetchResult {
const FetchSchema = Type.Object({
url: Type.String({ description: "URL to fetch" }),
- nocache: Type.Optional(Type.Boolean({ description: "Bypass cache and fetch fresh content" })),
+ nocache: Type.Optional(Type.Boolean({ description: "Force fresh fetch" })),
});
export function createWebFetchTool(apiKey: string): AgentTool {
return {
name: "web_fetch",
label: "Web Fetch",
- description: "Fetch a URL and return its content as markdown.",
+ description: "Fetch a URL and return markdown using Tabstack.",
parameters: FetchSchema as any,
execute: async (_toolCallId: string, params: any) => {
if (!apiKey) {
- throw new ToolInputError("Web fetch is not configured");
+ throw new ToolInputError("Missing Tabstack API key");
}
const client = new Tabstack({ apiKey });
@@ -2,8 +2,8 @@
//
// SPDX-License-Identifier: GPL-3.0-or-later
-import { Type } from "@sinclair/typebox";
import type { AgentTool } from "@mariozechner/pi-agent-core";
+import { Type } from "@sinclair/typebox";
import { search } from "kagi-ken";
import { FetchError, ToolInputError } from "../../util/errors.js";
@@ -14,18 +14,21 @@ const SearchSchema = Type.Object({
export const createWebSearchTool = (sessionToken: string): AgentTool => ({
name: "web_search",
label: "Web Search",
- description: "Search the web. Returns structured results with titles, URLs, and snippets.",
+ description: "Search the web using Kagi (session token required).",
parameters: SearchSchema as any,
execute: async (_toolCallId: string, params: any) => {
if (!sessionToken) {
- throw new ToolInputError("Web search is not configured");
+ throw new ToolInputError("Missing Kagi session token");
}
try {
const result = await search(params.query, sessionToken);
return {
content: [{ type: "text", text: JSON.stringify(result, null, 2) }],
- details: { query: params.query, resultCount: result?.data?.length ?? 0 },
+ details: {
+ query: params.query,
+ resultCount: result?.data?.length ?? 0,
+ },
};
} catch (error: any) {
throw new FetchError(
@@ -3,24 +3,24 @@
// SPDX-License-Identifier: GPL-3.0-or-later
import { readFile } from "node:fs/promises";
-import { expandHomePath } from "../../util/path.js";
-import { applyConfigOverrides, loadConfig } from "../../config/loader.js";
-import { createWorkspace } from "../../workspace/manager.js";
-import { createGrepTool } from "../../agent/tools/grep.js";
-import { createReadTool } from "../../agent/tools/read.js";
-import { createLsTool } from "../../agent/tools/ls.js";
+import simpleGit from "simple-git";
+import { buildRepoPrompt } from "../../agent/prompts/repo.js";
+import { runAgent } from "../../agent/runner.js";
import { createFindTool } from "../../agent/tools/find.js";
-import { createGitLogTool } from "../../agent/tools/git/log.js";
-import { createGitShowTool } from "../../agent/tools/git/show.js";
import { createGitBlameTool } from "../../agent/tools/git/blame.js";
+import { createGitCheckoutTool } from "../../agent/tools/git/checkout.js";
import { createGitDiffTool } from "../../agent/tools/git/diff.js";
+import { createGitLogTool } from "../../agent/tools/git/log.js";
import { createGitRefsTool } from "../../agent/tools/git/refs.js";
-import { createGitCheckoutTool } from "../../agent/tools/git/checkout.js";
-import { runAgent } from "../../agent/runner.js";
-import { buildRepoPrompt } from "../../agent/prompts/repo.js";
-import { createEventLogger, printUsageSummary } from "../output.js";
+import { createGitShowTool } from "../../agent/tools/git/show.js";
+import { createGrepTool } from "../../agent/tools/grep.js";
+import { createLsTool } from "../../agent/tools/ls.js";
+import { createReadTool } from "../../agent/tools/read.js";
+import { applyConfigOverrides, loadConfig } from "../../config/loader.js";
import { CloneError } from "../../util/errors.js";
-import simpleGit from "simple-git";
+import { expandHomePath } from "../../util/path.js";
+import { createWorkspace } from "../../workspace/manager.js";
+import { createEventLogger, printUsageSummary } from "../output.js";
export interface RepoCommandOptions {
query: string;
@@ -32,14 +32,21 @@ export interface RepoCommandOptions {
cleanup: boolean;
}
-export async function runRepoCommand(options: RepoCommandOptions): Promise<void> {
+export async function runRepoCommand(
+ options: RepoCommandOptions,
+): Promise<void> {
const { config } = await loadConfig();
const overrides = applyConfigOverrides(config, {
- defaults: { model: options.model ?? config.defaults.model, cleanup: options.cleanup },
+ defaults: {
+ model: options.model ?? config.defaults.model,
+ cleanup: options.cleanup,
+ },
repo: { model: options.model ?? config.repo.model },
});
- const workspace = await createWorkspace({ cleanup: overrides.defaults.cleanup });
+ const workspace = await createWorkspace({
+ cleanup: overrides.defaults.cleanup,
+ });
try {
const logger = createEventLogger({ verbose: options.verbose });
@@ -58,7 +65,18 @@ export async function runRepoCommand(options: RepoCommandOptions): Promise<void>
if (!options.full) {
const depth = overrides.repo.default_depth ?? 1;
const blobLimit = overrides.repo.blob_limit ?? "5m";
- cloneArgs.push("--depth", String(depth), `--filter=blob:limit=${blobLimit}`);
+ cloneArgs.push(
+ "--depth",
+ String(depth),
+ `--filter=blob:limit=${blobLimit}`,
+ );
+ }
+
+ if (options.uri.startsWith("-")) {
+ throw new CloneError(options.uri, "URI must not start with '-'");
+ }
+ if (options.ref?.startsWith("-")) {
+ throw new CloneError(options.ref, "Ref must not start with '-'");
}
try {
@@ -99,6 +117,10 @@ export async function runRepoCommand(options: RepoCommandOptions): Promise<void>
process.stdout.write(result.message + "\n");
printUsageSummary(result.usage as any, result.requestCount);
} finally {
- await workspace.cleanup();
+ try {
+ await workspace.cleanup();
+ } catch {
+ // cleanup is best-effort; don't mask the original error
+ }
}
}
@@ -4,20 +4,20 @@
import { readFile } from "node:fs/promises";
import { basename } from "node:path";
-import { expandHomePath } from "../../util/path.js";
-import { applyConfigOverrides, loadConfig } from "../../config/loader.js";
-import { createWorkspace } from "../../workspace/manager.js";
-import { writeWorkspaceFile } from "../../workspace/content.js";
+import { buildWebPrompt } from "../../agent/prompts/web.js";
+import { runAgent } from "../../agent/runner.js";
+import { createFindTool } from "../../agent/tools/find.js";
import { createGrepTool } from "../../agent/tools/grep.js";
-import { createReadTool } from "../../agent/tools/read.js";
import { createLsTool } from "../../agent/tools/ls.js";
-import { createFindTool } from "../../agent/tools/find.js";
+import { createReadTool } from "../../agent/tools/read.js";
import { createWebFetchTool } from "../../agent/tools/web-fetch.js";
import { createWebSearchTool } from "../../agent/tools/web-search.js";
-import { runAgent } from "../../agent/runner.js";
-import { buildWebPrompt } from "../../agent/prompts/web.js";
-import { createEventLogger, printUsageSummary } from "../output.js";
+import { applyConfigOverrides, loadConfig } from "../../config/loader.js";
import { ConfigError, FetchError } from "../../util/errors.js";
+import { expandHomePath } from "../../util/path.js";
+import { writeWorkspaceFile } from "../../workspace/content.js";
+import { createWorkspace } from "../../workspace/manager.js";
+import { createEventLogger, printUsageSummary } from "../output.js";
const INJECT_THRESHOLD = 50 * 1024;
@@ -32,27 +32,38 @@ export interface WebCommandOptions {
export async function runWebCommand(options: WebCommandOptions): Promise<void> {
const { config } = await loadConfig();
const overrides = applyConfigOverrides(config, {
- defaults: { model: options.model ?? config.defaults.model, cleanup: options.cleanup },
+ defaults: {
+ model: options.model ?? config.defaults.model,
+ cleanup: options.cleanup,
+ },
web: { model: options.model ?? config.web.model },
});
- const workspace = await createWorkspace({ cleanup: overrides.defaults.cleanup });
+ const workspace = await createWorkspace({
+ cleanup: overrides.defaults.cleanup,
+ });
try {
const logger = createEventLogger({ verbose: options.verbose });
const kagiSession =
- overrides.web.kagi_session_token ?? overrides.defaults.kagi_session_token ?? process.env["KAGI_SESSION_TOKEN"];
+ overrides.web.kagi_session_token ??
+ overrides.defaults.kagi_session_token ??
+ process.env["KAGI_SESSION_TOKEN"];
const tabstackKey =
overrides.web.tabstack_api_key ??
overrides.defaults.tabstack_api_key ??
process.env["TABSTACK_API_KEY"];
if (!kagiSession) {
- throw new ConfigError("Web search requires KAGI_SESSION_TOKEN (set via environment or config)");
+ throw new ConfigError(
+ "Web search requires KAGI_SESSION_TOKEN (set via environment or config)",
+ );
}
if (!tabstackKey) {
- throw new ConfigError("Web fetch requires TABSTACK_API_KEY (set via environment or config)");
+ throw new ConfigError(
+ "Web fetch requires TABSTACK_API_KEY (set via environment or config)",
+ );
}
let systemPrompt = buildWebPrompt({
@@ -63,9 +74,11 @@ export async function runWebCommand(options: WebCommandOptions): Promise<void> {
systemPrompt = await readFile(expandHomePath(promptPath), "utf8");
}
+ const webFetchTool = createWebFetchTool(tabstackKey);
+
const tools = [
createWebSearchTool(kagiSession),
- createWebFetchTool(tabstackKey),
+ webFetchTool,
createReadTool(workspace.path),
createGrepTool(workspace.path),
createLsTool(workspace.path),
@@ -74,20 +87,46 @@ export async function runWebCommand(options: WebCommandOptions): Promise<void> {
let seededContext = "";
if (options.url) {
- const fetchTool = createWebFetchTool(tabstackKey);
+ let parsedUrl: URL;
+ try {
+ parsedUrl = new URL(options.url);
+ } catch {
+ throw new FetchError(options.url, "Invalid URL");
+ }
+ if (parsedUrl.protocol !== "http:" && parsedUrl.protocol !== "https:") {
+ throw new FetchError(
+ options.url,
+ "Only http and https URLs are supported",
+ );
+ }
+
try {
- const result = await fetchTool.execute("prefetch", { url: options.url, nocache: false });
+ const result = await webFetchTool.execute("prefetch", {
+ url: options.url,
+ nocache: false,
+ });
const text = result.content
- .map((block) => (block.type === "text" ? block.text ?? "" : ""))
+ .map((block) => (block.type === "text" ? (block.text ?? "") : ""))
.join("");
if (text.length <= INJECT_THRESHOLD) {
seededContext = text;
} else {
- const filename = `web/${basename(new URL(options.url).pathname) || "index"}.md`;
+ let rawBasename: string;
+ try {
+ rawBasename =
+ decodeURIComponent(basename(parsedUrl.pathname)) || "index";
+ } catch {
+ rawBasename = basename(parsedUrl.pathname) || "index";
+ }
+ const safeBasename = rawBasename
+ .replace(/[^a-zA-Z0-9._-]+/g, "_")
+ .replace(/^\.{1,2}$/, "index");
+ const filename = `web/${safeBasename}.md`;
await writeWorkspaceFile(workspace.path, filename, text);
- seededContext = `Content from ${options.url} was too large to include directly. It has been saved to ${filename} in your workspace.`;
+ seededContext = `Fetched content stored at ${filename}`;
}
} catch (error: any) {
+ if (error instanceof FetchError) throw error;
throw new FetchError(options.url, error?.message ?? String(error));
}
}
@@ -107,6 +146,10 @@ export async function runWebCommand(options: WebCommandOptions): Promise<void> {
process.stdout.write(result.message + "\n");
printUsageSummary(result.usage as any, result.requestCount);
} finally {
- await workspace.cleanup();
+ try {
+ await workspace.cleanup();
+ } catch {
+ // cleanup is best-effort; don't mask the original error
+ }
}
}
@@ -1,16 +1,13 @@
-#!/usr/bin/env bun
+#!/usr/bin/env node
// SPDX-FileCopyrightText: Amolith <amolith@secluded.site>
//
// SPDX-License-Identifier: GPL-3.0-or-later
-import { runWebCommand } from "./commands/web.js";
-import { runRepoCommand } from "./commands/repo.js";
import { RumiloError } from "../util/errors.js";
+import { runRepoCommand } from "./commands/repo.js";
+import { runWebCommand } from "./commands/web.js";
import { parseArgs } from "./parse-args.js";
-import { readFileSync } from "node:fs";
-import { join } from "node:path";
-import { fileURLToPath } from "node:url";
const VERSION = "0.1.0";
@@ -19,7 +16,12 @@ async function main() {
// Handle version flag coming before any command (e.g., rumilo -v)
// When short flags come right after process.argv, they end up in 'command'
- if (command && (command === "-v" || command === "--version") && Object.keys(options).length === 0 && positional.length === 0) {
+ if (
+ command &&
+ (command === "-v" || command === "--version") &&
+ Object.keys(options).length === 0 &&
+ positional.length === 0
+ ) {
console.log(`rumilo v${VERSION}`);
process.exit(0);
}
@@ -27,12 +29,22 @@ async function main() {
const actualCommand = command?.startsWith("-") ? undefined : command;
// Handle version/short version as flag (before command) or as command
- if (options["version"] || actualCommand === "version" || actualCommand === "v") {
+ if (
+ options["version"] ||
+ actualCommand === "version" ||
+ actualCommand === "v"
+ ) {
console.log(`rumilo v${VERSION}`);
process.exit(0);
}
- if (!actualCommand || actualCommand === "help" || actualCommand === "--help" || actualCommand === "-h" || options["help"]) {
+ if (
+ !actualCommand ||
+ actualCommand === "help" ||
+ actualCommand === "--help" ||
+ actualCommand === "-h" ||
+ options["help"]
+ ) {
console.log(`RΓΊmilo v${VERSION} β dispatch AI research subagents
Commands:
@@ -61,7 +73,10 @@ Configuration:
if (command === "web") {
const query = positional.join(" ");
if (!query) {
- throw new RumiloError("Missing query. Usage: rumilo web <query>", "CLI_ERROR");
+ throw new RumiloError(
+ "Missing query. Usage: rumilo web <query>",
+ "CLI_ERROR",
+ );
}
await runWebCommand({
@@ -78,10 +93,16 @@ Configuration:
const query = positional.join(" ");
const uri = options["uri"] ? String(options["uri"]) : undefined;
if (!uri) {
- throw new RumiloError("Missing repository URI. Usage: rumilo repo -u <uri> <query>", "CLI_ERROR");
+ throw new RumiloError(
+ "Missing repo URI. Usage: rumilo repo -u <uri> <query>",
+ "CLI_ERROR",
+ );
}
if (!query) {
- throw new RumiloError("Missing query. Usage: rumilo repo -u <uri> <query>", "CLI_ERROR");
+ throw new RumiloError(
+ "Missing query. Usage: rumilo repo -u <uri> <query>",
+ "CLI_ERROR",
+ );
}
await runRepoCommand({
@@ -49,14 +49,24 @@ export function createEventLogger(options: OutputOptions) {
}
export function printUsageSummary(
- usage: { cost?: { total?: number }; totalTokens?: number; output?: number; input?: number } | undefined,
+ usage:
+ | {
+ cost?: { total?: number };
+ totalTokens?: number;
+ output?: number;
+ input?: number;
+ }
+ | undefined,
requestCount?: number,
) {
if (!usage) return;
const tokens = usage.totalTokens ?? (usage.output ?? 0) + (usage.input ?? 0);
const rawCost = usage.cost?.total;
- const cost = typeof rawCost === "number" && !isNaN(rawCost) && rawCost > 0 ? rawCost : undefined;
+ const cost =
+ typeof rawCost === "number" && !Number.isNaN(rawCost) && rawCost > 0
+ ? rawCost
+ : undefined;
let line = `\nusage: ${tokens} tokens`;
if (requestCount !== undefined && requestCount > 0) {
@@ -39,7 +39,7 @@ export function parseArgs(args: string[]): ParsedArgs {
} else if (arg.startsWith("-")) {
const short = arg.slice(1);
if (short === "u") {
- if (rest[i + 1] && !rest[i + 1]!.startsWith("-")) {
+ if (rest[i + 1] && !rest[i + 1]?.startsWith("-")) {
options["uri"] = rest[i + 1] as string;
i += 1;
}
@@ -5,18 +5,21 @@
import { readFile } from "node:fs/promises";
import { resolve } from "node:path";
import { Value } from "@sinclair/typebox/value";
+import toml from "toml";
+import { ConfigError } from "../util/errors.js";
import { defaultConfig } from "./defaults.js";
-import { ConfigSchema, PartialConfigSchema } from "./schema.js";
import type { RumiloConfig } from "./schema.js";
-import { ConfigError } from "../util/errors.js";
-import toml from "toml";
+import { ConfigSchema, PartialConfigSchema } from "./schema.js";
export interface LoadedConfig {
config: RumiloConfig;
path?: string;
}
-function mergeConfig(base: RumiloConfig, override: Partial<RumiloConfig>): RumiloConfig {
+function mergeConfig(
+ base: RumiloConfig,
+ override: Partial<RumiloConfig>,
+): RumiloConfig {
return {
defaults: {
...base.defaults,
@@ -37,15 +40,15 @@ function mergeConfig(base: RumiloConfig, override: Partial<RumiloConfig>): Rumil
};
}
-function validatePartialConfig(parsed: unknown): asserts parsed is Partial<RumiloConfig> {
+function validatePartialConfig(
+ parsed: unknown,
+): asserts parsed is Partial<RumiloConfig> {
if (!Value.Check(PartialConfigSchema, parsed)) {
const errors = [...Value.Errors(PartialConfigSchema, parsed)];
const details = errors
.map((e) => ` ${e.path}: ${e.message} (got ${JSON.stringify(e.value)})`)
.join("\n");
- throw new ConfigError(
- `Invalid config:\n${details}`,
- );
+ throw new ConfigError(`Invalid config:\n${details}`);
}
}
@@ -55,15 +58,14 @@ function validateFullConfig(config: unknown): asserts config is RumiloConfig {
const details = errors
.map((e) => ` ${e.path}: ${e.message} (got ${JSON.stringify(e.value)})`)
.join("\n");
- throw new ConfigError(
- `Invalid merged config:\n${details}`,
- );
+ throw new ConfigError(`Invalid merged config:\n${details}`);
}
}
export async function loadConfig(): Promise<LoadedConfig> {
const base = structuredClone(defaultConfig);
- const configHome = process.env["XDG_CONFIG_HOME"] || `${process.env["HOME"] ?? ""}/.config`;
+ const configHome =
+ process.env["XDG_CONFIG_HOME"] || `${process.env["HOME"] ?? ""}/.config`;
const configPath = resolve(configHome, "rumilo", "config.toml");
try {
@@ -2,7 +2,13 @@
//
// SPDX-License-Identifier: GPL-3.0-or-later
-import { Kind, Type, type Static, type TObject, type TProperties } from "@sinclair/typebox";
+import {
+ Kind,
+ type Static,
+ type TObject,
+ type TProperties,
+ Type,
+} from "@sinclair/typebox";
const CustomModelSchema = Type.Object({
provider: Type.String(),
@@ -28,13 +34,20 @@ const CustomModelSchema = Type.Object({
supports_developer_role: Type.Optional(Type.Boolean()),
supports_reasoning_effort: Type.Optional(Type.Boolean()),
supports_usage_in_streaming: Type.Optional(Type.Boolean()),
- max_tokens_field: Type.Optional(Type.Union([Type.Literal("max_tokens"), Type.Literal("max_completion_tokens")])),
+ max_tokens_field: Type.Optional(
+ Type.Union([
+ Type.Literal("max_tokens"),
+ Type.Literal("max_completion_tokens"),
+ ]),
+ ),
requires_tool_result_name: Type.Optional(Type.Boolean()),
requires_assistant_after_tool_result: Type.Optional(Type.Boolean()),
requires_thinking_as_text: Type.Optional(Type.Boolean()),
requires_mistral_tool_ids: Type.Optional(Type.Boolean()),
- thinking_format: Type.Optional(Type.Union([Type.Literal("openai"), Type.Literal("zai")])),
- })
+ thinking_format: Type.Optional(
+ Type.Union([Type.Literal("openai"), Type.Literal("zai")]),
+ ),
+ }),
),
});
@@ -65,7 +78,7 @@ export function partialObject<T extends TProperties>(schema: TObject<T>) {
const partial: Record<string, unknown> = {};
for (const [key, value] of Object.entries(schema.properties)) {
const v = value as any;
- const inner = v[Kind] === 'Object' && v.properties ? partialObject(v) : v;
+ const inner = v[Kind] === "Object" && v.properties ? partialObject(v) : v;
partial[key] = Type.Optional(inner as any);
}
return Type.Object(partial as any);
@@ -29,7 +29,7 @@ export function resolveConfigValue(value: string): string | undefined {
// Bare name β check as env var first, then use as literal
const envValue = process.env[value];
- return envValue || value;
+ return envValue ?? value;
}
/**
@@ -58,7 +58,7 @@ export function resolveHeaders(
const resolved: Record<string, string> = {};
for (const [key, value] of Object.entries(headers)) {
const resolvedValue = resolveConfigValue(value);
- if (resolvedValue) {
+ if (resolvedValue !== undefined) {
resolved[key] = resolvedValue;
}
}
@@ -111,13 +111,10 @@ export function truncateHead(
};
}
-function truncateStringToBytesFromEnd(
- str: string,
- maxBytes: number,
-): string {
+function truncateStringToBytesFromEnd(str: string, maxBytes: number): string {
const buf = Buffer.from(str, "utf-8");
if (buf.length <= maxBytes) return str;
- let end = buf.length;
+ const end = buf.length;
let start = end - maxBytes;
// Walk forward to a valid UTF-8 boundary
while (start < end && (buf[start]! & 0xc0) === 0x80) {
@@ -156,7 +153,11 @@ export function truncateTail(
let outputBytesCount = 0;
let truncatedBy: "lines" | "bytes" = "lines";
- for (let i = lines.length - 1; i >= 0 && outputLinesArr.length < maxLines; i--) {
+ for (
+ let i = lines.length - 1;
+ i >= 0 && outputLinesArr.length < maxLines;
+ i--
+ ) {
const line = lines[i]!;
const lineBytes =
Buffer.byteLength(line, "utf-8") + (outputLinesArr.length > 0 ? 1 : 0);
@@ -210,5 +211,8 @@ export function truncateLine(
if (line.length <= maxChars) {
return { text: line, wasTruncated: false };
}
- return { text: `${line.slice(0, maxChars)}... [truncated]`, wasTruncated: true };
+ return {
+ text: `${line.slice(0, maxChars)}... [truncated]`,
+ wasTruncated: true,
+ };
}
@@ -2,7 +2,7 @@
//
// SPDX-License-Identifier: GPL-3.0-or-later
-import { mkdtemp, rm, chmod } from "node:fs/promises";
+import { chmod, mkdtemp, rm } from "node:fs/promises";
import { tmpdir } from "node:os";
import { join } from "node:path";
import { WorkspaceError } from "../util/errors.js";
@@ -16,7 +16,9 @@ export interface WorkspaceOptions {
cleanup: boolean;
}
-export async function createWorkspace(options: WorkspaceOptions): Promise<Workspace> {
+export async function createWorkspace(
+ options: WorkspaceOptions,
+): Promise<Workspace> {
try {
const root = await mkdtemp(join(tmpdir(), "rumilo-"));
await chmod(root, 0o700);
@@ -2,11 +2,15 @@
//
// SPDX-License-Identifier: GPL-3.0-or-later
-import { describe, test, expect, beforeAll, afterAll } from "bun:test";
-import { AgentError } from "../src/util/errors.js";
-import { expandEnvVars, resolveConfigValue, resolveHeaders } from "../src/util/env.js";
+import { afterAll, beforeAll, describe, expect, test } from "bun:test";
import { buildGetApiKey } from "../src/agent/runner.js";
import type { RumiloConfig } from "../src/config/schema.js";
+import {
+ expandEnvVars,
+ resolveConfigValue,
+ resolveHeaders,
+} from "../src/util/env.js";
+import { AgentError } from "../src/util/errors.js";
const stubConfig: RumiloConfig = {
defaults: { model: "anthropic:test", cleanup: true },
@@ -54,7 +58,8 @@ describe("resolveConfigValue", () => {
});
afterAll(() => {
- if (saved["RUMILO_TEST_KEY"] === undefined) delete process.env["RUMILO_TEST_KEY"];
+ if (saved["RUMILO_TEST_KEY"] === undefined)
+ delete process.env["RUMILO_TEST_KEY"];
else process.env["RUMILO_TEST_KEY"] = saved["RUMILO_TEST_KEY"];
});
@@ -71,7 +76,9 @@ describe("resolveConfigValue", () => {
});
test("treats unknown name as literal", () => {
- expect(resolveConfigValue("sk-literal-key-12345")).toBe("sk-literal-key-12345");
+ expect(resolveConfigValue("sk-literal-key-12345")).toBe(
+ "sk-literal-key-12345",
+ );
});
test("returns undefined for unknown $VAR", () => {
@@ -134,7 +141,8 @@ describe("resolveHeaders", () => {
});
afterAll(() => {
- if (saved["RUMILO_HDR_KEY"] === undefined) delete process.env["RUMILO_HDR_KEY"];
+ if (saved["RUMILO_HDR_KEY"] === undefined)
+ delete process.env["RUMILO_HDR_KEY"];
else process.env["RUMILO_HDR_KEY"] = saved["RUMILO_HDR_KEY"];
});
@@ -2,7 +2,7 @@
//
// SPDX-License-Identifier: GPL-3.0-or-later
-import { describe, test, expect } from "bun:test";
+import { describe, expect, test } from "bun:test";
import { parseArgs } from "../src/cli/parse-args.js";
describe("CLI --key=value parsing (issue #6)", () => {
@@ -22,7 +22,13 @@ describe("CLI --key=value parsing (issue #6)", () => {
});
test("--key value (space-separated) works", () => {
- const result = parseArgs(["node", "script", "web", "--model", "openai:gpt-4"]);
+ const result = parseArgs([
+ "node",
+ "script",
+ "web",
+ "--model",
+ "openai:gpt-4",
+ ]);
expect(result.options["model"]).toBe("openai:gpt-4");
});
});
@@ -35,7 +41,13 @@ describe("CLI -u short flag (issue #7)", () => {
});
test("-u with valid URL works normally", () => {
- const result = parseArgs(["node", "script", "web", "-u", "https://example.com"]);
+ const result = parseArgs([
+ "node",
+ "script",
+ "web",
+ "-u",
+ "https://example.com",
+ ]);
expect(result.options["uri"]).toBe("https://example.com");
});
@@ -2,12 +2,12 @@
//
// SPDX-License-Identifier: GPL-3.0-or-later
-import { describe, test, expect, beforeEach, afterEach } from "bun:test";
-import { mkdtempSync, writeFileSync, rmSync, mkdirSync } from "node:fs";
+import { afterEach, beforeEach, describe, expect, test } from "bun:test";
+import { mkdirSync, mkdtempSync, rmSync, writeFileSync } from "node:fs";
import { tmpdir } from "node:os";
import { join } from "node:path";
-import { ConfigError } from "../src/util/errors.js";
import { loadConfig } from "../src/config/loader.js";
+import { ConfigError } from "../src/util/errors.js";
describe("loadConfig - ConfigError rethrown directly (issue #10)", () => {
let configDir: string;
@@ -2,15 +2,15 @@
//
// SPDX-License-Identifier: GPL-3.0-or-later
-import { describe, test, expect, beforeEach, afterEach } from "bun:test";
-import { mkdtempSync, writeFileSync, rmSync } from "node:fs";
+import { afterEach, beforeEach, describe, expect, test } from "bun:test";
+import { mkdtempSync, rmSync, writeFileSync } from "node:fs";
import { tmpdir } from "node:os";
import { join } from "node:path";
import { Type } from "@sinclair/typebox";
import { Value } from "@sinclair/typebox/value";
-import { ConfigError } from "../src/util/errors.js";
import { loadConfig } from "../src/config/loader.js";
import { partialObject } from "../src/config/schema.js";
+import { ConfigError } from "../src/util/errors.js";
describe("config validation", () => {
let configDir: string;
@@ -39,10 +39,7 @@ describe("config validation", () => {
});
test("rejects defaults.model with wrong type (number instead of string)", async () => {
- writeFileSync(
- configPath,
- `[defaults]\nmodel = 42\ncleanup = true\n`,
- );
+ writeFileSync(configPath, `[defaults]\nmodel = 42\ncleanup = true\n`);
await expect(loadConfig()).rejects.toThrow(ConfigError);
await expect(loadConfig()).rejects.toThrow(/defaults\/model/);
});
@@ -57,19 +54,13 @@ describe("config validation", () => {
});
test("rejects repo.default_depth with wrong type (string instead of number)", async () => {
- writeFileSync(
- configPath,
- `[repo]\ndefault_depth = "deep"\n`,
- );
+ writeFileSync(configPath, `[repo]\ndefault_depth = "deep"\n`);
await expect(loadConfig()).rejects.toThrow(ConfigError);
await expect(loadConfig()).rejects.toThrow(/repo\/default_depth/);
});
test("rejects repo.default_depth below minimum (0)", async () => {
- writeFileSync(
- configPath,
- `[repo]\ndefault_depth = 0\n`,
- );
+ writeFileSync(configPath, `[repo]\ndefault_depth = 0\n`);
await expect(loadConfig()).rejects.toThrow(ConfigError);
await expect(loadConfig()).rejects.toThrow(/default_depth/);
});
@@ -119,10 +110,7 @@ describe("config validation", () => {
});
test("error message includes path and expected type for diagnostics", async () => {
- writeFileSync(
- configPath,
- `[defaults]\nmodel = 42\ncleanup = true\n`,
- );
+ writeFileSync(configPath, `[defaults]\nmodel = 42\ncleanup = true\n`);
try {
await loadConfig();
throw new Error("should have thrown");
@@ -169,7 +157,9 @@ describe("partialObject deep-partial behavior", () => {
});
test("rejects wrong type inside nested object", () => {
- const result = Value.Check(PartialNested, { inner: { port: "not-a-number" } });
+ const result = Value.Check(PartialNested, {
+ inner: { port: "not-a-number" },
+ });
expect(result).toBe(false);
});
@@ -204,7 +194,11 @@ describe("partialObject deep-partial behavior", () => {
expect(Value.Check(PartialDeep, {})).toBe(true);
expect(Value.Check(PartialDeep, { level1: {} })).toBe(true);
expect(Value.Check(PartialDeep, { level1: { level2: {} } })).toBe(true);
- expect(Value.Check(PartialDeep, { level1: { level2: { value: 42 } } })).toBe(true);
- expect(Value.Check(PartialDeep, { level1: { level2: { value: "nope" } } })).toBe(false);
+ expect(
+ Value.Check(PartialDeep, { level1: { level2: { value: 42 } } }),
+ ).toBe(true);
+ expect(
+ Value.Check(PartialDeep, { level1: { level2: { value: "nope" } } }),
+ ).toBe(false);
});
});
@@ -2,7 +2,7 @@
//
// SPDX-License-Identifier: GPL-3.0-or-later
-import { describe, test, expect, beforeEach, afterEach } from "bun:test";
+import { afterEach, beforeEach, describe, expect, test } from "bun:test";
import { expandHomePath } from "../src/util/path.js";
describe("expandHomePath", () => {
@@ -2,7 +2,7 @@
//
// SPDX-License-Identifier: GPL-3.0-or-later
-import { describe, test, expect, beforeAll, afterAll } from "bun:test";
+import { afterAll, beforeAll, describe, expect, test } from "bun:test";
import { mkdtempSync, rmSync, writeFileSync } from "node:fs";
import { tmpdir } from "node:os";
import { join } from "node:path";
@@ -34,22 +34,30 @@ afterAll(() => {
describe("git_log validation - dead code fix (issue #12)", () => {
test("whitespace-only author throws ToolInputError", async () => {
const tool = createGitLogTool(workDir);
- await expect(tool.execute("id", { author: " " })).rejects.toThrow(ToolInputError);
+ await expect(tool.execute("id", { author: " " })).rejects.toThrow(
+ ToolInputError,
+ );
});
test("empty-string author throws ToolInputError", async () => {
const tool = createGitLogTool(workDir);
- await expect(tool.execute("id", { author: "" })).rejects.toThrow(ToolInputError);
+ await expect(tool.execute("id", { author: "" })).rejects.toThrow(
+ ToolInputError,
+ );
});
test("empty-string since throws ToolInputError", async () => {
const tool = createGitLogTool(workDir);
- await expect(tool.execute("id", { since: " " })).rejects.toThrow(ToolInputError);
+ await expect(tool.execute("id", { since: " " })).rejects.toThrow(
+ ToolInputError,
+ );
});
test("empty-string until throws ToolInputError", async () => {
const tool = createGitLogTool(workDir);
- await expect(tool.execute("id", { until: " " })).rejects.toThrow(ToolInputError);
+ await expect(tool.execute("id", { until: " " })).rejects.toThrow(
+ ToolInputError,
+ );
});
test("valid author is accepted", async () => {
@@ -2,16 +2,16 @@
//
// SPDX-License-Identifier: GPL-3.0-or-later
-import { describe, test, expect, beforeAll, afterAll } from "bun:test";
+import { afterAll, beforeAll, describe, expect, test } from "bun:test";
import { mkdtempSync, rmSync, writeFileSync } from "node:fs";
import { tmpdir } from "node:os";
import { join } from "node:path";
import simpleGit from "simple-git";
-import { createGitShowTool } from "../src/agent/tools/git/show.js";
-import { createGitDiffTool } from "../src/agent/tools/git/diff.js";
import { createGitBlameTool } from "../src/agent/tools/git/blame.js";
+import { createGitDiffTool } from "../src/agent/tools/git/diff.js";
import { createGitLogTool } from "../src/agent/tools/git/log.js";
-import { DEFAULT_MAX_LINES, DEFAULT_MAX_BYTES } from "../src/util/truncate.js";
+import { createGitShowTool } from "../src/agent/tools/git/show.js";
+import { DEFAULT_MAX_BYTES, DEFAULT_MAX_LINES } from "../src/util/truncate.js";
function textOf(result: any): string {
return result.content[0].text;
@@ -30,7 +30,10 @@ beforeAll(async () => {
// Create a large file for truncation tests
const largeLine = "x".repeat(100);
- const largeContent = Array.from({ length: 3000 }, (_, i) => `${i}: ${largeLine}`).join("\n");
+ const largeContent = Array.from(
+ { length: 3000 },
+ (_, i) => `${i}: ${largeLine}`,
+ ).join("\n");
writeFileSync(join(workDir, "large.txt"), largeContent);
await git.add("large.txt");
await git.commit("add large file");
@@ -54,14 +57,16 @@ describe("git_show truncation (issue #8)", () => {
const tool = createGitShowTool(workDir);
// The first commit has the large file diff, which should exceed truncation limits
const logs = await git.log();
- const firstCommitHash = logs.all[logs.all.length - 1]!.hash;
+ const firstCommitHash = logs.all[logs.all.length - 1]?.hash;
const result = await tool.execute("call-1", { ref: firstCommitHash });
const text = textOf(result);
// Output should be bounded - not return all 3000+ lines raw
const lines = text.split("\n");
expect(lines.length).toBeLessThanOrEqual(DEFAULT_MAX_LINES + 5); // small margin for notice
- expect(Buffer.byteLength(text, "utf-8")).toBeLessThanOrEqual(DEFAULT_MAX_BYTES + 500); // margin for notice
+ expect(Buffer.byteLength(text, "utf-8")).toBeLessThanOrEqual(
+ DEFAULT_MAX_BYTES + 500,
+ ); // margin for notice
expect(text).toContain("[truncated");
});
@@ -78,18 +83,24 @@ describe("git_diff truncation (issue #8)", () => {
test("truncates large diff output", async () => {
const tool = createGitDiffTool(workDir);
const logs = await git.log();
- const firstCommitHash = logs.all[logs.all.length - 1]!.hash;
- const secondCommitHash = logs.all[logs.all.length - 2]!.hash;
+ const firstCommitHash = logs.all[logs.all.length - 1]?.hash;
+ const secondCommitHash = logs.all[logs.all.length - 2]?.hash;
// Diff between first commit (large file add) and second commit
- const result = await tool.execute("call-3", { ref: firstCommitHash, ref2: secondCommitHash });
- const text = textOf(result);
+ const result = await tool.execute("call-3", {
+ ref: firstCommitHash,
+ ref2: secondCommitHash,
+ });
+ const _text = textOf(result);
// The diff won't be huge (only counter.txt changes), so let's create a proper large diff scenario
// Instead, diff from the first commit to HEAD which has many changes but also large.txt unchanged
// Better: modify large.txt to create a big diff
// Actually, let's just verify the mechanism works by checking the first commit via show already.
// For diff specifically, create a modified version of large.txt
const largeLine2 = "y".repeat(100);
- const largeContent2 = Array.from({ length: 3000 }, (_, i) => `${i}: ${largeLine2}`).join("\n");
+ const largeContent2 = Array.from(
+ { length: 3000 },
+ (_, i) => `${i}: ${largeLine2}`,
+ ).join("\n");
writeFileSync(join(workDir, "large.txt"), largeContent2);
const result2 = await tool.execute("call-3b", { ref: "HEAD" });
const text2 = textOf(result2);
@@ -108,7 +119,9 @@ describe("git_blame truncation (issue #8)", () => {
const text = textOf(result);
const lines = text.split("\n");
expect(lines.length).toBeLessThanOrEqual(DEFAULT_MAX_LINES + 5);
- expect(Buffer.byteLength(text, "utf-8")).toBeLessThanOrEqual(DEFAULT_MAX_BYTES + 500);
+ expect(Buffer.byteLength(text, "utf-8")).toBeLessThanOrEqual(
+ DEFAULT_MAX_BYTES + 500,
+ );
expect(text).toContain("[truncated");
});
});
@@ -2,7 +2,7 @@
//
// SPDX-License-Identifier: GPL-3.0-or-later
-import { describe, test, expect } from "bun:test";
+import { describe, expect, test } from "bun:test";
import { resolveModel } from "../src/agent/model-resolver.js";
import type { RumiloConfig } from "../src/config/schema.js";
import { ConfigError } from "../src/util/errors.js";
@@ -50,7 +50,7 @@ describe("resolveModel - colon handling (issue #5)", () => {
const config: RumiloConfig = {
...stubConfig,
custom_models: {
- "simple": {
+ simple: {
id: "simple-id",
name: "simple",
api: "openai",
@@ -69,6 +69,8 @@ describe("resolveModel - colon handling (issue #5)", () => {
});
test("rejects model string without colon", () => {
- expect(() => resolveModel("nocodelimiter", stubConfig)).toThrow(ConfigError);
+ expect(() => resolveModel("nocodelimiter", stubConfig)).toThrow(
+ ConfigError,
+ );
});
});
@@ -2,7 +2,7 @@
//
// SPDX-License-Identifier: GPL-3.0-or-later
-import { describe, test, expect, mock } from "bun:test";
+import { describe, expect, mock, test } from "bun:test";
import { FetchError, ToolInputError } from "../src/util/errors.js";
// Mock kagi-ken so the search function throws, exercising the FetchError wrapping
@@ -17,11 +17,15 @@ import { createWebSearchTool } from "../src/agent/tools/web-search.js";
describe("web_search error handling (issue #11)", () => {
test("missing session token throws ToolInputError", async () => {
const tool = createWebSearchTool("");
- await expect(tool.execute("id", { query: "test" })).rejects.toThrow(ToolInputError);
+ await expect(tool.execute("id", { query: "test" })).rejects.toThrow(
+ ToolInputError,
+ );
});
test("search API failure is wrapped as FetchError", async () => {
const tool = createWebSearchTool("invalid-token-xxx");
- await expect(tool.execute("id", { query: "test query" })).rejects.toThrow(FetchError);
+ await expect(tool.execute("id", { query: "test query" })).rejects.toThrow(
+ FetchError,
+ );
});
});
@@ -2,12 +2,12 @@
//
// SPDX-License-Identifier: GPL-3.0-or-later
-import { describe, test, expect, beforeEach, afterEach } from "bun:test";
-import { readdirSync, mkdtempSync } from "node:fs";
+import { afterEach, beforeEach, describe, expect, test } from "bun:test";
+import { execSync } from "node:child_process";
+import { mkdtempSync, readdirSync } from "node:fs";
import { rm } from "node:fs/promises";
import { tmpdir } from "node:os";
import { join } from "node:path";
-import { execSync } from "node:child_process";
import { ConfigError } from "../src/util/errors.js";
/**
@@ -80,10 +80,19 @@ describe("repo command β workspace cleanup on early failure", () => {
execSync("git init --bare", { cwd: localRepo, stdio: "ignore" });
// Create a temporary work clone to add a commit (bare repos need content)
const workClone = mkdtempSync(join(tmpdir(), "rumilo-test-work-"));
- execSync(`git clone ${localRepo} work`, { cwd: workClone, stdio: "ignore" });
+ execSync(`git clone ${localRepo} work`, {
+ cwd: workClone,
+ stdio: "ignore",
+ });
const workDir = join(workClone, "work");
- execSync("git config user.email test@test.com && git config user.name Test && git config commit.gpgsign false", { cwd: workDir, stdio: "ignore" });
- execSync("echo hello > README.md && git add . && git commit -m init", { cwd: workDir, stdio: "ignore" });
+ execSync(
+ "git config user.email test@test.com && git config user.name Test && git config commit.gpgsign false",
+ { cwd: workDir, stdio: "ignore" },
+ );
+ execSync("echo hello > README.md && git add . && git commit -m init", {
+ cwd: workDir,
+ stdio: "ignore",
+ });
execSync("git push", { cwd: workDir, stdio: "ignore" });
// Clean up work clone
execSync(`rm -rf ${workClone}`, { stdio: "ignore" });
@@ -127,7 +136,7 @@ describe("repo command β workspace cleanup on early failure", () => {
query: "test",
uri: localRepo,
ref: "nonexistent-ref-abc123",
- full: true, // full clone for local bare repo compatibility
+ full: true, // full clone for local bare repo compatibility
verbose: false,
cleanup: true,
});
@@ -2,325 +2,355 @@
//
// SPDX-License-Identifier: GPL-3.0-or-later
-import { describe, test, expect, beforeAll, afterAll } from "bun:test";
-import { mkdtemp, rm, writeFile, mkdir } from "node:fs/promises";
+import { afterAll, beforeAll, describe, expect, test } from "bun:test";
import { symlinkSync } from "node:fs";
+import { mkdir, mkdtemp, rm, writeFile } from "node:fs/promises";
import { tmpdir } from "node:os";
-import { join, resolve } from "node:path";
+import { join } from "node:path";
import { ensureWorkspacePath } from "../src/agent/tools/index.js";
-import { expandPath, resolveToCwd, resolveReadPath } from "../src/agent/tools/path-utils.js";
+import { expandPath, resolveToCwd } from "../src/agent/tools/path-utils.js";
import { writeWorkspaceFile } from "../src/workspace/content.js";
let workspace: string;
beforeAll(async () => {
- workspace = await mkdtemp(join(tmpdir(), "rumilo-test-"));
- await mkdir(join(workspace, "subdir"), { recursive: true });
- await writeFile(join(workspace, "hello.txt"), "hello");
- await writeFile(join(workspace, "subdir", "nested.txt"), "nested");
+ workspace = await mkdtemp(join(tmpdir(), "rumilo-test-"));
+ await mkdir(join(workspace, "subdir"), { recursive: true });
+ await writeFile(join(workspace, "hello.txt"), "hello");
+ await writeFile(join(workspace, "subdir", "nested.txt"), "nested");
});
afterAll(async () => {
- await rm(workspace, { recursive: true, force: true });
+ await rm(workspace, { recursive: true, force: true });
});
// βββ ensureWorkspacePath ββββββββββββββββββββββββββββββββββββββββββββ
describe("ensureWorkspacePath", () => {
- test("allows workspace root itself", () => {
- const result = ensureWorkspacePath(workspace, ".");
- expect(result).toBe(workspace);
- });
-
- test("allows a relative child path", () => {
- const result = ensureWorkspacePath(workspace, "hello.txt");
- expect(result).toBe(join(workspace, "hello.txt"));
- });
-
- test("allows nested relative path", () => {
- const result = ensureWorkspacePath(workspace, "subdir/nested.txt");
- expect(result).toBe(join(workspace, "subdir", "nested.txt"));
- });
-
- test("rejects .. traversal escaping workspace", () => {
- expect(() => ensureWorkspacePath(workspace, "../../../etc/passwd")).toThrow("Path escapes workspace");
- });
-
- test("rejects absolute path outside workspace", () => {
- expect(() => ensureWorkspacePath(workspace, "/etc/passwd")).toThrow("Path escapes workspace");
- });
-
- test("allows absolute path inside workspace", () => {
- const absInside = join(workspace, "hello.txt");
- const result = ensureWorkspacePath(workspace, absInside);
- expect(result).toBe(absInside);
- });
+ test("allows workspace root itself", () => {
+ const result = ensureWorkspacePath(workspace, ".");
+ expect(result).toBe(workspace);
+ });
+
+ test("allows a relative child path", () => {
+ const result = ensureWorkspacePath(workspace, "hello.txt");
+ expect(result).toBe(join(workspace, "hello.txt"));
+ });
+
+ test("allows nested relative path", () => {
+ const result = ensureWorkspacePath(workspace, "subdir/nested.txt");
+ expect(result).toBe(join(workspace, "subdir", "nested.txt"));
+ });
+
+ test("rejects .. traversal escaping workspace", () => {
+ expect(() => ensureWorkspacePath(workspace, "../../../etc/passwd")).toThrow(
+ "Path escapes workspace",
+ );
+ });
+
+ test("rejects absolute path outside workspace", () => {
+ expect(() => ensureWorkspacePath(workspace, "/etc/passwd")).toThrow(
+ "Path escapes workspace",
+ );
+ });
+
+ test("allows absolute path inside workspace", () => {
+ const absInside = join(workspace, "hello.txt");
+ const result = ensureWorkspacePath(workspace, absInside);
+ expect(result).toBe(absInside);
+ });
});
// βββ expandPath: tilde must NOT escape workspace ββββββββββββββββββββ
describe("expandPath - tilde handling for workspace sandboxing", () => {
- test("tilde alone must not expand to homedir", () => {
- const result = expandPath("~");
- expect(result).toBe("~");
- });
-
- test("tilde-prefixed path must not expand to homedir", () => {
- const result = expandPath("~/secret");
- expect(result).not.toContain("/home");
- expect(result).not.toContain("/Users");
- expect(result).toBe("~/secret");
- });
+ test("tilde alone must not expand to homedir", () => {
+ const result = expandPath("~");
+ // After fix, ~ should remain literal (not expand to homedir)
+ expect(result).toBe("~");
+ });
+
+ test("tilde-prefixed path must not expand to homedir", () => {
+ const result = expandPath("~/secret");
+ expect(result).not.toContain("/home");
+ expect(result).not.toContain("/Users");
+ // Should stay as literal path
+ expect(result).toBe("~/secret");
+ });
});
// βββ resolveToCwd: must stay within workspace βββββββββββββββββββββββ
describe("resolveToCwd - workspace containment", () => {
- test("resolves relative path within workspace", () => {
- const result = resolveToCwd("hello.txt", workspace);
- expect(result).toBe(join(workspace, "hello.txt"));
- });
-
- test("resolves '.' to workspace root", () => {
- const result = resolveToCwd(".", workspace);
- expect(result).toBe(workspace);
- });
+ test("resolves relative path within workspace", () => {
+ const result = resolveToCwd("hello.txt", workspace);
+ expect(result).toBe(join(workspace, "hello.txt"));
+ });
+
+ test("resolves '.' to workspace root", () => {
+ const result = resolveToCwd(".", workspace);
+ expect(result).toBe(workspace);
+ });
});
// βββ Tool-level containment (read tool) βββββββββββββββββββββββββββββ
describe("read tool - workspace containment", () => {
- let readTool: any;
-
- beforeAll(async () => {
- const { createReadTool } = await import("../src/agent/tools/read.js");
- readTool = createReadTool(workspace);
- });
-
- test("reads file inside workspace", async () => {
- const result = await readTool.execute("id", { path: "hello.txt" });
- expect(result.content[0].text).toBe("hello");
- });
-
- test("rejects traversal via ..", async () => {
- await expect(readTool.execute("id", { path: "../../etc/passwd" })).rejects.toThrow(
- /escapes workspace/i,
- );
- });
-
- test("rejects absolute path outside workspace", async () => {
- await expect(readTool.execute("id", { path: "/etc/passwd" })).rejects.toThrow(
- /escapes workspace/i,
- );
- });
-
- test("tilde path stays within workspace (no homedir expansion)", async () => {
- await expect(readTool.execute("id", { path: "~/.bashrc" })).rejects.toThrow(/ENOENT/);
- });
+ let readTool: any;
+
+ beforeAll(async () => {
+ const { createReadTool } = await import("../src/agent/tools/read.js");
+ readTool = createReadTool(workspace);
+ });
+
+ test("reads file inside workspace", async () => {
+ const result = await readTool.execute("id", { path: "hello.txt" });
+ expect(result.content[0].text).toBe("hello");
+ });
+
+ test("rejects traversal via ..", async () => {
+ await expect(
+ readTool.execute("id", { path: "../../etc/passwd" }),
+ ).rejects.toThrow(/escapes workspace/i);
+ });
+
+ test("rejects absolute path outside workspace", async () => {
+ await expect(
+ readTool.execute("id", { path: "/etc/passwd" }),
+ ).rejects.toThrow(/escapes workspace/i);
+ });
+
+ test("tilde path stays within workspace (no homedir expansion)", async () => {
+ // With tilde expansion removed, ~/foo resolves to <workspace>/~/foo
+ // which is safely inside the workspace. It will fail with ENOENT,
+ // NOT succeed in reading the user's homedir file.
+ await expect(readTool.execute("id", { path: "~/.bashrc" })).rejects.toThrow(
+ /ENOENT/,
+ );
+ });
});
// βββ Tool-level containment (ls tool) βββββββββββββββββββββββββββββββ
describe("ls tool - workspace containment", () => {
- let lsTool: any;
-
- beforeAll(async () => {
- const { createLsTool } = await import("../src/agent/tools/ls.js");
- lsTool = createLsTool(workspace);
- });
-
- test("lists workspace root", async () => {
- const result = await lsTool.execute("id", {});
- expect(result.content[0].text).toContain("hello.txt");
- });
-
- test("rejects traversal via ..", async () => {
- await expect(lsTool.execute("id", { path: "../../" })).rejects.toThrow(
- /escapes workspace/i,
- );
- });
-
- test("rejects absolute path outside workspace", async () => {
- await expect(lsTool.execute("id", { path: "/tmp" })).rejects.toThrow(
- /escapes workspace/i,
- );
- });
+ let lsTool: any;
+
+ beforeAll(async () => {
+ const { createLsTool } = await import("../src/agent/tools/ls.js");
+ lsTool = createLsTool(workspace);
+ });
+
+ test("lists workspace root", async () => {
+ const result = await lsTool.execute("id", {});
+ expect(result.content[0].text).toContain("hello.txt");
+ });
+
+ test("rejects traversal via ..", async () => {
+ await expect(lsTool.execute("id", { path: "../../" })).rejects.toThrow(
+ /escapes workspace/i,
+ );
+ });
+
+ test("rejects absolute path outside workspace", async () => {
+ await expect(lsTool.execute("id", { path: "/tmp" })).rejects.toThrow(
+ /escapes workspace/i,
+ );
+ });
});
// βββ Tool-level containment (grep tool) βββββββββββββββββββββββββββββ
describe("grep tool - workspace containment", () => {
- let grepTool: any;
-
- beforeAll(async () => {
- const { createGrepTool } = await import("../src/agent/tools/grep.js");
- grepTool = createGrepTool(workspace);
- });
-
- test("searches within workspace", async () => {
- const result = await grepTool.execute("id", { pattern: "hello", literal: true });
- expect(result.content[0].text).toContain("hello");
- });
-
- test("rejects traversal via ..", async () => {
- await expect(
- grepTool.execute("id", { pattern: "root", path: "../../etc" }),
- ).rejects.toThrow(/escapes workspace/i);
- });
-
- test("rejects absolute path outside workspace", async () => {
- await expect(
- grepTool.execute("id", { pattern: "root", path: "/etc" }),
- ).rejects.toThrow(/escapes workspace/i);
- });
+ let grepTool: any;
+
+ beforeAll(async () => {
+ const { createGrepTool } = await import("../src/agent/tools/grep.js");
+ grepTool = createGrepTool(workspace);
+ });
+
+ test("searches within workspace", async () => {
+ const result = await grepTool.execute("id", {
+ pattern: "hello",
+ literal: true,
+ });
+ expect(result.content[0].text).toContain("hello");
+ });
+
+ test("rejects traversal via ..", async () => {
+ await expect(
+ grepTool.execute("id", { pattern: "root", path: "../../etc" }),
+ ).rejects.toThrow(/escapes workspace/i);
+ });
+
+ test("rejects absolute path outside workspace", async () => {
+ await expect(
+ grepTool.execute("id", { pattern: "root", path: "/etc" }),
+ ).rejects.toThrow(/escapes workspace/i);
+ });
});
// βββ Tool-level containment (find tool) βββββββββββββββββββββββββββββ
describe("find tool - workspace containment", () => {
- let findTool: any;
-
- beforeAll(async () => {
- const { createFindTool } = await import("../src/agent/tools/find.js");
- findTool = createFindTool(workspace);
- });
-
- test("finds files in workspace", async () => {
- const result = await findTool.execute("id", { pattern: "*.txt" });
- expect(result.content[0].text).toContain("hello.txt");
- });
-
- test("rejects traversal via ..", async () => {
- await expect(
- findTool.execute("id", { pattern: "*", path: "../../" }),
- ).rejects.toThrow(/escapes workspace/i);
- });
-
- test("rejects absolute path outside workspace", async () => {
- await expect(
- findTool.execute("id", { pattern: "*", path: "/tmp" }),
- ).rejects.toThrow(/escapes workspace/i);
- });
+ let findTool: any;
+
+ beforeAll(async () => {
+ const { createFindTool } = await import("../src/agent/tools/find.js");
+ findTool = createFindTool(workspace);
+ });
+
+ test("finds files in workspace", async () => {
+ const result = await findTool.execute("id", { pattern: "*.txt" });
+ expect(result.content[0].text).toContain("hello.txt");
+ });
+
+ test("rejects traversal via ..", async () => {
+ await expect(
+ findTool.execute("id", { pattern: "*", path: "../../" }),
+ ).rejects.toThrow(/escapes workspace/i);
+ });
+
+ test("rejects absolute path outside workspace", async () => {
+ await expect(
+ findTool.execute("id", { pattern: "*", path: "/tmp" }),
+ ).rejects.toThrow(/escapes workspace/i);
+ });
});
// βββ writeWorkspaceFile containment (Issue #4) ββββββββββββββββββββββ
describe("writeWorkspaceFile - workspace containment", () => {
- test("writes file inside workspace", async () => {
- const result = await writeWorkspaceFile(workspace, "output.txt", "data");
- expect(result.filePath).toBe(join(workspace, "output.txt"));
- });
-
- test("writes nested file inside workspace", async () => {
- const result = await writeWorkspaceFile(workspace, "a/b/c.txt", "deep");
- expect(result.filePath).toBe(join(workspace, "a", "b", "c.txt"));
- });
-
- test("rejects traversal via ..", async () => {
- await expect(
- writeWorkspaceFile(workspace, "../../../tmp/evil.txt", "pwned"),
- ).rejects.toThrow(/escapes workspace/i);
- });
-
- test("absolute path via join stays inside workspace", async () => {
- const result = await writeWorkspaceFile(workspace, "/tmp/evil.txt", "safe");
- expect(result.filePath).toBe(join(workspace, "tmp", "evil.txt"));
- });
-
- test("tilde path via join stays inside workspace", async () => {
- const result = await writeWorkspaceFile(workspace, "~/evil.txt", "safe");
- expect(result.filePath).toBe(join(workspace, "~", "evil.txt"));
- });
+ test("writes file inside workspace", async () => {
+ const result = await writeWorkspaceFile(workspace, "output.txt", "data");
+ expect(result.filePath).toBe(join(workspace, "output.txt"));
+ });
+
+ test("writes nested file inside workspace", async () => {
+ const result = await writeWorkspaceFile(workspace, "a/b/c.txt", "deep");
+ expect(result.filePath).toBe(join(workspace, "a", "b", "c.txt"));
+ });
+
+ test("rejects traversal via ..", async () => {
+ await expect(
+ writeWorkspaceFile(workspace, "../../../tmp/evil.txt", "pwned"),
+ ).rejects.toThrow(/escapes workspace/i);
+ });
+
+ test("absolute path via join stays inside workspace", async () => {
+ // path.join(workspace, "/tmp/evil.txt") => "<workspace>/tmp/evil.txt"
+ // This is actually inside the workspace β join concatenates, doesn't replace.
+ const result = await writeWorkspaceFile(workspace, "/tmp/evil.txt", "safe");
+ expect(result.filePath).toBe(join(workspace, "tmp", "evil.txt"));
+ });
+
+ test("tilde path via join stays inside workspace", async () => {
+ // With no tilde expansion, ~/evil.txt joins as <workspace>/~/evil.txt
+ const result = await writeWorkspaceFile(workspace, "~/evil.txt", "safe");
+ expect(result.filePath).toBe(join(workspace, "~", "evil.txt"));
+ });
});
// βββ Symlink containment ββββββββββββββββββββββββββββββββββββββββββββ
describe("symlink containment", () => {
- let symlinkWorkspace: string;
- let outsideDir: string;
-
- beforeAll(async () => {
- symlinkWorkspace = await mkdtemp(join(tmpdir(), "rumilo-symlink-test-"));
- outsideDir = await mkdtemp(join(tmpdir(), "rumilo-outside-"));
-
- // Create a regular file inside workspace
- await writeFile(join(symlinkWorkspace, "legit.txt"), "safe content");
-
- // Create a subdirectory inside workspace
- await mkdir(join(symlinkWorkspace, "subdir"), { recursive: true });
- await writeFile(join(symlinkWorkspace, "subdir", "inner.txt"), "inner content");
-
- // Create a file outside workspace
- await writeFile(join(outsideDir, "secret.txt"), "secret content");
-
- // Symlink inside workspace pointing outside
- symlinkSync(outsideDir, join(symlinkWorkspace, "escape-link"));
-
- // Symlink inside workspace pointing to file outside
- symlinkSync(join(outsideDir, "secret.txt"), join(symlinkWorkspace, "secret-link.txt"));
-
- // Symlink inside workspace pointing to a file inside workspace (benign)
- symlinkSync(join(symlinkWorkspace, "legit.txt"), join(symlinkWorkspace, "good-link.txt"));
-
- // Nested symlink escape: subdir/nested-escape -> outsideDir
- symlinkSync(outsideDir, join(symlinkWorkspace, "subdir", "nested-escape"));
- });
-
- afterAll(async () => {
- await rm(symlinkWorkspace, { recursive: true, force: true });
- await rm(outsideDir, { recursive: true, force: true });
- });
-
- test("rejects symlink directory pointing outside workspace", () => {
- expect(() =>
- ensureWorkspacePath(symlinkWorkspace, "escape-link/secret.txt"),
- ).toThrow(/escapes workspace via symlink/);
- });
-
- test("rejects symlink file pointing outside workspace", () => {
- expect(() =>
- ensureWorkspacePath(symlinkWorkspace, "secret-link.txt"),
- ).toThrow(/escapes workspace via symlink/);
- });
-
- test("allows symlink pointing within workspace", () => {
- const result = ensureWorkspacePath(symlinkWorkspace, "good-link.txt");
- expect(result).toBe(join(symlinkWorkspace, "good-link.txt"));
- });
-
- test("rejects nested symlink escape (subdir/nested-escape)", () => {
- expect(() =>
- ensureWorkspacePath(symlinkWorkspace, "subdir/nested-escape/secret.txt"),
- ).toThrow(/escapes workspace via symlink/);
- });
-
- test("rejects symlink escape via directory symlink alone", () => {
- expect(() =>
- ensureWorkspacePath(symlinkWorkspace, "escape-link"),
- ).toThrow(/escapes workspace via symlink/);
- });
-
- test("handles non-existent file in real directory (write target)", () => {
- // File doesn't exist but parent is a real dir inside workspace β should pass
- const result = ensureWorkspacePath(symlinkWorkspace, "subdir/new-file.txt");
- expect(result).toBe(join(symlinkWorkspace, "subdir", "new-file.txt"));
- });
-
- test("rejects non-existent file under symlink-escaped parent", () => {
- // Parent is a symlink pointing outside β even though target file doesn't exist
- expect(() =>
- ensureWorkspacePath(symlinkWorkspace, "escape-link/new-file.txt"),
- ).toThrow(/escapes workspace via symlink/);
- });
-
- test("writeWorkspaceFile rejects path through symlink escape", async () => {
- await expect(
- writeWorkspaceFile(symlinkWorkspace, "escape-link/evil.txt", "pwned"),
- ).rejects.toThrow(/escapes workspace via symlink/);
- });
-
- test("writeWorkspaceFile allows normal nested write", async () => {
- const result = await writeWorkspaceFile(symlinkWorkspace, "new-dir/file.txt", "ok");
- expect(result.filePath).toBe(join(symlinkWorkspace, "new-dir", "file.txt"));
- });
+ let symlinkWorkspace: string;
+ let outsideDir: string;
+
+ beforeAll(async () => {
+ symlinkWorkspace = await mkdtemp(join(tmpdir(), "rumilo-symlink-test-"));
+ outsideDir = await mkdtemp(join(tmpdir(), "rumilo-outside-"));
+
+ // Create a regular file inside workspace
+ await writeFile(join(symlinkWorkspace, "legit.txt"), "safe content");
+
+ // Create a subdirectory inside workspace
+ await mkdir(join(symlinkWorkspace, "subdir"), { recursive: true });
+ await writeFile(
+ join(symlinkWorkspace, "subdir", "inner.txt"),
+ "inner content",
+ );
+
+ // Create a file outside workspace
+ await writeFile(join(outsideDir, "secret.txt"), "secret content");
+
+ // Symlink inside workspace pointing outside
+ symlinkSync(outsideDir, join(symlinkWorkspace, "escape-link"));
+
+ // Symlink inside workspace pointing to file outside
+ symlinkSync(
+ join(outsideDir, "secret.txt"),
+ join(symlinkWorkspace, "secret-link.txt"),
+ );
+
+ // Symlink inside workspace pointing to a file inside workspace (benign)
+ symlinkSync(
+ join(symlinkWorkspace, "legit.txt"),
+ join(symlinkWorkspace, "good-link.txt"),
+ );
+
+ // Nested symlink escape: subdir/nested-escape -> outsideDir
+ symlinkSync(outsideDir, join(symlinkWorkspace, "subdir", "nested-escape"));
+ });
+
+ afterAll(async () => {
+ await rm(symlinkWorkspace, { recursive: true, force: true });
+ await rm(outsideDir, { recursive: true, force: true });
+ });
+
+ test("rejects symlink directory pointing outside workspace", () => {
+ expect(() =>
+ ensureWorkspacePath(symlinkWorkspace, "escape-link/secret.txt"),
+ ).toThrow(/escapes workspace via symlink/);
+ });
+
+ test("rejects symlink file pointing outside workspace", () => {
+ expect(() =>
+ ensureWorkspacePath(symlinkWorkspace, "secret-link.txt"),
+ ).toThrow(/escapes workspace via symlink/);
+ });
+
+ test("allows symlink pointing within workspace", () => {
+ const result = ensureWorkspacePath(symlinkWorkspace, "good-link.txt");
+ expect(result).toBe(join(symlinkWorkspace, "good-link.txt"));
+ });
+
+ test("rejects nested symlink escape (subdir/nested-escape)", () => {
+ expect(() =>
+ ensureWorkspacePath(symlinkWorkspace, "subdir/nested-escape/secret.txt"),
+ ).toThrow(/escapes workspace via symlink/);
+ });
+
+ test("rejects symlink escape via directory symlink alone", () => {
+ expect(() => ensureWorkspacePath(symlinkWorkspace, "escape-link")).toThrow(
+ /escapes workspace via symlink/,
+ );
+ });
+
+ test("handles non-existent file in real directory (write target)", () => {
+ // File doesn't exist but parent is a real dir inside workspace β should pass
+ const result = ensureWorkspacePath(symlinkWorkspace, "subdir/new-file.txt");
+ expect(result).toBe(join(symlinkWorkspace, "subdir", "new-file.txt"));
+ });
+
+ test("rejects non-existent file under symlink-escaped parent", () => {
+ // Parent is a symlink pointing outside β even though target file doesn't exist
+ expect(() =>
+ ensureWorkspacePath(symlinkWorkspace, "escape-link/new-file.txt"),
+ ).toThrow(/escapes workspace via symlink/);
+ });
+
+ test("writeWorkspaceFile rejects path through symlink escape", async () => {
+ await expect(
+ writeWorkspaceFile(symlinkWorkspace, "escape-link/evil.txt", "pwned"),
+ ).rejects.toThrow(/escapes workspace via symlink/);
+ });
+
+ test("writeWorkspaceFile allows normal nested write", async () => {
+ const result = await writeWorkspaceFile(
+ symlinkWorkspace,
+ "new-dir/file.txt",
+ "ok",
+ );
+ expect(result.filePath).toBe(join(symlinkWorkspace, "new-dir", "file.txt"));
+ });
});