feat: add web search (#1565)

Kujtim Hoxha created

Change summary

go.mod                                                     |   2 
internal/agent/agentic_fetch_tool.go                       |  56 +
internal/agent/templates/agentic_fetch.md                  |  59 +-
internal/agent/templates/agentic_fetch_prompt.md.tpl       |  51 +
internal/agent/tools/fetch_helpers.go                      |  90 +++
internal/agent/tools/fetch_types.go                        |  15 
internal/agent/tools/search.go                             | 183 ++++++++
internal/agent/tools/web_search.go                         |  51 ++
internal/agent/tools/web_search.md                         |  18 
internal/tui/components/chat/messages/renderer.go          |  39 +
internal/tui/components/chat/messages/tool.go              |   4 
internal/tui/components/dialogs/permissions/permissions.go |   9 
12 files changed, 505 insertions(+), 72 deletions(-)

Detailed changes

go.mod 🔗

@@ -55,6 +55,7 @@ require (
 	github.com/tidwall/sjson v1.2.5
 	github.com/zeebo/xxh3 v1.0.2
 	golang.org/x/mod v0.30.0
+	golang.org/x/net v0.47.0
 	golang.org/x/sync v0.18.0
 	golang.org/x/text v0.31.0
 	gopkg.in/natefinch/lumberjack.v2 v2.2.1
@@ -164,7 +165,6 @@ require (
 	golang.org/x/crypto v0.45.0 // indirect
 	golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect
 	golang.org/x/image v0.27.0 // indirect
-	golang.org/x/net v0.47.0 // indirect
 	golang.org/x/oauth2 v0.33.0 // indirect
 	golang.org/x/sys v0.38.0 // indirect
 	golang.org/x/term v0.37.0 // indirect

internal/agent/agentic_fetch_tool.go 🔗

@@ -27,10 +27,6 @@ type agenticFetchValidationResult struct {
 
 // validateAgenticFetchParams validates the tool call parameters and extracts required context values.
 func validateAgenticFetchParams(ctx context.Context, params tools.AgenticFetchParams) (agenticFetchValidationResult, error) {
-	if params.URL == "" {
-		return agenticFetchValidationResult{}, errors.New("url is required")
-	}
-
 	if params.Prompt == "" {
 		return agenticFetchValidationResult{}, errors.New("prompt is required")
 	}
@@ -75,6 +71,14 @@ func (c *coordinator) agenticFetchTool(_ context.Context, client *http.Client) (
 				return fantasy.NewTextErrorResponse(err.Error()), nil
 			}
 
+			// Determine description based on mode.
+			var description string
+			if params.URL != "" {
+				description = fmt.Sprintf("Fetch and analyze content from URL: %s", params.URL)
+			} else {
+				description = "Search the web and analyze results"
+			}
+
 			p := c.permissions.Request(
 				permission.CreatePermissionRequest{
 					SessionID:   validationResult.SessionID,
@@ -82,7 +86,7 @@ func (c *coordinator) agenticFetchTool(_ context.Context, client *http.Client) (
 					ToolCallID:  call.ID,
 					ToolName:    tools.AgenticFetchToolName,
 					Action:      "fetch",
-					Description: fmt.Sprintf("Fetch and analyze content from URL: %s", params.URL),
+					Description: description,
 					Params:      tools.AgenticFetchPermissionsParams(params),
 				},
 			)
@@ -91,36 +95,43 @@ func (c *coordinator) agenticFetchTool(_ context.Context, client *http.Client) (
 				return fantasy.ToolResponse{}, permission.ErrorPermissionDenied
 			}
 
-			content, err := tools.FetchURLAndConvert(ctx, client, params.URL)
-			if err != nil {
-				return fantasy.NewTextErrorResponse(fmt.Sprintf("Failed to fetch URL: %s", err)), nil
-			}
-
 			tmpDir, err := os.MkdirTemp(c.cfg.Options.DataDirectory, "crush-fetch-*")
 			if err != nil {
 				return fantasy.NewTextErrorResponse(fmt.Sprintf("Failed to create temporary directory: %s", err)), nil
 			}
 			defer os.RemoveAll(tmpDir)
 
-			hasLargeContent := len(content) > tools.LargeContentThreshold
 			var fullPrompt string
 
-			if hasLargeContent {
-				tempFile, err := os.CreateTemp(tmpDir, "page-*.md")
+			if params.URL != "" {
+				// URL mode: fetch the URL content first.
+				content, err := tools.FetchURLAndConvert(ctx, client, params.URL)
 				if err != nil {
-					return fantasy.NewTextErrorResponse(fmt.Sprintf("Failed to create temporary file: %s", err)), nil
+					return fantasy.NewTextErrorResponse(fmt.Sprintf("Failed to fetch URL: %s", err)), nil
 				}
-				tempFilePath := tempFile.Name()
 
-				if _, err := tempFile.WriteString(content); err != nil {
+				hasLargeContent := len(content) > tools.LargeContentThreshold
+
+				if hasLargeContent {
+					tempFile, err := os.CreateTemp(tmpDir, "page-*.md")
+					if err != nil {
+						return fantasy.NewTextErrorResponse(fmt.Sprintf("Failed to create temporary file: %s", err)), nil
+					}
+					tempFilePath := tempFile.Name()
+
+					if _, err := tempFile.WriteString(content); err != nil {
+						tempFile.Close()
+						return fantasy.NewTextErrorResponse(fmt.Sprintf("Failed to write content to file: %s", err)), nil
+					}
 					tempFile.Close()
-					return fantasy.NewTextErrorResponse(fmt.Sprintf("Failed to write content to file: %s", err)), nil
-				}
-				tempFile.Close()
 
-				fullPrompt = fmt.Sprintf("%s\n\nThe web page from %s has been saved to: %s\n\nUse the view and grep tools to analyze this file and extract the requested information.", params.Prompt, params.URL, tempFilePath)
+					fullPrompt = fmt.Sprintf("%s\n\nThe web page from %s has been saved to: %s\n\nUse the view and grep tools to analyze this file and extract the requested information.", params.Prompt, params.URL, tempFilePath)
+				} else {
+					fullPrompt = fmt.Sprintf("%s\n\nWeb page URL: %s\n\n<webpage_content>\n%s\n</webpage_content>", params.Prompt, params.URL, content)
+				}
 			} else {
-				fullPrompt = fmt.Sprintf("%s\n\nWeb page URL: %s\n\n<webpage_content>\n%s\n</webpage_content>", params.Prompt, params.URL, content)
+				// Search mode: let the sub-agent search and fetch as needed.
+				fullPrompt = fmt.Sprintf("%s\n\nUse the web_search tool to find relevant information. Break down the question into smaller, focused searches if needed. After searching, use web_fetch to get detailed content from the most relevant results.", params.Prompt)
 			}
 
 			promptOpts := []prompt.Option{
@@ -148,10 +159,13 @@ func (c *coordinator) agenticFetchTool(_ context.Context, client *http.Client) (
 			}
 
 			webFetchTool := tools.NewWebFetchTool(tmpDir, client)
+			webSearchTool := tools.NewWebSearchTool(client)
 			fetchTools := []fantasy.AgentTool{
 				webFetchTool,
+				webSearchTool,
 				tools.NewGlobTool(tmpDir),
 				tools.NewGrepTool(tmpDir),
+				tools.NewSourcegraphTool(client),
 				tools.NewViewTool(c.lspClients, c.permissions, tmpDir),
 			}
 

internal/agent/templates/agentic_fetch.md 🔗

@@ -1,12 +1,12 @@
-Fetches content from a specified URL and processes it using an AI model to extract information or answer questions.
+Fetches content from a URL or searches the web, then processes it using an AI model to extract information or answer questions.
 
 <when_to_use>
 Use this tool when you need to:
-- Extract specific information from a webpage (e.g., "get pricing info")
-- Answer questions about web content (e.g., "what does this article say about X?")
+- Search the web for information (omit the url parameter)
+- Extract specific information from a webpage (provide a url)
+- Answer questions about web content
 - Summarize or analyze web pages
-- Find specific data within large pages
-- Interpret or process web content with AI
+- Research topics by searching and following links
 
 DO NOT use this tool when:
 - You just need raw content without analysis (use fetch instead - faster and cheaper)
@@ -15,37 +15,50 @@ DO NOT use this tool when:
 </when_to_use>
 
 <usage>
-- Takes a URL and a prompt as input
-- Fetches the URL content, converts HTML to markdown
-- Processes the content with the prompt using a small, fast model
-- Returns the model's response about the content
-- Use this tool when you need to retrieve and analyze web content
+- Provide a prompt describing what information you want to find or extract (required)
+- Optionally provide a URL to fetch and analyze specific content
+- If no URL is provided, the agent will search the web to find relevant information
+- The tool spawns a sub-agent with web_search, web_fetch, and analysis tools
+- Returns the agent's response about the content
 </usage>
 
-<usage_notes>
+<parameters>
+- prompt: What information you want to find or extract (required)
+- url: The URL to fetch content from (optional - if not provided, agent will search the web)
+</parameters>
 
+<usage_notes>
 - IMPORTANT: If an MCP-provided web fetch tool is available, prefer using that tool instead of this one, as it may have fewer restrictions. All MCP-provided tools start with "mcp_".
-- The URL must be a fully-formed valid URL
-- HTTP URLs will be automatically upgraded to HTTPS
-- The prompt should describe what information you want to extract from the page
-- This tool is read-only and does not modify any files
-- Results will be summarized if the content is very large
-- For very large pages, the content will be saved to a temporary file and the agent will have access to grep/view tools to analyze it
-- When a URL redirects to a different host, the tool will inform you and provide the redirect URL. You should then make a new fetch request with the redirect URL to fetch the content.
-- This tool uses AI processing and costs more tokens than the simple fetch tool
-  </usage_notes>
+- When using URL mode: The URL must be a fully-formed valid URL. HTTP URLs will be automatically upgraded to HTTPS.
+- When searching: Just provide the prompt describing what you want to find - the agent will search and fetch relevant pages.
+- The sub-agent can perform multiple searches and fetch multiple pages to gather comprehensive information.
+- This tool is read-only and does not modify any files.
+- Results will be summarized if the content is very large.
+- This tool uses AI processing and costs more tokens than the simple fetch tool.
+</usage_notes>
 
 <limitations>
-- Max response size: 5MB
+- Max response size: 5MB per page
 - Only supports HTTP and HTTPS protocols
 - Cannot handle authentication or cookies
 - Some websites may block automated requests
 - Uses additional tokens for AI processing
+- Search results depend on DuckDuckGo availability
 </limitations>
 
 <tips>
-- Be specific in your prompt about what information you want to extract
+- Be specific in your prompt about what information you want
+- For research tasks, omit the URL and let the agent search and follow relevant links
 - For complex pages, ask the agent to focus on specific sections
-- The agent has access to grep and view tools when analyzing large pages
+- The agent has access to web_search, web_fetch, grep, and view tools
 - If you just need raw content, use the fetch tool instead to save tokens
 </tips>
+
+<examples>
+Search for information:
+- prompt: "What are the main new features in the latest Python release?"
+
+Fetch and analyze a URL:
+- url: "https://docs.python.org/3/whatsnew/3.12.html"
+- prompt: "Summarize the key changes in Python 3.12"
+</examples>

internal/agent/templates/agentic_fetch_prompt.md.tpl 🔗

@@ -1,18 +1,38 @@
-You are a web content analysis agent for Crush. Your task is to analyze web page content and extract the information requested by the user.
+You are a web content analysis agent for Crush. Your task is to analyze web content, search results, or web pages to extract the information requested by the user.
 
 <rules>
-1. You should be concise and direct in your responses
+1. Be concise and direct in your responses
 2. Focus only on the information requested in the user's prompt
 3. If the content is provided in a file path, use the grep and view tools to efficiently search through it
-4. When relevant, quote specific sections from the page to support your answer
+4. When relevant, quote specific sections from the content to support your answer
 5. If the requested information is not found, clearly state that
 6. Any file paths you use MUST be absolute
-7. **IMPORTANT**: If you need information from a linked page to answer the question, use the web_fetch tool to follow that link
-8. After fetching a link, analyze the content yourself to extract what's needed
-9. Don't hesitate to follow multiple links if necessary to get complete information
-10. **CRITICAL**: At the end of your response, include a "Sources" section listing ALL URLs that were useful in answering the question
+7. **IMPORTANT**: If you need information from a linked page or search result, use the web_fetch tool to get that content
+8. **IMPORTANT**: If you need to search for more information, use the web_search tool
+9. After fetching a link, analyze the content yourself to extract what's needed
+10. Don't hesitate to follow multiple links or perform multiple searches if necessary to get complete information
+11. **CRITICAL**: At the end of your response, include a "Sources" section listing ALL URLs that were useful in answering the question
 </rules>
 
+<search_strategy>
+When searching for information:
+
+1. **Break down complex questions** - If the user's question has multiple parts, search for each part separately
+2. **Use specific, targeted queries** - Prefer multiple small searches over one broad search
+   - Bad: "Python 3.12 new features performance improvements async changes"
+   - Good: First "Python 3.12 new features", then "Python 3.12 performance improvements", then "Python 3.12 async changes"
+3. **Iterate and refine** - If initial results aren't helpful, try different search terms or more specific queries
+4. **Search for different aspects** - For comprehensive answers, search for different angles of the topic
+5. **Follow up on promising results** - When you find a good source, fetch it and look for links to related information
+
+Example workflow for "What are the pros and cons of using Rust vs Go for web services?":
+- Search 1: "Rust web services advantages"
+- Search 2: "Go web services advantages"
+- Search 3: "Rust vs Go performance comparison"
+- Search 4: "Rust vs Go developer experience"
+- Then fetch the most relevant results from each search
+</search_strategy>
+
 <response_format>
 Your response should be structured as follows:
 
@@ -24,7 +44,7 @@ Your response should be structured as follows:
 - [URL 3 that was useful]
 ...
 
-Only include URLs that actually contributed information to your answer. The main URL is always included. Add any additional URLs you fetched that provided relevant information.
+Only include URLs that actually contributed information to your answer. Include the main URL or search results that were helpful. Add any additional URLs you fetched that provided relevant information.
 </response_format>
 
 <env>
@@ -33,9 +53,20 @@ Platform: {{.Platform}}
 Today's date: {{.Date}}
 </env>
 
+<web_search_tool>
+You have access to a web_search tool that allows you to search the web:
+- Provide a search query and optionally max_results (default: 10)
+- The tool returns search results with titles, URLs, and snippets
+- After getting search results, use web_fetch to get full content from relevant URLs
+- **Prefer multiple focused searches over single broad searches**
+- Keep queries short and specific (3-6 words is often ideal)
+- If results aren't relevant, try rephrasing with different keywords
+- Don't be afraid to do 3-5+ searches to thoroughly answer a complex question
+</web_search_tool>
+
 <web_fetch_tool>
-You have access to a web_fetch tool that allows you to fetch additional web pages:
-- Use it when you need to follow links from the current page
+You have access to a web_fetch tool that allows you to fetch web pages:
+- Use it when you need to follow links from search results or the current page
 - Provide just the URL (no prompt parameter)
 - The tool will fetch and return the content (or save to a file if large)
 - YOU must then analyze that content to answer the user's question

internal/agent/tools/fetch_helpers.go 🔗

@@ -8,12 +8,17 @@ import (
 	"fmt"
 	"io"
 	"net/http"
+	"regexp"
 	"strings"
 	"unicode/utf8"
 
 	md "github.com/JohannesKaufmann/html-to-markdown"
+	"golang.org/x/net/html"
 )
 
+// BrowserUserAgent is a realistic browser User-Agent for better compatibility.
+const BrowserUserAgent = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
+
 // FetchURLAndConvert fetches a URL and converts HTML content to markdown.
 func FetchURLAndConvert(ctx context.Context, client *http.Client, url string) (string, error) {
 	req, err := http.NewRequestWithContext(ctx, "GET", url, nil)
@@ -21,7 +26,10 @@ func FetchURLAndConvert(ctx context.Context, client *http.Client, url string) (s
 		return "", fmt.Errorf("failed to create request: %w", err)
 	}
 
-	req.Header.Set("User-Agent", "crush/1.0")
+	// Use realistic browser headers for better compatibility.
+	req.Header.Set("User-Agent", BrowserUserAgent)
+	req.Header.Set("Accept", "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8")
+	req.Header.Set("Accept-Language", "en-US,en;q=0.5")
 
 	resp, err := client.Do(req)
 	if err != nil {
@@ -49,11 +57,13 @@ func FetchURLAndConvert(ctx context.Context, client *http.Client, url string) (s
 
 	// Convert HTML to markdown for better AI processing.
 	if strings.Contains(contentType, "text/html") {
-		markdown, err := ConvertHTMLToMarkdown(content)
+		// Remove noisy elements before conversion.
+		cleanedHTML := removeNoisyElements(content)
+		markdown, err := ConvertHTMLToMarkdown(cleanedHTML)
 		if err != nil {
 			return "", fmt.Errorf("failed to convert HTML to markdown: %w", err)
 		}
-		content = markdown
+		content = cleanupMarkdown(markdown)
 	} else if strings.Contains(contentType, "application/json") || strings.Contains(contentType, "text/json") {
 		// Format JSON for better readability.
 		formatted, err := FormatJSON(content)
@@ -66,11 +76,79 @@ func FetchURLAndConvert(ctx context.Context, client *http.Client, url string) (s
 	return content, nil
 }
 
+// removeNoisyElements removes script, style, nav, header, footer, and other
+// noisy elements from HTML to improve content extraction.
+func removeNoisyElements(htmlContent string) string {
+	doc, err := html.Parse(strings.NewReader(htmlContent))
+	if err != nil {
+		// If parsing fails, return original content.
+		return htmlContent
+	}
+
+	// Elements to remove entirely.
+	noisyTags := map[string]bool{
+		"script":   true,
+		"style":    true,
+		"nav":      true,
+		"header":   true,
+		"footer":   true,
+		"aside":    true,
+		"noscript": true,
+		"iframe":   true,
+		"svg":      true,
+	}
+
+	var removeNodes func(*html.Node)
+	removeNodes = func(n *html.Node) {
+		var toRemove []*html.Node
+
+		for c := n.FirstChild; c != nil; c = c.NextSibling {
+			if c.Type == html.ElementNode && noisyTags[c.Data] {
+				toRemove = append(toRemove, c)
+			} else {
+				removeNodes(c)
+			}
+		}
+
+		for _, node := range toRemove {
+			n.RemoveChild(node)
+		}
+	}
+
+	removeNodes(doc)
+
+	var buf bytes.Buffer
+	if err := html.Render(&buf, doc); err != nil {
+		return htmlContent
+	}
+
+	return buf.String()
+}
+
+// cleanupMarkdown removes excessive whitespace and blank lines from markdown.
+func cleanupMarkdown(content string) string {
+	// Collapse multiple blank lines into at most two.
+	multipleNewlines := regexp.MustCompile(`\n{3,}`)
+	content = multipleNewlines.ReplaceAllString(content, "\n\n")
+
+	// Remove trailing whitespace from each line.
+	lines := strings.Split(content, "\n")
+	for i, line := range lines {
+		lines[i] = strings.TrimRight(line, " \t")
+	}
+	content = strings.Join(lines, "\n")
+
+	// Trim leading/trailing whitespace.
+	content = strings.TrimSpace(content)
+
+	return content
+}
+
 // ConvertHTMLToMarkdown converts HTML content to markdown format.
-func ConvertHTMLToMarkdown(html string) (string, error) {
+func ConvertHTMLToMarkdown(htmlContent string) (string, error) {
 	converter := md.NewConverter("", true, nil)
 
-	markdown, err := converter.ConvertString(html)
+	markdown, err := converter.ConvertString(htmlContent)
 	if err != nil {
 		return "", err
 	}
@@ -80,7 +158,7 @@ func ConvertHTMLToMarkdown(html string) (string, error) {
 
 // FormatJSON formats JSON content with proper indentation.
 func FormatJSON(content string) (string, error) {
-	var data interface{}
+	var data any
 	if err := json.Unmarshal([]byte(content), &data); err != nil {
 		return "", err
 	}

internal/agent/tools/fetch_types.go 🔗

@@ -6,18 +6,21 @@ const AgenticFetchToolName = "agentic_fetch"
 // WebFetchToolName is the name of the web_fetch tool.
 const WebFetchToolName = "web_fetch"
 
+// WebSearchToolName is the name of the web_search tool for sub-agents.
+const WebSearchToolName = "web_search"
+
 // LargeContentThreshold is the size threshold for saving content to a file.
 const LargeContentThreshold = 50000 // 50KB
 
 // AgenticFetchParams defines the parameters for the agentic fetch tool.
 type AgenticFetchParams struct {
-	URL    string `json:"url" description:"The URL to fetch content from"`
-	Prompt string `json:"prompt" description:"The prompt to run on the fetched content"`
+	URL    string `json:"url,omitempty" description:"The URL to fetch content from (optional - if not provided, the agent will search the web)"`
+	Prompt string `json:"prompt" description:"The prompt describing what information to find or extract"`
 }
 
 // AgenticFetchPermissionsParams defines the permission parameters for the agentic fetch tool.
 type AgenticFetchPermissionsParams struct {
-	URL    string `json:"url"`
+	URL    string `json:"url,omitempty"`
 	Prompt string `json:"prompt"`
 }
 
@@ -26,6 +29,12 @@ type WebFetchParams struct {
 	URL string `json:"url" description:"The URL to fetch content from"`
 }
 
+// WebSearchParams defines the parameters for the web_search tool.
+type WebSearchParams struct {
+	Query      string `json:"query" description:"The search query to find information on the web"`
+	MaxResults int    `json:"max_results,omitempty" description:"Maximum number of results to return (default: 10, max: 20)"`
+}
+
 // FetchParams defines the parameters for the simple fetch tool.
 type FetchParams struct {
 	URL     string `json:"url" description:"The URL to fetch content from"`

internal/agent/tools/search.go 🔗

@@ -0,0 +1,183 @@
+package tools
+
+import (
+	"context"
+	"fmt"
+	"io"
+	"net/http"
+	"net/url"
+	"slices"
+	"strings"
+
+	"golang.org/x/net/html"
+)
+
+// SearchResult represents a single search result from DuckDuckGo.
+type SearchResult struct {
+	Title    string
+	Link     string
+	Snippet  string
+	Position int
+}
+
+// searchDuckDuckGo performs a web search using DuckDuckGo's HTML endpoint.
+func searchDuckDuckGo(ctx context.Context, client *http.Client, query string, maxResults int) ([]SearchResult, error) {
+	if maxResults <= 0 {
+		maxResults = 10
+	}
+
+	formData := url.Values{}
+	formData.Set("q", query)
+	formData.Set("b", "")
+	formData.Set("kl", "")
+
+	req, err := http.NewRequestWithContext(ctx, "POST", "https://html.duckduckgo.com/html", strings.NewReader(formData.Encode()))
+	if err != nil {
+		return nil, fmt.Errorf("failed to create request: %w", err)
+	}
+
+	req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+	req.Header.Set("User-Agent", BrowserUserAgent)
+
+	resp, err := client.Do(req)
+	if err != nil {
+		return nil, fmt.Errorf("failed to execute search: %w", err)
+	}
+	defer resp.Body.Close()
+
+	if resp.StatusCode != http.StatusOK {
+		return nil, fmt.Errorf("search failed with status code: %d", resp.StatusCode)
+	}
+
+	body, err := io.ReadAll(resp.Body)
+	if err != nil {
+		return nil, fmt.Errorf("failed to read response: %w", err)
+	}
+
+	return parseSearchResults(string(body), maxResults)
+}
+
+// parseSearchResults extracts search results from DuckDuckGo HTML response.
+func parseSearchResults(htmlContent string, maxResults int) ([]SearchResult, error) {
+	doc, err := html.Parse(strings.NewReader(htmlContent))
+	if err != nil {
+		return nil, fmt.Errorf("failed to parse HTML: %w", err)
+	}
+
+	var results []SearchResult
+	var traverse func(*html.Node)
+
+	traverse = func(n *html.Node) {
+		if n.Type == html.ElementNode && n.Data == "div" && hasClass(n, "result") {
+			result := extractResult(n)
+			if result != nil && result.Link != "" && !strings.Contains(result.Link, "y.js") {
+				result.Position = len(results) + 1
+				results = append(results, *result)
+				if len(results) >= maxResults {
+					return
+				}
+			}
+		}
+		for c := n.FirstChild; c != nil && len(results) < maxResults; c = c.NextSibling {
+			traverse(c)
+		}
+	}
+
+	traverse(doc)
+	return results, nil
+}
+
+// hasClass checks if an HTML node has a specific class.
+func hasClass(n *html.Node, class string) bool {
+	for _, attr := range n.Attr {
+		if attr.Key == "class" {
+			return slices.Contains(strings.Fields(attr.Val), class)
+		}
+	}
+	return false
+}
+
+// extractResult extracts a search result from a result div node.
+func extractResult(n *html.Node) *SearchResult {
+	result := &SearchResult{}
+
+	var traverse func(*html.Node)
+	traverse = func(node *html.Node) {
+		if node.Type == html.ElementNode {
+			// Look for title link.
+			if node.Data == "a" && hasClass(node, "result__a") {
+				result.Title = getTextContent(node)
+				for _, attr := range node.Attr {
+					if attr.Key == "href" {
+						result.Link = cleanDuckDuckGoURL(attr.Val)
+						break
+					}
+				}
+			}
+			// Look for snippet.
+			if node.Data == "a" && hasClass(node, "result__snippet") {
+				result.Snippet = getTextContent(node)
+			}
+		}
+		for c := node.FirstChild; c != nil; c = c.NextSibling {
+			traverse(c)
+		}
+	}
+
+	traverse(n)
+	return result
+}
+
+// getTextContent extracts all text content from a node and its children.
+func getTextContent(n *html.Node) string {
+	var text strings.Builder
+	var traverse func(*html.Node)
+
+	traverse = func(node *html.Node) {
+		if node.Type == html.TextNode {
+			text.WriteString(node.Data)
+		}
+		for c := node.FirstChild; c != nil; c = c.NextSibling {
+			traverse(c)
+		}
+	}
+
+	traverse(n)
+	return strings.TrimSpace(text.String())
+}
+
+// cleanDuckDuckGoURL extracts the actual URL from DuckDuckGo's redirect URL.
+func cleanDuckDuckGoURL(rawURL string) string {
+	if strings.HasPrefix(rawURL, "//duckduckgo.com/l/?uddg=") {
+		// Extract the actual URL from the redirect.
+		if idx := strings.Index(rawURL, "uddg="); idx != -1 {
+			encoded := rawURL[idx+5:]
+			if ampIdx := strings.Index(encoded, "&"); ampIdx != -1 {
+				encoded = encoded[:ampIdx]
+			}
+			decoded, err := url.QueryUnescape(encoded)
+			if err == nil {
+				return decoded
+			}
+		}
+	}
+	return rawURL
+}
+
+// formatSearchResults formats search results for LLM consumption.
+func formatSearchResults(results []SearchResult) string {
+	if len(results) == 0 {
+		return "No results were found for your search query. This could be due to DuckDuckGo's bot detection or the query returned no matches. Please try rephrasing your search or try again in a few minutes."
+	}
+
+	var sb strings.Builder
+	sb.WriteString(fmt.Sprintf("Found %d search results:\n\n", len(results)))
+
+	for _, result := range results {
+		sb.WriteString(fmt.Sprintf("%d. %s\n", result.Position, result.Title))
+		sb.WriteString(fmt.Sprintf("   URL: %s\n", result.Link))
+		sb.WriteString(fmt.Sprintf("   Summary: %s\n\n", result.Snippet))
+	}
+
+	return sb.String()
+}

internal/agent/tools/web_search.go 🔗

@@ -0,0 +1,51 @@
+package tools
+
+import (
+	"context"
+	_ "embed"
+	"net/http"
+	"time"
+
+	"charm.land/fantasy"
+)
+
+//go:embed web_search.md
+var webSearchToolDescription []byte
+
+// NewWebSearchTool creates a web search tool for sub-agents (no permissions needed).
+func NewWebSearchTool(client *http.Client) fantasy.AgentTool {
+	if client == nil {
+		client = &http.Client{
+			Timeout: 30 * time.Second,
+			Transport: &http.Transport{
+				MaxIdleConns:        100,
+				MaxIdleConnsPerHost: 10,
+				IdleConnTimeout:     90 * time.Second,
+			},
+		}
+	}
+
+	return fantasy.NewParallelAgentTool(
+		WebSearchToolName,
+		string(webSearchToolDescription),
+		func(ctx context.Context, params WebSearchParams, call fantasy.ToolCall) (fantasy.ToolResponse, error) {
+			if params.Query == "" {
+				return fantasy.NewTextErrorResponse("query is required"), nil
+			}
+
+			maxResults := params.MaxResults
+			if maxResults <= 0 {
+				maxResults = 10
+			}
+			if maxResults > 20 {
+				maxResults = 20
+			}
+
+			results, err := searchDuckDuckGo(ctx, client, params.Query, maxResults)
+			if err != nil {
+				return fantasy.NewTextErrorResponse("Failed to search: " + err.Error()), nil
+			}
+
+			return fantasy.NewTextResponse(formatSearchResults(results)), nil
+		})
+}

internal/agent/tools/web_search.md 🔗

@@ -0,0 +1,18 @@
+Searches the web using DuckDuckGo and returns search results.
+
+<usage>
+- Provide a search query to find information on the web
+- Returns a list of search results with titles, URLs, and snippets
+- Use this to find relevant web pages, then use web_fetch to get full content
+</usage>
+
+<parameters>
+- query: The search query string (required)
+- max_results: Maximum number of results to return (default: 10, max: 20)
+</parameters>
+
+<tips>
+- Use specific, targeted search queries for better results
+- After getting results, use web_fetch to get the full content of relevant pages
+- Combine multiple searches to gather comprehensive information
+</tips>

internal/tui/components/chat/messages/renderer.go 🔗

@@ -191,6 +191,7 @@ func init() {
 	registry.register(tools.FetchToolName, func() renderer { return simpleFetchRenderer{} })
 	registry.register(tools.AgenticFetchToolName, func() renderer { return agenticFetchRenderer{} })
 	registry.register(tools.WebFetchToolName, func() renderer { return webFetchRenderer{} })
+	registry.register(tools.WebSearchToolName, func() renderer { return webSearchRenderer{} })
 	registry.register(tools.GlobToolName, func() renderer { return globRenderer{} })
 	registry.register(tools.GrepToolName, func() renderer { return grepRenderer{} })
 	registry.register(tools.LSToolName, func() renderer { return lsRenderer{} })
@@ -636,15 +637,17 @@ type agenticFetchRenderer struct {
 	baseRenderer
 }
 
-// Render displays the fetched URL with prompt parameter and nested tool calls
+// Render displays the fetched URL or web search with prompt parameter and nested tool calls
 func (fr agenticFetchRenderer) Render(v *toolCallCmp) string {
 	t := styles.CurrentTheme()
 	var params tools.AgenticFetchParams
 	var args []string
 	if err := fr.unmarshalParams(v.call.Input, &params); err == nil {
-		args = newParamBuilder().
-			addMain(params.URL).
-			build()
+		if params.URL != "" {
+			args = newParamBuilder().
+				addMain(params.URL).
+				build()
+		}
 	}
 
 	prompt := params.Prompt
@@ -731,6 +734,30 @@ func (wfr webFetchRenderer) Render(v *toolCallCmp) string {
 	})
 }
 
+// -----------------------------------------------------------------------------
+//  Web search renderer
+// -----------------------------------------------------------------------------
+
+// webSearchRenderer handles web search with query display
+type webSearchRenderer struct {
+	baseRenderer
+}
+
+// Render displays a compact view of web_search with just the query
+func (wsr webSearchRenderer) Render(v *toolCallCmp) string {
+	var params tools.WebSearchParams
+	var args []string
+	if err := wsr.unmarshalParams(v.call.Input, &params); err == nil {
+		args = newParamBuilder().
+			addMain(params.Query).
+			build()
+	}
+
+	return wsr.renderWithParams(v, "Search", args, func() string {
+		return renderMarkdownContent(v, v.result.Content)
+	})
+}
+
 // -----------------------------------------------------------------------------
 //  Download renderer
 // -----------------------------------------------------------------------------
@@ -1260,7 +1287,9 @@ func prettifyToolName(name string) string {
 	case tools.AgenticFetchToolName:
 		return "Agentic Fetch"
 	case tools.WebFetchToolName:
-		return "Fetching"
+		return "Fetch"
+	case tools.WebSearchToolName:
+		return "Search"
 	case tools.GlobToolName:
 		return "Glob"
 	case tools.GrepToolName:

internal/tui/components/chat/messages/tool.go 🔗

@@ -301,7 +301,9 @@ func (m *toolCallCmp) formatParametersForCopy() string {
 		var params tools.AgenticFetchParams
 		if json.Unmarshal([]byte(m.call.Input), &params) == nil {
 			var parts []string
-			parts = append(parts, fmt.Sprintf("**URL:** %s", params.URL))
+			if params.URL != "" {
+				parts = append(parts, fmt.Sprintf("**URL:** %s", params.URL))
+			}
 			if params.Prompt != "" {
 				parts = append(parts, fmt.Sprintf("**Prompt:** %s", params.Prompt))
 			}

internal/tui/components/dialogs/permissions/permissions.go 🔗

@@ -389,7 +389,7 @@ func (p *permissionDialogCmp) renderHeader() string {
 	case tools.AgenticFetchToolName:
 		headerParts = append(headerParts,
 			baseStyle.Render(strings.Repeat(" ", p.width)),
-			t.S().Muted.Width(p.width).Bold(true).Render("URL"),
+			t.S().Muted.Width(p.width).Bold(true).Render("Web"),
 		)
 	case tools.ViewToolName:
 		params := p.permission.Params.(tools.ViewPermissionsParams)
@@ -605,7 +605,12 @@ func (p *permissionDialogCmp) generateAgenticFetchContent() string {
 	t := styles.CurrentTheme()
 	baseStyle := t.S().Base.Background(t.BgSubtle)
 	if pr, ok := p.permission.Params.(tools.AgenticFetchPermissionsParams); ok {
-		content := fmt.Sprintf("URL: %s\n\nPrompt: %s", pr.URL, pr.Prompt)
+		var content string
+		if pr.URL != "" {
+			content = fmt.Sprintf("URL: %s\n\nPrompt: %s", pr.URL, pr.Prompt)
+		} else {
+			content = fmt.Sprintf("Prompt: %s", pr.Prompt)
+		}
 		finalContent := baseStyle.
 			Padding(1, 2).
 			Width(p.contentViewPort.Width()).