Detailed changes
@@ -1,17 +1,25 @@
---
version: 1.4.1
title: Line ending should reject lone carriage return
-file: ./test/ghall_test.gleam
-test_name: parse_line_ending_fails_on_lone_carriage_return_test
---
-Input: \r (lone carriage return)
Number of errors: 2
-Error 1:
- Reason: Expected("\"\\n\"", "\r")
- Span: (row_start: 1, col_start: 1, row_end: 1, col_end: 2)
- Context: []
-Error 2:
- Reason: Expected("expected '\r\n' in keyword '\r\n'", "\r")
- Span: (row_start: 1, col_start: 1, row_end: 1, col_end: 2)
- Context: []
+Input: \r
+Error 1 [in "unix_line_ending"]: Expected "\n", but got "\r"
+Call chain:
+ ↳ called from "end_of_line"
+1 │ \r
+ ─┬
+ └─ error location
+
+Input: \r
+Error 2 [in "match_chars(\r\n)"]: Expected expected '
+' in keyword '
+', but got "\r"
+Call chain:
+ ↳ called from "exact_string('\r\n')"
+ ↳ called from "consume_exact_string('\r\n')"
+ ↳ called from "end_of_line"
+1 │ \r
+ ─┬
+ └─ error location
@@ -1,11 +1,9 @@
---
version: 1.4.1
title: Multiple line endings should all parse as EndOfLine
-file: ./test/ghall_test.gleam
-test_name: parse_multiple_line_endings_test
---
-Input: \n\r\n\n (Unix, Windows, Unix line endings)
-Parsed 3 line endings:
+Input: \n\r\n\n
+Parsed 3 node(s):
1. EndOfLine
2. EndOfLine
3. EndOfLine
@@ -1,21 +1,72 @@
---
version: 1.4.1
title: Quasi lexer spans with multiline input
-file: ./test/ghall_test.gleam
-test_name: quasi_lexer_off_by_one_test
---
-Token 0: 'l' at Span(row_start: 1, col_start: 1, row_end: 1, col_end: 2)
-Token 1: 'e' at Span(row_start: 1, col_start: 2, row_end: 1, col_end: 3)
-Token 2: 't' at Span(row_start: 1, col_start: 3, row_end: 1, col_end: 4)
-Token 3: ' ' at Span(row_start: 1, col_start: 4, row_end: 1, col_end: 5)
-Token 4: 'x' at Span(row_start: 1, col_start: 5, row_end: 1, col_end: 6)
-Token 5: '1' at Span(row_start: 1, col_start: 6, row_end: 1, col_end: 7)
-Token 6: ' ' at Span(row_start: 1, col_start: 7, row_end: 1, col_end: 8)
-Token 7: '=' at Span(row_start: 1, col_start: 8, row_end: 1, col_end: 9)
-Token 8: '
-' at Span(row_start: 1, col_start: 9, row_end: 2, col_end: 1)
-Token 9: ' ' at Span(row_start: 2, col_start: 1, row_end: 2, col_end: 2)
-Token 10: ' ' at Span(row_start: 2, col_start: 2, row_end: 2, col_end: 3)
-Token 11: 'e' at Span(row_start: 2, col_start: 3, row_end: 2, col_end: 4)
-Token 12: '1' at Span(row_start: 2, col_start: 4, row_end: 2, col_end: 5)
+Input: let x1 =\n e1
+1 │ let x1 =
+ ┬
+ └─ Token 0: 'l'
+2 │ e1
+
+1 │ let x1 =
+ ┬
+ └─ Token 1: 'e'
+2 │ e1
+
+1 │ let x1 =
+ ┬
+ └─ Token 2: 't'
+2 │ e1
+
+1 │ let x1 =
+ ┬
+ └─ Token 3: ' '
+2 │ e1
+
+1 │ let x1 =
+ ┬
+ └─ Token 4: 'x'
+2 │ e1
+
+1 │ let x1 =
+ ┬
+ └─ Token 5: '1'
+2 │ e1
+
+1 │ let x1 =
+ ┬
+ └─ Token 6: ' '
+2 │ e1
+
+1 │ let x1 =
+ ┬
+ └─ Token 7: '='
+2 │ e1
+
+1 │ let x1 =
+ │ ┬
+2 │ e1
+ ┬
+ └─ Token 8: '
+'
+
+1 │ let x1 =
+2 │ e1
+ ┬
+ └─ Token 9: ' '
+
+1 │ let x1 =
+2 │ e1
+ ┬
+ └─ Token 10: ' '
+
+1 │ let x1 =
+2 │ e1
+ ┬
+ └─ Token 11: 'e'
+
+1 │ let x1 =
+2 │ e1
+ ┬
+ └─ Token 12: '1'
@@ -0,0 +1,25 @@
+---
+version: 1.4.1
+title: Visual error demo: shows escaped chars, spans, and parser context
+---
+Number of errors: 2
+
+Input: let x = 42\r
+Error 1 [in "unix_line_ending"]: Expected "\n", but got "\r"
+Call chain:
+ ↳ called from "end_of_line"
+1 │ let x = 42\r
+ ─┬
+ └─ error location
+
+Input: let x = 42\r
+Error 2 [in "match_chars(\r\n)"]: Expected expected '
+' in keyword '
+', but got "\r"
+Call chain:
+ ↳ called from "exact_string('\r\n')"
+ ↳ called from "consume_exact_string('\r\n')"
+ ↳ called from "end_of_line"
+1 │ let x = 42\r
+ ─┬
+ └─ error location
@@ -6,49 +6,78 @@ import node.{
ValidNonAscii,
}
-pub fn exact_string(expected: String, node: Node) -> Parser(Node, String, ctx) {
- use _ <- do(string.to_graphemes(expected) |> match_chars(expected))
- return(node)
+pub fn exact_string(
+ expected: String,
+ node: Node,
+) -> Parser(Node, String, String) {
+ nibble.in(
+ {
+ use _ <- do(string.to_graphemes(expected) |> match_chars(expected))
+ return(node)
+ },
+ "exact_string('" <> expected <> "')",
+ )
}
-pub fn consume_exact_string(expected: String) -> Parser(Nil, String, ctx) {
- use _ <- nibble.do(exact_string(expected, node.Let))
- // NOTE: doesn't matter which constructor
+pub fn consume_exact_string(expected: String) -> Parser(Nil, String, String) {
+ nibble.in(
+ {
+ use _ <- nibble.do(exact_string(expected, node.Let))
+ // NOTE: doesn't matter which constructor
- return(Nil)
+ return(Nil)
+ },
+ "consume_exact_string('" <> expected <> "')",
+ )
}
-fn match_chars(chars: List(String), context: String) -> Parser(Nil, String, ctx) {
- case chars {
- [] -> return(Nil)
-
- [first, ..rest] -> {
- use _ <- do(
- nibble.take_map(
- "expected '" <> first <> "' in keyword '" <> context <> "'",
- fn(tok) {
- case tok == first {
- True -> Some(Nil)
- False -> None
- }
- },
- ),
- )
- match_chars(rest, context)
- }
- }
+fn match_chars(
+ chars: List(String),
+ context: String,
+) -> Parser(Nil, String, String) {
+ nibble.in(
+ {
+ case chars {
+ [] -> return(Nil)
+
+ [first, ..rest] -> {
+ use _ <- do(
+ nibble.take_map(
+ "expected '" <> first <> "' in keyword '" <> context <> "'",
+ fn(tok) {
+ case tok == first {
+ True -> Some(Nil)
+ False -> None
+ }
+ },
+ ),
+ )
+ match_chars(rest, context)
+ }
+ }
+ },
+ "match_chars(" <> context <> ")",
+ )
}
-pub fn let_keyword() -> Parser(Node, String, ctx) {
- exact_string("let", node.Let)
+pub fn let_keyword() -> Parser(Node, String, String) {
+ nibble.in(exact_string("let", node.Let), "let_keyword")
}
-pub fn end_of_line() -> Parser(Node, String, ctx) {
- use _ <- nibble.do(
- nibble.one_of([nibble.token("\n"), consume_exact_string("\r\n")]),
- )
+pub fn end_of_line() -> Parser(Node, String, String) {
+ nibble.in(
+ {
+ use _ <- nibble.do(
+ nibble.one_of([
+ nibble.in(nibble.token("\n"), "unix_line_ending"),
+ consume_exact_string("\r\n"),
+ ]),
+ )
- return(EndOfLine)
+ return(EndOfLine)
+ },
+ "end_of_line",
+ )
}
pub fn tab() -> Parser(Node, String, ctx) {
@@ -8,6 +8,7 @@ import nibble/lexer.{Span, Token}
import node
import parser
import quasi_lexer
+import snapshot_helpers
pub fn main() -> Nil {
gleeunit.main()
@@ -31,27 +32,11 @@ pub fn quasi_lexer_off_by_one_test() {
let input = "let x1 =\n e1"
let tokens = quasi_lexer.chars() |> quasi_lexer.run(on: input)
- let snap =
- tokens
- |> list.index_map(fn(token, index) {
- let Token(Span(rs, cs, re, ce), lexeme, _) = token
- "Token "
- <> int.to_string(index)
- <> ": '"
- <> lexeme
- <> "' at Span(row_start: "
- <> int.to_string(rs)
- <> ", col_start: "
- <> int.to_string(cs)
- <> ", row_end: "
- <> int.to_string(re)
- <> ", col_end: "
- <> int.to_string(ce)
- <> ")\n"
- })
- |> list.fold("", fn(acc, line) { acc <> line })
-
- birdie.snap(snap, title: "Quasi lexer spans with multiline input")
+ snapshot_helpers.snap_lexer_output(
+ input,
+ tokens,
+ "Quasi lexer spans with multiline input",
+ )
}
pub fn parse_let_successfully_test() {
@@ -66,7 +51,7 @@ pub fn parse_let_failing_test() {
let tokens = quasi_lexer.chars() |> quasi_lexer.run(on: input)
let parser = parser.exact_string("let", node.Let)
let assert Error(error) = nibble.run(tokens, parser)
- let assert [nibble.DeadEnd(Span(_, cs, _, _), Expected(msg, got: got), [])] =
+ let assert [nibble.DeadEnd(Span(_, cs, _, _), Expected(msg, got: got), _)] =
error
let snap =
@@ -103,38 +88,11 @@ pub fn parse_line_ending_fails_on_lone_carriage_return_test() {
let parser = parser.end_of_line()
let assert Error(error) = nibble.run(tokens, parser)
- let snap =
- "Input: \\r (lone carriage return)\n"
- <> "Number of errors: "
- <> int.to_string(list.length(error))
- <> "\n"
- <> {
- error
- |> list.index_map(fn(dead_end, idx) {
- let nibble.DeadEnd(Span(rs, cs, re, ce), reason, context) = dead_end
- "Error "
- <> int.to_string(idx + 1)
- <> ":\n"
- <> " Reason: "
- <> string.inspect(reason)
- <> "\n"
- <> " Span: (row_start: "
- <> int.to_string(rs)
- <> ", col_start: "
- <> int.to_string(cs)
- <> ", row_end: "
- <> int.to_string(re)
- <> ", col_end: "
- <> int.to_string(ce)
- <> ")\n"
- <> " Context: "
- <> string.inspect(context)
- <> "\n"
- })
- |> string.join("")
- }
-
- birdie.snap(snap, title: "Line ending should reject lone carriage return")
+ snapshot_helpers.snap_parse_error(
+ input,
+ error,
+ "Line ending should reject lone carriage return",
+ )
}
pub fn parse_line_ending_fails_on_other_chars_test() {
@@ -165,22 +123,30 @@ pub fn parse_multiple_line_endings_test() {
}
let assert Ok(nodes) = nibble.run(tokens, parser)
- let snap =
- "Input: \\n\\r\\n\\n (Unix, Windows, Unix line endings)\n"
- <> "Parsed "
- <> int.to_string(list.length(nodes))
- <> " line endings:\n"
- <> {
- nodes
- |> list.index_map(fn(n, idx) {
- " " <> int.to_string(idx + 1) <> ". " <> string.inspect(n) <> "\n"
- })
- |> string.join("")
- }
-
- birdie.snap(
- snap,
- title: "Multiple line endings should all parse as EndOfLine",
+ snapshot_helpers.snap_parse_success(
+ input,
+ nodes,
+ "Multiple line endings should all parse as EndOfLine",
+ )
+}
+
+pub fn demo_visual_error_rendering_test() {
+ let input = "let x = 42\r"
+ let tokens = quasi_lexer.chars() |> quasi_lexer.run(on: input)
+
+ // Try to parse "let" followed by a line ending
+ let parser = {
+ use _ <- nibble.do(parser.let_keyword())
+ use _ <- nibble.do(parser.exact_string(" x = 42", node.Let))
+ parser.end_of_line()
+ }
+
+ let assert Error(errors) = nibble.run(tokens, parser)
+
+ snapshot_helpers.snap_parse_error(
+ input,
+ errors,
+ "Visual error demo: shows escaped chars, spans, and parser context",
)
}
@@ -0,0 +1,404 @@
+import birdie
+import gleam/int
+import gleam/list
+import gleam/string
+import nibble
+import nibble/lexer.{type Span, type Token, Span, Token}
+import node.{type Node}
+
+/// Format a Span as a readable string (kept for compatibility)
+pub fn format_span(span: Span) -> String {
+ let Span(rs, cs, re, ce) = span
+ "Span(row_start: "
+ <> int.to_string(rs)
+ <> ", col_start: "
+ <> int.to_string(cs)
+ <> ", row_end: "
+ <> int.to_string(re)
+ <> ", col_end: "
+ <> int.to_string(ce)
+ <> ")"
+}
+
+/// Format a single Token with visual span rendering
+pub fn format_token(input: String, token: Token(a), index: Int) -> String {
+ let Token(span, lexeme, _) = token
+ let label = "Token " <> int.to_string(index) <> ": '" <> lexeme <> "'"
+ visual_single_span(input, span, label, 0)
+}
+
+/// Format a list of tokens with visual span rendering
+pub fn format_tokens(input: String, tokens: List(Token(a))) -> String {
+ tokens
+ |> list.index_map(fn(token, index) {
+ let Token(span, lexeme, _) = token
+ #(span, "Token " <> int.to_string(index) <> ": '" <> lexeme <> "'")
+ })
+ |> visual_multiple_spans(input, _, 1)
+}
+
+/// Format error reason in a readable way
+fn format_error_reason(reason: nibble.Error(tok)) -> String {
+ case reason {
+ nibble.BadParser(msg) -> "Bad parser: " <> msg
+ nibble.Custom(msg) -> "Custom error: " <> msg
+ nibble.EndOfInput -> "Unexpected end of input"
+ nibble.Expected(expected, got: got) ->
+ "Expected " <> expected <> ", but got " <> string.inspect(got)
+ nibble.Unexpected(tok) -> "Unexpected token: " <> string.inspect(tok)
+ }
+}
+
+/// Format a single DeadEnd error with visual span rendering and input context
+pub fn format_dead_end(
+ input: String,
+ dead_end: nibble.DeadEnd(tok, ctx),
+ index: Int,
+) -> String {
+ let nibble.DeadEnd(span, reason, context) = dead_end
+
+ // Extract the parser name (innermost parser that failed)
+ let parser_name = case context {
+ [#(_, name), ..] -> " [in " <> string.inspect(name) <> "]"
+ [] -> ""
+ }
+
+ let error_header =
+ "Error "
+ <> int.to_string(index + 1)
+ <> parser_name
+ <> ": "
+ <> format_error_reason(reason)
+
+ // Show full parser call stack if there are multiple levels
+ let context_info = case context {
+ [] | [_] -> ""
+ _ -> {
+ let call_chain =
+ context
+ |> list.drop(1) // Skip the first one since it's in the header
+ |> list.map(fn(ctx_item) {
+ let #(_, parser_name) = ctx_item
+ " ↳ called from " <> string.inspect(parser_name)
+ })
+ |> string.join("\n")
+ "\nCall chain:\n" <> call_chain
+ }
+ }
+
+ // Show input with each error for clarity
+ "Input: "
+ <> escape_string(input)
+ <> "\n"
+ <> error_header
+ <> context_info
+ <> "\n"
+ <> visual_single_span(input, span, "error location", 1)
+}
+
+/// Format a list of DeadEnd errors with visual rendering
+pub fn format_dead_ends(
+ input: String,
+ errors: List(nibble.DeadEnd(tok, ctx)),
+) -> String {
+ let count_header = "Number of errors: " <> int.to_string(list.length(errors))
+
+ case list.is_empty(errors) {
+ True -> count_header
+ False -> {
+ count_header
+ <> "\n\n"
+ <> {
+ errors
+ |> list.index_map(fn(dead_end, idx) { format_dead_end(input, dead_end, idx) })
+ |> string.join("\n\n")
+ }
+ }
+ }
+}
+
+/// Format a single Node with index
+pub fn format_node(node: Node, index: Int) -> String {
+ " "
+ <> int.to_string(index + 1)
+ <> ". "
+ <> string.inspect(node)
+ <> "\n"
+}
+
+/// Format a list of Nodes
+pub fn format_nodes(nodes: List(Node)) -> String {
+ "Parsed "
+ <> int.to_string(list.length(nodes))
+ <> " node(s):\n"
+ <> {
+ nodes
+ |> list.index_map(fn(n, idx) { format_node(n, idx) })
+ |> string.join("")
+ }
+}
+
+/// Create a snapshot of lexer output showing input and tokens
+pub fn snap_lexer_output(
+ input: String,
+ tokens: List(Token(a)),
+ title: String,
+) -> Nil {
+ let snap =
+ "Input: "
+ <> escape_string(input)
+ <> "\n\n"
+ <> format_tokens(input, tokens)
+
+ birdie.snap(snap, title: title)
+}
+
+/// Create a snapshot of a parse error showing input and errors
+pub fn snap_parse_error(
+ input: String,
+ errors: List(nibble.DeadEnd(tok, ctx)),
+ title: String,
+) -> Nil {
+ // Input is shown with each error, so no need to show it at the top level
+ let snap = format_dead_ends(input, errors)
+
+ birdie.snap(snap, title: title)
+}
+
+/// Create a snapshot of successful parse showing input and result
+pub fn snap_parse_success(
+ input: String,
+ nodes: List(Node),
+ title: String,
+) -> Nil {
+ let snap =
+ "Input: "
+ <> escape_string(input)
+ <> "\n"
+ <> format_nodes(nodes)
+
+ birdie.snap(snap, title: title)
+}
+
+/// Create a snapshot for a Result type (success or error)
+pub fn snap_parse_result(
+ input: String,
+ result: Result(a, List(nibble.DeadEnd(tok, ctx))),
+ title: String,
+ format_success: fn(a) -> String,
+) -> Nil {
+ let snap = case result {
+ Ok(value) -> {
+ "Input: "
+ <> escape_string(input)
+ <> "\n\nSuccess:\n"
+ <> format_success(value)
+ }
+ // Input is shown with each error, so no need to show it at the top level
+ Error(errors) -> format_dead_ends(input, errors)
+ }
+
+ birdie.snap(snap, title: title)
+}
+
+/// Escape special characters in strings for readable snapshots
+fn escape_string(s: String) -> String {
+ s
+ |> string.replace("\\", "\\\\")
+ |> string.replace("\n", "\\n")
+ |> string.replace("\r", "\\r")
+ |> string.replace("\t", "\\t")
+}
+
+// ============================================================================
+// Visual Span Rendering Functions
+// ============================================================================
+
+/// Split input into indexed lines (1-based)
+fn split_into_lines(input: String) -> List(#(Int, String)) {
+ input
+ |> string.split("\n")
+ |> list.index_map(fn(line, idx) { #(idx + 1, line) })
+}
+
+/// Calculate the width needed for the gutter (line numbers)
+fn calculate_gutter_width(max_line_num: Int) -> Int {
+ int.to_string(max_line_num)
+ |> string.length()
+}
+
+/// Render a line with its gutter (line number and separator)
+fn render_line_with_gutter(
+ line_num: Int,
+ content: String,
+ gutter_width: Int,
+) -> String {
+ let line_str = int.to_string(line_num)
+ let padding = string.repeat(" ", gutter_width - string.length(line_str))
+ padding <> line_str <> " │ " <> make_visible(content)
+}
+
+/// Render a continuation marker in the gutter
+fn render_continuation_gutter(gutter_width: Int) -> String {
+ string.repeat(" ", gutter_width) <> " │"
+}
+
+/// Make invisible characters visible by escaping them
+/// This ensures users can see what they're working with in visual spans
+fn make_visible(s: String) -> String {
+ s
+ |> string.replace("\\", "\\\\")
+ |> string.replace("\r", "\\r")
+ |> string.replace("\n", "\\n")
+ |> string.replace("\t", "\\t")
+}
+
+/// Calculate visible column position accounting for escaped characters
+/// Maps original column position to position in the escaped/visible string
+fn calculate_visual_column(line_content: String, original_col: Int) -> Int {
+ line_content
+ |> string.to_graphemes()
+ |> list.take(original_col - 1)
+ |> list.fold(0, fn(acc, char) {
+ case char {
+ "\\" -> acc + 2 // Displayed as \\
+ "\r" -> acc + 2 // Displayed as \r
+ "\n" -> acc + 2 // Displayed as \n
+ "\t" -> acc + 2 // Displayed as \t
+ _ -> acc + 1
+ }
+ })
+ |> fn(pos) { pos + 1 } // Add 1 because columns are 1-indexed
+}
+
+/// Generate a marker line with box-drawing characters
+/// Returns a string like " ───┬───" for a span
+fn visual_span_marker(
+ gutter_width: Int,
+ start_col: Int,
+ end_col: Int,
+) -> String {
+ let gutter = string.repeat(" ", gutter_width + 3)
+ let before = string.repeat(" ", start_col - 1)
+ let marker_width = end_col - start_col
+
+ case marker_width {
+ 0 -> gutter <> before <> "┬"
+ 1 -> gutter <> before <> "┬"
+ _ -> {
+ let half = marker_width / 2
+ let left = string.repeat("─", half)
+ let right = string.repeat("─", marker_width - half - 1)
+ gutter <> before <> left <> "┬" <> right
+ }
+ }
+}
+
+/// Generate a label line with box-drawing characters
+/// Returns a string like " └─ label text"
+fn visual_span_label(gutter_width: Int, col: Int, label: String) -> String {
+ let gutter = string.repeat(" ", gutter_width + 3)
+ let before = string.repeat(" ", col - 1)
+ gutter <> before <> "└─ " <> label
+}
+
+/// Get the center column of a span for label positioning
+fn span_center_col(span: Span) -> Int {
+ let Span(_, cs, _, ce) = span
+ cs + { ce - cs } / 2
+}
+
+/// Render a single span visually with context lines
+pub fn visual_single_span(
+ input: String,
+ span: Span,
+ label: String,
+ context_lines: Int,
+) -> String {
+ let Span(row_start, col_start, row_end, col_end) = span
+ let lines = split_into_lines(input)
+ let max_line = list.length(lines)
+ let gutter_width = calculate_gutter_width(max_line)
+
+ // Determine which lines to show
+ let first_line = int.max(1, row_start - context_lines)
+ let last_line = int.min(max_line, row_end + context_lines)
+
+ // Build the output
+ let content_lines =
+ lines
+ |> list.filter(fn(line) {
+ let #(num, _) = line
+ num >= first_line && num <= last_line
+ })
+ |> list.flat_map(fn(line) {
+ let #(num, content) = line
+ let line_str = render_line_with_gutter(num, content, gutter_width)
+
+ // Add markers for lines within the span
+ case num >= row_start && num <= row_end {
+ True -> {
+ case num == row_start, num == row_end {
+ // Single line span
+ True, True -> {
+ let visual_start = calculate_visual_column(content, col_start)
+ let visual_end = calculate_visual_column(content, col_end)
+ let marker = visual_span_marker(gutter_width, visual_start, visual_end)
+ let visual_center = visual_start + { visual_end - visual_start } / 2
+ let label_line =
+ visual_span_label(gutter_width, visual_center, label)
+ [line_str, marker, label_line]
+ }
+ // First line of multi-line span
+ True, False -> {
+ let visual_start = calculate_visual_column(content, col_start)
+ let visible_len = string.length(make_visible(content))
+ let marker = visual_span_marker(gutter_width, visual_start, visible_len + 1)
+ [line_str, render_continuation_gutter(gutter_width) <> " " <> marker]
+ }
+ // Last line of multi-line span
+ False, True -> {
+ let visual_end = calculate_visual_column(content, col_end)
+ let marker = visual_span_marker(gutter_width, 1, visual_end)
+ let label_line =
+ visual_span_label(gutter_width, visual_end / 2, label)
+ [line_str, marker, label_line]
+ }
+ // Middle line of multi-line span
+ False, False -> {
+ let visible_len = string.length(make_visible(content))
+ let marker = visual_span_marker(gutter_width, 1, visible_len + 1)
+ [line_str, render_continuation_gutter(gutter_width) <> " " <> marker]
+ }
+ }
+ }
+ False -> [line_str]
+ }
+ })
+
+ string.join(content_lines, "\n")
+}
+
+/// Render multiple spans visually (stacked vertically for same-line spans)
+pub fn visual_multiple_spans(
+ input: String,
+ spans: List(#(Span, String)),
+ context_lines: Int,
+) -> String {
+ case spans {
+ [] -> ""
+ [single] -> {
+ let #(span, label) = single
+ visual_single_span(input, span, label, context_lines)
+ }
+ _ -> {
+ // For now, render each span separately and join with blank lines
+ spans
+ |> list.map(fn(span_label) {
+ let #(span, label) = span_label
+ visual_single_span(input, span, label, context_lines)
+ })
+ |> string.join("\n\n")
+ }
+ }
+}